blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
08c2245d10891da893f461f1cd1ef301cf2c059a
|
Shell
|
just-john/prototype
|
/build.sh
|
UTF-8
| 268
| 2.59375
| 3
|
[] |
no_license
|
#!/bin/bash
source ./profile
mkdir -p ${PROJECT_ROOT}/bld
mkdir -p ${PROJECT_BIN_DIR}/lib
mkdir -p "${PROJECT_BUILD_DIR}"
cd "${PROJECT_BUILD_DIR}"
cmake -G Ninja \
-DCMAKE_INSTALL_PREFIX=${PROJECT_BIN_DIR} \
../
ninja
ninja install
cd "${PROJECT_ROOT}"
| true
|
a83fe8447f323c11c3df4854ed0a7945979a70c6
|
Shell
|
5l1v3r1/flytrap
|
/rc/flytrap.init.in
|
UTF-8
| 2,057
| 3.578125
| 4
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/sh
# chkconfig: 2345 20 80
# description: Network scanning detector and mitigator
# processname: flytrap
# config: /etc/sysconfig/flytrap
### BEGIN INIT INFO
# Provides: flytrap
# Required-Start: $syslog $time $remote_fs
# Required-Stop: $syslog $time $remote_fs
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: Network scanning detector and mitigator
# Description: Flytrap listens for traffic to unused IP addresses
# on your network, logs it, and sends bogus responses
# designed to slow down the attacker.
### END INIT INFO
PATH=/bin:/usr/bin:/sbin:/usr/sbin
NAME=Flytrap
SERVICE=flytrap
DAEMON=@sbindir@/$SERVICE
. /etc/rc.d/init.d/functions
if [ -f /etc/sysconfig/$SERVICE ] ; then
. /etc/sysconfig/$SERVICE
fi
: ${CSVFILE:=@FT_CSVFILE@}
: ${PIDFILE:=/var/run/$DAEMON.pid}
: ${LCKFILE:=/var/lock/subsys/$DAEMON}
check() {
[ $(id -u) = 0 ] || exit 4
[ -x $DAEMON ] || exit 5
}
start() {
check
action "Starting $NAME daemon" \
daemon $DAEMON -t $CSVFILE -p $PIDFILE $OPTIONS $INTERFACE
RETVAL=$?
if [ $RETVAL = 0 ] ; then
touch $LCKFILE
fi
return $RETVAL
}
stop() {
action "Stopping $NAME daemon" \
killproc -p $PIDFILE $DAEMON
RETVAL=$?
if [ $RETVAL = 0 ] ; then
rm -f $LCKFILE
fi
return $RETVAL
}
reload() {
action "Reloading $NAME daemon " \
killproc -p $PIDFILE $DAEMON -HUP
RETVAL=$?
return $RETVAL
}
restart() {
stop
start
}
status() {
status -p $PIDFILE -b $DAEMON -l $DAEMON $NAME
}
case "$1" in
start)
start
;;
stop)
stop
;;
reload)
reload
;;
force-reload|restart)
restart
;;
condrestart)
if status > /dev/null; then
restart
fi
;;
status)
status
;;
*)
echo "Usage: /etc/init.d/tsdfx {start|stop|restart|force-reload|status}"
exit 1
;;
esac
exit 0
| true
|
72f6b4caf99ed782e3fb7320dc16af119142b18a
|
Shell
|
nallagondu/scripts-for-linux
|
/process.sh
|
UTF-8
| 291
| 3.0625
| 3
|
[] |
no_license
|
### cat process.sh
#!/bin/bash
PROCESS=httpd
COUNTER=0
while ps aux | grep $PROCESS | grep -v grep > /dev/null
do
COUNTER=$((COUNTER+1))
sleep 1
echo COUNTER is $COUNTER
done
logger PROCESSMONITOR: $PROCESS stopped at `date`
service $PROCESS start
mail -s "$PROCESS stopped" root
| true
|
404704472c48cbc84d8350da847288c9a53f7eb4
|
Shell
|
rbray89/hassos
|
/buildroot/support/scripts/hardlink-or-copy
|
UTF-8
| 924
| 4.375
| 4
|
[
"GPL-2.0-only",
"LicenseRef-scancode-unknown-license-reference",
"GPL-1.0-or-later",
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
# Try to hardlink a file into a directory, fallback to copy on failure.
#
# Hardlink-or-copy the source file in the first argument into the
# destination directory in the second argument, using the basename in
# the third argument as basename for the destination file. If the third
# argument is missing, use the basename of the source file as basename
# for the destination file.
#
# In either case, remove the destination prior to doing the
# hardlink-or-copy.
#
# Note that this is NOT an atomic operation.
set -e
main() {
local src_file="${1}"
local dst_dir="${2}"
local dst_file="${3}"
if [ -n "${dst_file}" ]; then
dst_file="${dst_dir}/${dst_file}"
else
dst_file="${dst_dir}/${src_file##*/}"
fi
mkdir -p "${dst_dir}"
rm -f "${dst_file}"
ln -f "${src_file}" "${dst_file}" 2>/dev/null \
|| cp -f "${src_file}" "${dst_file}"
}
main "${@}"
| true
|
ab433209062e2991e01009c9fe134d8321b49d23
|
Shell
|
Dwyane-Xu/dotfiles
|
/scripts/install.sh
|
UTF-8
| 4,233
| 3.828125
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/sh
#
# 安装
source $PWD/lib/echo.sh
source $PWD/lib/prompt.sh
source $PWD/lib/mac_version.sh
cat $PWD/assets/ascii.txt
system_type=$(uname -s)
if [ $system_type != "Darwin" ]; then
error "仅适应于 macOS 系统。"
exit;
fi
if [ $CODENAME != "Catalina" ]; then
warn "此配置需要 macOS Catalina,你的版本部分配置有兼容性问题。"
fi
if [ $CODENAME != "Catalina" ]; then
warn "此配置需要 macOS Catalina,你的版本部分配置有兼容性问题。"
read -r -p "${YELLOW}? 你确定要继续吗? [y|N] ${END}" response
if [[ $response =~ (n|N) ]]; then
exit;
fi
fi
###############################################################################
# Xcode
###############################################################################
if ! xcode-select --print-path &> /dev/null; then
info "ensuring build/install tools are available"
xcode-select --install &> /dev/null;
fi
###############################################################################
# Git
###############################################################################
source $PWD/config/git.sh
###############################################################################
# Oh My Zsh
###############################################################################
source $PWD/config/zsh.sh
###############################################################################
# Homebrew
###############################################################################
if test ! $(which brew); then
ask "请选择镜像源"
options=("清华大学" "Homebrew 官网")
select_option "${options[@]}"
chioice=$?
if [[ $choice -eq 0 ]]; then
/usr/bin/ruby $PWD/lib/brew_install.rb
git clone https://mirrors.tuna.tsinghua.edu.cn/git/homebrew/homebrew-core.git /usr/local/Homebrew/Library/Taps/homebrew/homebrew-core --depth=1
git clone https://mirrors.tuna.tsinghua.edu.cn/git/homebrew/homebrew-cask.git /usr/local/Homebrew/Library/Taps/homebrew/homebrew-cask --depth=1
git -C "$(brew --repo homebrew/core)" remote set-url origin https://mirrors.tuna.tsinghua.edu.cn/git/homebrew/homebrew-core.git
git -C "$(brew --repo homebrew/cask)" remote set-url origin https://mirrors.tuna.tsinghua.edu.cn/git/homebrew/homebrew-cask.git
export HOMEBREW_BOTTLE_DOMAIN=https://mirrors.tuna.tsinghua.edu.cn/homebrew-bottles
brew update
brew doctor
export HOMEBREW_NO_AUTO_UPDATE=true
else
/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install.sh)"
brew tap homebrew/cask
fi
fi
###############################################################################
# Install
###############################################################################
brew install mas
brew bundle --file $HOME/.dotfiles/install/Brewfile
source $PWD/install/npm.sh
source $PWD/install/rust.sh
###############################################################################
# Clear
###############################################################################
info "清理系统自带 Emacs、Vim、Nano."
# sudo rm /usr/bin/vim
# sudo rm -rf /usr/share/vim
# sudo rm /usr/bin/emacs
# sudo rm -rf /usr/share/emacs
# sudo rm /usr/bin/nano
# sudo rm -rf /usr/share/nano
###############################################################################
# Fonts
###############################################################################
info "添加 SF Mono."
sudo mount -uw /
sudo cp /System/Applications/Utilities/Terminal.app/Contents/Resources/Fonts/SFMono-*.otf /System/Library/Fonts/ || exit 1
###############################################################################
# Config
###############################################################################
cd $HOME/.dotfiles
source $PWD/config/vim.sh
source $PWD/config/tmux.sh
source $PWD/config/osx.sh
main() {
ask_for_sudo
}
ask_for_sudo() {
info "Prompting for sudo password."
if sudo --validate; then
# keep alive
while true; do sudo --non-interactive true; \
sleep 10; kill -0 "$$" || exit; done 2>/dev/null &
success "Sudo password updated."
else
error "Sudo password update failed."
exit 1
fi
}
main "$@"
| true
|
3030e212675a262f0b34c8f949abd94be6f58641
|
Shell
|
straywithsmile/work_conf
|
/bin/gen_create_str.sh
|
UTF-8
| 2,508
| 3.390625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
date_str=`date "+%Y_%m_%d"`
svn_head="https://svn-dhxy2.gz.netease.com/products/xy2/develop/server/"
create_env="env|release|simulate"
create_branch="branch|release|test|shiwan|other"
help()
{
echo "./gen_create_str.sh create_env create_branch logic release [date]"
}
#if [ $# -eq 1 ];then
# date_str=`date "+%Y_%m_%d"`
# logic_svn_branch=$1
# engine_svn_branch=$1
#elif [ $# -eq 2 ];then
# date_str=`date "+%Y_%m_%d"`
# logic_svn_branch=$1
# engine_svn_branch=$2
#elif [ $# -eq 3 ];then
# logic_svn_branch=$1
# engine_svn_branch=$2
# date_str=$3
#elif [ $# -eq 4 ];then
# logic_svn_branch=$1
# engine_svn_branch=$2
# date_str=$3
# create_env=$4
#elif [ $# -eq 5 ];then
# logic_svn_branch=$1
# engine_svn_branch=$2
# date_str=$3
# create_env=$4
# create_branch=$5
#else
# help
# exit 2
#fi
if [ $# -eq 4 ];then
create_env=$1
create_branch=$2
logic_svn_branch=$3
engine_svn_branch=$4
elif [ $# -eq 5 ];then
create_env=$1
create_branch=$2
logic_svn_branch=$3
engine_svn_branch=$4
date_str=$5
else
help
exit 2
fi
#echo $#
echo $logic_svn_branch
if [ $logic_svn_branch = "release" ];then
logic_dir=${svn_head}logic/release/rel_${date_str}
elif [ ${logic_svn_branch:0:4} = "test" ];then
logic_dir=${svn_head}logic/test/${logic_svn_branch}_${date_str}
elif [ $logic_svn_branch = "trunk" ];then
logic_dir=${svn_head}logic/trunk
else
logic_dir=${svn_head}logic/branch/${logic_svn_branch}_${date_str}
fi
echo $logic_svn_branch
echo $engine_svn_branch
if [ $engine_svn_branch = "release" ];then
engine_dir=${svn_head}os/release/rel_${date_str}
elif [ ${engine_svn_branch:0:4} = "test" ];then
engine_dir=${svn_head}os/test/${engine_svn_branch}_${date_str}
elif [ $engine_svn_branch = "trunk" ];then
engine_dir=${svn_head}os/trunk
else
engine_dir=${svn_head}os/branch/${engine_svn_branch}_${date_str}
fi
echo $engine_svn_branch
IAM=`whoami`
svn info $logic_dir
if [ $? -ne 0 ];then
echo LOGIC_PATH ${logic_dir} does NOT_EXIST
fi
logic_dir="svn${logic_dir:5:100}"
svn info $engine_dir
if [ $? -ne 0 ];then
echo ENGINE_PATH ${engine_dir} does NOT_EXIST
fi
engine_dir="svn${engine_dir:5:100}"
echo ${logic_state} ${engine_state}
#example: ./create_simulate.sh simulate https://svn-dhxy2.gz.netease.com/products/xy2/develop/server/logic/trunk trunk https://svn-dhxy2.gz.netease.com/products/xy2/develop/server/os/trunk ~tx2/osd/release/release/osd
echo "./create_simulate.sh ${create_env} ${logic_dir} ${create_branch} ${engine_dir} ~tx2/osd/release/release/osd"
exit 0
| true
|
1754f0a1ef8711112d5e7e526c5b2fe67fcfb3ea
|
Shell
|
nabiuddin6/scripts-1
|
/GTD/wtoggle
|
UTF-8
| 893
| 3.640625
| 4
|
[] |
no_license
|
#!/bin/bash
#####################################################################
# Toggle watson tracking. #
#####################################################################
function notify() {
notify-send -t 3000 "Watson" "$1"
}
if [[ "$(uname -a)" == *"Debian"* ]]; then
debian-close-notifications
fi
lock_file=/tmp/"$(basename "$0")"
/bin/cp -f /home/bryan/.config/get-shit-done.ini /tmp/sites.ini
(
flock 200 || exit 1
cd /tmp || exit 1
if [[ -f /run/user/1000/xtaskidle/pid ]]; then
kill "$(cat /run/user/1000/xtaskidle/pid)"
fi
if [[ "$(watson status)" == "No "* ]]; then
watson start Study
notify "Starting Watson Tracker"
xtaskidle -d &> /dev/null &
else
watson stop
notify "Stopping Watson Tracker"
fi
) 200> "${lock_file}"
rm -rf "${lock_file}"
| true
|
4c7d7acb86af899417a9ea225d118a0632aaa212
|
Shell
|
e-COSI/docker-nginx-certbot
|
/scripts/entrypoint.sh
|
UTF-8
| 790
| 3.59375
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/sh
# When we get killed, kill all our children
trap "exit" INT TERM
trap "kill 0" EXIT
# Source in util.sh so we can have our nice tools
. $(cd $(dirname $0); pwd)/util.sh
# Immediately run auto_enable_configs so that nginx is in a runnable state
auto_enable_configs
# Start up nginx
nginx -g "daemon off;" &
# Next, run certbot to request all the ssl certs we can find
/scripts/run_certbot.sh
# Lastly, run startup scripts
for f in /scripts/startup/*.sh; do
if [[ -x "$f" ]]; then
echo "Running startup script $f"
$f
fi
done
echo "Done with startup"
# Run `cron -f &` so that it's a background job owned by bash and then `wait`.
# This allows SIGINT (e.g. CTRL-C) to kill cron gracefully, due to our `trap`.
cron -f &
wait "$(cat /var/run/nginx.pid)"
| true
|
b9cd58707043878438128808ba9b81cfe6cd3e57
|
Shell
|
brki/drifter
|
/install.sh
|
UTF-8
| 1,874
| 4.34375
| 4
|
[
"WTFPL"
] |
permissive
|
#!/bin/bash
# this script is used to install the virtualization capabilities in a new project
# Modify with care as the README tells users to run the latest version of the script
# but we checkout the latest tag, so you might run in issues when you try to copy
# or use files that are not yet tagged.
BASE=$(pwd)
VIRTDIR="virtualization"
REPODIR="drifter"
RED='\033[0;31m'
GREEN='\033[0;32m'
NC='\033[0m' # No Color
rollback()
{
echo -e "${RED}An error occured. Aborting.${NC}"
rm -Rf $VIRTDIR/$REPODIR
# this might lead to some confusing error output, but if we didn't reach
# the file copying stage it will leave the directory in a cleaner state.
if [ -d $VIRTDIR ]; then
rmdir --ignore-fail-on-non-empty $VIRTDIR
fi
}
trap 'rollback' 0
# exit on first error
set -e
mkdir "$VIRTDIR"
echo -n -e "Cloning Drifter into $VIRTDIR/$REPODIR : ${RED}"
git submodule add -q https://github.com/liip/drifter.git "$VIRTDIR/$REPODIR" > /dev/null
echo -e "${GREEN}OK${NC}."
cd "$VIRTDIR/$REPODIR"
LATEST_COMMIT=$(git rev-list --tags --max-count=1)
LATEST=$(git describe --tags $LATEST_COMMIT)
echo -n -e "Using version $LATEST : ${RED}"
git checkout -q $LATEST > /dev/null
echo -e "${GREEN}OK${NC}."
cd "$BASE"
echo -n -e "Copying default configuration inside the project : ${RED}"
cp "$VIRTDIR/$REPODIR/provisioning/playbook.yml.dist" "$VIRTDIR/playbook.yml"
cp "$VIRTDIR/$REPODIR/parameters.yml.dist" "$VIRTDIR/parameters.yml"
cp "$VIRTDIR/$REPODIR/ansible.cfg.dist" "ansible.cfg"
cp "$VIRTDIR/$REPODIR/Vagrantfile.dist" "Vagrantfile"
echo -e "${GREEN}OK${NC}."
echo
echo -e "You can now configure Drifter by modifying the following files : "
echo -e "\t * ${VIRTDIR}/playbook.yml : to configure what needs to be installed inside the box"
echo -e "\t * ${VIRTDIR}/parameters.yml : for project parameters"
# remove error handler
trap : 0
| true
|
af39e1c53111bdabaadc9b9d29c796dabd9d4b1c
|
Shell
|
conjuring/conjuring
|
/src/conda.sh
|
UTF-8
| 379
| 3.65625
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
## Usage: conda.sh <cmd> ...
# Where <cmd> is any valid conda command, or `path_exec` (which will prefix
# the base conda bin path before running the remaining command)
if [ ${#} -gt 1 -a "$1" = path_exec ]; then
PATH="$($0 info --base)/bin:$PATH" "${@:2}"
else
which conda
if [ $? -eq 0 ]; then
conda $@
else
/opt/conda/bin/conda $@
fi
fi
| true
|
734346ad7de93fb12c65b4b77a1504f7eb1a2537
|
Shell
|
cvignal/aur
|
/marktext/PKGBUILD
|
UTF-8
| 1,879
| 2.625
| 3
|
[] |
no_license
|
# Maintainer: Gabriel Saillard (GitSquared) <gabriel@saillard.dev>
# Maintainer: Caleb Maclennan <caleb@alerque.com>
# Contributor: David Birks <david@tellus.space>
# Contributor: Simon Doppler (dopsi) <dop.simon@gmail.com>
# Contributor: dpeukert
pkgname=marktext
pkgver=0.16.1
pkgrel=3
pkgdesc='A simple and elegant open-source markdown editor that focused on speed and usability'
arch=('x86_64')
url='https://marktext.app'
license=('MIT')
depends=('electron'
'libxkbfile'
'libsecret')
makedepends=('nodejs'
'node-gyp'
'yarn')
source=("$pkgname-$pkgver.tar.gz::https://github.com/marktext/marktext/archive/v${pkgver}.tar.gz"
"$pkgname.sh")
sha256sums=('a00aa0caf26ab6e24e6cd5fef2a2a03e2ef46d0bf185c6971d9f00207223633e'
'5716d0879a683d390caf8c90a9b373cc536256821d80498d0f983a1ac0f364ab')
prepare() {
cd "$pkgname-$pkgver"
yarn --cache-folder "$srcdir" install --frozen-lockfile --ignore-scripts
}
build() {
cd "$pkgname-$pkgver"
yarn --cache-folder "$srcdir" run build:bin
}
package() {
cd "$pkgname-$pkgver"
install -Dm755 "../$pkgname.sh" "$pkgdir/usr/bin/$pkgname"
install -Dm644 -t "$pkgdir/usr/lib/$pkgname/resources/" build/linux-unpacked/resources/app.asar
cp -a build/linux-unpacked/resources/{app.asar.unpacked,hunspell_dictionaries} "$pkgdir/usr/lib/$pkgname/resources/"
install -Dm755 -t "${pkgdir}/usr/share/applications/" resources/linux/marktext.desktop
install -Dm755 -t "${pkgdir}/usr/share/metainfo/" resources/linux/marktext.appdata.xml
install -Dm644 resources/icons/icon.png "${pkgdir}/usr/share/pixmaps/marktext.png"
install -Dm644 -t "$pkgdir/usr/share/licenses/$pkgname/" LICENSE
install -Dm644 -t "$pkgdir/usr/share/doc/$pkgname/" README.md
cp -a docs "$pkgdir/usr/share/doc/$pkgname/"
pushd "resources/icons"
find -name maktext.png -exec install -Dm644 {} "$pkgdir/usr/share/icons/hicolor/{}" \;
}
| true
|
0f069fbbb10f9b978f16b0c5e1acdb1c0fd1bab0
|
Shell
|
sruizcarmona/scripts
|
/BIOINFORMATICS/CBDD/DOCKING/VINA/vina_screen_local.sh
|
UTF-8
| 211
| 2.828125
| 3
|
[] |
no_license
|
#! /bin/bash
for f in ligs_pdbqt/*lig*.pdbqt; do
b=`basename $f .pdbqt`
echo Processing ligand $b
vina --config vina_conf.txt --ligand $f --out results/$b\_out.pdbqt > results/logs/$b\_log.txt
done
| true
|
5cba8e519443f1b6f48e5b2f7d83d6b255618c2d
|
Shell
|
mastarink/masbash
|
/path_util/set_path
|
UTF-8
| 2,811
| 3.5
| 4
|
[] |
no_license
|
function mas_set_path ( )
{
local s p show opath order px rest first tempf saved_trap result rpath
show='no'
#PATH=
opath=$PATH
# echo "opath: $PATH" >&2
tempf="/dev/shm/$$.path.tmp"
#NO: trap "rm $tempf" EXIT
saved_trap=$(trap -p RETURN)
# if [ -n "$saved_trap" ] ; then
# saved_trap="$saved_trap ; if [ -f '$tempf' ] ; then mas_logger ' RM $tempf' ; rm -f $tempf ; fi"
# else
# saved_trap="if [ -f '$tempf' ] ; then mas_logger ' RM $saved_trap' ; rm -f $tempf ; fi"
# fi
trap "$saved_trap" RETURN
> $tempf
local rex1 rex2 rex3
rex1='^\-(.*)$'
rex2='^\+(.*)$'
rex3='^\/(.*)$'
# while [[ "$opath" ]] ; do
# first=`echo $rest|/bin/cut -d: -f -1`
# rest=`echo $opath|/bin/cut -d: -f 2-`
# echo "first: $first" >&2
# echo "rest: $rest" >&2
# result=$?
# if [ "$opath" == "$rest" ] ; then
# rest=''
# fi
# echo "+$first" >> $tempf
# # if [ "x$show" == 'xyes' ] ; then echo "PRESET '$first'" ; fi
# opath=$rest
# done
for first in `echo $PATH|/bin/tr ':' ' '` ; do
# echo "f: $first" >&2
echo "+$first" >> $tempf
done
# mas_loadlib_if_not mas_get_paths path
mas_get_lib_ifnot path mas_get_paths
# rpath=`$MAS_CONF_DIR_TERM/path/get_paths common $tempf`
rpath=$( mas_get_paths common $tempf)
# echo ":: rpath:'$rpath'" >&2
#echo $rpath >&2
# if [ "x$show" == 'xyes' ] ; then echo "rpath:$rpath {common $tempf}" ; fi
MAS_PATH=''
for p in $rpath
do
if [[ $p =~ $rex1 ]] ; then
order='-'
px=${BASH_REMATCH[1]}
elif [[ $p =~ $rex2 ]] ; then
order='+'
px=${BASH_REMATCH[1]}
elif [[ $p =~ $rex3 ]] ; then
order='+'
px=$p
fi
if [[ $px =~ ^\/ ]] ; then
if [ -z "$MAS_PATH" ] ; then
MAS_PATH="$px"
# elif [ -z "$px" ] ; then
# if [ "x$show" == 'xyes' ] ; then mas_logger "px is empty" ; fi
elif ! echo $MAS_PATH | /bin/egrep -q "(^|:)$px($|:)"; then
if [[ -d "$px" ]] ; then
if [ "x$order" == 'x+' ] ; then
MAS_PATH="$MAS_PATH:$px"
elif [ "x$order" == 'x-' ] ; then
MAS_PATH="$px:$MAS_PATH"
fi
# else
# .......... "PATH : directory $px not exists"
fi
fi
fi
done
export MAS_PATH
# if [ -n "$tempf" -a -s "$tempf" ] ; then
# rm $tempf
# fi
[ -n "$tempf" -a -s "$tempf" ] && rm -f "$tempf"
}
function mas_set_path_test ()
{
unset MAS_PATH
mas_set_path
echo "MAS_PATH: $MAS_PATH" >&2
}
if [ -z "$MAS_PATH" ] ; then
mas_set_path
# mas_set_path_test
fi
#unset mas_set_path
PATH=$MAS_PATH
export PATH
# vi: ft=sh
| true
|
3075b30d502d20f35d8a263d3599c6e55c239d06
|
Shell
|
sidereux/vimrc
|
/install.sh
|
UTF-8
| 1,559
| 3.75
| 4
|
[] |
no_license
|
#!/bin/bash
# cp vrapperrc ~/.vrapperrc
# install pathogen
mkdir -p ~/.vim/{autoload,bundle}
wget https://raw.githubusercontent.com/tpope/vim-pathogen/master/autoload/pathogen.vim -O ~/.vim/autoload/pathogen.vim
# install gtags
function install_gtags
{
local readonly GTAT='/usr/local/share/gtags/gtags.vim'
if [ -r $GTAT ]
then
# read -p 'gtags.vim found, copy it into plugin? [Y/n] ' action
# if [[ $action == "n" ]]
# then
# echo skip gtags
# return
# fi
echo use gtags
mkdir -p ~/.vim/plugin
cp $GTAT ~/.vim/plugin/
fi
}
install_gtags
# download plugins
function clone
{
local url=$1
local filename=$2
if [ -e $filename ] && [ -d $filename/.git ]
then
cd $filename
git pull
cd ..
else
git clone --depth=1 $url
fi
}
urls=$(sed -n '/^" .* | https/p' vimrc | awk -F'|' '{print $2}')
cd bundle
for url in $urls
do
if [[ $url == 'https://github.com/'* ]]
then
# filename=$(echo $url | ark -F'/' '{print $NF}')
# {print $NF} may not work when url has a trailing '/' character
filename=$(echo $url | awk -F"github.com/" '{print $2}' | awk -F'/' '{print $2}')
echo
echo clone $filename
clone $url $filename
# if [[ $filename == 'vimproc.vim' ]]
# then
# # compile vimproc
# echo
# echo compile vimproc
# cd vimproc.vim
# make
# cd ..
# fi
fi
done
cd ..
| true
|
f06df51bce2424502b4e6b6a27d874bc19ad0018
|
Shell
|
mvala/zmq_strela
|
/scripts/install.sh
|
UTF-8
| 647
| 3.296875
| 3
|
[] |
no_license
|
#!/bin/bash
MYZMQ_DIR="$(dirname $(dirname $(readlink -m $0)))"
[ -d $MYZMQ_DIR/etc ] || mkdir -p $YZMQ_DIR/etc
export PATH="$MYZMQ_DIR/bin:$PATH"
export LD_LIBRARY_PATH="$MYZMQ_DIR/lib:$LD_LIBRARY_PATH"
function MyMake() {
git reset --hard
git pull
./autogen.sh
./configure --prefix=$MYZMQ_DIR $* || exit 1
make -j3 || exit 2
make install || exit 3
}
[ -d $HOME/git/libzmq ] || git clone git://github.com/zeromq/libzmq.git
cd libzmq
MyMake --with-relaxed
cd ..
[ -d $HOME/git/czmq ] || git clone git://github.com/zeromq/czmq.git
cd czmq
MyMake --with-libzmq=$MYZMQ_DIR --with-relaxed
cd ..
cd $MYZMQ_DIR
scripts/make.sh install
| true
|
1fa31cfef0ab062f052169df232a5106130b41a4
|
Shell
|
tkyashiro/pi-scripts
|
/ir/living.sh
|
UTF-8
| 515
| 3.78125
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/sh
if [ $# -ne 1 ]; then
echo "living.sh <on|off|max>"
exit 1
fi
command=$1
base=$(dirname $(realpath "$0"))
if [ $command = "on" ]; then
echo "Turning ON the living light"
python3 ${base}/irrp.py -p -g12 -f ${base}/codes living:on
elif [ $command = "off" ]; then
echo "Turning OFF the living light"
python3 ${base}/irrp.py -p -g12 -f ${base}/codes living:off
elif [ $command = "max" ]; then
echo "Turning ON(max) the living light"
python3 ${base}/irrp.py -p -g12 -f ${base}/codes living:max
fi
| true
|
8dd6ba9a6b57f1796542c58e0d8e0603825f6ffd
|
Shell
|
LISHUAI0000/Processor-Simulator
|
/Version 1/simulate.sh
|
UTF-8
| 347
| 2.671875
| 3
|
[] |
no_license
|
#!/bin/sh
# Script to simulate input assembly given in $1
# Change to binary directory
cd bin
# Convert assembly to binary IR
cat <&0 >input
java PreProcess > binary_prog
rm input
# Load binary IR and simulate execution
./ProcessorSimulator.o binary_prog ../data_dump
rm -f binary_prog
# Output the memory dump
cat ../data_dump
| true
|
b2419a87ed5882d7eb5e983c56590ee24ae23127
|
Shell
|
jbro885/dd-utility
|
/source/ddutility-osx-1.0.sh
|
UTF-8
| 9,881
| 3.65625
| 4
|
[] |
no_license
|
#!/bin/bash
#
# dd Utility version 1.0 - Mac OSX
#
# By The Fan Club 2015
# http://www.thefanclub.co.za
#
### BEGIN LICENSE
# Copyright (c) 2015, The Fan Club <info@thefanclub.co.za>
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 3, as published
# by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranties of
# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
### END LICENSE
# Vars
apptitle="dd Utility"
version="1.0 beta"
# Set Icon directory and file
iconfile="/System/Library/Extensions/IOStorageFamily.kext/Contents/Resources/Removable.icns"
# Select Backup or Restore
response=$(osascript -e 'tell app "System Events" to display dialog "Select Backup to create an image file from a memory card or disk.\n\nSelect Restore to copy an image file to a memory card or disk. Supported formats: \nimg, zip, gzip, xz\n\nSelect Cancel to Quit" buttons {"Cancel", "Backup", "Restore"} default button 3 with title "'"$apptitle"' '"$version"'" with icon POSIX file "'"$iconfile"'" ')
action=$(echo $response | cut -d ':' -f2)
# Exit if Canceled
if [ ! "$action" ] ; then
osascript -e 'display notification "Program closing" with title "'"$apptitle"'" subtitle "Program cancelled"'
exit 0
fi
### BACKUP : Select inputfile and outputfile
if [ "$action" == "Backup" ] ; then
# Get input folder of memcard disk - NOTE funny quotes ` not '
memcardpath=`/usr/bin/osascript << EOT
tell application "Finder"
activate
set folderpath to choose folder default location "/Volumes" with prompt "Select your memory card location"
end tell
return (posix path of folderpath)
EOT`
# Cancel is user selects Cancel
if [ ! "$memcardpath" ] ; then
osascript -e 'display notification "Program closing" with title "'"$apptitle"'" subtitle "Program cancelled"'
exit 0
fi
# Get output folder for backup image
imagepath=`/usr/bin/osascript << EOT
tell application "Finder"
activate
set folderpath to choose folder default location (path to desktop folder) with prompt "Select backup folder for memory card image file"
end tell
return (posix path of folderpath)
EOT`
# Cancel is user selects Cancel
if [ ! "$imagepath" ] ; then
osascript -e 'display notification "Program closing" with title "'"$apptitle"'" subtitle "Program cancelled"'
exit 0
fi
# Get filename for for backup image
response=$(osascript -e 'tell app "System Events" to display dialog "Please enter filename for disk image:" default answer "imagefile" with title "'"$apptitle"' '"$version"'" ' )
getfilename=$( echo $response | cut -d "," -f2 | cut -d ":" -f2 )
# Strip path if given
filename=$(basename "$getfilename")
# Cancel is user selects Cancel
if [ ! "$response" ] ; then
osascript -e 'display notification "Program closing" with title "'"$apptitle"'" subtitle "Program cancelled"'
exit 0
fi
# Ask for compression
response=$(osascript -e 'tell app "System Events" to display dialog "Compress the Backup image file? \n\nThis can significantly reduce the space used by the backup.\n\nSelect Cancel to Quit" buttons {"No", "Yes"} default button 2 with title "'"$apptitle"' '"$version"'" with icon POSIX file "'"$iconfile"'" ')
compression=$(echo $response | cut -d ':' -f2)
# Parse vars for dd
outputfile="$imagepath$filename.img"
if [ "$compression" == "Yes" ] ; then
outputfile="$outputfile.zip"
fi
# Check if image file exists
if [ -f "$outputfile" ] ; then
response=$(osascript -e 'tell app "System Events" to display dialog "The file '"$outputfile"' already exist.\n\nSelect Continue to overwrite the file.\n\nSelect Cancel to Quit" buttons {"Cancel", "Continue"} default button 2 with title "'"$apptitle"' '"$version"'" with icon POSIX file "'"$iconfile"'" ')
# Cancel is user selects Cancel
if [ ! "$response" ] ; then
osascript -e 'display notification "Program closing" with title "'"$apptitle"'" subtitle "Program cancelled"'
exit 0
fi
# Delete the file if exists
rm $outputfile
fi
fi
### RESTORE : Select image file and memcard location
if [ "$action" == "Restore" ] ; then
# Get image file location
imagepath=`/usr/bin/osascript << EOT
tell application "Finder"
activate
set imagefilepath to choose file of type {"img", "gz", "zip", "xz"} default location (path to desktop folder) with prompt "Select image file to restore to memory card or disk. Supported file formats : IMG, ZIP, GZ, XZ"
end tell
return (posix path of imagefilepath)
EOT`
# Cancel is user selects Cancel
if [ ! "$imagepath" ] ; then
osascript -e 'display notification "Program closing" with title "'"$apptitle"'" subtitle "Program cancelled"'
exit 0
fi
# Get input folder of memcard disk - NOTE funny quotes ` not '
memcardpath=`/usr/bin/osascript << EOT
tell application "Finder"
activate
set folderpath to choose folder default location "/Volumes" with prompt "Select your memory card location"
end tell
return (posix path of folderpath)
EOT`
# Cancel is user selects Cancel
if [ ! "$memcardpath" ] ; then
osascript -e 'display notification "Program closing" with title "'"$apptitle"'" subtitle "Program cancelled"'
exit 0
fi
# Parse vars for dd
inputfile=$imagepath
# Check if Compressed from extension
extension="${inputfile##*.}"
if [ "$extension" == "gz" ] || [ "$extension" == "zip" ] || [ "$extension" == "xz" ]; then
compression="Yes"
else
compression="No"
fi
fi
# Parse memcard disk volume Goodies
memcard=$( echo $memcardpath | awk -F '\/Volumes\/' '{print $2}' | cut -d '/' -f1 )
disknum=$( diskutil list | grep "$memcard" | awk -F 'disk' '{print $2}' | cut -d 's' -f1 )
devdisk="/dev/disk$disknum"
# use rdisk for faster copy
devdiskr="/dev/rdisk$disknum"
# Get Drive size
drivesize=$( diskutil list | grep "disk$disknum" | grep "0\:" | cut -d "*" -f2 | awk '{print $1 " " $2}' )
# Set output option
if [ "$action" == "Backup" ] ; then
inputfile=$devdiskr
source="$drivesize $memcard (disk$disknum)"
dest=$outputfile
check=$dest
fi
if [ "$action" == "Restore" ] ; then
source=$inputfile
dest="$drivesize $memcard (disk$disknum)"
outputfile=$devdiskr
check=$source
fi
# Confirmation Dialog
response=$(osascript -e 'tell app "System Events" to display dialog "Please confirm settings and click Start\n\nSource: \n'"$source"' \n\nDestination: \n'"$dest"' \n\n\nNOTE: All Data on the Destination will be deleted and overwritten" buttons {"Cancel", "Start"} default button 2 with title "'"$apptitle"' '"$version"'" with icon POSIX file "'"$iconfile"'" ')
answer=$(echo $response | grep "Start")
# Cancel is user does not select Start
if [ ! "$answer" ] ; then
osascript -e 'display notification "Program closing" with title "'"$apptitle"'" subtitle "Program cancelled"'
exit 0
fi
# Unmount Volume
response=$(diskutil unmountDisk $devdisk)
answer=$(echo $response | grep "successful")
# Cancel if unable to unmount
if [ ! "$answer" ] ; then
osascript -e 'display notification "Program closing" with title "'"$apptitle"'" subtitle "Cannot Unmount '"$memcard"'"'
exit 0
fi
# Start dd copy
## Todo - delete image file if it exists already
osascript -e 'display notification "Please be patient...." with title "'"$apptitle"'" subtitle "'"$drivesize"' Disk '$action' Started"'
if [ "$compression" == "No" ] ; then
osascript -e 'do shell script "dd if=\"'"$inputfile"'\" of=\"'"$outputfile"'\" bs=1m" with administrator privileges'
fi
# Compression Backup and Restore
if [ "$compression" == "Yes" ] ; then
# Compressed Backup to ZIP file
if [ "$action" == "Backup" ] ; then
osascript -e 'do shell script "dd if=\"'"$inputfile"'\" bs=1m | zip > \"'"$outputfile"'\"" with administrator privileges'
fi
# Compressed Restore
if [ "$action" == "Restore" ] ; then
# GZ files
if [ "$extension" == "gz" ] ; then
osascript -e 'do shell script "gzip -dc \"'"$inputfile"'\" | dd of=\"'"$outputfile"'\" bs=1m" with administrator privileges'
fi
# ZIP files
if [ "$extension" == "zip" ] ; then
osascript -e 'do shell script "unzip -p \"'"$inputfile"'\" | dd of=\"'"$outputfile"'\" bs=1m" with administrator privileges'
fi
# XZ files - OSX 10.10 only I think
if [ "$extension" == "xz" ] ; then
osascript -e 'do shell script "tar -xJOf \"'"$inputfile"'\" | dd of=\"'"$outputfile"'\" bs=1m" with administrator privileges'
fi
fi
fi
# Copy Complete
# Check filesize the OSX way 1Kb = 1000 bytes
filesize=$(stat -f%z "$check")
if [ "$filesize" -gt 1000000000000 ] ; then
fsize="$( echo "scale=2; $filesize/1000000000000" | bc ) TB"
elif [ "$filesize" -gt 1000000000 ] ; then
fsize="$( echo "scale=2; $filesize/1000000000" | bc ) GB"
elif [ "$filesize" -gt 1000000 ] ; then
fsize="$( echo "scale=2; $filesize/1000000" | bc ) MB"
elif [ "$filesize" -gt 1000 ] ; then
fsize="$( echo "scale=2; $filesize/1000" | bc ) KB"
fi
# Get Filename for display
fname=$(basename "$check")
# Display Notifications
osascript -e 'display notification "'"$drivesize"' Drive '$action' Complete " with title "'"$apptitle"'" subtitle " '"$fname"' "'
response=$(osascript -e 'tell app "System Events" to display dialog "'"$drivesize"' Disk '$action' Complete\n\nFile '"$fname"'\n\nSize '"$fsize"' " buttons {"Done"} default button 1 with title "'"$apptitle"' '"$version"'" with icon POSIX file "'"$iconfile"'" ')
exit 0
# END
| true
|
f20fbe0eee95a870b152aab69710e1c24d7e1878
|
Shell
|
ksyasuda/dotfiles
|
/.config/polybar/sblocks/scripts/music-controls.sh
|
UTF-8
| 430
| 3.625
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
set -euo pipefail
NEXT=0
PREV=0
TOGGLE=0
VERBOSE='-q'
while getopts nptv options
do
case $options in
n)
NEXT=1
;;
p)
PREV=1
;;
t)
TOGGLE=1
;;
v)
VERBOSE='-v'
;;
?)
echo 'something went wrong'
exit 1
;;
esac
done
if [[ $NEXT -eq 1 ]]; then
mpc next $VERBOSE
elif [[ $PREV -eq 1 ]]; then
mpc prev $VERBOSE
elif [[ $TOGGLE -eq 1 ]]; then
mpc toggle $VERBOSE
fi
| true
|
147761d356e0235f3b4f1401ee1d631592506c84
|
Shell
|
mk-fg/systemd-password-agent
|
/systemd_password_cache_cleanup.sh
|
UTF-8
| 3,565
| 3.75
| 4
|
[] |
no_license
|
#!/bin/bash
umask 077
chmod 700 /run/initramfs || exit 2
pw_cache=/run/initramfs/.password.cache
pw_seed=/run/initramfs/.password.seed
rsa_info=$(grep -o 'rd.rsa=[^[:space:]]\+' /proc/cmdline)
rsa_info=${rsa_info#*=}
rsa_src=${rsa_info%/*}
rsa_dev=${rsa_src%-*}
rsa_offset=${rsa_src#*-}
rsa_key=${rsa_info#*/}
key_file=
process_seed() {
# Create old/new seed/keys
pkcs15-crypt -r0 -k "$rsa_key" --sign -i "$pw_seed" -R |
hexdump -e '"%x"' >"${pw_cache}.old"
[[ $? -ne 0 || ${PIPESTATUS[0]} -ne 0 || ! -s "${pw_cache}.old" ]] && return 1
dd if=/dev/urandom of="$pw_seed" bs=256 count=1 status=noxfer 2>/dev/null \
&& pkcs15-crypt -r0 -k "$rsa_key" --sign -i "$pw_seed" -R |
hexdump -e '"%x"' >"${pw_cache}.new"
[[ $? -ne 0 || ${PIPESTATUS[0]} -ne 0 || ! -s "${pw_cache}.new" ]] && return 1
return 0
}
update_seed() {
slots=( "$@" )
# Devices to process
devs=( $(awk 'match($1, /luks-(.*)/, a) {system("blkid -U " a[1])}' /etc/crypttab) )
[[ ${#devs[@]} -eq 0 ]] && return 0
# Key material
key=( -d "${pw_cache}.old" )
[[ -n "$key_file" ]] && {
[[ ! -e "$key_file" ]] && { echo >&2 "Keyfile not found: $key_file"; return 1; }
key=( -d "$key_file" )
}
# Kill old slots
[[ ${#slots[@]} -gt 0 ]] && {
for dev in ${devs[@]}; do for slot in ${slots[@]}; do
cryptsetup -q "${key[@]}" luksKillSlot "$dev" "$slot"
done; done
}
# Add new key, counting failures
failures=0
for dev in ${devs[@]}; do
cryptsetup -q -i100 "${key[@]}" luksAddKey "$dev" "${pw_cache}.new"
[[ "$?" -ne 0 ]] && (( failures += 1 ))
done
[[ $failures -gt 0 ]] && echo >&2 "*** Failed to add new key to $failures devices ***"
[[ $failures -eq ${#devs[@]} ]] && return 1
# Remove old keys
[[ ${#slots[@]} -eq 0 ]] && {
failures=0
for dev in ${devs[@]}; do
cryptsetup -q luksRemoveKey "$dev" "${pw_cache}.old"
[[ "$?" -ne 0 ]] && (( failures += 1 ))
done
[[ $failures -gt 0 ]] && echo >&2 "*** Failed to remove old key from $failures devices ***"
[[ $failures -eq ${#devs[@]} ]] && return 1
}
# Update original seed
dd if="$pw_seed" of=/dev/"$rsa_dev"\
bs=256 seek="$rsa_offset" count=1 status=noxfer 2>/dev/null \
|| return 1
return 0
}
### Automated start with no arguments
[[ -z "$1" ]] && {
# Do the thing only if dracut has created a seed file
err=
[[ -f "$pw_seed" && -n "$rsa_info" ]] && {
[[ -z "$err" ]] && process_seed\
|| { echo >&2 "Failed to process rsa seed"; err=true; }
[[ -z "$err" ]] && update_seed\
|| { echo >&2 "Failed to update rsa seed"; err=true; }
}
rm -f "$pw_seed" "$pw_cache"{,.old,.new}
[[ -z "$err" ]] && exit 0 || exit 1
}
### Manual start
while [[ -n "$1" ]]; do
case "$1" in
-h|--help) echo "Usage: $0 $(awk '
func print_arg() {
if (!ac) ac=""; else ac=sprintf(" ...(%s)", ac)
if (ap) printf("[ %s%s ] ", ap, ac) }
/^\s+case\>/ {parse=1; next}
/^\s+esac\>/ {print_arg(); exit}
!parse {next}
match($0, /^\s*([\-|a-z]+)\)/, a) { print_arg(); ap=a[1]; ac=0 }
!match(ap,/\<-h|--help\>/)\
{for (i=1;i<NF;i++) if (match($i, /\<shift\>/)) ac++}'\
$0)"
exit 0 ;;
-d|--debug) set -x ;;
--key-file) shift; key_file="$1" ;;
rewrite)
shift
slots=( "$@" )
dd if=/dev/urandom of="$pw_seed" bs=256 count=1 status=noxfer 2>/dev/null
process_seed || { echo >&2 "Failed to process rsa seed"; exit 1; }
update_seed "${slots[@]}" || { echo >&2 "Failed to rewrite luks slots"; exit 1; }
rm -f "$pw_seed" "$pw_cache"{,.old,.new}
exit 0 ;;
*) echo >&2 "Unknown arg/option: $1" && exit 1 ;;
esac
shift
done
| true
|
b2cec9fc2e453d8c1e46affad0d59ee818ae14bf
|
Shell
|
devops-workflow/jenkins-tools
|
/bin/static-analysis-docker-image-clair.sh
|
UTF-8
| 5,474
| 3.828125
| 4
|
[] |
no_license
|
#!/bin/bash
#
# Docker image testing
#
tmpdir=tmp
#dockerDir=.
#dockerFile=${dockerDir}/Dockerfile
imageDir=output-images
reportsDir=reports
versionFile=${tmpdir}/version
GIT_URL="${GIT_URL%.git}"
mkdir -p ${tmpdir} ${reportsDir}
###
### Clair
###
# Install
# use docker-compose or manual and can have central Clair server
if [ $(docker ps --format '{{.ID}}' --filter name=clair_postgres | wc -l) -eq 0 ]; then
###
### Create postgres container
###
#docker pull postgres:latest
docker run -d --name clair_postgres -e POSTGRES_PASSWORD=password postgres:latest
fi
if [ $(docker ps --format '{{.ID}}' --filter name=clair_clair | wc -l) -eq 0 ]; then
###
### Create Clair container
###
# Do not try to use latest with config from master
clair_version=v1.2.3
clair_config_dir="${WORKSPACE%%/workspace*}/clair_config"
TMPDIR=
mkdir -p ${clair_config_dir}
curl -L https://raw.githubusercontent.com/coreos/clair/${clair_version}/config.example.yaml -o ${clair_config_dir}/config.yaml
sed -i '/ source:/ s#:.*#: postgresql://postgres:password@postgres:5432?sslmode=disable#' ${clair_config_dir}/config.yaml
#docker pull quay.io/coreos/clair:${clair_version}
docker run -d --name clair_clair -p 6060-6061:6060-6061 --link clair_postgres:postgres -v /tmp:/tmp -v ${clair_config_dir}:/config quay.io/coreos/clair:${clair_version} -config=/config/config.yaml
fi
install_dir="${WORKSPACE%%/workspace*}/bin"
if [ ! -x ${install_dir}/hyperclair ]; then
###
### Install hyperclair
###
# write code to determine and get latest
# Could eventually change to clairctl in clair
hyperclair_version=0.5.2
mkdir -p ${install_dir}
# sudo curl -L -o ${install_dir}/hyperclair https://github.com/wemanity-belgium/hyperclair/releases/download/0.5.0/hyperclair-{OS}-{ARCH}
curl -L -o ${install_dir}/hyperclair https://github.com/wemanity-belgium/hyperclair/releases/download/${hyperclair_version}/hyperclair-linux-amd64
chmod +x ${install_dir}/hyperclair
# Create config file (optional)
# cat <<HYPERCLAIR > ${install_dir}/../.hyperclair.yml
#clair:
# port: 6060
# healthPort: 6061
# uri: http://127.0.0.1
# priority: Low
# report:
# path: ./reports
# format: html
#HYPERCLAIR
fi
# Run hyperclair - analyse image and generate report - Have jenkins consume report (formats?)
# Config file setup ??
${install_dir}/hyperclair version
${install_dir}/hyperclair health
# If clair is NOT healthy, wait for a while
#${install_dir}/hyperclair -h
echo
printf "=%.s" {1..3}
echo -e "Running Clair CLI hyperclair...\n"
echo "CMD: hyperclair push|analyse|report ${dockerImage} --local"
${install_dir}/hyperclair push ${dockerImage} --local
${install_dir}/hyperclair analyse ${dockerImage} --local
${install_dir}/hyperclair report ${dockerImage} --local --format html
# Create json output and parse to get into Jenkins GUI
${install_dir}/hyperclair report ${dockerImage} --local --format json
# Report at reports/html/analyse-<image name>-<tag|latest>.html
#--config ${install_dir}/../.hyperclair.yml
# Can query json with jq
# Number of vulnerabilities found. List of all the CVEs
#jq '.Layers[].Layer.Features[].Vulnerabilities[].Name' analysis-intel-fenix-0.0-636-3edcab1.json 2>/dev/null | sort -u | wc -l
#jq '.Layers[].Layer.Features[].Vulnerabilities[].Severity' analysis-intel-fenix-0.0-636-3edcab1.json 2>/dev/null | wc -l
# List of all package names
#jq '.Layers[].Layer.Features[].Name' | sort -u
# Create jq parser for creating Jenkins Warning plugin output
parserFile=parse-hyperclair.jq
cat <<"PARSER" >${tmpdir}/${parserFile}
#
# jq filter for parsing hyperclair output into 1 liners that Jenkins Warning plugin can parse
#
# Written for jq 1.5
# Author: Steven Nemetz
#
# Output format:
# filename;line number;category;type;priority;message
#
# Set to variable, then reference after if
# Got lost because of piping a lower level
#.filename = "\(.ImageName):\(.Tag)"
# | (.Layers[].Layer.Features[]
#
# First line is bad. So pipe to
# Will create duplicate and bad lines
# Need to pipe output to cleanup or figure out better way to do this
# | sort -u | tail -n+2
"\(.ImageName):\(.Tag);0;" +
(.Layers[].Layer.Features[]
| if .Vulnerabilities then
"\(.Name) - \(.Version);" +
(.Vulnerabilities[] | "\(.Name);\(.Severity);" +
if .Message then
"\(.Message) "
else
""
end +
"Reference: \(.Link)")
else
""
end
)
PARSER
# reformat dockerImage - x/y:ver -> x-y-ver - s/[/:]/-/g
filenameBase="analysis-$(echo ${dockerImage} | sed 's#[/:]#-#g')"
jq -f ${tmpdir}/${parserFile} ${reportsDir}/json/${filenameBase}.json | sort -u | tail -n+2 > ${reportsDir}/${filenameBase}.warnings
if [ ! -s ${reportsDir}/${filenameBase}.warnings ]; then
rm -f ${reportsDir}/${filenameBase}.warnings
fi
#export GODIR="${WORKSPACE%%/workspace*}/go"
#export GOPATH=$GODIR:/usr/lib/go-1.6
#if [ ! -x ${GODIR}/bin/analyze-local-images ]; then
# ###
# ### Install analyze-local-images
# ###
# #export GOBIN=
# /usr/lib/go-1.6/bin/go get -u github.com/coreos/clair/contrib/analyze-local-images
# $GODIR/bin/analyze-local-images -h || true
#fi
## Run analyze-local-images
#echo
#printf "=%.s" {1..3}
#echo -e "Running Clair CLI analyze-local-images...\n"
#echo "CMD: analyze-local-images ${dockerImage}"
#$GODIR/bin/analyze-local-images ${dockerImage}
## Write Jenkins Warning parser for output if decide to continue with this cli tool
# Can also write own tool to talk with Clair API
| true
|
4c95afacd2b38e86e33b203db8f44df09f05b62e
|
Shell
|
mdkcore0/dotbins
|
/show-ansi-colors.sh
|
UTF-8
| 412
| 3.75
| 4
|
[] |
no_license
|
#!/usr/bin/env sh
#original: https://gist.github.com/eliranmal/b373abbe1c21e991b394bdffb0c8a6cf
usage() {
echo "show-ansi-colors <n>"
exit 0
}
(( $# < 1 )) && usage
show_ansi_colors() {
local colors=$1
echo "showing $colors ansi colors:"
for (( n=-0; n < $colors; n++ )) do
printf " [%d]\t$(tput setab $n)%s$(tput sgr0)\n" $n " "
done
echo
}
show_ansi_colors "$@"
| true
|
2bc3418db93e81ad95b695cce83fc18b70890c8b
|
Shell
|
kchodnicki/.zshrc
|
/.zshrc
|
UTF-8
| 9,390
| 2.75
| 3
|
[] |
no_license
|
# If you come from bash you might have to change your $PATH.
# export PATH=$HOME/bin:/usr/local/bin:$PATH
# Path to your oh-my-zsh installation.
export ZSH="/Users/chodnicki/.oh-my-zsh"
# Set name of the theme to load. Optionally, if you set this to "random"
# it'll load a random theme each time that oh-my-zsh is loaded.
# See https://github.com/robbyrussell/oh-my-zsh/wiki/Themes
ZSH_THEME="powerlevel9k/powerlevel9k"
# Set list of themes to load
# Setting this variable when ZSH_THEME=random
# cause zsh load theme from this variable instead of
# looking in ~/.oh-my-zsh/themes/
# An empty array have no effect
# ZSH_THEME_RANDOM_CANDIDATES=( "robbyrussell" "agnoster" )
# Uncomment the following line to use case-sensitive completion.
# CASE_SENSITIVE="true"
# Uncomment the following line to use hyphen-insensitive completion. Case
# sensitive completion must be off. _ and - will be interchangeable.
# HYPHEN_INSENSITIVE="true"
# Uncomment the following line to disable bi-weekly auto-update checks.
# DISABLE_AUTO_UPDATE="true"
# Uncomment the following line to change how often to auto-update (in days).
# export UPDATE_ZSH_DAYS=13
# Uncomment the following line to disable colors in ls.
# DISABLE_LS_COLORS="true"
# Uncomment the following line to disable auto-setting terminal title.
# DISABLE_AUTO_TITLE="true"
# Uncomment the following line to enable command auto-correction.
# ENABLE_CORRECTION="true"
# Uncomment the following line to display red dots whilst waiting for completion.
# COMPLETION_WAITING_DOTS="true"
# Uncomment the following line if you want to disable marking untracked files
# under VCS as dirty. This makes repository status check for large repositories
# much, much faster.
# DISABLE_UNTRACKED_FILES_DIRTY="true"
# Uncomment the following line if you want to change the command execution time
# stamp shown in the history command output.
# You can set one of the optional three formats:
# "mm/dd/yyyy"|"dd.mm.yyyy"|"yyyy-mm-dd"
# or set a custom format using the strftime function format specifications,
# see 'man strftime' for details.
# HIST_STAMPS="mm/dd/yyyy"
# Would you like to use another custom folder than $ZSH/custom?
# ZSH_CUSTOM=/path/to/new-custom-folder
# Which plugins would you like to load? (plugins can be found in ~/.oh-my-zsh/plugins/*)
# Custom plugins may be added to ~/.oh-my-zsh/custom/plugins/
# Example format: plugins=(rails git textmate ruby lighthouse)
# Add wisely, as too many plugins slow down shell startup.
plugins=(
git
zsh-autosuggestions
)
source $ZSH/oh-my-zsh.sh
source /usr/local/share/zsh-autosuggestions/zsh-autosuggestions.zsh
source /usr/local/share/zsh-syntax-highlighting/zsh-syntax-highlighting.zsh
# User configuration
# export MANPATH="/usr/local/man:$MANPATH"
# You may need to manually set your language environment
# export LANG=en_US.UTF-8
# Preferred editor for local and remote sessions
# if [[ -n $SSH_CONNECTION ]]; then
# export EDITOR='vim'
# else
# export EDITOR='mvim'
# fi
# Compilation flags
# export ARCHFLAGS="-arch x86_64"
# ssh
# export SSH_KEY_PATH="~/.ssh/rsa_id"
# Set personal aliases, overriding those provided by oh-my-zsh libs,
# plugins, and themes. Aliases can be placed here, though oh-my-zsh
# users are encouraged to define aliases within the ZSH_CUSTOM folder.
# For a full list of active aliases, run `alias`.
#
# Example aliases
# alias zshconfig="mate ~/.zshrc"
# alias ohmyzsh="mate ~/.oh-my-zsh"
alias ll="ls -laG"
alias vimgo="vim -u ~/.vimrc.go"
# POWERLEVEL9k config
POWERLEVEL9K_LEFT_PROMPT_ELEMENTS=(time dir vcs)
POWERLEVEL9K_RIGHT_PROMPT_ELEMENTS=(status root_indicator history)
POWERLEVEL9K_PROMPT_ON_NEWLINE=true
# Add a space in the first prompt
POWERLEVEL9K_PROMPT_ADD_NEWLINE=true
export GOPATH='/Users/chodnicki/go-workspace'
export GOBIN='/Users/chodnicki/go-workspace/bin'
export PATH=$PATH:/usr/local/bin:/usr/bin:/bin:/usr/sbin:/sbin:$GOBIN:$GOPATH
export KUBECONFIG=/Users/chodnicki/my_kubernetes/config-development
kubectl config set-cluster development --server=https://10.145.66.30:6443 --certificate-authority=/Users/chodnicki/my_kubernetes/ca.pem
kubectl config set-credentials developer --server=https://10.145.66.30:6443 --certificate-authority=/Users/chodnicki/my_kubernetes/ca.pem --client-key=/Users/chodnicki/my_kubernetes/admin-kubernetes-master-0-key.pem --client-certificate=/Users/chodnicki/my_kubernetes/admin-kubernetes-master-0.pem
export AWS_ACCESS_KEY_ID=AKIAILIL2AO3V4MSBIBQ
export AWS_SECRET_KEY=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
gwurl(){
addr=$(cat f5go_config | grep server: | cut -d " " -f 2)
r=$(curl curl --fail --silent -k --cacert ca.pem --key blue-admin-key.pem --cert blue-admin.pem https://"$addr"/"$1" )
echo $r | jq
}
kubeaws()
{
loc=$(pwd)
mkdir ~/tmp
addr=$(cat f5go_config | grep server: | cut -d " " -f 2)
pushd ~/tmp
scp -i "$loc"/secrets/*_rsa centos@$addr:/etc/kubernetes/ssl/kube-apiserver-key.pem .
scp -i "$loc"/secrets/*_rsa centos@$addr:/etc/kubernetes/ssl/kube-apiserver.pem .
scp -i "$loc"/secrets/*_rsa centos@$addr:/etc/blue/ssl/blue-user/blue-admin-key.pem "$loc"/blue-admin-key.pem
scp -i "$loc"/secrets/*_rsa centos@$addr:/etc/blue/ssl/blue-user/blue-admin.pem "$loc"/blue-admin.pem
scp -i "$loc"/secrets/*_rsa centos@$addr:/etc/blue/ssl/blue-user/ca.pem "$loc"/ca.pem
touch kubeconfig-aws.yaml
export KUBECONFIG=`pwd`/kubeconfig-aws.yaml
kubectl config set-cluster aws-cluster --insecure-skip-tls-verify --server https://$addr:6443
kubectl config set-credentials aws-cluster-admin --client-certificate kube-apiserver.pem --client-key kube-apiserver-key.pem
kubectl config set-context aws --cluster aws-cluster --user aws-cluster-admin
kubectl config use-context aws
popd
}
kubeCft(){
loc=$(pwd)
mkdir ~/tmp >/dev/null 2>/dev/null
addrraw=$1
echo "Trying to resolve DNS: \033[0;32m$addrraw \033[0m"
ipInRaw=`echo $addrraw | grep -ohE "\b([0-9]{1,3}\.){3}[0-9]{1,3}\b"`
addr=`host $addrraw | grep -ohE "\b([0-9]{1,3}\.){3}[0-9]{1,3}\b"`
if [[ -z "$addr" ]] || ! [[ -z "$ipInRaw" ]] ; then
echo "Could not look up IP address for $addrraw. Trying it as address."
addr=$1
fi
echo "IP: \033[0;32m$addr \033[0m"
pushd ~/tmp >/dev/null 2>/dev/null
scp centos@$addr:/etc/kubernetes/ssl/kube-apiserver-key.pem . >/dev/null 2>/dev/null || {echo "\033[0;31mCommand failed\033[0m at downloading kube-apiserver-key.pem"; return 1}
scp centos@$addr:/etc/kubernetes/ssl/kube-apiserver.pem . >/dev/null 2>/dev/null || {echo "\033[0;31mCommand failed\033[0m at downloading kube-apiserver.pem"; return 1}
scp centos@$addr:/etc/blue/ssl/blue-user/blue-admin-key.pem . >/dev/null 2>/dev/null || echo "\033[0;31mCommand failed\033[0m at downloading blue-admin-key.pem"
scp centos@$addr:/etc/blue/ssl/blue-user/blue-admin.pem . >/dev/null 2>/dev/null || echo "\033[0;31mCommand failed\033[0m at downloading blue-admin.pem"
scp centos@$addr:/etc/blue/ssl/blue-user/ca.pem . >/dev/null 2>/dev/null || echo "\033[0;31mCommand failed\033[0m at downloading ca.pem"
touch kubeconfig-aws.yaml
export KUBECONFIG=`pwd`/kubeconfig-aws.yaml
kubectl config set-cluster aws-cluster --insecure-skip-tls-verify --server https://$addr:6443 >/dev/null
kubectl config set-credentials aws-cluster-admin --client-certificate kube-apiserver.pem --client-key kube-apiserver-key.pem >/dev/null
kubectl config set-context aws --cluster aws-cluster --user aws-cluster-admin >/dev/null
kubectl config use-context aws >/dev/null
popd >/dev/null 2>/dev/null
cd $loc
kubectl get namespace >/dev/null 2>/dev/null || {echo "Self check after the script \033[0;31mFAILED\033[0m (Could not get namespaces). Sorry :(" && return 1 }
{ kubectl cluster-info | grep $addr >/dev/null 2>/dev/null} || {echo "Self check after the script \033[0;31mFAILED\033[0m (Cluster set to wrong IP). Sorry :(" && return 1 }
echo "\033[0;32mScript succeeded, cluster set to IP $addr\033[0m"
}
curlCftGw(){
addr=$(kubectl cluster-info | grep master | grep -oh https://.\* | awk -v FS="(https://|:)" '{print $2}')
r=$(curl -k --cacert ~/tmp/ca.pem --key ~/tmp/blue-admin-key.pem --cert ~/tmp/blue-admin.pem https://"$addr"/"$1" )
echo $r
}
changeLogLevel(){
port=$1
capability=$2
pod=$3
level=$4
API_VERSION_COMMON=v1alpha3
command="kubectl exec -it $pod -- curl -s -k --key etc/blue/ssl/machine-svc-key --cert etc/blue/ssl/machine-svc-cert --resolve $service:$port:127.0.0.1 https://$capability:$port/$API_VERSION_COMMON/config"
loggers=`eval $command`
replacedLoggers=$(echo $loggers | sed -E "s/WARN|DEBUG|ERROR|INFO/$level/g")
command2="curl -s -k --key etc/blue/ssl/machine-svc-key --cert etc/blue/ssl/machine-svc-cert -X PUT -H \"Content-Type: application/json\" --data '$replacedLoggers' --resolve $capability:$port:127.0.0.1 https://$capability:$port/$API_VERSION_COMMON/config"
echo "\033[0;32mResult: \033[0m"
eval "kubectl exec -it $pod -- $command2" | jq
}
getLogLevel(){
port=$1
capability=$2
pod=$3
API_VERSION_COMMON=v1alpha3
command="kubectl exec -it $pod -- curl -s -k --key etc/blue/ssl/machine-svc-key --cert etc/blue/ssl/machine-svc-cert --resolve $capability:$port:127.0.0.1 https://$capability:$port/$API_VERSION_COMMON/config"
loggers=`eval $command`
echo $loggers | jq
}
[ -f ~/.fzf.zsh ] && source ~/.fzf.zsh
| true
|
6fa2489de917607501f0801f9d438050590dfa3a
|
Shell
|
myArchivedProjects/special-octo-sniffle
|
/cloud_init_LVM_on_ephemeral_disks.sh
|
UTF-8
| 2,602
| 4.3125
| 4
|
[] |
no_license
|
#!/bin/bash
# Adapted from:
# https://gist.githubusercontent.com/joemiller/6049831/raw/f2a458c3afcb3b57cb7a6d9cb3d9534482982086/raid_ephemeral.sh
#
# this script will attempt to detect any ephemeral drives on an EC2 node and create a LVM mounted /opt
# It should be run early on the first boot of the system.
#
# Beware, This script is NOT fully idempotent.
#
METADATA_URL_BASE="http://169.254.169.254/2012-01-12"
yum -y -d0 install curl
# Take into account xvdb or sdb
root_drive=`df -h | grep -v grep | awk 'NR==2{print $1}'`
if [ "$root_drive" == "/dev/xvda1" ]; then
echo "Detected 'xvd' drive naming scheme (root: $root_drive)"
DRIVE_SCHEME='xvd'
else
echo "Detected 'sd' drive naming scheme (root: $root_drive)"
DRIVE_SCHEME='sd'
fi
# figure out how many ephemerals we have by querying the metadata API, and then:
# - convert the drive name returned from the API to the hosts DRIVE_SCHEME, if necessary
# - verify a matching device is available in /dev/
drives=""
ephemeral_count=0
ephemerals=$(curl --silent $METADATA_URL_BASE/meta-data/block-device-mapping/ | grep ephemeral)
for e in $ephemerals; do
echo "Probing $e .."
device_name=$(curl --silent $METADATA_URL_BASE/meta-data/block-device-mapping/$e)
# might have to convert 'sdb' -> 'xvdb'
device_name=$(echo $device_name | sed "s/sd/$DRIVE_SCHEME/")
device_path="/dev/$device_name"
# test that the device actually exists since you can request more ephemeral drives than are available
# for an instance type and the meta-data API will happily tell you it exists when it really does not.
if [ -b $device_path ]; then
echo "Detected ephemeral disk: $device_path"
drives="$drives $device_path"
ephemeral_count=$((ephemeral_count + 1 ))
else
echo "Ephemeral disk $e, $device_path is not present. skipping"
fi
done
if [ "$ephemeral_count" = 0 ]; then
echo "No ephemeral disk detected. exiting"
exit 0
fi
# ephemeral0 is typically mounted for us already. umount it here
umount /mnt
umount /media/*
# overwrite first few blocks in case there is a filesystem
for drive in $drives; do
dd if=/dev/zero of=$drive bs=4096 count=1024
done
partprobe
for drive in $drives
do
pvcreate $drive
done
vgcreate /dev/vg01 $drives
lvcreate -l 100%FREE -n lvol_opt vg01
mkfs -t ext4 /dev/vg01/lvol_opt
mkdir -p /opt
mount -t ext4 -o noatime /dev/vg01/lvol_opt /opt
# Remove xvdb/sdb from fstab
chmod 777 /etc/fstab
sed -i "/${DRIVE_SCHEME}b/d" /etc/fstab
sed -i "/ephemeral/d" /etc/fstab
# Make /opt appear on reboot
echo "/dev/vg01/lvol_opt /opt ext4 noatime 0 0" | tee -a /etc/fstab
| true
|
64a0b539c98ea68c05855b2e915e15fa2338af45
|
Shell
|
bivkarki/ShellScripting-Practice
|
/greater_num.sh
|
UTF-8
| 469
| 3.828125
| 4
|
[] |
no_license
|
#!/bin/bash
echo "Enter any 3 Numbers: "
read -a nums #reads array #nums = ($@)
echo You entered ${#nums[*]} numbers #to read length of element ${#nums[1]}
echo "The three number are: "${nums[0]},${nums[1]},${nums[2]}
num1=${nums[0]}
num2=${nums[1]}
num3=${nums[2]}
if [ $num1 -ge $num2 ] && [ $num1 -ge $num3 ]
then
echo "The greater number is: " $num1
elif [ $num2 -ge $num3 ]
then
echo "the greater number is: " $num2
else
echo "The greater number is: " $num3
fi
| true
|
301fb6646fb8425173af01c069ec001cf74e658e
|
Shell
|
thalesfsp/insanehash
|
/test.sh
|
UTF-8
| 3,918
| 2.65625
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
### Tests for the CLI version of insanehash
# Author: Thales Pinheiro
# Since: 07/28/2015
# Basic usage:
# sh test
# ./test
#
# Copyright (c) 2015
# Thales Pinheiro. All rights reserved.
#
# Copyright terms:
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. All advertising materials mentioning features or use of this software
# must display the following acknowledgement:
# This product includes software developed by Thales Pinheiro
# 4. The names of the author cannot be used to endorse or promote products
# derived from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
# Runs tests for the short version of options
echo 'Using short version of options and hashing "aáâã09!?.,<>":'
IHTB=$(node bin/insanehash.js -b 'aáâã09!?.,<>')
echo "blake32: " $IHTB
IHTM=$(node bin/insanehash.js -m 'aáâã09!?.,<>')
echo "bmw: " $IHTM
IHTC=$(node bin/insanehash.js -c 'aáâã09!?.,<>')
echo "cubehash: " $IHTC
IHTS=$(node bin/insanehash.js -s 'aáâã09!?.,<>')
echo "skein: " $IHTS
IHTK=$(node bin/insanehash.js -k 'aáâã09!?.,<>')
echo "halfskein: " $IHTK
IHTA=$(node bin/insanehash.js -a 'aáâã09!?.,<>')
echo "shabal: " $IHTA
IHTE=$(node bin/insanehash.js -e 'aáâã09!?.,<>')
echo "keccak: " $IHTE
# Runs tests for the long version of options
echo && echo 'Using long version of options and hashing "aáâã09!?.,<>":'
IHTBlake32=$(node bin/insanehash.js --blake32 'aáâã09!?.,<>')
echo "blake32: " $IHTBlake32
IHTBmw=$(node bin/insanehash.js --bmw 'aáâã09!?.,<>')
echo "bmw: " $IHTBmw
IHTCubehash=$(node bin/insanehash.js --cubehash 'aáâã09!?.,<>')
echo "cubehash: " $IHTCubehash
IHTSkein=$(node bin/insanehash.js --skein 'aáâã09!?.,<>')
echo "skein: " $IHTSkein
IHTHalfskein=$(node bin/insanehash.js --halfskein 'aáâã09!?.,<>')
echo "halfskein: " $IHTHalfskein
IHTShabal=$(node bin/insanehash.js --shabal 'aáâã09!?.,<>')
echo "shabal: " $IHTShabal
IHTKeccak=$(node bin/insanehash.js --keccak 'aáâã09!?.,<>')
echo "keccak: " $IHTKeccak
# Comparing the two versions
echo && echo 'Comparing the two versions:'
if [ "$IHTB" == "$IHTBlake32" ]; then
echo "blake32: OK"
else
echo "blake32: NOK"
exit 1
fi
if [ "$IHTM" == "$IHTBmw" ]; then
echo "bmw: OK"
else
echo "bmw: NOK"
exit 1
fi
if [ "$IHTC" == "$IHTCubehash" ]; then
echo "cubehash: OK"
else
echo "cubehash: NOK"
exit 1
fi
if [ "$IHTS" == "$IHTSkein" ]; then
echo "skein: OK"
else
echo "skein: NOK"
exit 1
fi
if [ "$IHTK" == "$IHTHalfskein" ]; then
echo "halfskein: OK"
else
echo "halfskein: NOK"
exit 1
fi
if [ "$IHTA" == "$IHTShabal" ]; then
echo "shabal: OK"
else
echo "shabal: NOK"
exit 1
fi
if [ "$IHTE" == "$IHTKeccak" ]; then
echo "keccak: OK"
else
echo "keccak: NOK"
exit 1
fi
| true
|
371fc809d3457a75c5358fd47f1b09cd9fbb8688
|
Shell
|
Etersoft/anyservice
|
/test/start-stop-tests/test_process_exec
|
UTF-8
| 240
| 3
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
mkdir $1
cd $1
myuser="my.user"
mypid="my.pid"
mydir="my.dir"
myhome="my.home"
echo "Started"
echo $USER > $myuser
echo $$ > $mypid
echo $(pwd) > $mydir
echo $HOME > $myhome
exec sleep 10
#rm -f $myuser $mypid $mydir $myhome
| true
|
ef32f97388b52fb0ea8f76e200cec7f35378d1c7
|
Shell
|
RUGSoftEng/CloudVisualizer
|
/cloudwatch/runcrawler.sh
|
UTF-8
| 876
| 2.734375
| 3
|
[] |
no_license
|
npm install
if ! grep -Fxq "172.17.0.1 cwdev.nl" /etc/hosts
then
sudo -- sh -c "echo 172.17.0.1 cwdev.nl >> /etc/hosts"
fi
sudo docker run -d -v $PWD:/host -p 8000:3000 -it rvanbuijtenen/cloudwatch:compressed /bin/bash
IP=$(sudo docker inspect --format '{{ .NetworkSettings.IPAddress }}' $(sudo docker ps -q))
sudo docker exec -d $(sudo docker ps -q) bash -c "google-chrome --no-sandbox --headless --disable-gpu --remote-debugging-port=9222 &"
sudo docker exec -d $(sudo docker ps -q) bash -c "service mongodb start"
sudo docker exec -d $(sudo docker ps -q) bash -c "mongod --dbpath /host/db"
echo "Starting server on cwdev.nl (172.17.0.1)"
echo "Access VM directly on "$IP":3000"
echo "Press CTRL-C to exit"
sudo docker exec -it $(sudo docker ps -q) bash -c "cd host && npm run startserver -- --no-seed"
echo "Shutting down VM..."
sudo docker stop $(sudo docker ps -q)
| true
|
422a3cf658b48a91a4d2fe87725252e41cb74a35
|
Shell
|
danikron/dotfiles
|
/sysscripts/scratchtoggle.sh
|
UTF-8
| 361
| 3.25
| 3
|
[] |
no_license
|
#! /bin/sh
ID=$(cat /tmp/scratchid)
# Check for proper scratchpad
[[ $(xprop -id $ID WM_CLASS | cut -d'"' -f2) = "scratchpad" ]] || exit 1
# Make sure scratchpad is on the focused monitor
[[ $(bspc query -m focused -N | grep "$ID") ]] || bspc node $ID --flag sticky=off -m focused --flag sticky=on
# Show and focus scratchpad
bspc node $ID --flag hidden -f
| true
|
d75a826290baf54a41130f0f22d31aafcb87ad11
|
Shell
|
dan-kruk/scrap
|
/flib
|
UTF-8
| 266
| 3.390625
| 3
|
[] |
no_license
|
#!/bin/bash
thread ()
{
local i;
date;
while ((++i)); do
sleep $((5-$(date +%s)%5));
echo "$BASHPID $i $(date)";
done
}
jkill ()
{
local jx
for j in $(jobs|sed 's/^\[\([0-9]*\).*/\1/g')
do
jx+="%$j "
done
kill $jx
}
| true
|
d5aaee96501fa0b184cdb2dd763e19c898a49d9b
|
Shell
|
FauxFaux/debian-control
|
/o/oss4/oss4-dev_4.2-build2010-5_all/postrm
|
UTF-8
| 199
| 2.828125
| 3
|
[] |
no_license
|
#!/bin/sh
set -e
case "$1" in
remove|purge|disappear)
dpkg-divert --package oss4-dev --rename --remove "/usr/include/linux/soundcard.h"
;;
*)
exit 0
;;
esac
| true
|
262cfc9c57da13d76a4cf2a9e07b142f41b9c90b
|
Shell
|
sidhant-gupta-004/OS-tuhdo
|
/.legacy/run_bootloader.sh
|
UTF-8
| 470
| 3.234375
| 3
|
[] |
no_license
|
#!/bin/sh
gdb="-gdb"
if test "$1" = "$gdb"
then
qemu-system-i386 -machine q35 -drive file=disk.img,format=raw,index=0,if=floppy -gdb tcp::4444 -S
# This option allows for debugging the QEMU VM through gdb as you run it. The connection is establised
# on your localhost at port 4444. The -S option stops the VM from running
# until connection is established by the debugger.
else
qemu-system-i386 -machine q35 -drive file=disk.img,format=raw,index=0,if=floppy
fi
| true
|
5aca9f1c907937c967ad45b1c3e7beeb4061fc9c
|
Shell
|
epsiro/timelog-core
|
/scripts/timelog-core-uninstall.bash
|
UTF-8
| 1,614
| 3.953125
| 4
|
[
"ISC"
] |
permissive
|
#!/usr/bin/env bash
if [ "$#" -eq "1" ] && [ "$1" == "--drop" ] ; then
drop=true
elif [ "$#" -ne "0" ] ; then
echo "Usage: $0 [--drop]" 1>&2
exit 1
else
unset drop
fi
if [ "$( id -u )" -ne "0" ] ; then
echo "Need root privileges to run." 1>&2
exit 2
fi
sudo -u nobody bash $( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )/timelog-core-install.bash --version-check-only || exit 3
function uninstall_timelog {
read -p "Stop timelog service and kill all timelog user owned processes? [y/N] " -n 1 -r
if [ ! $REPLY == "" ] ; then
echo
fi
if [[ ! $REPLY =~ ^[Yy]$ ]]
then
echo "Abort." 1>&2
exit 1
fi
echo "Stopping timelog service. This might take a while." 1>&2
systemctl stop timelog.service
pkill -u timelog
read -p "Really remove timelog service, user and files? [y/N] " -n 1 -r
if [ ! $REPLY == "" ] ; then
echo
fi
if [[ ! $REPLY =~ ^[Yy]$ ]]
then
echo "Abort." 1>&2
exit 1
fi
systemctl disable timelog.service
rm /etc/systemd/system/timelog.service
systemctl daemon-reload
rm /etc/nginx/sites-available/timelog
userdel timelog
apt-get -y remove timelog-core-avahi
apt-get -y remove timelog-core-base
rm -rf /var/lib/timelog
if [ "$drop" == "true" ] ; then
read -p "Really drop all timelog data from DB? [y/N] " -n 1 -r
if [ ! $REPLY == "" ] ; then
echo
fi
if [[ $REPLY =~ ^[Yy]$ ]]
then
sudo -u postgres -i psql <<EOF
DROP DATABASE timelog;
DROP USER timelog;
EOF
else
echo "Abort." 1>&2
exit 1
fi
fi
echo "Done uninstalling timelog." 1>&2
}
uninstall_timelog
| true
|
68e8d7a7c87613f395758a695c0377d1a9afe7d8
|
Shell
|
TedHaley/TraderBot
|
/web_app/run.sh
|
UTF-8
| 254
| 2.515625
| 3
|
[] |
no_license
|
#!/bin/bash
#export SECRETS=/deploy/secrets.env
COMPOSE="docker-compose.yml"
NETWORK="trader-network"
docker network create "$NETWORK" || echo "network already exists: $NETWORK"
echo "starting compose: $COMPOSE"
docker-compose -f "$COMPOSE" up --build
| true
|
960b3d0a51ed6a3828043e3ba9df02101d8237e8
|
Shell
|
jacob-brown/EquSeq
|
/scripts/listErrors.sh
|
UTF-8
| 295
| 3.375
| 3
|
[] |
no_license
|
# usage
# list errors in file with specific grep string
# sh listErrors.sh fastqDownload.sh closed
FILENAME=$1
files=($(ls $FILENAME.e*))
errorGrep="$2"
echo "printing problem files with grep: " $errorGrep
for i in "${files[@]}"
do
if grep -q $errorGrep $i; then
echo $i
fi
done
| true
|
a4c122994a9ad6f1f078808b6ff55890142b3cf0
|
Shell
|
abdulghanimohammed/comcast-test
|
/Task1.sh
|
UTF-8
| 337
| 3.4375
| 3
|
[] |
no_license
|
#!/bin/bash
#Usuage bash key.sh key.pem
#FIle variable contain pem key"
FILE="$1"
LOCATION="/home/user/.ssh/authorized_keys"
check()
{
if [ -f $FILE ]
then
echo "Key File found,preparing to transfer"
else
echo "File not found "
exit 0
fi
}
while read server; do
echo $server
#scp -p $FILE $server:$LOCATION
done < ips.txt
| true
|
9713a1365cc711ddf956905f1e62eb71788caa1b
|
Shell
|
tom-haines/schemaspy-mssql-github-pages
|
/task_serve.sh
|
UTF-8
| 416
| 3.4375
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
source task_config.sh
REQUIRED_TOOLS=(
"python"
)
HOST="127.0.0.1"
PORT="8000"
for tool in ${REQUIRED_TOOLS[@]}; do
if ! command -v ${tool} >/dev/null; then
echo "${tool} is required ..."
exit 1
fi
done
echo "Preview dir ${DIR} at http://${HOST}:${PORT}"
# See https://docs.python.org/3/library/http.server.html
python -m http.server ${PORT} --bind ${HOST} --directory ${DIR}
| true
|
09377f4d61770a37427429700e704025c5b832f3
|
Shell
|
jason-work/system-config
|
/bin/update-android-mirror
|
UTF-8
| 1,400
| 2.875
| 3
|
[] |
no_license
|
#!/bin/bash
set -ex
. ~/system-config/.bashrc
. ~/system-config/bin/set-ssh-agent
(
cd ~/src/android-mirror/linux-2.6.git || exit 0
git fetch https://github.com/mirrors/linux
)
(
cd ~/src/android-mirror/ || exit 0
(
repo init -u s:qualcomm/platform/manifest.git -b sanfrancisco -m sfo-rom.xml
repo sync -j8 -f -m icesky-rom.xml
repo sync -j8 -f -m sfo-rom.xml
cd ~/src/android-mirror/kernel/linux.git
git fetch linus
) || true
export http_proxy=http://localhost:8888
export https_proxy=http://localhost:8888
for branch in $(
( cd .repo/manifests >/dev/null 2>&1
git branch -a | grep -P -e '/android-\d' | sort -n | tail -n 1 | perl -npe 's,.*/,,'
)
if test ! -e .repo/update-android-branches; then
echo master > .repo/update-android-branches
fi
cat .repo/update-android-branches 2>/dev/null || true
);
do
if repo-switch -u https://android.googlesource.com/platform/manifest -b $branch -m default.xml; then
repo sync -j1 || true
fi
done
)
#git clone --mirror --bare git://android.git.kernel.org/tools/repo.git
cd ~/src/android-mirror/git-repo.git
git fetch
echo ok, repo update complete
| true
|
f0e9fa73c7f0ac1337f03cfb65e34e1cf8694378
|
Shell
|
divine-dotfiles/divine-dotfiles
|
/lib/helpers/link-queue.hlp.sh
|
UTF-8
| 8,760
| 3.03125
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
#:title: Divine Bash deployment helpers: link-queue
#:author: Grove Pyree
#:email: grayarea@protonmail.ch
#:revdate: 2019.12.11
#:revremark: Re-arrange dependencies in spec queue helpers
#:created_at: 2019.04.02
## Part of Divine.dotfiles <https://github.com/divine-dotfiles/divine-dotfiles>
#
## Helper functions for deployments based on template 'link-queue.dpl.sh'
#
## Replaces arbitrary files (e.g., config files) with symlinks to provided
#. replacements. Creates backup of each replaced file. Restores original set-up
#. on removal.
#
# Marker and dependencies
readonly D__HLP_LINK_QUEUE=loaded
d__load procedure prep-md5
d__load util workflow
d__load util stash
d__link_queue_check()
{
d_queue_pre_check() { d__link_queue_pre_check; }
d_item_pre_check() { d_link_item_pre_check; }
d_item_check() { d__link_item_check; }
d_item_post_check() { d_link_item_post_check; }
d_queue_post_check() { d__link_queue_post_check; }
d__queue_check
}
d__link_queue_install()
{
d_queue_pre_install() { d__link_queue_pre_install; }
d_item_pre_install() { d_link_item_pre_install; }
d_item_install() { d__link_item_install; }
d_item_post_install() { d_link_item_post_install; }
d_queue_post_install() { d__link_queue_post_install; }
d__queue_install
}
d__link_queue_remove()
{
d_queue_pre_remove() { d__link_queue_pre_remove; }
d_item_pre_remove() { d_link_item_pre_remove; }
d_item_remove() { d__link_item_remove; }
d_item_post_remove() { d_link_item_post_remove; }
d_queue_post_remove() { d__link_queue_post_remove; }
d__queue_remove
}
d__link_queue_pre_check()
{
# Switch context; prepare stash
d__context -- push 'Preparing link-queue for checking'
d__stash -- ready || return 1
# Ensure the required arrays are continuous at the given section
local d__i; for ((d__i=$D__QUEUE_SECTMIN;d__i<$D__QUEUE_SECTMAX;++d__i)); do
if [ -z ${D_QUEUE_ASSETS[$d__i]+isset} ]; then
d__notify -lxht 'Link-queue failed' -- \
'Array $D_QUEUE_ASSETS is not continuous in the given section'
return 1
fi
if [ -z ${D_QUEUE_TARGETS[$d__i]+isset} ]; then
d__notify -lxht 'Link-queue failed' -- \
'Array $D_QUEUE_TARGETS is not continuous in the given section'
return 1
fi
done
# Run queue pre-processing, if implemented
local d__rtc=0; if declare -f d_link_queue_pre_check &>/dev/null
then d_link_queue_pre_check; d__rtc=$?; unset -f d_link_queue_pre_check; fi
# If item check hooks are not implemented, implement dummies
if ! declare -f d_link_item_pre_check &>/dev/null
then d_link_item_pre_check() { :; }; fi
if ! declare -f d_link_item_post_check &>/dev/null
then d_link_item_post_check() { :; }; fi
d__context -- pop; return $d__rtc
}
d__link_item_check()
{
# Init storage variables; switch context
local d__lqei="$D__ITEM_NUM" d__lqrtc d__lqer=()
local d__lqea="${D_QUEUE_ASSETS[$d__lqei]}"
local d__lqet="${D_QUEUE_TARGETS[$d__lqei]}"
local d__lqesk="link_$( d__md5 -s "$d__lqet" )"
local d__lqeb="$D__DPL_BACKUP_DIR/$d__lqesk"
local d__ddsl=false
d__context -- push "Checking if linked at: '$d__lqet'"
# Do sanity checks
[ -n "$d__lqea" ] || d__lqer+=( -i- '- asset path is empty' )
[ -n "$d__lqet" ] || d__lqer+=( -i- '- target path is empty' )
if ((${#d__lqer[@]})); then
d__notify -lxh -- 'Invalid link-queue item:' "${d__lqer[@]}"
d__context -- pop; return 3
fi
# Do the actual checking; check if source is readable
if [ ! -e "$d__lqet" -a -L "$d__lqet" ]; then
D_ADDST_WARNING+=("Dead symlink at: $d__lqet")
D_ADDST_PROMPT=true
d__ddsl=true
fi
if [ -e "$d__lqet" ] || $d__ddsl; then
if [ -L "$d__lqet" -a "$d__lqet" -ef "$d__lqea" ]
then d__stash -s -- has $d__lqesk && d__lqrtc=1 || d__lqrtc=7
else d__stash -s -- has $d__lqesk && d__lqrtc=6 || d__lqrtc=2; fi
else d__stash -s -- has $d__lqesk && d__lqrtc=6 || d__lqrtc=2; fi
if ! [ $d__lqrtc = 1 ] && [ -e "$d__lqeb" ];
then d__notify -l!h -- "Orphaned backup at: $d__lqeb"; fi
if ! [ -r "$d__lqea" ]; then
d__notify -lxh -- "Unreadable asset at: $d__lqea"
[ "$D__REQ_ROUTINE" = install ] && d__lqrtc=3
fi
# Switch context and return
d__context -qq -- pop "Check code is '$d__lqrtc'"; return $d__lqrtc
}
d__link_queue_post_check()
{
# Switch context; unset check hooks
d__context -- push 'Tidying up after checking link-queue'
unset -f d_link_item_pre_check d_link_item_post_check
# Run queue post-processing, if implemented
local d__rtc=0; if declare -f d_link_queue_post_check &>/dev/null
then d_link_queue_post_check; d__rtc=$?; unset -f d_link_queue_post_check; fi
d__context -- pop; return $d__rtc
}
d__link_queue_pre_install()
{
# Switch context; run queue pre-processing, if implemented
d__context -- push 'Preparing link-queue for installing'
local d__rtc=0; if declare -f d_link_queue_pre_install &>/dev/null
then
d_link_queue_pre_install; d__rtc=$?; unset -f d_link_queue_pre_install
fi
# If item install hooks are not implemented, implement dummies
if ! declare -f d_link_item_pre_install &>/dev/null
then d_link_item_pre_install() { :; }; fi
if ! declare -f d_link_item_post_install &>/dev/null
then d_link_item_post_install() { :; }; fi
d__context -- pop; return $d__rtc
}
d__link_item_install()
{
# Init storage variables; switch context
local d__lqei="$D__ITEM_NUM" d__lqrtc d__lqcmd
local d__lqea="${D_QUEUE_ASSETS[$d__lqei]}"
local d__lqet="${D_QUEUE_TARGETS[$d__lqei]}"
local d__lqesk="link_$( d__md5 -s "$d__lqet" )"
local d__lqeb="$D__DPL_BACKUP_DIR/$d__lqesk"
d__context -- push "Installing a link at: '$d__lqet'"
# Do the actual installing
if d__push_backup -- "$d__lqet" "$d__lqeb"; then
d__lqcmd=ln; d__require_wdir "$d__lqet" || d__lqcmd='sudo ln'
$d__lqcmd -s &>/dev/null -- "$d__lqea" "$d__lqet" \
&& d__lqrtc=0 || d__lqrtc=1
else d__lqrtc=1; fi
if [ $d__lqrtc -eq 0 ]; then
case $D__ITEM_CHECK_CODE in
2|7) d__stash -s -- set $d__lqesk "$d__lqet" || d__lqrtc=1;;
esac
[ -e "$d__lqeb" ] && printf '%s\n' "$d__lqet" >"$d__lqeb.path"
fi
# Switch context and return
d__context -qq -- pop "Install code is '$d__lqrtc'"; return $d__lqrtc
}
d__link_queue_post_install()
{
# Switch context; unset install hooks
d__context -- push 'Tidying up after installing link-queue'
unset -f d_link_item_pre_install d_link_item_post_install
# Run queue post-processing, if implemented
local d__rtc=0; if declare -f d_link_queue_post_install &>/dev/null
then
d_link_queue_post_install; d__rtc=$?; unset -f d_link_queue_post_install
fi
d__context -- pop; return $d__rtc
}
d__link_queue_pre_remove()
{
# Switch context; run queue pre-processing, if implemented
d__context -- push 'Preparing link-queue for removing'
local d__rtc=0; if declare -f d_link_queue_pre_remove &>/dev/null
then
d_link_queue_pre_remove; d__rtc=$?; unset -f d_link_queue_pre_remove
fi
# If item remove hooks are not implemented, implement dummies
if ! declare -f d_link_item_pre_remove &>/dev/null
then d_link_item_pre_remove() { :; }; fi
if ! declare -f d_link_item_post_remove &>/dev/null
then d_link_item_post_remove() { :; }; fi
d__context -- pop; return $d__rtc
}
d__link_item_remove()
{
# Init storage variables; switch context
local d__lqei="$D__ITEM_NUM" d__lqrtc d__lqeo
local d__lqea="${D_QUEUE_ASSETS[$d__lqei]}"
local d__lqet="${D_QUEUE_TARGETS[$d__lqei]}"
local d__lqesk="link_$( d__md5 -s "$d__lqet" )"
local d__lqeb="$D__DPL_BACKUP_DIR/$d__lqesk"
d__context -- push "Undoing link at: '$d__lqet'"
# Do the actual removing
d__lqeo='-e'; if $D__OPT_OBLITERATE || [ "$D__ITEM_CHECK_CODE" -eq 1 ]
then d__lqeo='-ed'; fi
d__pop_backup $d__lqeo -- "$d__lqet" "$d__lqeb" && d__lqrtc=0 || d__lqrtc=1
if [ $d__lqrtc -eq 0 ]; then
case $D__ITEM_CHECK_CODE in
1|6) d__stash -s -- unset $d__lqesk || d__lqrtc=1;;
esac
if [ -e "$d__lqeb" ]
then d__notify -l!h -- "An older backup remains at: $d__lqeb"
else rm -f -- "$d__lqeb.path"; fi
fi
# Switch context and return
d__context -qq -- pop "Remove code is '$d__lqrtc'"; return $d__lqrtc
}
d__link_queue_post_remove()
{
# Switch context; unset remove hooks
d__context -- push 'Tidying up after removing link-queue'
unset -f d_link_item_pre_remove d_link_item_post_remove
# Run queue post-processing, if implemented
local d__rtc=0; if declare -f d_copy_queue_post_remove &>/dev/null
then
d_copy_queue_post_remove; d__rtc=$?; unset -f d_copy_queue_post_remove
fi
d__context -- pop; return $d__rtc
}
| true
|
88ccb42c6d5fa03eb60b83ab696e1e67fa7566b2
|
Shell
|
SofiMarcos/recovering_host_genomes
|
/1_pipeline/2_variant_calling-reference_samples.sh
|
UTF-8
| 4,732
| 2.984375
| 3
|
[] |
no_license
|
#!/bin/bash
####
# Custom reference panel design
####
# paths
workdir='path/to/directory'
# directories
mkdir "${workdir}"/RefPanel/{06_Variants,07_PanelDesign,08_Phased}
mkdir "${workdir}"/RefPanel/06_Variants/"${dataset}"
mkdir "${workdir}"/RefPanel/07_PanelDesign/"${panel}"
mkdir "${workdir}"/RefPanel/08_Phased/"${panel}"
# parameters
threads='40'
# 06 - variant calling
### A - Internal reference samples
##### paths
dataset1='type_of_dataset' # internal reference samples
ref='path/to/reference'
sample='sample_codes'
chrom='chromosome_name'
##### divide internal reference samples if needed
samtools view -h -b ${workdir}/PrePro/${dataset}/03_MapToRef/${sample}_map2host.bam ${chrom} > ${workdir}/PrePro/${dataset}/03_MapToRef/${sample}_${chrom}.bam
samtools index ${workdir}/PrePro/${dataset}/03_MapToRef/${sample}_${chrom}.bam
##### variant calling
find ${workdir}/PrePro/${dataset}/03_MapToRef/*${chrom}.bam > ${workdir}/PrePro/${dataset}/03_MapToRef/${chrom}.txt
sed -i -e "s#^#${workdir}/PrePro/${dataset}/03_MapToRef/#" ${chrom}.txt
bcftools mpileup -b ${workdir}/PrePro/${dataset}/03_MapToRef/${chrom}.txt -C 50 -q 30 -Q 20 -Ou -f ${ref} -r ${chrom} | bcftools call -m -v -Ou | bcftools filter -o ${workdir}/RefPanel/06_Variants/${dataset1}/${chrom}_all.vcf.gz -s LowQual -e '%QUAL<30 || DP<(AVG(DP)*3)' -Oz --threads ${threads}
bcftools view ${workdir}/RefPanel/06_Variants/${dataset1}/${chrom}_all.vcf.gz -o ${workdir}/RefPanel/06_Variants/${dataset1}/${chrom}_snps.vcf.gz -m2 -M2 -v snps -Oz --threads ${threads}
rm *${chrom}.bam
### B - External reference samples
##### paths
dataset2='type_of_dataset' # external reference samples
ref='path/to/reference'
chrom='chromosome_name'
##### download from database
module load anaconda3/4.4.0 enabrowsertools/1.6
enaDataGet -f fastq -d ${workdir}/PrePro/00_RawData/${sample}
rm ${workdir}/PrePro/00_RawData/${sample}.fastq.gz
#### alignment
module load samtools/1.11 bwa/0.7.16a
bwa mem -t 8 -R "@RG\\tID:tID\\tCN:tCN\\tDS:tDS\\tPL:tPL\\tSM:${sample}" ${ref} ${workdir}/PrePro/00_RawData/${sample}_1.fastq.gz ${workdir}/PrePro/00_RawData/${sample}_2.fastq.gz | samtools sort -o ${workdir}/PrePro/${dataset}/03_MapToRef/${SAMPLE}.bam --threads ${threads}
##### genotyping
module load java/1.8.0 gatk/4.0.8.1
find ${workdir}/PrePro/${dataset}/03_MapToRef/*.bam > ${workdir}/PrePro/${dataset}/03_MapToRef/samplefile.txt
sed -i -e "s#^#${workdir}/PrePro/${dataset}/03_MapToRef/#" samplefile.txt
gatk IndexFeaturefile -F ${workdir}/RefPanel/06_Variants/${dataset2}/${chrom}_snps.vcf.gz
gatk HaplotypeCaller --java-options "-Xmx180g" -I ${workdir}/PrePro/${dataset}/03_MapToRef/samplefile.txt --output ${workdir}/RefPanel/06_Variants/${dataset2}/${chrom}_all.vcf.gz --reference ${ref} --alleles ${workdir}/RefPanel/int/06_Variants/${chrom}_snps.vcf.gz --intervals ${workdir}/RefPanel/int/06_Variants/${chrom}_snps.vcf.gz --sample-ploidy 2 --min-base-quality-score 20 --standard-min-confidence-threshold-for-calling 30.0
gatk SelectVariants -R ${ref} -V ${workdir}/RefPanel/06_Variants/${dataset2}/${chrom}_all.vcf.gz -O ${workdir}/RefPanel/06_Variants/${dataset2}/${chrom}_snps.vcf.gz --select-type-to-include SNP
# 07 - combining datasets to design different reference panels (diverse panel as an example)
module load plink2/1.90beta6.17 bcftools/1.11
bcftools merge ${workdir}/RefPanel/06_Variants/${dataset1}/${chrom}_snps.vcf.gz ${workdir}/RefPanel/06_Variants/${dataset2}/${chrom}_snps.vcf.gz -o ${workdir}/RefPanel/07_PanelDesign/${panel}/${chrom}_snps.vcf.gz -Oz --threads ${threads}
bcftools index ${workdir}/RefPanel/07_PanelDesign/${panel}/${chrom}_snps.vcf.gz
plink --vcf ${workdir}/RefPanel/07_PanelDesign/${panel}/${chrom}_snps.vcf.gz --out ${workdir}/RefPanel/07_PanelDesign/${panel}/${chrom}_snps --double-id --make-bed --allow-extra-chr --keep-allele-order --real-ref-alleles --set-missing-var-ids '@:#\$1,\$2'
plink --bfile ${workdir}/RefPanel/07_PanelDesign/${panel}/${chrom}_snps --out ${workdir}/RefPanel/07_PanelDesign/${panel}/${chrom}_snps_filt --double-id --allow-extra-chr --keep-allele-order --real-ref-alleles --geno 0 --recode vcf-iid bgz
find ${workdir}/RefPanel/07_PanelDesign/${panel}/ -name "*nosex" -delete
# 08 - phasing
## paths
panel='some' # internal, external, combined or diverse
module load tabix/1.2.1 shapeit4/4.1.3
tabix ${workdir}/RefPanel/07_PanelDesign/${panel}/${chrom}_snps_filt.vcf.gz
shapeit4 --input ${workdir}/RefPanel/07_PanelDesign/${panel}/${chrom}_snps_filt.vcf.gz --output ${workdir}/RefPanel/08_Phased/${panel}/${chrom}_phased.vcf.gz --region ${chrom}
tabix ${workdir}/RefPanel/08_Phased/${panel}/${chrom}_phased.vcf.gz
| true
|
17d7db32f35260d146d74eb67bda7b4aed660914
|
Shell
|
DanielGunna/OkSSE-Poc
|
/Server/workload_test.sh
|
UTF-8
| 144
| 3.015625
| 3
|
[] |
no_license
|
chmod +x ./send_data.sh
total=$1;
for ((n=0;n<total;n++)); do
./send_data.sh $n;
if [[ $2 -ne 0 ]] ; then
sleep $2;
fi
done
| true
|
45caa2ed2a8d131ef0f8ff87e7414587b93aabca
|
Shell
|
mutsys/node-performance-analysis
|
/run-with-no-tuning.sh
|
UTF-8
| 320
| 2.609375
| 3
|
[] |
no_license
|
#!/bin/sh
ps -aef | grep grunt | grep -v grep | tr -s " " | cut -d " " -f 3 | while read pid
do
kill $pid
done
ps -aef | grep collect-node-process-data | grep -v grep | tr -s " " | cut -d " " -f 3 | while read pid
do
kill $pid
done
rm *csv
nohup ./collect-node-process-data.sh &
NODE_ENV=production node app.js
| true
|
eb8a0b13812e36af311fa8bd2bd7c94ee045a63d
|
Shell
|
pgohier/Windshaft-cartodb
|
/tools/show_style
|
UTF-8
| 368
| 3.296875
| 3
|
[
"BSD-2-Clause"
] |
permissive
|
#!/bin/sh
# TODO: port to node, if you really need it
REDIS_PORT=6379 # default port
if test -z "$2"; then
echo "Usage: $0 <username> <tablename>" >&2
exit 1
fi
username="$1"
tabname="$2"
dbname=`redis-cli -p ${REDIS_PORT} -n 5 hget "rails:users:${username}" "database_name"`
redis-cli get "map_style|${dbname}|${tabname}" | sed -e 's/\\n/\n/g' -e 's/\\//g'
| true
|
9481f0775f31cdfcbb97cf92f050d0e44df28857
|
Shell
|
awschef-code/Docker-setup
|
/images/mysql-backup/healthcheck
|
UTF-8
| 663
| 3.109375
| 3
|
[] |
no_license
|
#!/bin/bash
hostname=$(mysql -u${MYSQL_USER:-admin} -p${MYSQL_PASSWORD:-x0cloud} --socket=${MYSQL_VOLUME:-/var/lib/mysql}/mysql.sock -e "show variables like 'report_host'\G" 2>/dev/null | tail -n 1 | sed -e"s/.*Value: //")
state=$(mysql -u${MYSQL_USER:-admin} -p${MYSQL_PASSWORD:-x0cloud} --socket=${MYSQL_VOLUME:-/var/lib/mysql}/mysql.sock -e "select member_state from performance_schema.replication_group_members where member_host='${hostname}'\G" 2>/dev/null | tail -n 1 | sed -e"s/.*member_state: //")
curl -s http://localhost:80 > /dev/null
apacheStatus=$?
if [ "$state" == "ONLINE" ] && [ $apacheStatus -eq 0 ]
then
exit 0
else
exit 2
fi;
| true
|
9a27e5c10d1e46d5099299505d733347d0a6db01
|
Shell
|
tanobar/S.O.L.-project
|
/analisi.sh
|
UTF-8
| 207
| 2.9375
| 3
|
[] |
no_license
|
#!/bin/bash
while pgrep -x supermercato >/dev/null; do sleep 1s; done
if [ -s "resoconto.log" ]; then
exec 3<resoconto.log
while read -u 3 linea ; do
echo $linea
done
else
echo "Errore in $0" 1>&2
fi
| true
|
87d249c533a2ddd0046ed4c60391e132d0d2fec2
|
Shell
|
hm1365166/opencsw
|
/csw/mgar/pkg/cswclassutils/trunk/files/CSWcswclassutils.i.cswtexhash
|
UTF-8
| 640
| 3.65625
| 4
|
[] |
no_license
|
#!/bin/sh
#
# i.cswtexhash - Class action script
# for registration of TeX packages
#
# Written and maintained by Peter Felecan
#
# 2012-06-25 Initial release
echo "Installing class <cswtexhash> ..."
while read src dest; do
echo $dest
/usr/bin/cp $src $dest || exit 2
done
if [ -x ${PKG_INSTALL_ROOT}/opt/csw/bin/mktexlsr ]; then
echo "Registering TeX entries. This may take some time..."
/usr/sbin/chroot ${PKG_INSTALL_ROOT:-/} /usr/bin/bash /opt/csw/bin/mktexlsr ||
echo "Registering Tex entries failed! You need to run mktexlsr again"
else
echo "$dest (TeX will be registered when CSWtexlive-binaries is installed)"
fi
exit 0
| true
|
aaea05812ca73dfdbcd7167a39b2b55457e159a1
|
Shell
|
akpotter/bin
|
/._versionning-utils/github-issues
|
UTF-8
| 708
| 3.703125
| 4
|
[] |
no_license
|
#!/bin/sh
# List open github repo issues from url
set -e
# -- Prepare things...
url="https://api.github.com/repos/$(github-repo-from-url "${1?missing url argument}")/issues?state=open&sort=created&direction=asc"
pages=$(curl -I "$url" | grep '^Link' | grep -oP '(?<=page=)[0-9]+' | sort -nr | head -1)
[ -z "$pages" ] && pages=1
rm -f ISSUES
# -- Get the issues...
for page in $(seq 1 "$pages"); do
echo " [+] Processing page #$page ..."
curl "${url}&page=$page" | jq -r '.[] | "[#\(.number)][\(.created_at[0:10])] \(.title) \(if(.labels|length>0) then " -- [#"+(.labels|map(.name)|join(", #"))+"]" else "" end)"' | tee -a ISSUES
sleep 2
done
#---
# API Doc: https://developer.github.com/v3/issues/
| true
|
48371fdacc6a1450f29f7ea3428aa7ca0e3ae9f3
|
Shell
|
Warbo/chriswarbo-net
|
/tests/have_readmes
|
UTF-8
| 724
| 3.859375
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
shopt -s nullglob
# Make sure READMEs get included into repo pages
ERR=0
# shellcheck disable=SC2154
for REPO in "$rendered"/projects/repos/*.html
do
NAME=$(basename "$REPO" .html)
[[ "x$NAME" = "xindex" ]] && continue
if grep "No README found" "$REPO" > /dev/null
then
for DIR in /home/chris/Programming/repos/$NAME.git
do
pushd "$DIR" > /dev/null || continue
READMES=$(git ls-files | grep "^README")
if [[ -n "$READMES" ]]
then
ERR=1
echo "'$NAME' has no README, but found '$DIR/$READMES'" 1>&2
fi
popd > /dev/null || exit 1
done
fi
done
exit "$ERR"
| true
|
48eb0b29f9392a908b41fb1943d3edc84953602b
|
Shell
|
XSWgrzh/i5gmc_install
|
/i5gmc_config.sh
|
UTF-8
| 863
| 2.65625
| 3
|
[] |
no_license
|
#!/bin/bash
export HOST_USER=test
export HOST_PASS=test
export DOCKER_LOCAL_REGISTRY=p1:5000
export DOCKER_REMOTE_REGISTRY=docker.com:5000
export HOST_LEADER=p1
export HOST_ARRAY=(
"p1:172.16.169.153"
"p2:172.16.169.154"
"p3:172.16.169.155"
)
export DOCKER_MIDWARE_ARRAY=(
#"i5gmc_nginx:1.19.7.1"
"i5gmc_mongo:4.4.4.1"
#"i5gmc_redis:5.0.0.1"
#"i5gmc_sentinel:5.0.0.1"
#"i5gmc_mysql:5.7.32.2"
#"i5gmc_nacos:1.3.0.2"
)
export DOCKET_I5GMC_JDK=(
"i5gmc_jdk:8.2.11.1"
)
export START_APP=(
"i5gmc_robot"
"i5gmc_maapgw"
"i5gmc_auth"
"i5gmc_config"
"i5gmc_devcenter"
"i5gmc_devmgr"
"i5gmc_imserver"
"i5gmc_openapi"
"i5gmc_oss"
"i5gmc_task"
"i5gmc_imserver"
"i5gmc_h5gw"
)
function get_hosts() {
for h in ${HOST_ARRAY[@]}
do
echo ${h%%:*}
done
}
get_midwares() {
for h in ${DOCKER_MIDWARE_ARRAY[@]}
do
echo ${h%%:*}
done
}
| true
|
81131a8a13f9667576d22cbe0b44a1d3ffc4475e
|
Shell
|
abdullahnegm/Database---Bash-Script
|
/del_Table
|
UTF-8
| 493
| 3.796875
| 4
|
[] |
no_license
|
#!/bin/bash
while true
do
clear >$(tty)
# Check if Database is empty
if [[ $(ls $1) ]]; then
tput setaf 6;
read -p "Name of Table to be Deleted ? " name
# Check if Table name exists
if [ -f $1/"$name" ]; then
rm $1/"$name"
echo $name has been deleted!
sleep 1.5
break
# If Table name Doesn't exist
else
echo This name Doesn\'t exist!
sleep 1.5
fi
else
echo Database is empty !
sleep 1.5
break
fi
done
| true
|
96dd8cb93b5e22910790d10716bb0073da807453
|
Shell
|
MaxOsef/Docker_Perso
|
/webdev_tmux.sh
|
UTF-8
| 1,905
| 3.328125
| 3
|
[] |
no_license
|
#!/bin/sh
session="Webdev"
project=$1
# set up tmux
tmux start-server
# create a new tmux session and windows.
tmux new-session -d -s $session -n logs # default window #1
tmux new-window -t $session:2 -n vim
tmux new-window -t $session:3 -n "$project"
tmux new-window -t $session:4 -n mysql
# Window 1
tmux select-window -t $session:1
tmux selectp -t 1
# First, starting the docker container
tmux send-keys "cd /home/osef/.local/devilbox/" C-m C-l
tmux send-keys "webdocker" C-m C-l
sleep 1
# Then, setting up environment
#Split first window
tmux splitw -h -p 50 #split first pane horizontaly
tmux splitw -v -p 50 #split second pane verticaly
tmux selectp -t 1
tmux splitw -v -p 50 #split first pane verticaly
# Showing logs
tmux selectp -t 2
tmux send-keys "cd /home/osef/.local/devilbox/" C-m C-l
sleep 1
tmux send-keys "docker-compose logs -f mysql" C-m C-l
tmux selectp -t 3
tmux send-keys "cd /home/osef/.local/devilbox/" C-m C-l
sleep 1
tmux send-keys "docker-compose logs -f httpd" C-m C-l
tmux selectp -t 4
tmux send-keys "cd /home/osef/.local/devilbox/" C-m C-l
sleep 1
tmux send-keys "docker-compose logs -f php" C-m C-l
# Window 2
# Start vim
tmux select-window -t $session:2
tmux selectp -t 1
tmux send-keys "mkdir /home/osef/Dev_Web/$project/" C-m C-l
tmux send-keys "cd /home/osef/Dev_Web/$project/;vim" C-m
# Window 3
# Moving to the project directory
tmux select-window -t $session:3
tmux selectp -t 1
tmux send-keys "cd /home/osef/Dev_Web/$project/" C-m C-l
# Window 4
# Connecting to Database
tmux select-window -t $session:4
tmux selectp -t 1
tmux send-keys "cd /home/osef/.local/devilbox/" C-m C-l
sleep 1
tmux send-keys "./shell.sh" C-m C-l
sleep 4
tmux send-keys "mysql -u root -h 127.0.0.1 -P 3306 $project" C-m
# return to main vim window
tmux select-window -t $session:2
# Finished setup, attach to the tmux session!
tmux attach-session -t $session
| true
|
794b720c26fe6ed5ee01bd4b65930f10b1043abb
|
Shell
|
samisalkosuo/udpipe-server-baltic-sea
|
/scripts/get_conllu.sh
|
UTF-8
| 403
| 2.875
| 3
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
#!/usr/bin/env bash
#parse given text file
if [[ -z $1 || -z $2 ]]; then
echo 'Usage ' $0 ' <LANG_ID> <FILE_NAME>'
exit 1
fi
__model=$1
__file=$2
__udpipe_server=127.0.0.1:8080
curl -F data=@${__file} -F model=${__model} -F tokenizer= -F tagger= -F parser= http://${__udpipe_server}/process | PYTHONIOENCODING=utf-8 python -c "import sys,json; sys.stdout.write(json.load(sys.stdin)['result'])"
| true
|
287f662137a2c15dbe618c65575919f47ad479a1
|
Shell
|
rolfn/relayServer
|
/install
|
UTF-8
| 668
| 3.390625
| 3
|
[] |
no_license
|
#! /bin/bash
# Rolf Niepraschk (Rolf.Niepraschk@ptb.de), 2020-05-26
# Preparation and start of the relayServer process.
R_PATH=/usr/local/share/relayServer/ # Path to relayServer installation
dist=$(lsb_release -is | cut -d " " -f 1 | tr '[A-Z]' '[a-z]')
case "$dist" in
opensuse)
chown -R nobody:nobody ${R_PATH} ;;
ubuntu|raspbian|debian)
chown -R nobody:nogroup ${R_PATH} ;;
esac
cd ${R_PATH}
ln -sf $PWD/vlLogging /usr/local/bin/vlLogging
systemctl daemon-reload # if already running
systemctl link $PWD/relayServer.service
systemctl restart relayServer.service
systemctl enable relayServer.service
systemctl status relayServer.service
exit
| true
|
417005d4a5434465578f47b59ea936c677ccc51b
|
Shell
|
roverdotcom/django-haystack
|
/tests/run_all_tests.sh
|
UTF-8
| 1,583
| 3.359375
| 3
|
[
"BSD-3-Clause",
"MIT"
] |
permissive
|
#!/bin/bash
set -e
if [ "$1" == "--help" ]; then
echo "Runs the test suite for all backends"
echo
echo "See docs/running_tests.rst for instructions on installing test"
echo "search engine instances"
echo
echo "Use $0 --with-coverage to execute tests using coverage.py"
echo
exit 0
elif [ "$1" == "--with-coverage" ]; then
TEST_RUNNER="coverage run --source=$(realpath "$(dirname "$0")/../haystack") -- `which django-admin.py`"
else
TEST_RUNNER=django-admin.py
fi
export FAIL=0
echo "** CORE **"
$TEST_RUNNER test core --settings=settings $TEST_RUNNER_ARGS || FAIL=1
echo ""
echo "** DISCOVERY **"
$TEST_RUNNER test discovery --settings=discovery_settings $TEST_RUNNER_ARGS || FAIL=1
echo ""
echo "** OVERRIDES **"
$TEST_RUNNER test overrides --settings=overrides_settings $TEST_RUNNER_ARGS || FAIL=1
echo ""
echo "** SIMPLE **"
$TEST_RUNNER test simple_tests --settings=simple_settings $TEST_RUNNER_ARGS || FAIL=1
echo ""
echo "** SOLR **"
$TEST_RUNNER test solr_tests --settings=solr_settings $TEST_RUNNER_ARGS || FAIL=1
echo ""
echo "** Elasticsearch **"
$TEST_RUNNER test elasticsearch_tests --settings=elasticsearch_settings $TEST_RUNNER_ARGS || FAIL=1
echo ""
echo "** WHOOSH **"
$TEST_RUNNER test whoosh_tests --settings=whoosh_settings $TEST_RUNNER_ARGS || FAIL=1
echo ""
echo "** MULTIPLE INDEX **"
$TEST_RUNNER test multipleindex --settings=multipleindex_settings $TEST_RUNNER_ARGS || FAIL=1
echo ""
echo "** SPATIAL **"
$TEST_RUNNER test spatial --settings=spatial_settings $TEST_RUNNER_ARGS || FAIL=1
echo ""
exit $FAIL
| true
|
f841b721d60d28d16303d1eabc499f66a36ba886
|
Shell
|
sosobiao/IRMP_STM32
|
/STM32F103/scripts/prepare.sh
|
UTF-8
| 5,438
| 2.890625
| 3
|
[] |
no_license
|
#!/bin/bash
mkdir -p ext_src
[[ -e ./ext_src/prepared ]] && exit 0
cd ./ext_src
for i in 32010 32121; do
if [[ ! -s stsw-stm$i.zip ]]; then
wget "http://www.st.com/st-web-ui/static/active/en/st_prod_software_internet/resource/technical/software/firmware/stsw-stm$i.zip"
fi
done
if [[ ! -s irmp.tar.gz ]]; then
wget "http://www.mikrocontroller.net/svnbrowser/irmp/?view=tar" -O irmp.tar.gz
fi
cd ..
ar2='../../ext_src/stsw-stm32121.zip'
ar1='../ext_src/stsw-stm32121.zip'
ver='4.0.0'
path="STM32_USB-FS-Device_Lib_V$ver"
mkdir -p cmsis_boot
cd cmsis_boot
unzip -j $ar1 $path/Libraries/CMSIS/Device/ST/STM32F10x/Include/stm32f10x.h
unzip -j $ar1 $path/Libraries/CMSIS/Device/ST/STM32F10x/Include/system_stm32f10x.h
unzip -j $ar1 $path/Projects/Custom_HID/src/system_stm32f10x.c
unzip -j $ar1 $path/Projects/Custom_HID/inc/stm32f10x_conf.h
mkdir -p startup
cd startup
unzip -j $ar2 $path/Libraries/CMSIS/Device/ST/STM32F10x/Source/Templates/gcc_ride7/startup_stm32f10x_md.s
cd ../..
mkdir -p cmsis
cd cmsis
unzip -j $ar1 $path/Libraries/CMSIS/Include/core_cm3.h
unzip -j $ar1 $path/Libraries/CMSIS/Include/core_cmFunc.h
unzip -j $ar1 $path/Libraries/CMSIS/Include/core_cmInstr.h
cd ..
mkdir -p stm_lib
cd stm_lib
mkdir -p inc
cd inc
unzip -j $ar2 $path/Libraries/STM32F10x_StdPeriph_Driver/inc/misc.h
unzip -j $ar2 $path/Libraries/STM32F10x_StdPeriph_Driver/inc/stm32f10x_exti.h
unzip -j $ar2 $path/Libraries/STM32F10x_StdPeriph_Driver/inc/stm32f10x_flash.h
unzip -j $ar2 $path/Libraries/STM32F10x_StdPeriph_Driver/inc/stm32f10x_gpio.h
unzip -j $ar2 $path/Libraries/STM32F10x_StdPeriph_Driver/inc/stm32f10x_pwr.h
unzip -j $ar2 $path/Libraries/STM32F10x_StdPeriph_Driver/inc/stm32f10x_rcc.h
unzip -j $ar2 $path/Libraries/STM32F10x_StdPeriph_Driver/inc/stm32f10x_tim.h
unzip -j $ar2 $path/Libraries/STM32F10x_StdPeriph_Driver/inc/stm32f10x_usart.h
cd ..
mkdir -p src
cd src
unzip -j $ar2 $path/Libraries/STM32F10x_StdPeriph_Driver/src/misc.c
unzip -j $ar2 $path/Libraries/STM32F10x_StdPeriph_Driver/src/stm32f10x_exti.c
unzip -j $ar2 $path/Libraries/STM32F10x_StdPeriph_Driver/src/stm32f10x_flash.c
unzip -j $ar2 $path/Libraries/STM32F10x_StdPeriph_Driver/src/stm32f10x_gpio.c
unzip -j $ar2 $path/Libraries/STM32F10x_StdPeriph_Driver/src/stm32f10x_pwr.c
unzip -j $ar2 $path/Libraries/STM32F10x_StdPeriph_Driver/src/stm32f10x_rcc.c
unzip -j $ar2 $path/Libraries/STM32F10x_StdPeriph_Driver/src/stm32f10x_tim.c
unzip -j $ar2 $path/Libraries/STM32F10x_StdPeriph_Driver/src/stm32f10x_usart.c
cd ../..
mkdir -p usb_hid
cd usb_hid
mkdir -p inc
cd inc
unzip -j $ar2 $path/Libraries/STM32_USB-FS-Device_Driver/inc/usb_core.h
unzip -j $ar2 $path/Libraries/STM32_USB-FS-Device_Driver/inc/usb_def.h
unzip -j $ar2 $path/Libraries/STM32_USB-FS-Device_Driver/inc/usb_init.h
unzip -j $ar2 $path/Libraries/STM32_USB-FS-Device_Driver/inc/usb_int.h
unzip -j $ar2 $path/Libraries/STM32_USB-FS-Device_Driver/inc/usb_lib.h
unzip -j $ar2 $path/Libraries/STM32_USB-FS-Device_Driver/inc/usb_mem.h
unzip -j $ar2 $path/Libraries/STM32_USB-FS-Device_Driver/inc/usb_regs.h
unzip -j $ar2 $path/Libraries/STM32_USB-FS-Device_Driver/inc/usb_sil.h
unzip -j $ar2 $path/Libraries/STM32_USB-FS-Device_Driver/inc/usb_type.h
unzip -j $ar2 $path/Projects/Custom_HID/inc/platform_config.h
unzip -j $ar2 $path/Projects/Custom_HID/inc/hw_config.h
unzip -j $ar2 $path/Projects/Custom_HID/inc/stm32_it.h
unzip -j $ar2 $path/Projects/Custom_HID/inc/usb_conf.h
unzip -j $ar2 $path/Projects/Custom_HID/inc/usb_desc.h
unzip -j $ar2 $path/Projects/Custom_HID/inc/usb_istr.h
unzip -j $ar2 $path/Projects/Custom_HID/inc/usb_prop.h
unzip -j $ar2 $path/Projects/Custom_HID/inc/usb_pwr.h
cd ..
mkdir -p src
cd src
unzip -j $ar2 $path/Projects/Custom_HID/src/hw_config.c
unzip -j $ar2 $path/Projects/Custom_HID/src/stm32_it.c
unzip -j $ar2 $path/Projects/Custom_HID/src/usb_desc.c
unzip -j $ar2 $path/Projects/Custom_HID/src/usb_endp.c
unzip -j $ar2 $path/Projects/Custom_HID/src/usb_istr.c
unzip -j $ar2 $path/Projects/Custom_HID/src/usb_prop.c
unzip -j $ar2 $path/Projects/Custom_HID/src/usb_pwr.c
unzip -j $ar2 $path/Libraries/STM32_USB-FS-Device_Driver/src/usb_core.c
unzip -j $ar2 $path/Libraries/STM32_USB-FS-Device_Driver/src/usb_init.c
unzip -j $ar2 $path/Libraries/STM32_USB-FS-Device_Driver/src/usb_int.c
unzip -j $ar2 $path/Libraries/STM32_USB-FS-Device_Driver/src/usb_mem.c
unzip -j $ar2 $path/Libraries/STM32_USB-FS-Device_Driver/src/usb_regs.c
unzip -j $ar2 $path/Libraries/STM32_USB-FS-Device_Driver/src/usb_sil.c
ar='../../ext_src/stsw-stm32010.zip'
ver='3.1.0'
path="STM32F10x_AN2594_FW_V$ver"
cd ../..
cd stm_lib
cd inc
unzip -j $ar $path/Project/EEPROM_Emulation/inc/eeprom.h
cd ..
cd src
unzip -j $ar $path/Project/EEPROM_Emulation/src/eeprom.c
cd ../..
pwd
cd usb_hid
pwd
patch -p1 -i ../patches/usb_hid.patch
cd ..
cd stm_lib
patch -p1 -i ../patches/eeprom.patch
cd ..
cd cmsis_boot
patch -p1 -i ../patches/stm32f10x_conf.patch
cd ..
ar='../ext_src/irmp.tar.gz'
path="irmp"
mkdir -p irmp
cd irmp
tar -xvf $ar --strip-components=1 $path/irmp.c
tar -xvf $ar --strip-components=1 $path/irmp.h
tar -xvf $ar --strip-components=1 $path/irmpconfig.h
tar -xvf $ar --strip-components=1 $path/irmpprotocols.h
tar -xvf $ar --strip-components=1 $path/irmpsystem.h
tar -xvf $ar --strip-components=1 $path/irsnd.c
tar -xvf $ar --strip-components=1 $path/irsnd.h
tar -xvf $ar --strip-components=1 $path/irsndconfig.h
patch -p1 -i ../patches/irmp.patch
touch ../ext_src/prepared
| true
|
38f2dfa409fff6ca15add961a61245e5f01b93be
|
Shell
|
av-maramzin/LLVM
|
/debug-build.sh
|
UTF-8
| 875
| 3.65625
| 4
|
[] |
no_license
|
#!/bin/bash
SCRIPT_DIR="$(dirname "$(realpath $0)")"
LLVM_PROJECT_DIR="$SCRIPT_DIR/llvm-project/llvm"
BUILD_DIR="$SCRIPT_DIR/build"
DEBUG_BUILD_DIR="$SCRIPT_DIR/debug-build"
if [ ! -d "$LLVM_PROJECT_DIR" ]; then
echo "Error: LLVM git repository has not been found!"
exit 1
fi
echo "LLVM folder: $SCRIPT_DIR"
echo "LLVM repository: $LLVM_PROJECT_DIR"
echo "LLVM debug build directory: $DEBUG_BUILD_DIR"
echo ""
if [ -d "$DEBUG_BUILD_DIR" ]; then
echo "Removing existent build directory"
rm -rf $DEBUG_BUILD_DIR
fi
mkdir $DEBUG_BUILD_DIR
cd $DEBUG_BUILD_DIR
cmake -G "Ninja"\
-DCMAKE_BUILD_TYPE=Debug\
-DLLVM_ENABLE_PROJECTS="clang"\
-DLLVM_TARGETS_TO_BUILD="X86"\
-DLLVM_ENABLE_CXX1Y=ON\
-DBUILD_SHARED_LIBS=ON\
-DLLVM_ENABLE_ASSERTIONS=ON\
-DCMAKE_CXX_FLAGS="-std=c++14"\
$LLVM_PROJECT_DIR
cmake --build .
cd $SCRIPT_DIR
| true
|
f387cec66337aa9bc86911d228b953ad9da67a2e
|
Shell
|
cloudfoundry-incubator/kubecf-tools
|
/top/memory-max.sh
|
UTF-8
| 601
| 4.0625
| 4
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
#!/bin/bash
## Given the path to a file generated by `top-iterated.sh` it
## determines the max memory usage per job and returns the relevant
## information to stdout.
log="${1}" ; shift
if test -z "${log}"
then
echo 1>&2 "Usage: $0 log"
exit 1
fi
# Split the report across group+jobs.
while read epoch group job cpu memory
do
echo "${group}" "${job}" "${memory}" >> "$$.${group}.${job}"
done < "${log}"
# Retrieve and report max memory per group+job combination
for gjobfile in $$.*
do
sort -u < "${gjobfile}" | sort -k 3,4 -n | tail -1
done | sort
# Clear temp
rm -f $$.*
exit
| true
|
4c03ee7e0dce22f2a6ba72f58b421a9c40ca2d5b
|
Shell
|
ShakingCode/Shareplex
|
/shared_scripts/shareplex_install_postgres_config.sh
|
UTF-8
| 3,987
| 2.71875
| 3
|
[] |
no_license
|
#mkdir -p /u01/app/quest
#rm -fR /u01/app/quest/shareplex9.2/ ; rm -fR /u01/app/quest/vardir/
mkdir -p /u01/app/quest/shareplex9.2/ ; mkdir -p /u01/app/quest/vardir/2100/
echo "******************************************************************************"
echo "Shareplex installation." `date`
echo "******************************************************************************"
#rm -Rf /u01/app/quest/vardir/2100/
cd /vagrant_software
licence_key=`cat /vagrant_software/shareplex_licence_key.txt`
customer_name=`cat /vagrant_software/shareplex_customer_name.txt`
#SP_SYS_HOST_NAME=ol7-postgresql106-splex3
. /vagrant_config/install.env
SP_SYS_HOST_NAME=${NODE3_HOSTNAME}
#root
#echo -e "postgres\n/u01/app/quest/shareplex9.2/\n/u01/app/quest/vardir/2100/\n\n\n\n\n\n\n${licence_key}\n${customer_name}" | ./SPX-9.2.0-b42-oracle110-rh-40-amd64-m64.tpm
echo -e "/u01/app/quest/shareplex9.2/\n/u01/app/quest/vardir/2100/\n\n\n\n\n${licence_key}\n${customer_name}" | ./SPX-9.2.0-b42-oracle110-rh-40-amd64-m64.tpm
echo "******************************************************************************"
echo "PostgreSQL configuration." `date`
echo "******************************************************************************"
cd /opt/PostgreSQL/10/bin
#PGPASSWORD=postgres ./psql -c "drop database testdb;"
PGPASSWORD=postgres ./psql -c "create database testdb;"
#PGPASSWORD=postgres ./psql -d testdb -c "drop user test;"
PGPASSWORD=postgres ./psql -d testdb -c "create user test with encrypted password 'test';"
PGPASSWORD=postgres ./psql -d testdb -c "grant all privileges on database testdb to test;"
#PGPASSWORD=test ./psql -d testdb -U test -c "drop schema test;"
PGPASSWORD=test ./psql -d testdb -U test -c "create schema test;"
PGPASSWORD=test ./psql -d testdb -U test -c "create table test.test (id numeric not null, constraint pk_test primary key (id));"
PGPASSWORD=test ./psql -d testdb -U test -c "select * from test.test;"
echo "******************************************************************************"
echo "ODBC configuration." `date`
echo "******************************************************************************"
cat >> /u01/app/quest/vardir/2100/odbc/odbc.ini <<EOF
[postgres]
Driver = PostgreSQL
Server = localhost
Port = 5432
EOF
cat >> /u01/app/quest/vardir/2100/odbc/odbcinst.ini <<EOF
[PostgreSQL]
Description = PostgreSQL ODBC driver
Driver = /usr/lib/odbc/psqlodbca.so
Setup = /usr/lib/odbc/libodbcpsqlS.so
Driver64 = /usr/lib64/psqlodbcw.so
Setup64 = /usr/lib64/libodbcpsqlS.so
EOF
echo "******************************************************************************"
echo "Quest Shareplex configuration." `date`
echo "******************************************************************************"
cd /u01/app/quest/shareplex9.2/bin
echo -e "postgres\npostgres\npostgres\n\ntestdb\n\nsplex\nsplex\nsplex\n" | ./pg_setup
echo "******************************************************************************"
echo "Quest Shareplex start process." `date`
echo "******************************************************************************"
cd /u01/app/quest/shareplex9.2/bin
./sp_cop -u2100 &
sleep 5
echo "******************************************************************************"
echo "Quest Shareplex show configuration." `date`
echo "******************************************************************************"
echo -e ""
echo -e "show\nstatus" | /u01/app/quest/shareplex9.2/bin/sp_ctrl
echo -e "connection r.postgres set user=postgres" | ./sp_ctrl
echo -e "connection r.postgres set password=postgres" | ./sp_ctrl
echo -e "connection r.postgres set port=5432" | ./sp_ctrl
echo -e "connection r.postgres set server=localhost" | ./sp_ctrl
#echo -e "connection r.postgres set driver=/database/ODBC/lib/databasedriver.so" | ./sp_ctrl
echo -e "connection r.postgres set driver=/opt/PostgreSQL/10/lib/postgresql/postgres_fdw.so" | ./sp_ctrl
echo -e "connection r.postgres set driver=/usr/lib64/libodbcpsqlS.so" | ./sp_ctrl
| true
|
04e420118d0769b61f475eac35eab7febebaced1
|
Shell
|
tw-hygieia/hygieia-deploy-teamcity-collector
|
/docker/properties-builder.sh
|
UTF-8
| 4,679
| 3.546875
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
if [ "$SKIP_PROPERTIES_BUILDER" = true ]; then
echo "Skipping properties builder"
exit 0
fi
# mongo container provides the HOST/PORT
# api container provided DB Name, ID & PWD
if [ "$TEST_SCRIPT" != "" ]
then
#for testing locally
PROP_FILE=application.properties
else
PROP_FILE=/hygieia/config/application.properties
fi
# if [ "$MONGO_PORT" != "" ]; then
# # Sample: MONGO_PORT=tcp://172.17.0.20:27017
# MONGODB_HOST=`echo $MONGO_PORT|sed 's;.*://\([^:]*\):\(.*\);\1;'`
# MONGODB_PORT=`echo $MONGO_PORT|sed 's;.*://\([^:]*\):\(.*\);\2;'`
# else
# env
# echo "ERROR: MONGO_PORT not defined"
# exit 1
# fi
echo "MONGODB_HOST: $MONGODB_HOST"
echo "MONGODB_PORT: $MONGODB_PORT"
#update local host to bridge ip if used for a URL
# DOCKER_LOCALHOST=
# echo $TEAMCITY_MASTER|egrep localhost >>/dev/null
# if [ $? -ne 1 ]
# then
# #this seems to give a access to the VM of the docker-machine
# #LOCALHOST=`ip route|egrep '^default via'|cut -f3 -d' '`
# #see http://superuser.com/questions/144453/virtualbox-guest-os-accessing-local-server-on-host-os
# DOCKER_LOCALHOST=10.0.2.2
# MAPPED_URL=`echo "$TEAMCITY_MASTER"|sed "s|localhost|$DOCKER_LOCALHOST|"`
# echo "Mapping localhost -> $MAPPED_URL"
# TEAMCITY_MASTER=$MAPPED_URL
# fi
# echo $TEAMCITY_OP_CENTER|egrep localhost >>/dev/null
# if [ $? -ne 1 ]
# then
# #this seems to give a access to the VM of the docker-machine
# #LOCALHOST=`ip route|egrep '^default via'|cut -f3 -d' '`
# #see http://superuser.com/questions/144453/virtualbox-guest-os-accessing-local-server-on-host-os
# LOCALHOST=10.0.2.2
# MAPPED_URL=`echo "$TEAMCITY_OP_CENTER"|sed "s|localhost|$LOCALHOST|"`
# echo "Mapping localhost -> $MAPPED_URL"
# TEAMCITY_OP_CENTER=$MAPPED_URL
# fi
cat > $PROP_FILE <<EOF
#Database Name
dbname=${HYGIEIA_API_ENV_SPRING_DATA_MONGODB_DATABASE:-dashboarddb}
#Database HostName - default is localhost
dbhost=${MONGODB_HOST:-10.0.1.1}
#Database Port - default is 27017
dbport=${MONGODB_PORT:-27017}
#Database Username - default is blank
dbusername=${HYGIEIA_API_ENV_SPRING_DATA_MONGODB_USERNAME:-dashboarduser}
#Database Password - default is blank
dbpassword=${HYGIEIA_API_ENV_SPRING_DATA_MONGODB_PASSWORD:-dbpassword}
#Collector schedule (required)
teamcity.cron=${TEAMCITY_CRON:-0 0/5 * * * *}
#A comma seperated list of teamcity projectids
teamcity.projectIds=${TEAMCITY_DEPLOYMENT_PROJECT_IDS}
# The folder depth - default is 10
teamcity.folderDepth=${TEAMCITY_FOLDER_DEPTH:-10}
#Teamcity server (required) - Can provide multiple
teamcity.servers[0]=${TEAMCITY_SERVER}
#teamcity.niceNames[0]=[YourTeamcity]
#teamcity.environments[0]=[DEV,QA,INT,PERF,PROD]
teamcity.apiKeys[0]=${TEAMCITY_API_KEY}
teamcity.branchMatcher=${TEAMCITY_BRANCH_REGEX:master}
teamcity.pipelineIgnoreMatcher=${TEAMCITY_PIPELINE_IGNORE_REGEX:ignore}
# Search criteria enabled via properties (max search criteria = 2)
teamcity.searchFields[0]= options.jobName
teamcity.searchFields[1]= niceName
EOF
# # find how many teamcity urls are configured
# max=$(wc -w <<< "${!TEAMCITY_MASTER*}")
# # loop over and output the url, username, apiKey and niceName
# i=0
# while [ $i -lt $max ]
# do
# if [ $i -eq 0 ]
# then
# server="TEAMCITY_MASTER"
# username="TEAMCITY_USERNAME"
# apiKey="TEAMCITY_API_KEY"
# niceName="TEAMCITY_NAME"
# else
# server="TEAMCITY_MASTER$i"
# username="TEAMCITY_USERNAME$i"
# apiKey="TEAMCITY_API_KEY$i"
# niceName="TEAMCITY_NAME$i"
# fi
# cat >> $PROP_FILE <<EOF
# teamcity.servers[${i}]=${!server}
# teamcity.usernames[${i}]=${!username}
# teamcity.apiKeys[${i}]=${!apiKey}
# teamcity.niceNames[${i}]=${!niceName}
# EOF
# i=$(($i+1))
# done
# cat >> $PROP_FILE <<EOF
# #Determines if build console log is collected - defaults to false
# teamcity.saveLog=${TEAMCITY_SAVE_LOG:-true}
# #map the entry localhost so URLS in teamcity resolve properly
# # Docker NATs the real host localhost to 10.0.2.2 when running in docker
# # as localhost is stored in the JSON payload from teamcity we need
# # this hack to fix the addresses
# teamcity.dockerLocalHostIP=${DOCKER_LOCALHOST}
# EOF
# if [ "$TEAMCITY_OP_CENTER" != "" ]
# then
# cat >> $PROP_FILE <<EOF
# #If using username/token for api authentication (required for Cloudbees Teamcity Ops Center) see sample
# #teamcity.servers[${max}]=${TEAMCITY_OP_CENTER:-http://username:token@teamcity.company.com}
# teamcity.servers[${max}]=${TEAMCITY_OP_CENTER}
# EOF
# fi
echo "
===========================================
Properties file created `date`: $PROP_FILE
Note: passwords & apiKey hidden
===========================================
`cat $PROP_FILE |egrep -vi 'password|apiKey'`
"
exit 0
| true
|
10862f225b1a393bc46595590625253e45225a09
|
Shell
|
entn-at/LCLDA
|
/egs/sre10/v1/local/lcplda_plda_scoring.sh
|
UTF-8
| 4,732
| 3.1875
| 3
|
[] |
no_license
|
#!/bin/bash
# Copyright 2015 David Snyder
# Apache 2.0.
#
# This script trains an LDA transform, applies it to the enroll and
# test i-vectors and does cosine scoring.
use_existing_models=false
simple_length_norm=false # If true, replace the default length normalization
# performed in PLDA by an alternative that
# normalizes the length of the iVectors to be equal
# to the square root of the iVector dimension.
lda_dim=150
# covar_factor=0.1
covar_factor=0.0
beta=1.0
nj=16
echo "$0 $@" # Print the command line for logging
if [ -f path.sh ]; then . ./path.sh; fi
. parse_options.sh || exit 1;
if [ $# != 8 ]; then
echo "Usage: $0 <lda-data-dir> <enroll-data-dir> <test-data-dir> <lda-ivec-dir> <enroll-ivec-dir> <test-ivec-dir> <trials-file> <scores-dir>"
fi
plda_data_dir=$1 #data/sre
enroll_data_dir=$2 #data/sre10_train
test_data_dir=$3 # nouse: data/sre10_test
plda_ivec_dir=$4 #sub_sre_path_cs
enroll_ivec_dir=$5 #sub_sre_train_path_cs
test_ivec_dir=$6 #sub_sre_test_path_cs
trials=$7
scores_dir=$8 # exp/cs_score
if [ "$use_existing_models" == "true" ]; then
for f in ${test_ivec_dir}/gmm_pdf.mat ${plda_ivec_dir}/transform_lcp.ark ${plda_ivec_dir}/spk_mean.ark ${plda_ivec_dir}/lcplda_plda_set.ark ; do
[ ! -f $f ] && echo "No such file $f" && exit 1;
done
else
mkdir -p ${plda_ivec_dir}/log
run.pl ${plda_ivec_dir}/log/lcplda_plda1.log \
ivector-compute-lcplda-pdf --nj=$nj \
--pdf=${test_ivec_dir}/gmm_pdf.mat \
--dim=$lda_dim --total-covariance-factor=$covar_factor \
"ark:ivector-normalize-length scp:${plda_ivec_dir}/ivector.scp ark:- |" \
ark:${plda_data_dir}/utt2spk \
ark:${plda_ivec_dir}/transform_lcp.ark \
ark,t:${plda_ivec_dir}/spk_mean.ark || exit 1;
run.pl ${plda_ivec_dir}/log/spk_mean_ln.log \
ivector-normalize-length ark:${plda_ivec_dir}/spk_mean.ark ark:${plda_ivec_dir}/spk_mean_ln.ark || exit 1;
# # Train a PLDA model. using transform-vec-lclda function
# # apply spk_mean_ln here, use cosine distance to find nearest spker
# $train_cmd $plda_ivec_dir/log/lcplda_plda2.log \
# ivector-compute-plda ark:$plda_data_dir/spk2utt \
# "ark:ivector-normalize-length scp:$plda_ivec_dir/ivector.scp ark:- |
# transform-vec-lclda --development=true ark:${plda_ivec_dir}/transform_lcp.ark ark:${plda_ivec_dir}/spk_mean_ln.ark ark:- ark:- | ivector-normalize-length ark:- ark:- |" \
# $plda_ivec_dir/lcplda_plda || exit 1;
# Train a PLDA set model. using transform-vec-lclda function
# apply spk_mean_ln here, use cosine distance to find nearest spker
$train_cmd $plda_ivec_dir/log/lcplda_plda2.log \
ivector-compute-plda-set --nj=$nj \
--pdf=${test_ivec_dir}/gmm_pdf.mat \
--spk-mean=ark:${plda_ivec_dir}/spk_mean_ln.ark \
ark:$plda_data_dir/spk2utt \
ark:${plda_ivec_dir}/transform_lcp.ark \
"ark:ivector-normalize-length scp:$plda_ivec_dir/ivector.scp ark:- |" \
ark:$plda_ivec_dir/lcplda_plda_set.ark || exit 1;
fi
mkdir -p $scores_dir/log
echo "do lcplda_plda_scoring!"
# # Guess: ivector-subtract-global-mean is due to transform-vec add mean
# $train_cmd $scores_dir/log/eval_scoring.log \
# ivector-lclda-plda-scoring2 --normalize-length=true \
# --simple-length-normalization=$simple_length_norm \
# --num-utts=ark:$enroll_ivec_dir/num_utts.ark \
# --transform=ark:${plda_ivec_dir}/transform_lcp.ark \
# --spk-mean=ark:${plda_ivec_dir}/spk_mean_ln.ark \
# "ivector-copy-plda --smoothing=0.0 $plda_ivec_dir/lcplda_plda - |" \
# ark:"ivector-mean ark:$enroll_data_dir/spk2utt scp:$enroll_ivec_dir/ivector.scp ark:- | ivector-subtract-global-mean --subtract-mean=false $plda_ivec_dir/mean.vec ark:- ark:- |" \
# ark:"ivector-subtract-global-mean --subtract-mean=false $plda_ivec_dir/mean.vec scp:$test_ivec_dir/ivector.scp ark:- |" \
# "cat '$trials' | cut -d\ --fields=1,2 |" \
# $scores_dir/eval_scores || exit 1;
$train_cmd $scores_dir/log/eval_scoring.log \
ivector-lclda-pldaset-scoring --normalize-length=true \
--simple-length-normalization=$simple_length_norm \
--num-utts=ark:$enroll_ivec_dir/num_utts.ark \
--transform=ark:${plda_ivec_dir}/transform_lcp.ark \
--spk-mean=ark:${plda_ivec_dir}/spk_mean_ln.ark \
ark:"$plda_ivec_dir/lcplda_plda_set.ark" \
ark:"ivector-mean ark:$enroll_data_dir/spk2utt scp:$enroll_ivec_dir/ivector.scp ark:- | ivector-subtract-global-mean --subtract-mean=false $plda_ivec_dir/mean.vec ark:- ark:- |" \
ark:"ivector-subtract-global-mean --subtract-mean=false $plda_ivec_dir/mean.vec scp:$test_ivec_dir/ivector.scp ark:- |" \
"cat '$trials' | cut -d\ --fields=1,2 |" \
$scores_dir/eval_scores || exit 1;
| true
|
c6caf63ebe70b016e80206b6fa4d38ccacfada2a
|
Shell
|
sixteenmillimeter/mcopy
|
/scripts/branch_name.sh
|
UTF-8
| 145
| 2.78125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
branch_name=$(git symbolic-ref -q HEAD)
branch_name=${branch_name##refs/heads/}
branch_name=${branch_name:-HEAD}
echo $branch_name
| true
|
b69c862666885f3fd53d8388562131c44b79afa6
|
Shell
|
scelesticsiva/classification_of_fashion_category
|
/train.sh
|
UTF-8
| 665
| 3.21875
| 3
|
[] |
no_license
|
#!/bin/bash
#
# Author - Sivaramakrishnan
#
#The main script to test the pre trained model
VGG_PRETRAINED_WEIGHTS="https://www.dropbox.com/s/o0t8wuqbu64eazh/vgg16.npy?dl=1"
VGG_FILE="vgg16.npy"
echo "Downloading VGG16 weights file"
curl -L -o vgg16.npy $VGG_PRETRAINED_WEIGHTS
echo "Running the training script"
#tail +3 "$1" > "$1" #to remove the first two rows in the text file which contains number of images and the string "image category label"
if [ "$2" == 'use_gpu' ] ; then
python3 train.py --train_labels "$1" --run_in_gpu True --vgg_pretrained_weights $VGG_FILE
else
python3 train.py --train_labels "$1" --vgg_pretrained_weights $VGG_FILE
fi
| true
|
be5ad9810713e03c02aa3581ec3ff7a218fd9149
|
Shell
|
kjsubbu/dockerprojects
|
/arap-workflow-prescan/multiJvmRun.sh
|
UTF-8
| 284
| 2.9375
| 3
|
[] |
no_license
|
#!/bin/bash
# Run 4 PreScan JVMs
# TODO: Modify to support N JVMS for any N.
PSbucket=$1
PSWFC=`expr $PSbucket - 1`
for i in `seq 0 $PSWFC`;
do
echo "Starting JVM $i"
./run.sh $i $PSbucket &
done
#for ind in 0 1 2 3
#do
# echo "Starting JVM $ind"
# ./run.sh $ind 4 &
#done
| true
|
5907027ce3a3d66f05e4f7edd2fb6d55d91fac78
|
Shell
|
ivanpierre/makes
|
/src/args/make-ssl-certificate/builder.sh
|
UTF-8
| 436
| 3.03125
| 3
|
[
"MIT"
] |
permissive
|
# shellcheck shell=bash
function main {
source "${envOptions}"/template local options
mkdir "${out}" \
&& openssl req \
-days "${envDays}" \
-keyout "${out}/cert.key" \
-new \
-newkey "${envKeyType}" \
-nodes \
-out "${out}/cert.crt" \
"${options[@]}" \
-x509 \
&& openssl x509 \
-in "${out}/cert.crt" \
-inform 'pem' \
-noout \
-text
}
main "${@}"
| true
|
1d77fd4a9b2231061dd687bdce5e59df2d9f2b8c
|
Shell
|
atul-ram/step10
|
/roles/zabbix/files/script/hardening.sh
|
UTF-8
| 7,045
| 3.28125
| 3
|
[] |
no_license
|
###### Maximum password Days ##################
maxdayspass=`cat /etc/login.defs |grep "PASS_MAX_DAYS 30" |wc -l`
if [ $maxdayspass == 1 ] ; then
echo "PASSWORD Max days is OK"
else
echo "PASSWORD Max days PROBLEM"
fi
###### Minimum Password days #################
mindayspass=`cat /etc/login.defs |grep "PASS_MIN_DAYS 1" |wc -l`
if [ $mindayspass == 1 ] ; then
echo "PASSWORD Min days is OK"
else
echo "PASSWORD Min days PROBLEMK"
fi
##### Minimum password length ###############
minpasslength=`cat /etc/login.defs |grep "PASS_MIN_LEN 14" |wc -l`
if [ $minpasslength == 1 ] ; then
echo "PASSWORD Min Length is OK"
else
echo "PASSWORD Min Length PROBLEM"
fi
##### Password warning days ################
passwarndate=`cat /etc/login.defs |grep "PASS_WARN_AGE 7" | wc -l`
if [ $passwarndate == 1 ] ; then
echo "PASSWORD warn date is OK"
else
echo "PASSWORD warn date PROBLEM"
fi
###### PASSWORD EXPIRY Days "
pwdexpiry=`sudo cat /etc/default/useradd |grep EXPIRE=30 |wc -l`
if [ $pwdexpiry == 1 ] ; then
echo "PASSWORD Expiry date is OK"
else
echo "PASSWORD Expiry date PROBLEM"
fi
#### Server run level #####################
srvrrunlevel=`cat /etc/inittab |grep "id:3" |wc -l`
if [ $srvrrunlevel == 1 ] ; then
echo "SERVER run level is OK"
else
echo "SERVER run level PROBLEM"
fi
#### SELinux Status #######################
selinuxstatus=`getenforce |grep -i enf |wc -l`
if [ $selinuxstatus == 1 ] ; then
echo "SELINUX in enforcing mode OK"
else
echo "SELINUX in enforcing mode PROBLEM"
fi
#### Password algorithm ##################
shaalgo=`cat /etc/login.defs |grep "SHA256" |wc -l`
if [ $shaalgo == 1 ] ; then
echo "SHA256 password algorithm OK"
else
echo "SHA256 password algorithm PROBLEM"
fi
###### SYSCTL Permission ###############
sysctlperm=`ls -l /etc/sysctl.conf | awk '{print $1}' |grep "rw-------" |wc -l`
if [ $sysctlperm == 1 ] ; then
echo "SYSCTL permission is OK"
else
echo "SYSCTL permission PROBLEM"
fi
##### Sysctl attribute
sysctlatr=`sudo lsattr /etc/sysctl.conf |grep "i--------e" |wc -l`
if [ $sysctlatr == 1 ] ; then
echo "SYSCTL attribute permission OK"
else
echo "SYSCTL attribute permission PROBLEM"
fi
##### NTP Configureation status #########
ntpconf=`cat /etc/ntp.conf |grep 172.17. |wc -l`
if [ $ntpconf == 2 ] ; then
echo "NTP Configuration OK"
else
echo "NTP Configuration PROBLEM"
fi
##### Root dir permission ##############
rootdir=`sudo ls -l -ld /root/ |grep drwx------ |wc -l`
if [ $rootdir == 1 ] ; then
echo "ROOT dir permission is OK"
else
echo "ROOT dir permission PROBLEM"
fi
##### Banner configuration ##############
bannerstat=` cat /etc/issue |grep time.Unauthorized |wc -l`
if [ $bannerstat == 1 ] ; then
echo "TCS Banner OK"
else
echo "TCS Banner PROBLEM"
fi
###### Host .Allow permission ###########
hostallow=`ls -l /etc/hosts.allow |grep rw------- |wc -l`
if [ $hostallow == 1 ] ; then
echo "HOST ALLOW permission OK"
else
echo "Host ALLOW permission PROBLEM"
fi
##### HOST ALLOW ATTRIBUTES ############
hostallowatr=`sudo lsattr /etc/hosts.allow |grep "i--------e" |wc -l`
if [ $hostallowatr == 1 ] ; then
echo "HOST ALLOW attribute permission OK"
else
echo "HOST ALLOW attribute permission PROBLEM"
fi
##### HOST DENY PERMISSION ##############
hostdeny=`ls -l /etc/hosts.deny |grep rw------- |wc -l`
if [ $hostdeny == 1 ] ; then
echo "HOST DENY permission OK"
else
echo "HOST DENY permission PROBLEM"
fi
#####
hostsdenyatr=`sudo lsattr /etc/hosts.deny |grep "i--------e" |wc -l`
if [ $hostsdenyatr == 1 ] ; then
echo "HOST DENY attribute permission OK"
else
echo "HOST DENY attribute permission PROBLEM"
fi
#####NET IPV4 ICMP########################
netipv4ignore=`sudo cat /etc/sysctl.conf |grep "net.ipv4.icmp_ignore_bogus_error_responses = 1" |wc -l`
if [ $netipv4ignore == 1 ] ; then
echo "KERNEL bogus_error OK"
else
echo "KERNEL bogus_error PROBLEM"
fi
#####net.ipv4.conf.all.log_martians########################
netipmartians=`sudo cat /etc/sysctl.conf |grep "net.ipv4.conf.all.log_martians = 1" |wc -l`
if [ $netipv4ignore == 1 ] ; then
echo "KERNEL Martians OK"
else
echo "KERNEL Martians PROBLEM"
fi
#####net.ipv4.icmp_echo_ignore_broadcasts = 1 ########
netbroadcasts=`sudo cat /etc/sysctl.conf |grep "net.ipv4.icmp_echo_ignore_broadcasts = 1" |wc -l`
if [ $netbroadcasts == 1 ] ; then
echo "KERNEL broadcasts OK"
else
echo "KERNEL broadcasts PROBLEM"
fi
######
historyformat=`cat /etc/profile |grep -i HISTTIMEFORMAT |wc -l`
if [ $historyformat == 1 ] ; then
echo "HISTORY FORMAT OK"
else
echo "HISTORY FORMAT PROBLEM"
fi
####
userprivilegesep=`sudo cat /etc/ssh/sshd_config |grep -i "UsePrivilegeSeparation yes" |grep -v '#' |wc -l`
if [ $userprivilegesep == 1 ] ; then
echo "SSHD User Privilege Seperation OK"
else
echo "SSHD User Privilege Seperation PROBLEM"
fi
####AllowTcpForwarding no ####
tcpforwarding=`sudo cat /etc/ssh/sshd_config |grep -i "AllowTcpForwarding no" |grep -v '#' |wc -l`
if [ $tcpforwarding == 1 ] ; then
echo "SSHD TCP forwarding OK"
else
echo "SSHD TCP forwarding PROBLEM"
fi
##### X11Forwarding #####
x11forwarding=`sudo cat /etc/ssh/sshd_config |grep -i "X11Forwarding no" |grep -v '#' |wc -l`
if [ $x11forwarding == 1 ] ; then
echo "SSHD X11 Forwarding OK"
else
echo "SSHD X11 Forwarding PROBLEM"
fi
##### Protocol 2
protocol2=`sudo cat /etc/ssh/sshd_config |grep -i "Protocol 2" |grep -v '#' |wc -l`
if [ $protocol2 == 1 ] ; then
echo "SSHD Protocol OK"
else
echo "SSHD Protocol PROBLEM"
fi
#### LogLevel verbose
loglevelv=`sudo cat /etc/ssh/sshd_config |grep -i "LogLevel verbose" |grep -v '#' |wc -l`
if [ $loglevelv == 1 ] ; then
echo "SSHD Loglevel OK"
else
echo "SSHD loglevel PROBLEM"
fi
#### StrictModes yes
strictmode=`sudo cat /etc/ssh/sshd_config |grep -i "StrictModes yes" |grep -v '#' |wc -l`
if [ $strictmode == 1 ] ; then
echo "SSHD StrictMode OK"
else
echo "SSHD StrictMode PROBLEM"
fi
######
grubpwd=`sudo cat /etc/grub.conf |grep -i "password --md5" |wc -l`
if [ $grubpwd == 1 ] ; then
echo "GRUB password protected OK"
else
echo "GRUB password protected PROBLEM"
fi
#####
deny3auth=`sudo cat /etc/pam.d/system-auth |grep -i "deny=3 onerr=fail" | wc -l`
if [ $deny3auth == 1 ] ; then
echo "Three wrong password attempt auth OK"
else
echo "Three wrong password attempt auth PROBLEM"
fi
####
deny3acc=`sudo cat /etc/pam.d/system-auth |grep "deny=3 no_magic_root" |wc -l`
if [ $deny3acc == 1 ] ; then
echo "Three wrong password attempt account OK"
else
echo "Three wrong password attempt account PROBLEM"
fi
####
denypwd=`sudo cat /etc/pam.d/system-auth |grep "minlen=14 lcredit=-1" |wc -l`
if [ $denypwd == 1 ] ; then
echo "Three wrong password attempt password OK"
else
echo "Three wrong password attempt password PROBLEM"
fi
######
numopenfiles=`sudo cat /etc/security/limits.conf |grep "hard nofile" |wc -l`
if [ $numopenfiles == 1 ] ; then
echo "NUMBER of open files set OK"
else
echo "NUMBER of open files PROBLEM"
fi
#### UAL Configuration checking ############
ualconf=`cat /etc/bashrc |grep substitutions |wc -l`
if [ $ualconf == 1 ] ; then
echo "UAL Configuration OK"
else
echo "UAL Configuration PROBLEM"
fi
| true
|
7669c1c621858f59fbb1c3fe9e24cb598d400478
|
Shell
|
Dumstruck/zen-securenode-docker
|
/secnodetracker/entrypoint.sh
|
UTF-8
| 552
| 3.453125
| 3
|
[] |
no_license
|
#!/bin/sh
# link the secure node tracker config, bail if not present
#if [ -f "/mnt/zen/secnode/stakeaddr" ]; then
#echo "Secure node config found OK - linking..."
#ln -s /mnt/zen/secnode /home/node/secnodetracker/config > /dev/null 2>&1 || true
#else
#echo "No secure node config found. exiting"
#exit 1
#fi
cd /home/node/secnodetracker
if [ -f ./config/stakeaddr ]; then
echo "Secure node config found OK"
else
echo "No secure node config found. Exiting"
exit 1
fi
# Fix permissions of files
chown -R node:node .
su-exec node "$@"
| true
|
d872b712f0255a2d209e7b881f09f41ba515c075
|
Shell
|
bitmap/dotfiles.archive
|
/macos/bootstrap.sh
|
UTF-8
| 1,126
| 3.5625
| 4
|
[] |
no_license
|
#!/bin/zsh
local DOTS="$HOME/.dotfiles"
if [[ "$PWD" != "$HOME/.dotfiles" ]]; then
print -P "%F{1}Error!%f Run script %F{4}./setup-macos%f from %F{3}$DOTS%f"
exit 1
fi
sudo -v
# Keep-alive sudo
while true; do
sudo -n true
sleep 60
kill -0 "$$" || exit
done 2>/dev/null &
echo "\nInstalling Xcode command line tools"
xcode-select --install
echo "\nInstalling Homebrew..."
echo | /bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install.sh)"
echo "\nInstalling NVM"
curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/v0.36.0/install.sh | bash
echo "\nInstalling Apps and dev tools..."
brew bundle --file ./macos/Brewfile
echo "\nInstalling Rust..."
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
echo "\nSymlinking settings..."
source ./setup.sh -f
echo "\nLinking iTerm preferences..."
defaults write com.googlecode.iterm2.plist PrefsCustomFolder -string "$DOTS/macos/iterm"
defaults write com.googlecode.iterm2.plist LoadPrefsFromCustomFolder -bool true
# macOS defaults
source ./macos/defaults.sh
echo "\nFinished install. Please restart."
| true
|
be6671132b1b903b79a408655002420a3b80ecd9
|
Shell
|
softcake/softcake.gradle-java-template
|
/travis-build.sh
|
UTF-8
| 3,208
| 3.421875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# This script will build the project.
git fsck --full
EXIT_STATUS=0
function strongEcho {
echo ""
echo "================ $1 ================="
}
if [ "$TRAVIS_SECURE_ENV_VARS" == "true" ]; then
strongEcho 'Decrypt secret key file'
openssl aes-256-cbc -pass pass:$SIGNING_PASSPHRASE -in secring.gpg.enc -out secring.gpg -d
gpg --keyserver keyserver.ubuntu.com --recv-key $SIGNING_KEY
fi
if [ "$TRAVIS_PULL_REQUEST" != "false" ] && [ "$TRAVIS_SECURE_ENV_VARS" == "true" ]; then
strongEcho 'Build and analyze pull request'
./gradlew build check sonarqube \
-Dsonar.analysis.mode=issues \
-Dsonar.github.pullRequest=$TRAVIS_PULL_REQUEST \
-Dsonar.github.repository=$TRAVIS_REPO_SLUG \
-Dsonar.github.oauth=$SONAR_GITHUB_OAUTH \
-Dsonar.host.url=$SONAR_HOST_URL \
-Dsonar.login=$SONAR_LOGIN || EXIT_STATUS=$?
elif [ "$TRAVIS_PULL_REQUEST" == "false" ] && [ "$TRAVIS_TAG" == "" ]&& [ "$TRAVIS_BRANCH" != "master" ]; then
strongEcho 'Build Branch with Snapshot => Branch ['$TRAVIS_BRANCH']'
# for snapshots we upload to Sonatype OSS
./gradlew snapshot uploadArchives sonarqube --info \
-Dsonar.host.url=$SONAR_HOST_URL \
-Dsonar.login=$SONAR_LOGIN \
-Prelease.travisci=true \
-Dsonar.projectVersion=$TRAVIS_BRANCH \
-Psigning.keyId="$SIGNING_KEY" \
-Psigning.password="$SIGNING_PASSPHRASE" \
-Psigning.secretKeyRingFile="${TRAVIS_BUILD_DIR}/secring.gpg" || EXIT_STATUS=$?
elif [ "$TRAVIS_PULL_REQUEST" == "false" ] && [ "$TRAVIS_TAG" != "" ]; then
strongEcho 'Build Branch for Release => Branch ['$TRAVIS_BRANCH'] Tag ['$TRAVIS_TAG']'
# for snapshots we upload to Bintray and Sonatype OSS
./gradlew final uploadArchives sonarqube --info \
-PbintrayNoDryRun \
-Dsonar.host.url=$SONAR_HOST_URL \
-Dsonar.login=$SONAR_LOGIN \
-Prelease.travisci=true \
-Prelease.useLastTag=true \
-Dsonar.projectVersion=$TRAVIS_BRANCH \
-Psigning.keyId="$SIGNING_KEY" \
-Psigning.password="$SIGNING_PASSPHRASE" \
-Psigning.secretKeyRingFile="${TRAVIS_BUILD_DIR}/secring.gpg" || EXIT_STATUS=$?
if [[ $EXIT_STATUS -eq 0 ]]; then
strongEcho "Update Changelog"
git config --global user.name "$GIT_NAME"
git config --global user.email "$GIT_EMAIL"
git config --global credential.helper "store --file=~/.git-credentials"
echo "https://$GH_TOKEN:@github.com" > ~/.git-credentials
git clone --depth=50 --branch=master https://${GH_TOKEN}@github.com/softcake/softcake.gradle-java-template.git softcake/softcake.gradle-java-template
cd softcake/softcake.gradle-java-template
git branch --all
git checkout master
echo "Current branch is - $(git rev-parse HEAD)"
./gradlew gitChangelogTask
git add CHANGELOG.md
git commit -a -m "Updating Changelog for release '$TRAVIS_TAG'"
git push origin master
fi
else
strongEcho 'Build, no analysis => Branch ['$TRAVIS_BRANCH'] Tag ['$TRAVIS_TAG'] Pull Request ['$TRAVIS_PULL_REQUEST']'
# Build branch, without any analysis
./gradlew build check -Prelease.useLastTag=true || EXIT_STATUS=$?
fi
if [[ $EXIT_STATUS -eq 0 ]]; then
strongEcho "Successful"
fi
exit $EXIT_STATUS
| true
|
64fe6354f83edbf129646c5d0c24431aab2e9329
|
Shell
|
h4ck3rm1k3/awips2
|
/tools/devAutoDeploy/devCave/qpid-auto-shutdown.sh
|
UTF-8
| 894
| 3.421875
| 3
|
[] |
no_license
|
#!/bin/sh
#----------------------------------------------------------------------
# Auto shutdown script for a qpid server.
#----------------------------------------------------------------------
PAUSE=3
PID=`/sbin/pidof qpidd`
DATE=`date`
echo "**************************************************************************************"
echo "Stopping QPID - $DATE"
echo "**************************************************************************************"
service edex_qpid stop
sleep ${PAUSE}
if [ -z "${PID}" ]; then
echo " WARNING: Qpid daemon not running"
let ERROR_COUNT=ERROR_COUNT+1
else
echo " Qpid daemon running"
fi
DATE=`date`
echo "**************************************************************************************"
echo "QPID Stopped At $DATE"
echo "**************************************************************************************"
echo ""
exit
| true
|
5a15ddf4c61f2d372603079929833d966cceb08c
|
Shell
|
OpenVnmrJ/OpenVnmrJ
|
/src/scripts/ovjcraft.sh
|
UTF-8
| 1,969
| 3.390625
| 3
|
[
"Apache-2.0",
"GPL-3.0-only"
] |
permissive
|
#!/bin/bash
# Looks for sendMessage macro in systemdir or userdir
if [ x$vnmrsystem = "x" ]
then
vnmrsystem=/vnmr
fi
if [ x$vnmruser = "x" ]
then
source $vnmrsystem/user_templates/.vnmrenvsh
fi
export DISPLAY=:0.0
if [ $# -lt 1 ]; then
Arg="help"
else
Arg="$1"
fi
if [ "x$Arg" = "xhelp" ]; then
# We should use the display.jar
if [ -f $vnmruser/manual/ovjcraft ]; then
cat $vnmruser/manual/ovjcraft
elif [ -f $vnmruser/CPpatch/CRAFTpatch/manual/ovjcraft ]; then
cat $vnmruser/CPpatch/CRAFTpatch/manual/ovjcraft
elif [ -f $vnmrsystem/manual/ovjcraft ]; then
cat $vnmrsystem/vnmrsys/manual/ovjcraft
else
echo "Supports 5 keyword arguments -"
echo " submit - displays submit2craft popup"
echo " admin - displays craft queue manager"
echo " qest - displays craftQnmr options"
echo " pref - displays craftPref options"
echo " exit - exits craft application"
echo " Any other argument is sent to Vnmrbg as is"
fi
elif [ "x$Arg" = "xfg" ]; then
vnmrj -exec craftv5 -splash $vnmrsystem/iconlib/Splash_CRAFT.png
else
if [ "x$Arg" = "xsubmit" ]; then
Vnmrbg -mback -n- -s sendMessage "submit2craft('window') s2cparbg='bg'" "-splash $vnmrsystem/iconlib/Splash_CRAFT.png"
elif [ "x$Arg" = "xexit" ]; then
Vnmrbg -mback -n0 -s sendMessage "exitComm"
elif [ "x$Arg" = "xadmin" ]; then
Vnmrbg -mback -n- -s sendMessage "craftQueueManage('window') cQMpar2='bg'" "-splash $vnmrsystem/iconlib/Splash_CRAFT.png"
elif [ "x$Arg" = "xqest" ]; then
Vnmrbg -mback -n- -s sendMessage "craftQnmr('window')" "-splash $vnmrsystem/iconlib/Splash_CRAFT.png"
elif [ "x$Arg" = "xpref" ]; then
Vnmrbg -mback -n- -s sendMessage "craftPref('window') cpparbg='bg'" "-splash $vnmrsystem/iconlib/Splash_CRAFT.png"
else
Vnmrbg -mback -n- -s sendMessage "$Arg" "-splash $vnmrsystem/iconlib/Splash_CRAFT.png"
fi
fi
| true
|
fb28f44749da889ca0ea64b24991601ca8944e5d
|
Shell
|
conormag94/xkcd-downloader
|
/xkcd.sh
|
UTF-8
| 651
| 3.984375
| 4
|
[] |
no_license
|
#!/bin/bash
# Pull XKCD's most recent comic, saving JSON result to temp file
echo "Searching XKCD..."
curl -G -s http://xkcd.com/info.0.json | json_pp > res.json
# Extract latest comic number from json file (a number followed by a comma)
grep -E -o '\d+,' res.json > num.txt
TEMP=`cat num.txt`
# Remove comma found by regex
COMIC_NUM=${TEMP%?}
echo "Current comic: #$COMIC_NUM"
# Extract url for latest XKCD and save to temp file
grep -o http:\/\/\[a-z\.\/_\]\* res.json > url.txt
# Download png from the url listed in the temp file
cat url.txt | xargs curl -G -s -o "$COMIC_NUM-xkcd.png"
rm url.txt
rm res.json
echo "XKCD #$COMIC_NUM saved!"
| true
|
be2f12e880ad1bf248327c2d042a4a5c1d0f850a
|
Shell
|
toddmcbrearty/myvagrant2
|
/welcome.sh
|
UTF-8
| 1,880
| 2.84375
| 3
|
[] |
no_license
|
echo ""
echo ""
echo ""
echo ""
echo " __ __ .__ ___________ __ "
echo " / \ / \ ____ | | ____ ____ _____ ____ \_ _____/__ __ ____ _____/ |______ ___ _______ ___________ ______"
echo " \ \/\/ // __ \| | _/ ___\/ _ \ / \_/ __ \ | __)_\ \/ // __ \ / \ __\__ \\ \/ /\__ \ _/ __ \_ __ \/ ___/"
echo " \ /\ ___/| |_\ \__( <_> ) Y Y \ ___/ | \\ /\ ___/| | \ | / __ \\ / / __ \\ ___/| | \/\___ \ "
echo " \__/\ / \___ >____/\___ >____/|__|_| /\___ > /_______ / \_/ \___ >___| /__| (____ /\_/ (____ /\___ >__| /____ >"
echo " \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ "
echo ""
echo ""
echo "You're all set!"
echo "Drop the sites you generated server blocks for in your www folder within this vagrant project."
echo ""
echo "Some Vagrant commands:"
echo ""
echo "Command Description"
echo "vagrant up Start the virtual machine. This will reload any configuration changes you may have made."
echo "vagrant halt Shut down the virtual machine completely. This does not save state."
echo "vagrant suspend Suspend the virtual machine. This will save state."
echo "vagrant reload Reload vagrant configuration. The equivalent of doing vagrant halt followed by vagrant up."
echo "vagrant destroy This will delete your virtual machine. You can use vagrant up to bring it back"
echo " but it will take a while to complete."
echo "Eat well and be merry."
echo "I HOLD ABSOLUTELY NOT RESPONSIBILITY FOR ANYTHING LOST OR STOLEN"
echo ""
echo ""
| true
|
ad1cbd9167cf4008e022a84cad008ec78123c07d
|
Shell
|
ICP-Foundation/sdk
|
/e2e/tests-replica/deploy.bash
|
UTF-8
| 2,523
| 3.140625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bats
load ../utils/_
setup() {
standard_setup
}
teardown() {
dfx_stop
standard_teardown
}
@test "deploy from a fresh project" {
dfx_new hello
dfx_start
install_asset greet
assert_command dfx deploy
assert_command dfx canister call hello greet '("Banzai")'
assert_eq '("Hello, Banzai!")'
}
@test "deploy a canister without dependencies" {
dfx_new hello
dfx_start
install_asset greet
assert_command dfx deploy hello
assert_match 'Deploying: hello'
assert_not_match 'hello_assets'
}
@test "deploy a canister with dependencies" {
dfx_new hello
dfx_start
install_asset greet
assert_command dfx deploy hello_assets
assert_match 'Deploying: hello hello_assets'
}
@test "deploy a canister with non-circular shared dependencies" {
install_asset transitive_deps_canisters
dfx_start
assert_command dfx deploy canister_f
assert_match 'Deploying: canister_a canister_f canister_g canister_h'
}
@test "report an error on attempt to deploy a canister with circular dependencies" {
install_asset transitive_deps_canisters
dfx_start
assert_command_fail dfx deploy canister_d
assert_match 'canister_d -> canister_e -> canister_d'
}
@test "deploy with InstallMode::Install on an empty canister" {
dfx_new hello
install_asset greet
dfx_start
assert_command dfx canister create --all
assert_command dfx deploy
assert_match 'Installing code for canister'
}
@test "dfx deploy supports arguments" {
dfx_new hello
install_asset greet_arg
dfx_start
assert_command dfx canister create --all
assert_command dfx deploy --argument '("World")'
assert_command dfx canister call hello greet
assert_match 'Hello, World'
}
@test "dfx deploy with InstallMode::Install on first invocation, InstallMode::Upgrade on second" {
dfx_new hello
install_asset greet
dfx_start
# In the normal case, whether for an initial install or a subsequent install,
# dfx deploy does the right thing, so it doesn't need to retry.
# Therefore, there is no "attempting (install|upgrade)" message.
assert_command dfx deploy hello
assert_match 'Installing code for canister'
assert_command dfx canister call hello greet '("First")'
assert_eq '("Hello, First!")'
assert_command dfx deploy hello
assert_match 'Upgrading code for canister'
assert_command dfx canister call hello greet '("Second")'
assert_eq '("Hello, Second!")'
}
| true
|
e64d34ec18b687ffcec0426c7e5beb7fc889b7fe
|
Shell
|
souvik-de/azhpc-images
|
/centos/common/hpc-tuning.sh
|
UTF-8
| 1,652
| 3.234375
| 3
|
[
"LicenseRef-scancode-generic-cla",
"MIT"
] |
permissive
|
#!/bin/bash
# Disable some unneeded services by default (administrators can re-enable if desired)
systemctl disable firewalld
# Update memory limits
cat << EOF >> /etc/security/limits.conf
* hard memlock unlimited
* soft memlock unlimited
* hard nofile 65535
* soft nofile 65535
* hard stack unlimited
* soft stack unlimited
EOF
# Enable reclaim mode
echo "vm.zone_reclaim_mode = 1" >> /etc/sysctl.conf
sysctl -p
# Uninstall WALinuxAgent from base image
rpm -e --nodeps WALinuxAgent
# Install Custom WALinuxAgent
WALINUXAGENT_DOWNLOAD_URL=https://github.com/Azure/WALinuxAgent/archive/refs/tags/v2.3.1.1.tar.gz
TARBALL=$(basename ${WALINUXAGENT_DOWNLOAD_URL})
wget $WALINUXAGENT_DOWNLOAD_URL
tar zxvf $TARBALL
pushd WALinuxAgent-2.3.1.1
python setup.py install --register-service
popd
# Configure WALinuxAgent
sudo sed -i -e 's/# OS.EnableRDMA=y/OS.EnableRDMA=y/g' /etc/waagent.conf
echo "Extensions.GoalStatePeriod=120" | sudo tee -a /etc/waagent.conf
echo "OS.EnableFirewallPeriod=300" | sudo tee -a /etc/waagent.conf
echo "OS.RemovePersistentNetRulesPeriod=300" | sudo tee -a /etc/waagent.conf
echo "OS.RootDeviceScsiTimeoutPeriod=300" | sudo tee -a /etc/waagent.conf
echo "OS.MonitorDhcpClientRestartPeriod=60" | sudo tee -a /etc/waagent.conf
echo "Provisioning.MonitorHostNamePeriod=60" | sudo tee -a /etc/waagent.conf
sudo systemctl restart waagent
$COMMON_DIR/write_component_version.sh "WAAGENT" $(python /usr/sbin/waagent --version | grep -o "[0-9].[0-9].[0-9].[0-9]" | head -n 1)
| true
|
b1ed9a9590c05ae42dff767bc44892a166570160
|
Shell
|
Mullefa/tech-time-talk
|
/0-setup/execute_command.sh
|
UTF-8
| 350
| 3.609375
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
# Utility function for making it transparent commands that are getting executed in bash scripts.
green='\033[0;32m'
blue='\033[01;36m'
reset='\033[0m' # No Color
COMMAND="$@"
read -p "$(echo -e "${green}executing command: ${blue} ${COMMAND} ${reset}")"
eval ${COMMAND}
read -p "$(echo -e "${green}command complete ${reset}")"
| true
|
a97c6ddec7468f66f52be76d6eb963e94023df22
|
Shell
|
itspriddle/bash-project-template
|
/bin/APP
|
UTF-8
| 902
| 4.1875
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
# Usage: APP
#
# NAME
# APP -- FIXME
#
# SYNOPSIS
# APP
#
# DESCRIPTION
# FIXME
#
# OPTIONS
# FIXME
#
# EXAMPLES
# FIXME
#
# SEE ALSO
# FIXME
# Call this script with DEBUG=1 to add some debugging output
if [[ "$DEBUG" ]]; then
export PS4='+ [${BASH_SOURCE##*/}:${LINENO}] '
set -x
fi
set -e
# Echoes given args to STDERR
#
# $@ - args to pass to echo
warn() {
echo "$@" >&2
}
# Print the help text for this program
#
# $1 - if set, show full help, otherwise just usage
print_help() {
sed -ne '/^#/!q;s/^#$/# /;/^# /s/^# //p' < "$0" |
awk -v f="$1" '!f && /^Usage:/ || u { u=!/^\s*$/; if (!u) exit } u || f'
}
# Main program
main() {
while [[ $# -gt 0 ]]; do
case "$1" in
-h | --help) print_help "${1#-h}"; return 0 ;;
--) shift; break ;;
-*) warn "Invalid option '$1'"; return 1 ;;
*) break ;;
esac
done
}
main "$@"
| true
|
fb4c8bde122fb75af2d8ac0f7d5e8a571bed91b6
|
Shell
|
tompipen/aubit4gl_code
|
/aubit4gltest/branches/AubitStable/incl/multiple_test_inc.sh
|
UTF-8
| 5,667
| 3.5625
| 4
|
[] |
no_license
|
##############################################################################
# Runing multiple configuration of tests (-alltests)
##############################################################################
if test "$ALL_TESTS_NONDB" = "1" || test "$ALL_TESTS_DB" = "1"; then
#Running all tests - initialise results file,defaults and counters:
echo "0" > ./fail.cnt
echo "0" > ./pass.cnt
echo "" > ./alldb.log
echo "===================== Test results ========================" >> ./alldb.log
echo "" >> ./alldb.log
#echo "Platform: $PLATFORM" >> ./alldb.log
echo "Flags: $FLAGS Platform: $PLATFORM Date:$DATE" >> ./alldb.log
echo " PWD=$CURR_DIR" >> ./alldb.log
echo "" >> ./alldb.log
#set the common flags we will use to run tests - we will add test
#configuration specific ones later, as needed
#-alldbrun = is to indicate to called script that it was called from 'alltests' loop
#-defaults = apply platform specific defaults
#-short = show only short summary
#-noecho = Don't show any non-critical messages while running tests
#-silent = don't show output of programs execution
COMMON_FLAGS="-defaults -short -alldbrun -silent -noecho"
fi
if test "$ALL_TESTS_NONDB" = "1"; then
#Running all tests - run non-db tests
######################
#First non-db tests with different packers (cant test PACKER_PERL -
#output only) and UI's plus pcode:
#This is not 100% what we want, since at leaset some db tests will use
#packers (forms) too, and pcode/EC should be tested on everything, but
#this would force us to run EVERYTHING in the loop, taking too much
#ime - maybe one day...
NODB_LIST="-xml -packed -xdr -console -pcode"
for a in $NODB_LIST; do
sh run_tests -nodb $a $COMMON_FLAGS $RUN_ONE
RET=$?
if test "$RET" != "0"; then
echo "EXIT with error $RET on $a (non-db)" >> ./alldb.log
if test "$STOP_ON_EXIT"; then
echo "Stop after EXIT"
exit 44
fi
fi
done
fi
if test "$ALL_TESTS_DB" = "1"; then
#Running all tests - run all db tests
#for debugging - you can turn on/off particular class of db tests here:
ODBC_U=1
ODBC_I=1
DB_NATIVE=1
ODBC_DIRECT=1
DB_EC=1
######################
#native plug-ins and directly linked ODBC plug-ins
if test "$DB_NATIVE" = "1"; then
DB_PLUGIN="-sqlite -esqli -pg"
fi
if test "$PLATFORM" = "UNIX" && test "$ODBC_DIRECT" = "1"; then
DB_PLUGIN="$DB_PLUGIN -ifxodbc -sqliteodbc -pgodbc -sapodbc"
fi
for a in $DB_PLUGIN; do
sh run_tests -onlydb $a $COMMON_FLAGS $RUN_ONE
RET=$?
if test "$RET" != "0"; then
echo "EXIT with error $RET on $a (db native)" >> ./alldb.log
if test "$STOP_ON_EXIT"; then
echo "Stop after EXIT"
exit 44
fi
fi
done
#######################
#ODBC managers, for each available backend:
DB_LIST="-odbcdb-ifx -odbcdb-pg -odbcdb-sqlite -odbcdb-sap"
if test "$PLATFORM" != "UNIX"; then
#we are on Windows - only ODBC manager is native Windows odbc32
for a in $DB_LIST; do
sh run_tests -onlydb -winodbc $a $COMMON_FLAGS $RUN_ONE
RET=$?
if test "$RET" != "0"; then
echo "EXIT with error $RET on $a (ODBC manager-Windows)" >> ./alldb.log
if test "$STOP_ON_EXIT"; then
echo "Stop after EXIT"
exit 44
fi
fi
done
else
#we are on UNIX
for a in $DB_LIST; do
#UnixODBC
if test "$ODBC_U" = "1"; then
#echo "---------------------------------------"
sh run_tests -onlydb -unixodbc $a $COMMON_FLAGS $RUN_ONE
RET=$?
if test "$RET" != "0"; then
echo "EXIT with error $RET on $a (unixODBC)" >> ./alldb.log
if test "$STOP_ON_EXIT"; then
echo "Stop after EXIT"
exit 44
fi
fi
fi
#iODBC
if test "$ODBC_I" = "1"; then
sh run_tests -onlydb -iodbc $a $COMMON_FLAGS $RUN_ONE
RET=$?
if test "$RET" != "0"; then
echo "EXIT with error $RET on $a (iODBC)" >> ./alldb.log
if test "$STOP_ON_EXIT"; then
echo "Stop after EXIT"
exit 44
fi
fi
fi
done
fi
#######################
#now EC output:
if test "$DB_EC" = "1"; then
EC_LIST="-eci -ecs -ecp -ecq -ecg"
for a in $EC_LIST; do
sh run_tests -onlydb $a $COMMON_FLAGS $RUN_ONE
RET=$?
if test "$RET" != "0"; then
echo "EXIT with error $RET on $a (EC)" >> ./alldb.log
if test "$STOP_ON_EXIT"; then
echo "Stop after EXIT"
exit 44
fi
fi
done
fi
fi
if test "$ALL_TESTS_NONDB" = "1" || test "$ALL_TESTS_DB" = "1"; then
#Running all tests - finish results file and exit
FAIL_CNT_TOT=`cat ./fail.cnt`
PASS_CNT_TOT=`cat ./pass.cnt`
echo "Total passed=$PASS_CNT_TOT Total failed=$FAIL_CNT_TOT" >> ./alldb.log
echo "" >> ./alldb.log
echo "===================== End of Test results ===================" >> ./alldb.log
echo "" >> ./alldb.log
rm -f ./fail.cnt
rm -f ./pass.cnt
cat ./alldb.log
#exit this script - we are done
exit
fi
| true
|
03bdd7cb9db0eadab339ea461b22421f5056af95
|
Shell
|
snprpc/shadowsocket-manager-script
|
/check_dir.sh
|
UTF-8
| 1,577
| 4.0625
| 4
|
[] |
no_license
|
#!/bin/bash
root_directory="/mnt/nfs-data/ShadowSocket/NetStatus"
ip_address="0.0.0.0"
function get_ip () {
curl ip.6655.com/ip.aspx
}
function sh_init () {
get_ip > /root/ip.txt
if [ -d "$root_directory" ]; then
#echo "The root_directory is exist"
return 0
else
echo "The root_directory is not exist"
#如果目录不存在,则将该目录从nfs服务器挂载到本地
#mount
return 0
fi
}
function check_netstatus () {
#echo $1
#echo $2
netstatus=($(ping -c4 -W1 $1 | awk '
NR==2,NR==5{
gsub(/time=/,"",$7);
package_num++;
netstatus[package_num]=$7;
} END {
for (i in netstatus)
print netstatus[i] | "sort -n";
}'))
min=${netstatus[0]}
max=${netstatus[3]}
echo "{
\"ip\":\"$ip_address\",
\"packages\":4,
\"max_network_delay\":\"$max ms\",
\"min_network_delay\":\"$min ms\",
}" > $2
}
function read_the_dir () {
ip_address_dirs=($(ls -l $1 | awk '/^d/ {
print $NF;
}'))
# echo "目录数量:"${#ip_address_dirs[@]}
for ip_dir in ${ip_address_dirs[*]}
do
if [ -f "$root_directory/$ip_dir/$ip_address.json" ]; then
continue
else
filepath="$root_directory/$ip_dir/$ip_address.json"
touch $filepath
check_netstatus $ip_dir $filepath &
fi
done
}
#初始化脚本环境
sh_init
#读取主机外网ip地址
if [ -f /root/ip.txt ]; then
ip_address=$( cat /root/ip.txt )
fi
#echo $ip_address
while [ 1 ]
do
#读取nfs服务器目录
if [ $? == 0 ]; then
read_the_dir $root_directory
fi
sleep 1
done
| true
|
f10c88c32c0cbd6f0b3b14d1da1664dfd0261dfd
|
Shell
|
aswinsegu/edge-trg-demo
|
/deploy.sh
|
UTF-8
| 241
| 2.96875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
BASEDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
USAGE="Usage: deploy [proxy name]"
ORG="org"
ENV="test"
set -eu
cd $BASEDIR
apigeetool deployproxy -u $EDGE_USERNAME -p $EDGE_PASSWORD -o $ORG -e $ENV -n $1 -d ./$1
| true
|
3964612d1da3103cf8360814e0a40cbf70c9f15b
|
Shell
|
cha63506/holist
|
/holist-api-server/install.sh
|
UTF-8
| 1,079
| 3.46875
| 3
|
[] |
no_license
|
#! /bin/sh
# Constants
readonly SCRIPT=$(cd "$(dirname "$0")"; pwd)
readonly USERNAME=holist
# Stop and remove the holist-nodejs container.
docker stop holistNodejs holistMongodb
docker rm holistNodejs holistMongodb
# Create a new directory to save and persist the data outside the MongoDB container.
sudo mkdir -p /var/db/holist-mongodb
# Create a new directory to save and persist the
sudo mkdir -p /var/log/holist-mongodb
# Remove the mongod.lock file if existing.
sudo rm -f /var/db/holist-nodejs/mongod.lock
# Build the holist-nodejs container.
docker build -t $USERNAME/holist-nodejs $SCRIPT/holist-nodejs
# Build the holist-mongodb container.
docker build -t $USERNAME/holist-mongodb $SCRIPT/holist-mongodb
# Run the MongoDB container.
# For exposing the port add '-p 27017'.
docker run -itd -p 49101:27017 -v /var/db/holist-mongodb:/data/db -v /var/log/holist-mongodb:/data/log --name holistMongodb $USERNAME/holist-mongodb
# Run the node.js container.
docker run -itd -p 49100:8080 --name holistNodejs --link holistMongodb:holistMongodb $USERNAME/holist-nodejs
| true
|
182d02d3676d6dca94f09ef0854c3f2985394998
|
Shell
|
sakuyacatcat/django-unit-and-gui-test
|
/scripts/entrypoint.sh
|
UTF-8
| 1,046
| 3.109375
| 3
|
[] |
no_license
|
#!/bin/bash
# Set setting env, please exchange the comment out line to switch for the other env
export DJANGO_SETTINGS_MODULE="config.settings.dev"
# export DJANGO_SETTINGS_MODULE="config.settings.prod"
# Install packages for dev or prod, about which packages will be installed, please decide by changing above env value
if [ $DJANGO_SETTINGS_MODULE = "config.settings.dev" ]; then
pip3 install -r requirements/dev.txt
else
pip3 install -r requirements/prod.txt
mkdir -p /var/log/app && touch /var/log/app/app.log
chmod +w /var/log/app/app.log
fi
# Prepare database
python3 manage.py makemigrations
python3 manage.py migrate
python3 manage.py shell -c "from django.contrib.auth import get_user_model; get_user_model().objects.create_superuser('admin', 'admin@example.com', 'adminpass');"
# Collect Staticfiles
python3 manage.py collectstatic --noinput
# Set server(gunicorn)
# mkdir -p /var/run/gunicorn
# gunicorn config.wsgi --bind=unix:/var/run/gunicorn/gunicorn.sock
# Set server(Django)
python3 manage.py runserver 0.0.0.0:8000
| true
|
5f813c4071f7599b7570ad60aa41156aa8773ea7
|
Shell
|
onceknown/ledger-graph
|
/bin/prep-gcloud
|
UTF-8
| 403
| 2.9375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
# add gcloud and kubectl to PATH so its available with sudo on circleci
if [ -z "$CIRCLECI" ]; then
echo "export PATH=$PATH:/opt/google-cloud-sdk/bin
$(cat /root/.bashrc)" > /root/.bashrc
source /root/.bashrc
fi
gcloud config set compute/zone us-central1-a
gcloud config set project $GCLOUD_PROJECT
gcloud --quiet components install kubectl
gcloud --quiet components update
| true
|
26d9a8dc62e5a6e1097982fa3de2cebdee8ee314
|
Shell
|
h3abionet/refpanels
|
/v4/03_filter.sh
|
UTF-8
| 1,038
| 3.34375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
#SBATCH --job-name='03_filter.sh'
#SBATCH --cpus-per-task=32
#SBATCH --mem=232GB
#SBATCH --output=./logs/out.log
#SBATCH --error=./logs/err.log
#SBATCH --time=96:00:00
JOBLOG="./logs/03_filter.log"
INPATTERN="~/scratch/v4/02_rsids/v4.chr{}.rs.vcf.gz"
OUTPATTERN="~/scratch/v4/03_filtered/v4.chr{}.sn.vcf.gz"
SCRIPTNAME="03_filter.sh"
if [[ $(hostname -s) = slurm-login ]]; then
echo "don't run on headnode!"
echo "${SCRIPTNAME}"
exit 1
elif (( $# == 0 )); then
>&1 echo "0 parameters.. running parallel"
echo "Launching SLURM job on $HOSTNAME" >> $JOBLOG
seq 1 22 | parallel ./${SCRIPTNAME} $INPATTERN $OUTPATTERN
elif (( $# == 2 )); then
MSG="inner script: $1 $2"
INFILE=$1
OUTFILE=$2
echo $MSG >> $JOBLOG
bcftools view \
-v snps,indels \
--min-ac 3 \
--max-alleles 2 \
$INFILE \
-Oz -o $OUTFILE
tabix $OUTFILE
else
>&2 echo "unknown number of parameters"
fi
: '
# This is a multiline comment
# copy/paste the following to launch parallel
'
| true
|
290ebdc605f4b402db836a6a24a0cd300a56ec83
|
Shell
|
quickbooks2018/aws
|
/kubernetes-kind
|
UTF-8
| 2,365
| 2.6875
| 3
|
[] |
no_license
|
#!/bin/bash
# Purpose: Kafka Cluster In Kubernetes
# Maintainer DevOps <Muhammad Asim <quickbooks2018@gmail.com>
# Kubectl Installation
curl -# -LO "https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl"
chmod +x ./kubectl
mv ./kubectl /usr/local/bin/kubectl
kubectl version --client
# Helm Installation
# https://helm.sh/docs/intro/install/
curl -fsSL -o get_helm.sh https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3
chmod 700 get_helm.sh
./get_helm.sh
cp /usr/local/bin/helm /usr/bin/helm
rm -f get_helm.sh
helm version
# Kind Installation
curl -Lo ./kind https://kind.sigs.k8s.io/dl/v0.20.0/kind-linux-amd64
# Latest Version
# ###########################################################################
# curl -Lo ./kind "https://kind.sigs.k8s.io/dl/v0.9.0/kind-$(uname)-amd64"
#############################################################################
# curl -Lo ./kind https://kind.sigs.k8s.io/dl/v0.9.0/kind-linux-amd64
# curl -Lo ./kind https://kind.sigs.k8s.io/dl/v0.8.1/kind-linux-amd64
chmod +x ./kind
mv ./kind /usr/local/bin
# Cluster Creation
kind create cluster --name cloudgeeks-ca --image kindest/node:v1.19.0
kubectl cluster-info
###################
# Create Deployment
###################
# kubectl create deployment <Deplyment-Name> --image=<Container-Image>
# kubectl create deployment blue-deployment --image=quickbooks2018/blue
######################
# Scaling a Deployment
######################
# kubectl scale --replicas=3 deployment/<Deployment-Name>
# kubectl scale --replicas=3 deployment/blue-deployment
#################################
# Expose Deployment as a Service
#################################
# kubectl expose deployment <Deployment-Name> --type=NodePort --port=80 --target-port=80 --name=<Service-Name-To-Be-Created>
# kubectl expose deployment blue-deployment --type=NodePort --port=80 --target-port=80 --name=blue-service
###############
# Port Forward
###############
# kubectl port-forward service/blue-service --address 0.0.0.0 11000:80 -n default
#END
# kind create cluster --config hack/kind-config.yaml --image=kindest/node:v1.17.2
cat << EOF > kind-config.yaml
kind: Cluster
apiVersion: kind.x-k8s.io/v1alpha4
networking:
apiServerAddress: "0.0.0.0"
apiServerPort: 7443
EOF
| true
|
18fb4a4d05ab5a604ca91a9f7c6a7e09a6d4da7e
|
Shell
|
kumar130/shellscripts
|
/evalbuild.sh
|
UTF-8
| 3,628
| 3.28125
| 3
|
[] |
no_license
|
#!/bin/bash
# Evaluate which build needs to be triggered
function script_usage
{
# echo " Usage : $0 <$TAG> <"$MAJ_MIN"_"$SP"PatchScript> <Radia_"$MAJ_MIN"_"$SP"> "
echo " Usage : $0 <TAG> <"MAJ_MIN"_"SP"PatchScript> <"Radia_MAJ_MIN"_"SP"> "
echo " Example : $0 HPCA7_90_7_AGENT_PATCH 7_90_9PatchScript Radia_7_90_9"
echo " To test: $0 HPCA7_90_7_AGENT_PATCH 7.9.7PatchScript Radia_797 "
}
if [ ! $# -eq 3 ]
then
echo " Please read the below usage. "
script_usage
exit 16
else
TAG=$1
PDIR=$2
RDIR=$3
echo " Entered the following attributes $TAG $PDIR $RDIR "
fi
PHERE=`pwd`
PRT=$PHERE/private
echo " Begin the build "
echo " Performing build for $TAG output will be in $PRT/$TAG "
if [ -d $PRT/$TAG ]
then
DIRNAME=`date +%F_%T`
echo "moving $PRT/$TAG to $PRT/$TAG-$DIRNAME "
mv $PRT/$TAG $PRT/$TAG-$DIRNAME
fi
if [ ! -d /mnt/ftcshare/nfs_src/client/$RDIR ]
then
echo " Creating /mnt/ftcshare/nfs_src/client/$RDIR since its not present "
sudo mkdir -p /mnt/ftcshare/nfs_src/client/$RDIR
else
echo " /mnt/ftcshare/nfs_src/client/$RDIR is available "
fi
if [ ! -d /Users/caagent/$PDIR ]
then
echo " /Users/caagent/$PDIR does not exist "
echo " Error /Users/caagent/$PDIR does not exist " >> $PHERE/Fullpatch.log
exit 16
else
cd /Users/caagent/$PDIR
echo " We are currently in `pwd` location "
fi
echo " setting the mac environment "
/mnt/ftcshare/nfs_src/client/mac_svn_co_scripts/svnenv.sh
#echo " cc_build.sh $TAG $PRT /mnt/ftcshare/nfs_src/client/$RDIR 1> $TAG.log 2> $TAG.log "
echo " cc_build.sh $TAG $PRT /mnt/ftcshare/nfs_src/client/$RDIR | tee $PRT/$TAG/$TAG.log "
#cc_build.sh $TAG $PRT /mnt/ftcshare/nfs_src/client/$RDIR 1> $TAG.log 2> $TAG.log
cc_build.sh $TAG $PRT /mnt/ftcshare/nfs_src/client/$RDIR 2>&1 | tee -a $PRT/$TAG.log
sudo mv $PRT/$TAG.log $PRT/$TAG/$TAG.log
echo " cc_build.sh $TAG $PRT /mnt/ftcshare/nfs_src/client/$RDIR 2>&1 | tee -a $PRT/$TAG/$TAG.log "
echo " ls -ltr $PRT/$TAG/CA-Build/content/agents/macx86/ram >> $PRT/$TAG/buildlist.log "
ls -ltr $PRT/$TAG/CA-Build/content/agents/macx86/ram >> $PRT/$TAG/buildlist.log
echo "grep Error $PRT/$TAG/$TAG.log >> $PRT/$TAG/error.log"
grep Error $PRT/$TAG/$TAG.log >> $PRT/$TAG/error.log
echo " grep FAILED $PRT/$TAG/$TAG.log >> $PRT/$TAG/fatalerror.log "
grep FAILED $PRT/$TAG/$TAG.log >> $PRT/$TAG/fatalerror.log
echo " grep -v "Error 1" error.log >> $PRT/$TAG/fatalerror.log "
grep -v "Error 1" $PRT/$TAG/error.log >> $PRT/$TAG/fatalerror.log
if [ -s $PRT/$TAG/fatalerror.log ]
then
echo " Please verify the Error in error.msg log located in `pwd` location " >> $PHERE/Fullpatch.log
echo " You can Ignore " "Error 1" "or" "Error 1 (Ignored)" >> $PHERE/Fullpatch.log
echo " Also verify the buildlist.log to check if the binaries have been built correctly " >> $PHERE/Fullpatch.log
exit 16
# else
# echo " The build completed successfully "
fi
if [ -s $PRT/$TAG/buildlist.log ]
then
echo " The build completed successfully "
else
echo " Error in build, hence Build Failed " >> $PHERE/Fullpatch.log
echo " Please verify the log at $PRT/$TAG/$TAG.log " >> $PHERE/Fullpatch.log
exit 16
fi
cd $PHERE
if [ -s $PRT/$TAG/fatalerror.log ]
then
Echo " The build has failed " >> $PHERE/Fullpatch.log
Echo " Verify the build logs for Error and fix the issue " >> $PHERE/Fullpatch.log
exit 16
# else
# echo " Build has completed " >> $PHERE/Fullpatch.log
fi
echo " We are now at $PHERE location "
| true
|
71be1262e33a3abb942688130c83fedc88ac2946
|
Shell
|
ccho-mongodb/miscellaneous-tools
|
/php_docs_deploy.sh
|
UTF-8
| 827
| 3.8125
| 4
|
[] |
no_license
|
#!/bin/sh
# version branch names in descending order
VERSIONS=(master v1.15 v1.13 v1.12 v1.11 v1.10 v1.9 v1.8 v1.7 v1.6 v1.5 v1.4 v1.3 v1.2 v1.1)
# clean
git checkout master
TODEPLOY=()
for i in "${VERSIONS[@]}"
do
read -p "add branch [$i]? [y/n] " REPLY
if [[ $REPLY =~ ^[Yy]$ ]]; then
TODEPLOY+=($i)
fi
done
# build each version in order
for i in "${TODEPLOY[@]}"; do
echo "Branch [$i]: Generating HTML and local directories"
git checkout $i && git pull && make html publish
done
# Prompt in case errors encountered
read -p "Proceed with deploy? " -n 1 -r
echo
if [[ ! $REPLY =~ ^[Yy]$ ]]; then
[[ "$0" = "$BASH_SOURCE" ]] && exit 1 || return 1
fi
# deploy
for i in "${TODEPLOY[@]}"; do
echo "Deploying [$i]"
git checkout $i && yes | make deploy
done
echo "Deployment complete!"
| true
|
51ba11f51cee0bc0baaa0cae5f9eacea3c90f838
|
Shell
|
victoria168/gezi-blood-pressure-django
|
/install_db.sh
|
UTF-8
| 1,340
| 3.625
| 4
|
[] |
no_license
|
#!/bin/bash
# Install PostgreSQL for MMS on Ubuntu 18.04 LTS and configure MMS database, create user for Django, and update connection parameters
# Instructions:
# 1. Configure the database's name and setup a username and password below
# 2. Run this shell script with './install_db.sh'
# 3. If everything ran successfully, reboot (don't shutdown, otherwise the public IP will change) the EC2 instance on the AWS console
#################################### Config ####################################
# Set database name, username, and password here
DATABASENAME=gezi_db
DBUSERNAME=geziadmin
DBPASSWORD=secret
################################## Configuration ###############################
# Create database and user for Django
sudo -u postgres psql -c "CREATE DATABASE $DATABASENAME;"
sudo -u postgres psql -c "CREATE USER $DBUSERNAME WITH PASSWORD '$DBPASSWORD';"
# Update connection parameters (required for Django)
sudo -u postgres psql -c "ALTER ROLE $DBUSERNAME SET client_encoding TO 'utf8';"
sudo -u postgres psql -c "ALTER ROLE $DBUSERNAME SET default_transaction_isolation TO 'read committed';"
sudo -u postgres psql -c "ALTER ROLE $DBUSERNAME SET timezone to 'UTC';"
# Grant user permission to access the database table
sudo -u postgres psql -c "GRANT ALL PRIVILEGES ON DATABASE $DATABASENAME TO $DBUSERNAME;"
| true
|
4a8541a0bc8a8344955328ca8434a71ffc1c8280
|
Shell
|
imarin2/FlatCAM2FABtotum
|
/post_process.sh
|
UTF-8
| 438
| 2.640625
| 3
|
[] |
no_license
|
#!/bin/bash
# $1 => file_in
# $2 => rpm
feed_rapids='F10000'
feed_tool=`cat $1 | grep '^F'`
cat $1 | sed "/^G00/ s/$/ $feed_rapids/" | sed "/^G01/ s/$/ $feed_tool/" | sed "/^G21/ d" | sed "/^G94/ d" | sed "/^F/ d" | sed "/^G04 P1/ d" | sed "s/M03/M03 S$2\nG4 S3/g" | sed "s/M05/G4 S5\nM05/g" | sed 's|Y| Y|g' | sed 's|Z| Z|g' | sed -e '/^$/ d' | sed 's/G00/G0/g' | sed 's/G01/G1/g' > `echo $1 | sed 's/.gcode/_postprocessed.gcode/g'`
| true
|
6b663cffa24e24ec1a7c9720e312d0f2216e2018
|
Shell
|
alloy-d/dotfiles
|
/.gnupg/switch-gpg-smart-card.sh
|
UTF-8
| 1,596
| 4.0625
| 4
|
[] |
no_license
|
#!/bin/bash
set -uexo pipefail
GNUPGHOME=${GNUPGHOME-"${HOME}/.gnupg"}
OPERATION="${1-}"
CARD="${2-NFC}"
KEYGRIPS="AA834B25B3B0F86AE7D7AD04C17C60EBA883461B 8DF7BE7931EB269AD310EBD70817BA7DC310ABFD 8F001E2055E5D202AB32A7D3216499FF133331AE"
KEY_DIR="${GNUPGHOME}/private-keys-v1.d"
usage() {
set +x
echo "This is a utility for managing key stubs used by GPG."
echo "It is useful when using the same keys with multiple smart cards."
echo
echo "Usage: ${0} operation [card]"
echo
echo "Examples:"
echo
echo " ${0} clear"
echo " clears cached key stubs"
echo
echo " ${0} save NFC"
echo " saves the current key stubs to name 'NFC'"
echo
echo " ${0} restore NFC"
echo " restores the key stubs with name 'NFC'"
echo
}
if [ "${OPERATION:-empty}" = "empty" ]; then
usage
exit 1
fi
validate_card() {
if [[ ("$CARD" != "nano") && ("$CARD" != "NFC") && ("$CARD" != "5C-NFC") && ("$CARD" != "5C-nano")]]; then
echo "Sorry, never heard of card ${CARD}."
exit 1
fi
}
if [ "${OPERATION}" = "clear" ]; then
for grip in $KEYGRIPS; do
rm "${KEY_DIR}/${grip}.key"
done
elif [ "${OPERATION}" = "save" ]; then
validate_card
echo "Saving cached keys for ${CARD}..."
for grip in $KEYGRIPS; do
cp "${KEY_DIR}/${grip}.key" "${KEY_DIR}/${grip}.key.${CARD}"
done
elif [ "${OPERATION}" = "restore" ]; then
validate_card
echo "Restoring cached keys for $CARD..."
for grip in $KEYGRIPS; do
cp "${KEY_DIR}/${grip}.key.${CARD}" "${KEY_DIR}/${grip}.key"
done
else
echo "I don't know how to do operation '${OPERATION}'."
fi
| true
|
b9504eb8618e1fa00c7719991eb1229046ce6d9b
|
Shell
|
toxicafunk/s2i-scala-container
|
/2.12.10/s2i/bin/run
|
UTF-8
| 737
| 3.765625
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
source /opt/app-root/etc/generate_container_user
set -e
application_jar_file="$APP_FILE"
if [ -z "$application_jar_file" ]
then
# if no app file has been specified, look to see if there is a single
# jar file in the app root. if there is a single jar file, it gets run
# otherwise raise an error and exit the run script.
jar_files=( $(find . -maxdepth 1 -name *.jar) )
if [ ${#jar_files[*]} = 1 ]
then
application_jar_file=${jar_files[0]}
else
echo "No single application jar file could be detected and APP_FILE was not specified."
echo "--> Exiting application ..."
exit 1
fi
fi
echo "--> Starting application ..."
java -jar $application_jar_file $APP_ARGS
| true
|
ac9436d4a7297e2c9816ef6997ecdab049bf78a4
|
Shell
|
thomasgransden/ml4pg
|
/test/runner.sh
|
UTF-8
| 338
| 3.15625
| 3
|
[] |
no_license
|
#!/bin/sh
if [ -z "$TESTS" ]
then
TESTS="coq ssreflect"
fi
cd "$ML4PG_HOME" || {
echo "Couldn't cd to '$ML4PG_HOME'" >> /dev/stderr
exit 1
}
for TEST_SUITE in $TESTS
do
export TEST_SUITE
echo "Running $TEST_SUITE tests"
emacs --quick --debug-init --script test/runner.el 2>&1 | grep -v "^Loading.*\.\.\.$"
done
| true
|
a72f04cab7ee9c783766edbe09f0f33810c55a8b
|
Shell
|
millerlogic/cluster-deploy
|
/roles/hosts/service.sh
|
UTF-8
| 571
| 3.546875
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
#ROLE_USAGE: auto # Adds your cluster's config/hosts file to the remote /etc/hosts file.
SDIR=$(readlink -f `dirname "${BASH_SOURCE[0]}"`)
if [ x"$1" == x"install" ]; then
(
fgrep -v '#auto#' /etc/hosts
echo "# BEGIN - Do not edit this block of hosts or the #auto# comments"
while read line; do
echo "$line #auto#"
done <$CLUSTER_CONFIG_DIR/hosts
echo "# END - Do not edit this block of hosts or the #auto# comments"
) >$LOCAL_DATA_DIR/newhosts
cp -f /etc/hosts $LOCAL_DATA_DIR/oldhosts
cp -f $LOCAL_DATA_DIR/newhosts /etc/hosts
fi
| true
|
dfbfee383cc1a7e32dcceb5c29abc800356b3abf
|
Shell
|
lasanjin/dotfiles
|
/.scripts/gs
|
UTF-8
| 336
| 3.59375
| 4
|
[] |
no_license
|
#!/bin/bash
#git status -$@ for all repositories in $repdir
gs() {
if [ -z "$1" ]; then
echo "gs [-s|-v|...]"
return 0
fi
find $repdir -maxdepth 2 -name ".git" \
-execdir sh -c '(echo "\033[94m"${PWD##*/}"\033[0m")' \; \
-execdir git status $@ \; \
-exec echo \; 2>/dev/null
}
| true
|
fb48446b86b7fbd53fd8694cb0b76142ebcb28e8
|
Shell
|
cms-sw/cmssw
|
/CondCore/SiPixelPlugins/test/testSiPixelTemplateDBObject.sh
|
UTF-8
| 3,391
| 2.734375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# Save current working dir so img can be outputted there later
W_DIR=$(pwd);
# Set SCRAM architecture var
SCRAM_ARCH=slc7_amd64_gcc900;
export SCRAM_ARCH;
source /afs/cern.ch/cms/cmsset_default.sh;
eval `scram run -sh`;
# Go back to original working directory
cd $W_DIR;
# Run get payload data script
if [ -d $W_DIR/plots_Template ]; then
rm -fr $W_DIR/plots_Template
fi
mkdir $W_DIR/plots_Template
getPayloadData.py \
--plugin pluginSiPixelTemplateDBObject_PayloadInspector \
--plot plot_SiPixelTemplateDBObjectTest \
--tag SiPixelTemplateDBObject38Tv3_express \
--time_type Run \
--iovs '{"start_iov": "326083", "end_iov": "326083"}' \
--db Prod \
--test ;
getPayloadData.py \
--plugin pluginSiPixelTemplateDBObject_PayloadInspector \
--plot plot_SiPixelTemplateIDsBPixMap \
--tag SiPixelTemplateDBObject38Tv3_express \
--time_type Run \
--iovs '{"start_iov": "326083", "end_iov": "326083"}' \
--db Prod \
--test ;
mv *.png $W_DIR/plots_Template/IDsBPixMap.png
getPayloadData.py \
--plugin pluginSiPixelTemplateDBObject_PayloadInspector \
--plot plot_SiPixelTemplateIDsFPixMap \
--tag SiPixelTemplateDBObject38Tv3_express \
--time_type Run \
--iovs '{"start_iov": "326083", "end_iov": "326083"}' \
--db Prod \
--test ;
mv *.png $W_DIR/plots_Template/IDsFPixMap.png
getPayloadData.py \
--plugin pluginSiPixelTemplateDBObject_PayloadInspector \
--plot plot_SiPixelTemplateLABPixMap \
--tag SiPixelTemplateDBObject38Tv3_express \
--time_type Run \
--iovs '{"start_iov": "326083", "end_iov": "326083"}' \
--db Prod \
--test ;
mv *.png $W_DIR/plots_Template/LABPixMap.png
getPayloadData.py \
--plugin pluginSiPixelTemplateDBObject_PayloadInspector \
--plot plot_SiPixelTemplateLAFPixMap \
--tag SiPixelTemplateDBObject38Tv3_express \
--time_type Run \
--iovs '{"start_iov": "326083", "end_iov": "326083"}' \
--db Prod \
--test ;
mv *.png $W_DIR/plots_Template/LAFPixMap.png
getPayloadData.py \
--plugin pluginSiPixelTemplateDBObject_PayloadInspector \
--plot plot_SiPixelTemplateHeaderTable \
--tag SiPixelTemplateDBObject38Tv3_express \
--time_type Run \
--iovs '{"start_iov": "326083", "end_iov": "326083"}' \
--db Prod \
--test ;
mv *.png $W_DIR/plots_Template/HeaderTable.png
getPayloadData.py \
--plugin pluginSiPixelTemplateDBObject_PayloadInspector \
--plot plot_SiPixelTemplateTitles_Display \
--tag SiPixelTemplateDBObject38Tv3_express \
--time_type Run \
--iovs '{"start_iov": "326083", "end_iov": "326083"}' \
--db Prod \
--test ;
mv *.png $W_DIR/plots_Template/HeaderTitles.png
getPayloadData.py \
--plugin pluginSiPixelTemplateDBObject_PayloadInspector \
--plot plot_SiPixelTemplateQScaleMap \
--tag SiPixelTemplateDBObject38Tv3_express \
--time_type Run \
--iovs '{"start_iov": "326083", "end_iov": "326083"}' \
--db Prod \
--test ;
mv *.png $W_DIR/plots_Template/QScaleMap.png
getPayloadData.py \
--plugin pluginSiPixel2DTemplateDBObject_PayloadInspector \
--plot plot_SiPixel2DTemplateHeaderTable \
--tag SiPixel2DTemplateDBObject_38T_v1_express \
--time_type Run \
--iovs '{"start_iov": "326083", "end_iov": "326083"}' \
--db Prod \
--test ;
mv *.png $W_DIR/plots_Template/2DHeaderTable.png
| true
|
0b6092aa0a077e09f21bc6a46a3259c06c7d89b9
|
Shell
|
abahturin/rsha
|
/mysql-server_install.sh
|
UTF-8
| 316
| 3.015625
| 3
|
[] |
no_license
|
#!/bin/bash
mysql_server=$(dpkg-query -W -f='${Status}' 'mysql-server' 2>/dev/null | grep -c "ok installed")
if [ $mysql_server -eq 0 ]; then
echo "Paigaldame mysql-serveri"
apt-get install -y mysql-server;
elif [ $mysql_server -eq 1 ]; then
echo "mysql-server on paigaldatud"
# service mysql-server status
fi
| true
|
59ead21ebf28c15f84d094a59b69751381af7933
|
Shell
|
Confucius-Mencius/third_party
|
/build/build_aws_sdk_cpp.sh
|
UTF-8
| 1,881
| 3.0625
| 3
|
[] |
no_license
|
#!/bin/bash
###############################################################################
# author: BrentHuang (guang11cheng@qq.com)
###############################################################################
SCRIPT_PATH=$(cd `dirname $0`; pwd)
. ${SCRIPT_PATH}/build_type.sh
. ${SCRIPT_PATH}/common.sh
echo "build aws sdk cpp..."
cd ${AWS_SDK_CPP_SRC_DIR}
sed -i -e 's!list(APPEND AWS_COMPILER_WARNINGS "-Wall" "-Werror" "-pedantic" "-Wextra")!list(APPEND AWS_COMPILER_WARNINGS "-Wall" "-pedantic" "-Wextra")!' ./cmake/compiler_settings.cmake
sudo rm -rf ${BUILD_TYPE}_build
mkdir ${BUILD_TYPE}_build
cd ${BUILD_TYPE}_build
# 会在cmake的时候安装头文件,必须要sudo,后面的make也必须要sudo
sudo cmake -G "${CMAKE_GENERATOR}" -DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE_VALUE} \
-DTARGET_ARCH=LINUX -DBUILD_ONLY="s3;transfer;sts" \
-DCMAKE_INSTALL_PREFIX=${AWS_SDK_CPP_INSTALL_DIR} ..
# sudo cmake -G "${CMAKE_GENERATOR}" -DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE_VALUE} \
# -DOPENSSL_CRYPTO_LIBRARY=${OPENSSL_INSTALL_DIR}/lib/libcrypto.so \
# -DOPENSSL_SSL_LIBRARY=${OPENSSL_INSTALL_DIR}/lib/libssl.so \
# -DOPENSSL_INCLUDE_DIR=${OPENSSL_INSTALL_DIR}/include \
# -DCURL_LIBRARY=${CURL_INSTALL_DIR}/lib/libcurl.so \
# -DCURL_INCLUDE_DIR=${CURL_INSTALL_DIR}/include \
# -DBUILD_ONLY="s3;transfer;sts" -DENABLE_TESTING=OFF -DAUTORUN_UNIT_TESTS=OFF \
# -DCMAKE_INSTALL_PREFIX=${AWS_SDK_CPP_INSTALL_DIR} ..
# sudo cmake -G "${CMAKE_GENERATOR}" -DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE_VALUE} -DOPENSSL_ROOT_DIR=${OPENSSL_INSTALL_DIR} \
# -DCURL_INCLUDE_DIR=${CURL_INSTALL_DIR}/include -DCURL_LIBRARY=${CURL_INSTALL_DIR}/lib \
# -DBUILD_ONLY="s3;transfer;sts" -DENABLE_TESTING=OFF -DAUTORUN_UNIT_TESTS=OFF \
# -DTARGET_ARCH=LINUX -DCMAKE_INSTALL_PREFIX=${AWS_SDK_CPP_INSTALL_DIR} ..
sudo ${RUN_MAKE_CMD}
${RUN_INSTALL_CMD}
| true
|
0d25fc757820f746a4b2e40f19dc90bf7070802e
|
Shell
|
miroslavvidovic/tmux-configuration
|
/setup-tmux.sh
|
UTF-8
| 1,826
| 4.0625
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
# -----------------------------------------------------------------------------
# Info:
# author: Miroslav Vidovic
# file: setup-tmux.sh
# created: 01.04.2016.-11:45:17
# revision: 20.08.2017.
# version: 1.3
# -----------------------------------------------------------------------------
# Requirements:
# tmux, git
# Description:
# Set up tmux, tmuxinator and tmux package manager
# - copy .tmux.conf to $HOME/.tmux.conf
# - copy .tmuxniator dir to $HOME/.tmuxinator
# - clone tpm repository to $HOME/.tmux/plugins/tpm
# Usage:
# setup-tmux.sh
# -----------------------------------------------------------------------------
# Script:
# Check if installed
is_installed() {
if ! type "$1" 2> /dev/null; then
return 1
else
return 0
fi
}
set_up_tmux() {
if is_installed tmux ; then
echo " => tmux configuration"
cp -i tmux.conf ~/.tmux.conf
else
echo "Tmux not installed. Install tmux first."
fi
}
set_up_tmuxinator() {
if is_installed tmuxinator ; then
mkdir -p "$HOME"/.tmuxinator
echo " => tmuxinator configuration"
cp -r tmuxinator/* "$HOME"/.tmuxinator
else
echo "Tmuxinator not installed. Install tmuxinator first."
fi
}
# Clone or update the tmux package manager
set_up_tpm() {
local repo=~/.tmux/plugins/tpm
if is_installed git; then
if cd "$repo"; then
echo " => updating tpm"
git pull
else
echo " => installing tpm"
git clone https://github.com/tmux-plugins/tpm "$repo"
fi
else
echo "Git is not installed install git first."
fi
}
set_up_themes() {
echo " =>installing themes"
mkdir -p "$HOME"/.tmux/themes
cp -r themes/* "$HOME"/.tmux/themes
}
main() {
set_up_tmux
set_up_tmuxinator
set_up_themes
set_up_tpm
echo "Done"
}
main
exit 0
| true
|
dd9500baddf0018202af6a3f8c3ad1c73afafc42
|
Shell
|
priestd09/linuxScripts
|
/postgresql/postgreBackup.sh
|
UTF-8
| 800
| 4.125
| 4
|
[] |
no_license
|
#!/bin/bash
backupDir="/backup/postgresql"
numberOfDaysToKeep=90
function main()
{
databases=`su --login --command "psql -l -t | cut -d'|' -f1 | sed -e 's/ //g' -e '/^$/d'" postgres`
for database in $databases; do
if [ "$database" != "template0" ] && [ "$database" != "template1" ]; then
makeFileName
echo Dumping $database to $fileName
backup
deleteOldBackup
fi
done
}
function backup()
{
mkdir -p $backupDir
su --login --command "pg_dump $database | gzip " postgres | tee $fileName > /dev/null
}
function deleteOldBackup()
{
find $backupDir -type f -prune -mtime +$numberOfDaysToKeep -exec rm -f {} \;
}
function makeFileName()
{
fileName=$backupDir/postgresql.`hostname`.`date +%Y-%m-%d+%H.%m`.$database.gz
}
main
| true
|
ea971ce368d74d98292545ba4656848ba70b8153
|
Shell
|
NealSCarffery/repo
|
/pypy-hg-beaker/PKGBUILD
|
UTF-8
| 1,547
| 2.734375
| 3
|
[] |
no_license
|
# Maintainer: Yichao Yu <yyc1992@gmail.com>
_pypyname=pypy-hg
_pypyabi=26
_pypy3name=pypy3-hg
_pypy3abi=25
pkgbase=${_pypyname}-beaker
pkgname=(${_pypy3name}-beaker ${_pypyname}-beaker)
pkgver=1.6.4
pkgrel=7
arch=('any')
license=('custom')
pkgdesc="Caching and sessions WSGI middleware for use with web applications and stand-alone Python scripts and applications, pypy version."
url="http://beaker.groovie.org/"
makedepends=(pypy-setuptools pypy3-${_pypy3abi}-setuptools sqlite
pypy3-${_pypy3abi})
source=("http://cheeseshop.python.org/packages/source/B/Beaker/Beaker-${pkgver}.tar.gz")
md5sums=('c2e102870ed4c53104dec48ceadf8e9d')
build() {
cp -r Beaker-${pkgver} python2-Beaker-${pkgver}
(cd Beaker-${pkgver}
sed -i "s#/usr/bin/python#/usr/bin/pypy3#" beaker/crypto/pbkdf2.py
pypy3 setup.py build)
(cd python2-Beaker-${pkgver}
sed -i "s#/usr/bin/python#/usr/bin/pypy#" beaker/crypto/pbkdf2.py
pypy setup.py build)
}
package_pypy3-hg-beaker() {
depends=(pypy3-${_pypy3abi})
provides=(pypy3-beaker=${pkgver}
pypy3-${_pypy3abi}-beaker=${pkgver})
conflicts=(pypy3-beaker)
cd Beaker-${pkgver}
pypy3 setup.py install --root="${pkgdir}" --optimize=1
install -D -m644 LICENSE "${pkgdir}/usr/share/licenses/pypy3-beaker/LICENSE"
}
package_pypy-hg-beaker() {
depends=(pypy)
provides=(pypy-beaker=${pkgver})
conflicts=(pypy-beaker)
cd python2-Beaker-${pkgver}
pypy setup.py install --root="${pkgdir}" --optimize=1
install -D -m644 LICENSE "${pkgdir}/usr/share/licenses/pypy-beaker/LICENSE"
}
| true
|
944fea73d717cff0a7814cbc2c7d32fa146fb5c4
|
Shell
|
VietHTran/BashLibrary
|
/QuickTest.sh
|
UTF-8
| 344
| 3.359375
| 3
|
[] |
no_license
|
#!/bin/bash
#Quick way to compile and check error of simple C program
if [[ -z "$1" ]]; then
echo "Please provide filename as argument in order to perform test"
exit
fi
rm test
gcc "$1" -o test
if [[ $(ls "test") ]]; then
echo "Compile Successfully"
./test
valgrind --tool=memcheck ./test
else
echo "Compile Failed"
fi
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.