blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
9deb7644a6541940fc2eba00972dc674aa12a82d
|
Shell
|
4a256b6b3e7t3e8b7t9q7t/samba4-ads
|
/home/$USER/.bash_aliases
|
UTF-8
| 1,305
| 3.40625
| 3
|
[] |
no_license
|
acl(){ /usr/bin/getfacl "${1-.}" "${@:2}";}; readonly -f acl
inscreen(){
screen -q -ls
if [ $? -gt 10 ] && screen -S "main" -X select . 2> /dev/null 1>&2 ; then
printf 'screen -S "main" -X screen'
printf ' "%s"' "$@"
printf ' --'
fi
}; readonly -f inscreen
alias lld='ls -ld'
xat(){
eval $(inscreen -t "LXC:$*") sudo lxc-attach -n \"\${1:-dc1}\" -- sudo -i \"\${@:2}\"
}; readonly -f xat
alias xsc='screen -aDR "main"'
xsu(){
if [ "$1" ]; then
eval $(inscreen -t "\\\$ |shell($1):") sudo -u "\$1" -i
else
eval $(inscreen -t "# |sudo($USER):") sudo PATH="\$PATH" -Es
fi
}; readonly -f xsu
xvb(){ xsu virtualbox;}; readonly -f xvb
__set_prompt()
{
test "$PS_ORIG" || PS_ORIG="$PS1"
test "$PC_ORIG" || eval "PC_ORIG='$PROMPT_COMMAND'"
PROMPT_COMMAND=__bash_prompt
}; readonly -f __set_prompt
__bash_prompt()
{
local ERRORLEVEL=$?
if [ $ERRORLEVEL != 0 ]; then
ERRORLEVEL="\n[$(tput setaf 1)$ERRORLEVEL$(tput sgr0)]"
else
ERRORLEVEL=
fi
eval "$PC_ORIG"
[ "$PS_ORIG" ] && PS1="$ERRORLEVEL$PS_ORIG"
}; readonly -f __bash_prompt
| true
|
bec34aba3f02de49e1e4374ff5d85274d28e53de
|
Shell
|
andreyvpng/dot
|
/i3wm/.config/i3/scripts/i3-get-color
|
UTF-8
| 100
| 2.796875
| 3
|
[] |
no_license
|
#!/bin/bash
get_color() {
echo "$(xrdb -query -all | grep $1 | awk '{print $2}')"
}
get_color $1
| true
|
5e4012f4b7e0b18dd34eb10a5fa46b06e76c7e1f
|
Shell
|
dkorunic/ibm-ds-rrdgraph
|
/update-rrd.sh
|
UTF-8
| 2,028
| 3.6875
| 4
|
[] |
no_license
|
#!/bin/sh
# constants and other
SAN_NAME="my-DS3300-SAN"
DS_CMD="SMcli -n $SAN_NAME -c \"show allLogicalDrives performanceStats;\""
RRD_FILE_PATH="rrd"
AWK_CMD='BEGIN { FS="," } /CONTROLLER|Logical Drive/ { gsub(/\"/, ""); gsub(/[ \t]+/, "_"); gsub(/CONTROLLER_IN_SLOT/, "CTRL"); gsub(/Logical_Drive/, "LUN"); print $1, $3, $4, $5, $7 }'
# time constants (don't touch pls)
HEARTBEAT=300
EPOCH=$(date +%s)
UPD=$(expr \( $EPOCH / $HEARTBEAT \) \* $HEARTBEAT)
UPD_PREV=$(expr $UPD - $HEARTBEAT)
# $1 = file
# $2 = calculated secs since epoch
create_rrd() {
rrdtool create "$1" \
--start "$2" \
--step $HEARTBEAT \
"DS:read_percent:GAUGE:$HEARTBEAT:0:100" \
"DS:cache_hit_percent:GAUGE:$HEARTBEAT:0:100" \
"DS:current_kbps:GAUGE:$HEARTBEAT:0:U" \
"DS:current_iops:GAUGE:$HEARTBEAT:0:U" \
"RRA:AVERAGE:0.5:1:300" \
"RRA:AVERAGE:0.5:6:700" \
"RRA:AVERAGE:0.5:24:775" \
"RRA:AVERAGE:0.5:288:797" \
"RRA:MAX:0.5:1:300" \
"RRA:MAX:0.5:6:700" \
"RRA:MAX:0.5:24:775" \
"RRA:MAX:0.5:288:797"
return $?
}
# $1 = file
# $2 = absolute value (gauge) for read percentage
# $3 = absolute value (gauge) for cache hit percentage
# $4 = absolute value (gauge) for current kb/s
# $5 = absolute value (gauge) for current io/s
update_rrd() {
if [ ! -e "$1" ]; then
if ! create_rrd "$1" $UPD_PREV; then
echo "FATAL: Cannot create RRD file $1. Exiting."
exit 1
fi
fi
if ! rrdtool update "$1" "$UPD:$2:$3:$4:$5"; then
echo "FATAL: Error updating RRD file $1. Exiting."
exit 1
fi
}
fetch_data() {
if ! eval $DS_CMD | awk "$AWK_CMD"; then
echo "FATAL: SMcli returned error. Exiting."
exit 1
fi
}
fetch_data | while read arry_name read_percent cache_hit_percent \
current_kbps current_iops; do
update_rrd "$RRD_FILE_PATH/$SAN_NAME-$arry_name.rrd" \
$read_percent $cache_hit_percent $current_kbps $current_iops
done
| true
|
b50d7bb437ca245044c701be9954cbcc9c5e406d
|
Shell
|
Bergem93/linux_scripts
|
/sysinfo_page
|
UTF-8
| 852
| 3.8125
| 4
|
[] |
no_license
|
#!/bin/bash
# sysinfo_page - Et script som oppretter en html layout.
#### Constants
TITLE="My System Information for $HOSTNAME"
RIGHT_NOW=$(date +"%x %r %z")
TIME_STAMP="Updated on $RIGHT_NOW by $USER"
#### FUNCTIONS
system_info()
{
echo "<h2>System release info</h2>"
echo "<p>Function not yet implemented</p>"
}
show_uptime()
{
echo "<h2>System upime</h2>"
echo "<pre>"
uptime
echo "</pre>"
}
drive_space()
{
echo "<h2>Filesystem space</h2>"
echo "<pre>"
df
echo "</pre>"
}
home_space()
{
echo "<h2>Home directory space by user</h2>"
echo "<pre>"
echo "Bytes Directory"
du -s /home/* | sort -nr
echo "</pre>"
}
#### Main
cat <<- _EOF_
<html>
<head>
<title>
$TITLE
</title>
</head>
<body>
<h1>$TITLE</h1>
<p>$TIME_STAMP</p>
$(system_info)
$(show_uptime)
$(drive_space)
$(home_space)
</body>
</html>
_EOF_
| true
|
62a5a4d7e6ba8e5694b12532fd0a5cc9e071efdf
|
Shell
|
nravid/netscaler
|
/ciphers.sh
|
UTF-8
| 564
| 3.75
| 4
|
[] |
no_license
|
#Usage ./ciphers.sh <server>:<port>
SERVER=$1
DELAY=1
ciphers=$(openssl ciphers 'ALL:eNULL' | sed -e 's/:/ /g')
echo Obtaining cipher list from $(openssl version).
for cipher in ${ciphers[@]}
do
echo -n Testing $cipher...
result=$(echo | openssl s_client -cipher "$cipher" -connect $SERVER 2>&1)
if [[ "$result" =~ ":error" ]] ; then
error=$(echo -n $result | cut -d':' -f6)
echo NO \($error\)
else
if [[ "$result" =~ "Cipher is ${cipher}" || "$result" =~ "Cipher :" ]] ; then
echo YES
else
echo UNKNONW RESPONSE
echo $result
fi
fi
sleep $DELAY
done
| true
|
1e334decf1621988e9e6be94fea51f41ccf2815f
|
Shell
|
bejayoharen/heroku-buildpack-go-revel
|
/bin/compile
|
UTF-8
| 7,717
| 3.5625
| 4
|
[] |
no_license
|
#!/bin/bash
# usage: bin/compile <build-dir> <cache-dir> <env-dir>
set -eo pipefail
echo '---- > Downloading and installing ffmpeg'
indent() {
sed -u 's/^/ /'
}
BUILD_DIR=$1
CACHE_DIR=$2
ENV_DIR=$3
VENDOR_BIN_DIR="$BUILD_DIR/vendor/bin"
PROFILE_PATH="$BUILD_DIR/.profile.d/ffmpeg.sh"
PROFILE_PATH1="$BUILD_DIR/.profile.d/ffmpeg-10bit.sh"
PROFILE_PATH2="$BUILD_DIR/.profile.d/ffprobe.sh"
PROFILE_PATH3="$BUILD_DIR/.profile.d/ffserver.sh"
PROFILE_PATH4="$BUILD_DIR/.profile.d/qt-faststart.sh"
FFMPEG_BIN_URL="https://johnvansickle.com/ffmpeg/releases/ffmpeg-release-amd64-static.tar.xz"
if [ -f "$ENV_DIR/FFMPEG_BIN_URL" ]; then
FFMPEG_BIN_URL=$(cat $ENV_DIR/FFMPEG_BIN_URL)
fi
FFMPEG_FILE=${FFMPEG_BIN_URL##*/}
mkdir="mkdir -p"
case ${FFMPEG_FILE##*.} in
bz2) tar="tar xj" ;;
gz) tar="tar xz" ;;
xz) tar="tar x" ;;
*)
echo "Fail to uncompress $FFMPEG_FILE because only xz, gzip, bzip2 are supported." | indent
exit 1
;;
esac
download="curl -L -s --create-dirs -o"
extract="${tar} -C $VENDOR_BIN_DIR --strip-components 1 --wildcards -f"
echo "-----> Installing ffmpeg, ffmpeg-10bit, ffprobe, ffserver and qt-faststart ..."
$mkdir $VENDOR_BIN_DIR
$mkdir ${PROFILE_PATH%/*}
$mkdir ${PROFILE_PATH1%/*}
$mkdir ${PROFILE_PATH2%/*}
$mkdir ${PROFILE_PATH3%/*}
$mkdir ${PROFILE_PATH4%/*}
if [ ! -f "$CACHE_DIR/$FFMPEG_FILE" ]; then
echo "downloading ffmpeg: $FFMPEG_BIN_URL" | indent
$download $CACHE_DIR/$FFMPEG_FILE $FFMPEG_BIN_URL
#TODO sha256sum $CACHE_DIR/$FFMPEG_FILE
fi
echo "extacting ffmpeg: $FFMPEG_BIN_URL" | indent
echo "$extract $CACHE_DIR/$FFMPEG_FILE" | indent
$extract $CACHE_DIR/$FFMPEG_FILE
echo "exporting PATH" | indent
echo 'export PATH="$PATH:$HOME/vendor/bin"' >> $PROFILE_PATH
echo 'export PATH="$PATH:$HOME/vendor/bin"' >> $PROFILE_PATH1
echo 'export PATH="$PATH:$HOME/vendor/bin"' >> $PROFILE_PATH2
echo 'export PATH="$PATH:$HOME/vendor/bin"' >> $PROFILE_PATH3
echo 'export PATH="$PATH:$HOME/vendor/bin"' >> $PROFILE_PATH4
echo "-----> Downloading and Installing libsndfile"
# change to the the BUILD_DIR ($1)
## download the binary (-O) silently (-s)
#curl https://s3-eu-west-1.amazonaws.com/soundtracktor-public/libsndfile-1.0.28.tar.gz -s -O
## make a directory to untar (like unzip) the binary
#mkdir -p vendor/libsndfile
## untar the binary to the directory we want
#tar -C vendor/libsndfile -xvf libsndfile-1.0.28.tar.gz
#CFLAGS="$CFLAGS -I ${1}/vendor/libsndfile/lib/pkgconfig"
#export CFLAGS
#PKG_CONFIG_PATH="${1}"/vendor/libsndfile/lib/pkgconfig:"${PKG_CONFIG_PATH}"
#export PKG_CONFIG_PATH
#popd
echo "---- > Installing Go"
cd $1
pushd .
# Go releases for Darwin beginning with 1.2rc1
# have included more than one build, depending
# on the specific version of Mac OS X. Try to
# account for that, but don't try too hard.
# This doesn't affect Heroku builds, it's only
# for testing on Darwin systems.
platext() {
case $1 in
go1.0*|go1.1beta*|go1.1rc*|go1.1|go1.1.*) return ;;
esac
case $(uname|tr A-Z a-z) in
darwin) printf %s -osx10.8 ;;
esac
}
# Go releases have moved to a new URL scheme
# starting with Go version 1.2.2. Return the old
# location for known old versions and the new
# location otherwise.
urlfor() {
ver=$1
file=$2
case $ver in
go1.0*|go1.1beta*|go1.1rc*|go1.1|go1.1.*|go1.2beta*|go1.2rc*|go1.2|go1.2.1)
echo http://go.googlecode.com/files/$file
;;
*)
echo https://storage.googleapis.com/golang/$file
;;
esac
}
mkdir -p "$1" "$2"
build=$(cd "$1/" && pwd)
cache=$(cd "$2/" && pwd)
buildpack=$(dirname $(dirname $0))
arch=$(uname -m|tr A-Z a-z)
if test $arch = x86_64
then arch=amd64
fi
plat=$(uname|tr A-Z a-z)-$arch
# Python
venv=$cache/venv
mkdir -p $cache/pip
python=python2.7
PATH=$buildpack/$plat/bin:$venv/bin:$PATH
virtualenv() {
python "$buildpack/vendor/virtualenv-1.11.6/virtualenv.py" "$@"
}
if test -f $build/.godir
then
name=$(cat $build/.godir)
ver=go$(cat $build/goversion)
else
echo >&2 " ! A .godir is required. For instructions:"
echo >&2 " ! http://mmcgrana.github.io/2012/09/getting-started-with-go-on-heroku"
exit 1
fi
echo $ver
file=${GOFILE:-$ver.$(uname|tr A-Z a-z)-amd64$(platext $ver).tar.gz}
url=${GOURL:-$(urlfor $ver $file)}
echo $url
if test -e $build/bin && ! test -d $build/bin
then
echo >&2 " ! File bin exists and is not a directory."
exit 1
fi
if test -d $cache/$ver/go
then
echo "-----> Using $ver"
else
rm -rf $cache/* # be sure not to build up cruft
mkdir -p $cache/$ver
pushd
cd $cache/$ver
echo -n "-----> Installing $ver..."
curl -sO $url
tar zxf $file
rm -f $file
echo " done"
popd
fi
mkdir -p $build/bin
mkdir -p $build/.go
cp -R $cache/$ver/go $build/.goroot
# echo gobin: $GOBIN
# echo goroot: $GOROOT
# echo gopath: $GOPATH
GOBIN=$build/bin export GOBIN
GOROOT=$build/.goroot export GOROOT
GOPATH=$build/.go export GOPATH
PATH=$GOROOT/bin:$PATH
# echo debugging...
# for i in $build $GOBIN $GOROOT $GOPATH
# do
# echo
# echo
# echo $i
# echo
# find $i
# done
# if ! (which hg >/dev/null && which bzr >/dev/null)
# then
# echo -n " Installing Virtualenv..."
# virtualenv --python $python --distribute --never-download --prompt='(venv) ' $venv > /dev/null 2>&1
# . $venv/bin/activate > /dev/null 2>&1
# echo " done"
# #echo -n " Installing Mercurial..."
# #pip install mercurial > /dev/null 2>&1
# #echo " done"
# #echo -n " Installing Bazaar..."
# #pip install bzr > /dev/null 2>&1
# #echo " done"
# fi
p=$GOPATH/src/$name
mkdir -p $p
cp -R $build/* $p
# allow apps to specify cgo flags and set up /app symlink so things like CGO_CFLAGS=-I/app/... work
env_dir="$3"
if [ -d "$env_dir" ]
then
ln -sfn $build /app/code
for cgo_opt in CGO_CFLAGS CGO_CPPFLAGS CGO_CXXFLAGS CGO_LDFLAGS
do
if [ -f "$env_dir/$cgo_opt" ]
then
export "$cgo_opt=$(cat "$env_dir/$cgo_opt")"
fi
done
fi
FLAGS=(-tags heroku)
if test -f "$env_dir/GO_GIT_DESCRIBE_SYMBOL"
then
git_describe=$(git describe --tags --always)
git_describe_symbol=$(cat "$env_dir/GO_GIT_DESCRIBE_SYMBOL")
FLAGS=(${FLAGS[@]} -ldflags "-X $git_describe_symbol $git_describe")
fi
#update godep
#echo "-----> updating/installing godep"
#go get -u github.com/golang/dep/cmd/dep
#export GO15VENDOREXPERIMENT=1
unset GIT_DIR # unset git dir or it will mess with goinstall
cd $p
echo '=== debug'
pwd
find .
echo '=== debug'
if test -e $build/vendor
then
echo "-----> Copying workspace"
#cp -R $(godep path)/* $GOPATH
#cp -R vendor/* $GOPATH/src
mv vendor/* $GOPATH/src
rm -r vendor
echo "-----> Running: go install -v ${FLAGS[@]} ./..."
go install -v "${FLAGS[@]}" ./...
else
echo "-----> Running: go get ${FLAGS[@]} ./..."
go get "${FLAGS[@]}" ./...
fi
echo "-----> adding ~/bin to path"
mkdir -p $build/.profile.d
echo 'PATH=$PATH:$HOME/bin' > $build/.profile.d/go.sh
echo "-----> installing revel"
mkdir $GOPATH/bin
go get github.com/revel/cmd/revel
echo "-----> building executable"
echo " this is just to launch faster than running revel run mashday prod $PORT"
echo " but it might take a few minutes now..."
if [ -d ./exec ] ; then
rm -r ./exec >> /dev/null
fi
if [ -d /tmp/mashday ] ; then
rm -r /tmp/mashday >> /dev/null
fi
$GOBIN/revel build mashday /tmp/mashday prod
mv /tmp/mashday $build/exec
echo "-----> Now we want to delete everything else big"
cd $1
rm -r .go/src
rm -r ./public
rm -r ./test-scripts
echo "-----> Displaying top 100 files and dirs with sizes"
find . -exec du -sk {} \; | sort -n | tail -100
| true
|
18493658f5a21a2e79e2c5fe4e6ce3102f33e1b2
|
Shell
|
froskos/vagrant-mean
|
/env/bootstrap/list.sh
|
UTF-8
| 247
| 2.765625
| 3
|
[] |
no_license
|
#!/bin/bash
#
echo ""
echo "BUILD STARTS"
echo ""
#Set path to scripts installing each package
BUILD_PATH="/vagrant/env/build"
#Call the scripts that install the required packages
$BUILD_PATH/git.sh
$BUILD_PATH/node.sh
$BUILD_PATH/mongodb.sh
| true
|
b6d9bc27acca65346259156683f5da053bea4293
|
Shell
|
Ghardo/shutdown-fsck
|
/fsck_shutdown
|
UTF-8
| 867
| 4
| 4
|
[] |
no_license
|
#!/bin/bash
DEVICES="/dev/sda4 /dev/sdb4"
LOGFILE="testlog"
function log2file {
echo "$(date +\[%a,%F\ %T\]) $1" >> $LOGFILE
}
for DEVICE in $DEVICES; do
log2file "### Checking ${DEVICE} ###"
MOUNTCOUNT=$(tune2fs -l $DEVICE | grep "Mount count" | awk '{print $3}')
MAXMOUNTCOUNT=$(tune2fs -l $DEVICE | grep "Maximum mount count" | awk '{print $4}')
if [ $MOUNTCOUNT -eq $MAXMOUNTCOUNT ]; then
log2file "no information found for ${DEVICE}. aborted"
continue
fi
log2file "Mount count: ${MOUNTCOUNT}"
log2file "Max mount count: ${MAXMOUNTCOUNT}"
if [ $MOUNTCOUNT -gt $MAXMOUNTCOUNT ]; then
log2file "fsck needed on ${DEVICE}"
touch /etc/mtab
if [ $? -eq 0 ]; then
log2file "root fs is mounted. fsck aborted"
else
fsck -C0 -y -f $DEVICE
log2file "fsck done on ${DEVICE}"
fi
else
log2file "no fsck needed on ${DEVICE}"
fi
done
| true
|
028e707eec6b4ddb8a19244012aa3cdc430c9b42
|
Shell
|
guziakas/docker-coverity-scan-maven
|
/run-coverity.sh
|
UTF-8
| 1,647
| 3.734375
| 4
|
[] |
no_license
|
#!/usr/bin/env sh
set -e
printHelp() {
echo "Runs Coverity Scan for Maven project."
echo "Usage: docker run onenashev/coverity-scan-maven <organization> <project> <scm_tag> <email> <token>"
echo ""
}
#TODO: Good parameter processing
if [ $# -eq 0 ]; then
printHelp
exit 0
fi
if [ $# -ne 5 ]; then
echo "ERROR: wrong number of arguments"
printHelp
fi
ORGANIZATION=$1
PROJECT=$2
SCM_TAG=$3
EMAIL=$4
TOKEN=$5
# Verify parameters
if [ -z "${EMAIL}" ] ; then
echo "EMAIL is not specified"
exit 1;
fi
if [ -z "${TOKEN}" ] ; then
echo "TOKEN is not specified"
exit 1;
fi
if [ -z "${ORGANIZATION}" ] ; then
echo "ORGANIZATION is not specified"
exit 1;
fi
if [ -z "${PROJECT}" ] ; then
echo "PROJECT is not specified"
exit 1;
fi
if [ -z "${SCM_TAG}" ] ; then
echo "SCM_TAG is not specified"
exit 1;
fi
# Prepare build dir
mkdir build
cd build
# Checkout
# TODO: always determine commit and put it to Description
# TODO: better processing of changesets
git clone https://github.com/${ORGANIZATION}/${PROJECT}.git
cd ${PROJECT}
git checkout ${SCM_TAG}
# Run build with maven
cov-build --dir cov-int mvn -DskipTests=true -Dfindbugs.skip=true compile
# Prepare the submission archive
echo "Archiving cov-int.tgz"
tar czvf cov-int.tgz cov-int
# Upload to coverity
DESTINATION_URL="https://scan.coverity.com/builds?project=${ORGANIZATION}%2F${PROJECT}"
echo "Uploading results to ${DESTINATION_URL}"
curl --form token="${TOKEN}" \
--form email="${EMAIL}" \
--form file=@cov-int.tgz \
--form version="${SCM_TAG}" \
--form description="Automatic Coverity Scan build for ${SCM_TAG}" \
${DESTINATION_URL}
| true
|
87160c94a4d7aca4dcbf0a882744b3b7671bab8f
|
Shell
|
Epivalent/ampify
|
/environ/ampdev.sh
|
UTF-8
| 3,731
| 3.828125
| 4
|
[
"LicenseRef-scancode-public-domain",
"MS-PL",
"CC0-1.0",
"BSD-3-Clause"
] |
permissive
|
#! /bin/sh
# No Copyright (-) 2010 The Ampify Authors. This file is under the
# Public Domain license that can be found in the root LICENSE file.
# NOTE: This script has only been tested in the context of a modern Bash Shell
# on Ubuntu Linux and OS X. Any patches to make it work under alternative Unix
# shells, versions and platforms are very welcome!
if [[ "x$BASH_SOURCE" == "x" ]]; then
echo "Sorry, this only works under Bash shells atm. Patches welcome... =)"
exit
fi
_OS_NAME=$(uname -s | tr [[:upper:]] [[:lower:]])
# ------------------------------------------------------------------------------
# exit if we're not sourced and echo usage example if possible
# ------------------------------------------------------------------------------
if [ "x$0" == "x$BASH_SOURCE" ]; then
LSOF=$(lsof -p $$ 2> /dev/null | grep -E "/"$(basename $0)"$")
case $_OS_NAME in
darwin)
__FILE=$(echo $LSOF | sed -E s/'^([^\/]+)\/'/'\/'/1 2>/dev/null);;
linux)
__FILE=$(echo $LSOF | sed -r s/'^([^\/]+)\/'/'\/'/1 2>/dev/null);;
freebsd)
__FILE=$(echo $LSOF | sed -E s/'^([^\/]+)\/'/'\/'/1 2>/dev/null);;
*)
echo "ERROR: You need to source this script and not run it directly!";
exit
esac
echo
echo "Usage:"
echo
echo " source $__FILE"
echo
echo "You might want to add the above line to your .bashrc or equivalent."
echo
exit
fi
# ------------------------------------------------------------------------------
# try to determine the absolute path of the enclosing startup + root directory
# ------------------------------------------------------------------------------
cd "$(dirname $BASH_SOURCE)" || return $?
export AMPIFY_STARTUP_DIRECTORY=`pwd -P 2> /dev/null` || return $?
cd $OLDPWD || return $?
export AMPIFY_ROOT=$(dirname $AMPIFY_STARTUP_DIRECTORY)
# ------------------------------------------------------------------------------
# exit if $AMPIFY_ROOT is not set
# ------------------------------------------------------------------------------
if [ "x$AMPIFY_ROOT" == "x" ]; then
echo "ERROR: Sorry, couldn't detect the Ampify Root Directory."
return
fi
# ------------------------------------------------------------------------------
# extend the PATH
# ------------------------------------------------------------------------------
if [ "x$PATH" != "x" ]; then
export PRE_AMPDEV_PATH=$PATH
fi
export PATH=$AMPIFY_ROOT/environ:$AMPIFY_ROOT/src/codereview:$PATH
# ------------------------------------------------------------------------------
# try to figure out if we are inside an interactive shell or not
# ------------------------------------------------------------------------------
test "$PS1" && _INTERACTIVE_SHELL=true;
# ------------------------------------------------------------------------------
# the auto-completer for optcomplete used by the amp runner
# ------------------------------------------------------------------------------
_amp_completion() {
COMPREPLY=( $( \
COMP_LINE=$COMP_LINE COMP_POINT=$COMP_POINT \
COMP_WORDS="${COMP_WORDS[*]}" COMP_CWORD=$COMP_CWORD \
OPTPARSE_AUTO_COMPLETE=1 $1 ) )
}
# ------------------------------------------------------------------------------
# set us up the bash completion!
# ------------------------------------------------------------------------------
if [ "x$_INTERACTIVE_SHELL" == "xtrue" ]; then
# first, turn on the extended globbing and programmable completion
shopt -s extglob progcomp
# register completers
complete -o default -F _amp_completion amp
complete -o default -F _amp_completion optcomplete-commands
# and finally, register files with specific commands
complete -f -X '!*.go' 5g 6g 8g
complete -f -X '!*.5' 5l
complete -f -X '!*.6' 6l
complete -f -X '!*.8' 8l
fi
| true
|
b8c0553fb1cd67552854ec9a49be213ad3665d1c
|
Shell
|
dream1986/you-get-ui
|
/you-get-ui
|
UTF-8
| 758
| 3.515625
| 4
|
[] |
no_license
|
#!/bin/bash
### * dream1986 * 20151216
### 使用zenity给you-get在线视频下载软件创建交互式图形窗口
### 需要安装 zenity 、you-get 、lxterminal (可用其他终端替换)
### 将这个脚本绑定键盘快捷键,使用时复制视频地址,按快捷键调出(窗口1), 粘贴视频地址点确定
### 定义下载目录
dir=/home/dream/视频/
### 创建在线视频地址提交窗口,获取视频url地址,按确定键开始下载,按取消键退出 (窗口1)
url=$(zenity --entry --title="you-get 视频下载" --text="输入在线视频地址:" --width=500)
if [[ $? == 1 ]] ;
then
exit
fi
### 打开一个新lxterminal窗口显示进度
lxterminal --title="you-get 正在下载视频:" -e you-get -o ${dir} ${url}
| true
|
228a0c6e9a442b99a7919459003eb899da293762
|
Shell
|
morimekta/generate_workspace
|
/generate_workspace.sh
|
UTF-8
| 3,956
| 3.625
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
#
# Copyright (c) 2016, Stein Eldar Johnsen
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
location="${PWD}"
bazel=$(which bazel)
if [[ -z "${bazel}" ]]
then
echo "you need install bazel and make it available in the path before"
echo "running the ./generate_workspace.sh command. The bazel binary must"
echo "also reside within the bazel workspace (may be symlinked)."
exit 1
fi
if [[ -s ${bazel} ]]
then
bazel=$(readlink -f $bazel)
fi
bazel_workspace=${bazel%/bazel}
while [[ ! -f "${bazel_workspace}/WORKSPACE" ]]
do
bazel_workspace=${bazel_workspace%/*}
if [[ -z "$bazel_workspace" ]]
then
echo "ERROR: The bazel binary must be located within the bazel workspace."
echo
echo "run # ./compile.sh compile"
echo
echo "from within your bazel workspace, and make sure it is available via the"
echo "path. Symlinking the binary to a /bin folder is fine."
exit 1
fi
done
TMP_file=$(mktemp -t 'generate_workspace.XXXXXXXXXX')
POMS=( $(find | grep -v '^\./pom.xml' | grep '/pom.xml' | sed -e 's:/pom.xml$::' -e "s:^./:-m ${location}/:") )
echo "-- log file ${TMP_file}"
echo "## generate_workspace ${POMS[@]}"
cd ${bazel_workspace}
bazel run //src/tools/generate_workspace -- ${POMS[@]} 1> ${TMP_file} 2>&1 || exit 1
echo "-- done"
echo
WS=$(cat ${TMP_file} | tail -n 2 | head -n 1)
BUILD=$(cat ${TMP_file} | tail -n 1)
cd ${location}
if [[ ! -d third-party ]]
then
mkdir third-party
fi
echo "-- writing WORKSPACE"
if [[ -f third-party/common.WORKSPACE ]]
then
cat third-party/common.WORKSPACE > WORKSPACE
echo >> WORKSPACE
echo "# --- generated dependencies below this line ---" >> WORKSPACE
echo >> WORKSPACE
else
echo "# --- create a third-party/common.WORKSPACE file to add non-managed" > WORKSPACE
echo "# --- third-party dependencies to this file." >> WORKSPACE
echo >> WORKSPACE
fi
cat ${WS} | sed "s:${location}/::" >> WORKSPACE
echo "-- writing third-party/BUILD"
if [[ -f third-party/common.BUILD ]]
then
cat third-party/common.BUILD > third-party/BUILD
echo >> third-party/BUILD
echo "# --- generated dependencies below this line ---" >> third-party/BUILD
echo >> third-party/BUILD
else
echo "# --- create a third-party/common.BUILD file to add non-managed" > third-party/BUILD
echo "# --- third-party dependencies to this file." >> third-party/BUILD
echo >> third-party/BUILD
fi
cat ${BUILD} | sed "s:${location}/::" >> third-party/BUILD
exit 0
| true
|
a417a604ba98586e7e2c6f034b73cb49a1b27337
|
Shell
|
zergioz/crypto
|
/overclock.sh
|
UTF-8
| 819
| 3.203125
| 3
|
[] |
no_license
|
#!/bin/bash
MY_WATT="120"
MY_CLOCK="150"
MY_MEM="800"
MY_FAN="85"
export DISPLAY=:0
# Graphics card 1 to 5
for MY_DEVICE in {0..4}
do
# Check if card exists
if nvidia-smi -i $MY_DEVICE >> /dev/null 2>&1; then
nvidia-settings -c :0 -a "[gpu:$MY_DEVICE]/GPUPowerMizerMode=1"
# Fan speed
nvidia-settings -c :0 -a "[gpu:$MY_DEVICE]/GPUFanControlState=1"
nvidia-settings -c :0 -a "[fan:$MY_DEVICE]/GPUTargetFanSpeed=$MY_FAN"
# Graphics clock
nvidia-settings -c :0 -a "[gpu:$MY_DEVICE]/GPUGraphicsClockOffset[3]=$MY_CLOCK"
# Memory clock
nvidia-settings -c :0 -a "[gpu:$MY_DEVICE]/GPUMemoryTransferRateOffset[3]=$MY_MEM"
# Set watt/powerlimit. This is also set in miner.sh at autostart.
sudo nvidia-smi -i "$MY_DEVICE" -pl "$MY_WATT"
fi
done
echo
echo "Done"
echo
| true
|
2f9ef248ebca9219f3bfe589d7b51221d3bef37e
|
Shell
|
Ste74/aur-alucryd
|
/unsupported/gens-gs-ii-git/PKGBUILD
|
UTF-8
| 1,110
| 2.734375
| 3
|
[] |
no_license
|
# Maintainer: Maxime Gauduin <alucryd@archlinux.org>
pkgname=gens-gs-ii-git
pkgver=r2642.8454ae5
pkgrel=1
pkgdesc='A Sega Genesis/CD/32X emulator'
arch=('i686' 'x86_64')
url='http://segaretro.org/Gens/GS_II'
license=('GPL')
depends_i686=('glew' 'minizip' 'portaudio' 'qt4')
depends_x86_64=('lib32-glew' 'lib32-portaudio' 'lib32-qt4')
makedepends=('cmake' 'git' 'nasm' 'python')
makedepends_x86_64=('gcc-multilib')
conflicts=('gens-gs' 'gens-gs-ii')
source=('git+git://dusers.drexel.edu/srv/git/~korth/gens-gs-ii.git')
sha256sums=('SKIP')
pkgver() {
cd gens-gs-ii
echo "r$(git rev-list --count HEAD).$(git rev-parse --short HEAD)"
}
build() {
cd gens-gs-ii
if [[ -d build ]]; then
rm -rf build
fi
mkdir build && cd build
if [[ $CARCH == x86_64 ]]; then
export CC='gcc -m32'
export CXX='g++ -m32'
export PKG_CONFIG_PATH='/usr/lib32/pkgconfig'
fi
cmake .. \
-DCMAKE_BUILD_TYPE='Release' \
-DCMAKE_INSTALL_PREFIX='/usr'
make
}
package() {
cd gens-gs-ii/build
install -dm 755 "${pkgdir}"/usr/bin
install -m 755 src/gens-qt4/gens-qt4 "${pkgdir}"/usr/bin/
}
# vim: ts=2 sw=2 et:
| true
|
ab7f64ea3fa57e7742df9926e8fdeca12d169ba5
|
Shell
|
reorx/dotfiles
|
/bin/frgi
|
UTF-8
| 743
| 3.453125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
if [[ "$1" == "-h" ]]; then
echo "Interactive rg search"
echo "Usage: frgi [PATTERN [PATH]]"
exit
fi
declare preview="$FZF_PREVIEW_CONTEXT_CMD"
while getopts ':l' x; do
case "$x" in
l) list_files=1
preview="$FZF_PREVIEW_CMD"
;;
esac
done
shift $(( OPTIND - 1 ))
unset x OPTARG OPTIND
if [ -n "$1" ]; then rg --color=always -n -H ${list_files:+-l} "$1"; fi 2> /dev/null | fzf -d: \
--ansi \
--height 100% \
--query="$1" \
--color "hl:-1:underline,hl+:-1:underline:reverse" \
--phony \
--bind="change:reload:if [ -n {q} ]; then rg --color=always -n ${list_files:+-l} {q}; fi" \
--bind="enter:execute:$EDITOR +{2} {1}" \
--preview="[[ -n {1} ]] && $preview"
| true
|
6e2ea434144be427be8178a3089f1881bcc1eea6
|
Shell
|
xuerenlv/hihoCoder
|
/c++_interview/MyImplement/make_run.sh
|
UTF-8
| 91
| 2.734375
| 3
|
[] |
no_license
|
cur_path=$PWD
if [ -f $cur_path/"xhj" ]
then
rm ./xhj
fi
g++ $1 -o xhj
./xhj
rm ./xhj
| true
|
26834cf040b6519eb2e3fd3c3720ffb9967db7d7
|
Shell
|
lenik/stack
|
/deployment/node/sdu/sdu-security-util/hw
|
UTF-8
| 343
| 3.46875
| 3
|
[] |
no_license
|
#!/bin/bash
echo Bash history size of users:
sudo bash -c '
while IFS=: read username password uid gid fullname home shell; do
if [ -r "$home/.bash_history" ]; then
histsize=`grep -v '^#' "$home/.bash_history" | wc -l`
histsize="${histsize%% *}"
printf "%10d %s\n" $histsize $username
fi
done </etc/passwd
'
| true
|
07c46734ed23898e046a34c098dd8f51b1e8a229
|
Shell
|
yukishinonome/dotfiles
|
/.zshrc
|
UTF-8
| 1,757
| 2.875
| 3
|
[] |
no_license
|
alias zshrc='vim ~/.zshrc'
alias szshrc='source ~/.zshrc'
alias vimrc='vim ~/.vimrc'
alias gs='git status'
alias gsd='git switch -d'
alias gc='git checkout'
alias gcp='git cherry-pick'
alias gb='git branch'
alias hg='history | grep'
alias b='bundle'
alias be='bundle exec'
alias k='kubectl'
alias ke='kubectl exec'
alias kcg='kubectl config get-contexts'
alias kgp='kubectl get pods'
alias cppwd='pwd | tr -d "\n" | pbcopy'
alias de='docker exec'
alias dpf='docker ps --format "table {{.ID}} {{.Names}}\t{{.Image}}\t{{.Status}}\t{{.Ports}}"'
# プロンプトの表示
fpath=(~/.zsh $fpath)
if [ -f ${HOME}/.zsh/git-completion.zsh ]; then
zstyle ':completion:*:*:git:*' script ~/.zsh/git-completion.zsh
fi
if [ -f ${HOME}/.zsh/git-prompt.sh ]; then
source ${HOME}/.zsh/git-prompt.sh
fi
GIT_PS1_SHOWDIRTYSTATE=true
GIT_PS1_SHOWUNTRACKEDFILES=true
GIT_PS1_SHOWSTASHSTATE=true
GIT_PS1_SHOWUPSTREAM=auto
setopt PROMPT_SUBST ; PS1='%F{green}%~%f%F{red}$(__git_ps1 " (%s)")%f
\$ '
# 履歴ファイルの保存先
export HISTFILE=${HOME}/.zsh_history
# メモリに保存される履歴の件数
export HISTSIZE=1000
# 履歴ファイルに保存される履歴の件数
export SAVEHIST=100000
# 重複を記録しない
setopt hist_ignore_all_dups
# 開始と終了を記録
setopt EXTENDED_HISTORY
# 履歴検索
function select-history() {
local tac
if which tac > /dev/null; then
tac="tac"
else
tac="tail -r"
fi
BUFFER=$(fc -l -n 1 | eval $tac | peco --query "$LBUFFER")
CURSOR=$#BUFFER
zle -R -c
}
zle -N select-history
bindkey '^r' select-history
# コマンド予測を有効にする
source ~/.zsh/zsh-autosuggestions/zsh-autosuggestions.zsh
| true
|
eac7ad70ab65f57ddb5bdcd68a077aa6332747ac
|
Shell
|
codebam/dotfiles-old
|
/bin/spectro
|
UTF-8
| 6,579
| 3.90625
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
trap 'exit' ERR
shopt -s nullglob
# spectro: creates forum code with spectrograms of FLAC/MP3 files
# Dependencies: sox, optipng and curl (lame optional)
# Usage: spectro location/of/album/ or spectro file1 file2...
# Check for dependencies
command -v sox >/dev/null 2>&1 || { echo >&2 "This script requires SoX but it's not installed. Aborting."; exit 1; }
command -v curl >/dev/null 2>&1 || { echo >&2 "This script requires curl but it's not installed. Aborting."; exit 1; }
# Public key:
apikey="6bf8cf819aff12a"
# Set optipng=1 if you wish to use it by default
# Can be forced with the -o, --optipng flag
optipng=
# Local directory where files will be stored (should be inside your public_html folder) when using -l, --local:
# Do NOT use a trailing slash
localdir="${HOME}"/www/"$(whoami)"."$(hostname -f)"/public_html/spectro
# URL format (example: http://yoursite.com/spectro/) for the -l, --local flag
# Do NOT use a trailing slash
urlformat=https://"$(hostname -f)"/"$(whoami)"/spectro
show_help () {
echo
echo "Usage:"
echo " spectro <input>"
echo "Input can be individual files or a directory."
echo
echo "Options:"
echo " -d, --double Take both zoomed in and normal image for each file"
echo " -h, --help Show this help and exit"
echo " -l, --local Create the spectrograms locally"
echo " -o, --optipng Force optipng usage"
echo " -p, --parallel Play nicely with parallel"
echo " -z, --zoom Create zoomed in screenshot (3 seconds)"
echo
echo "Example: spectro -ol DirectoryWithMusic/"
echo
echo "To process a directory with GNU parallel, first cd into it and then run"
echo 'printf "[hide=Spectrograms]" && ls | parallel -k spectro -p && echo "[/hide]"'
echo
}
sox_arguments=("-n remix 1 spectrogram -x 3000 -y 513 -z 120 -w Kaiser")
while :; do
case $1 in
-h|-\?|--help) # Call a "show_help" function to display a synopsis, then exit.
show_help
exit
;;
-d|--double)
sox_arguments+=( "-n remix 1 spectrogram -x 500 -y 1025 -z 120 -w Kaiser -S 0:25 -d 0:04" )
;;
-o|--optipng)
optipng=1
;;
-p|--parallel)
parallel=1
;;
-l|--local)
offline=1
;;
-z|--zoom)
sox_arguments=("-n remix 1 spectrogram -x 500 -y 1025 -z 120 -w Kaiser -S 0:25 -d 0:04")
;;
-?*)
if [[ "${#1}" -ge "3" && ${1:1:1} != "-" ]]; then
for (( i=1; i<${#1}; i++ )); do
if [[ "${1:$i:1}" == "d" ]]; then
sox_arguments+=( "-n remix 1 spectrogram -x 500 -y 1025 -z 120 -w Kaiser -S 0:25 -d 0:04" )
continue
fi
if [[ "${1:$i:1}" == "o" ]]; then
optipng=1
continue
fi
if [[ "${1:$i:1}" == "p" ]]; then
parallel=1
continue
fi
if [[ "${1:$i:1}" == "l" ]]; then
offline=1
continue
fi
if [[ "${1:$i:1}" == "z" ]]; then
sox_arguments=("-n remix 1 spectrogram -x 500 -y 1025 -z 120 -w Kaiser -S 0:25 -d 0:04")
continue
fi
ignored_options="$ignored_options""${1:$i:1}"
continue
done
if [[ -n "$ignored_options" ]]; then
printf 'WARN: Unknown options (ignored): %s\n' "$ignored_options" >&2
fi
else
printf 'WARN: Unknown option (ignored): %s\n' "$1" >&2
fi
shift 1
continue
;;
--) # End of all options.
shift
break
;;
*) # Default case: If no more options then break out of the loop.
break
esac
shift
done
if [[ "$optipng" = "1" ]]; then
command -v optipng >/dev/null 2>&1 || { echo >&2 "You enabled optipng but it's not installed. Aborting."; exit 1; }
fi
# Check there's at least one argument
if [ "$#" -lt 1 ]; then
show_help
exit 1
fi
create_spectrogram () {
if [ -z "$offline" ]; then
f="${2}"
# Replace commas for the image name, or curl may cause problems
output_name="${2//,/_}"
# Append 'spectrogram' to image name, and replace spaces for underscores
output_name="${output_name// /_}spectrogram.png"
if [[ "$f" == *.mp3 ]]; then
mp3=1
command -v lame >/dev/null 2>&1 || { echo >&2 "Checking mp3 spectrals requires lame, which isn't installed. Aborting."; exit 1; }
lame --quiet --decode "$f" "$f.spectrogram.wav"
f="$2.spectrogram.wav"
output_name="${f//,/_}"
output_name="${output_name// /_}.png"
fi
sox "$f" $sox_arg -t "${PWD##*/}/$f" -o "$output_name"
if [[ "$optipng" = "1" ]]; then optipng -quiet "$output_name"; fi
response="$(curl -H "Authorization: CLIENT-ID $apikey" -F "image=@$output_name" \
https://api.imgur.com/3/image.xml 2>/dev/null | tail -n +2 | sed 's/http:/https:/g' )"
url="$(echo "$response" | sed -r 's/.*<link>(.*)<\/link>.*/\1/')"
rm -f "$output_name"
if [ "$mp3" = 1 ]; then
rm -f "$f"
fi
echo "$url"
elif [ "$offline" = "1" ]; then
# Truncate spaces for underscores
output_name="${f// /_}"
output_name="${output_name//,/_}.png"
# Create the spectrogram
sox "$f" $sox_arg -t "${PWD##*/}/$f" -o "$localdir"/"$output_name"
# Optimise the png
if [[ "$optipng" = "1" ]]; then optipng -quiet "$localdir"/"$output_name"; fi
# Provide url
url="$urlformat"/"$output_name"
echo "$url"
fi
}
if [[ ! -d "$1" ]]; then
if [[ -f "$1" ]]; then
if ! [[ "$parallel" = "1" ]]; then printf "[hide=Spectrograms]"; fi
for f in "$@"; do
content="$(file --mime-type "$f")"
content="${content##* }"
if ! [[ "${content##* }" == *@(flac|mpeg)* ]]; then
continue
fi
echo "${PWD##*/}/$f:"
for sox_arg in "${sox_arguments[@]}"; do
echo "[img]$(create_spectrogram "$sox_arg" "$f")[/img]"
done
if [[ "$double" = "1" ]]; then
shift -1
zoom=$((!zoom))
fi
done
if ! [[ "$parallel" = "1" ]]; then echo "[/hide]"; fi
exit
else
echo "Invalid input."
show_help
exit 1
fi
else
# Input exists and is a directory
if ! cd "$1"; then
echo "Couldn't access directory. Aborting."
exit 1
else
if ! [[ "$parallel" = "1" ]]; then printf "[hide=Spectrograms]"; fi
for f in *.{flac,mp3} **/*.{flac,mp3}; do
echo "${PWD##*/}/$f:"
for sox_arg in "${sox_arguments[@]}"; do
echo "[img]$(create_spectrogram "$sox_arg" "$f")[/img]"
done
done
if ! [[ "$parallel" = "1" ]]; then echo "[/hide]"; fi
fi
fi
| true
|
b0146c3cae727713c43e8cb9f5df097d269b0de4
|
Shell
|
FauxFaux/debian-control
|
/r/rainloop/rainloop_1.11.1-1_all/postinst
|
UTF-8
| 488
| 2.859375
| 3
|
[] |
no_license
|
#!/bin/sh -e
#Copy sample config file for Nginx
if [ -d /etc/nginx/sites-available/ ]; then
if [ ! -f /etc/nginx/sites-available/rainloop ]; then
cp /usr/share/doc/rainloop/rainloop.nginx.conf /etc/nginx/sites-available/rainloop
fi
fi
#Copy sample config file for Apache2
if [ -d /etc/apache2/sites-available/ ]; then
if [ ! -f /etc/apache2/sites-available/rainloop.conf ]; then
cp /usr/share/doc/rainloop/rainloop.apache.conf /etc/apache2/sites-available/rainloop.conf
fi
fi
| true
|
53232c61306318447b3d261cacc3c4c821fe2051
|
Shell
|
NavidHeydari/OpenWhisk_actions
|
/taskExecutor.sh
|
UTF-8
| 414
| 2.640625
| 3
|
[] |
no_license
|
#! /bin/sh
module load parallel
echo "starting"
for ((count=1;count < 200 ;count++))
{
#curl https://openwhisk.ng.bluemix.net/api/v1/web/navidh2%40uw.edu_dev/default/openWhiskTester.json?number=20 | grep result > cpuStress_res${count}.csv
curl https://openwhisk.ng.bluemix.net/api/v1/web/navidh2%40uw.edu_dev/default/openWhiskTester.json?number=20 | grep result >> cpuStress_res.csv
}
echo "finished"
exit 0
| true
|
1e982885ab524c5e5e0bf76d885340b81444e108
|
Shell
|
ilscipio/scipio-erp
|
/themes/base/webapp/base/bower_components/codemirror/component-tools/build.sh
|
UTF-8
| 599
| 3.84375
| 4
|
[
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0",
"LicenseRef-scancode-unknown",
"LicenseRef-scancode-jdom",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"MPL-1.1",
"CPL-1.0",
"GFDL-1.1-or-later",
"MPL-2.0",
"CC-BY-2.5",
"SPL-1.0",
"LicenseRef-scancode-proprietary-license",
"CDDL-1.0",
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-public-domain"
] |
permissive
|
#!/usr/bin/env bash
set -e
if [[ -z "$1" ]]; then
echo "please specify a tag to build"
exit -1
fi
TAG="$1"
if [[ ! -z "$(git tag | grep "^$TAG\$")" ]]; then
echo "tag $1 already exists"
exit -1
fi
repo="$(dirname $(dirname $0))"
test -f "$repo/component-tools/$(basename $0)"
td="$(mktemp -d)"
cd "$td"
npm install codemirror@$TAG
cd "$repo"
rsync -ar --delete --exclude .git --exclude component-tools "$td/node_modules/codemirror/" "$repo/"
cp component-tools/bower.json "$repo/"
rm -rf "$td"
git add -A
git commit -am "Build component $TAG"
git tag -am "release $TAG" $TAG
| true
|
b936fa62d3e88d0af0084d84f87fdb04e1c091b7
|
Shell
|
gissehel/centraconf
|
/__tools__/mkbootstrap.sh
|
UTF-8
| 1,156
| 3.75
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
bootstrap_header_size=800
targetdir="$1"
[ -z "$targetdir" ] && targetdir="$(dirname "$0")/.."
wd="$(pwd)"
cd "$targetdir"
targetdir="$(pwd)"
which git >/dev/null && git submodule init
which git >/dev/null && git submodule update -f
cd "$wd"
echo "Create a bootstrap from [$targetdir] (enter=>yes)?"
read yesno
[ \! -z "$yesno" ] && echo "aborting..." && exit 0
bootstrap_filename="$targetdir"/../"$(basename "$targetdir")".bootstrap
function fail_message() {
echo "[ERROR] $1 ... aborting..."
exit 1
}
function get_bootstrap_header() {
dd if="$targetdir"/__tools__/bootstrap_header bs=${bootstrap_header_size} skip=0 count=1 status=none
}
get_bootstrap_header | grep "FILL FILL" > /dev/null || fail_message "Wrong bootstrap header size"
HEADER_SIZE_MATCH=$(get_bootstrap_header | grep "bootstrap_header_size=")
[ "${HEADER_SIZE_MATCH#*=}" != "${bootstrap_header_size}" ] && fail_message "'bootstrap_header_size' don't match"
rm -f "${bootstrap_filename}"
(get_bootstrap_header ; cd "$targetdir" ; tar cz .) > "${bootstrap_filename}"
[ -f "${bootstrap_filename}" ] && echo "done" || fail_message "Something went wrong"
| true
|
da47ab953541c3e12a9913f2ae8134ece87ce99b
|
Shell
|
Code-Slave/kiwix-serv-improved
|
/server/entrypoint.sh
|
UTF-8
| 967
| 4
| 4
|
[
"Unlicense"
] |
permissive
|
#!/bin/sh
#
# Handles various arguments passed in to docker container which should be one of
# the following:
#
# - if nothing is specified then show help for kiwix-serve
# - if the single argument generate-library then run generate-library command
# - if '--library somefile' is specified run kiwix-serve with that
# - if 'somefile.zim' then run kiwix-serve with that
#
#
set -e
set -u
kiwix_serve_cmd="/usr/local/bin/kiwix-serve --port 80"
if [ $# -eq 1 ]; then
if [ "${1}" = "generate-library" ]; then
# docker run [...] generate-library
exec /usr/local/bin/generate-library
else
# docker run [...] somefile.zim
exec ${kiwix_serve_cmd} "${1}"
fi
elif [ $# -eq 2 ] && [ "${1}" = "--library" ]; then
# docker run [...] --library somefile.xml
exec ${kiwix_serve_cmd} --library "${2}"
else
# docker run [...] somefile1.zim somefile2.zim ...
# docker run [...]
exec ${kiwix_serve_cmd} $*
exit 1
fi
| true
|
376ffa1ba3e22a79e1e533a2e3f1dfc0d73aad51
|
Shell
|
pabl0rg/kobalt-mixed-example
|
/check-dep.sh
|
UTF-8
| 355
| 3.140625
| 3
|
[] |
no_license
|
#!/bin/sh
if [ "$#" -ne 3 ]; then
echo "Usage: $0 <groupId> <artifactId> <version>"
exit
fi
POM_DIR="`echo "$1" | tr . /`/$2/$3"
POM_PATH="$POM_DIR/$2-$3.pom"
mkdir -p "$HOME/.m2/repository/$POM_DIR"
wget -q -O "$HOME/.m2/repository/$POM_PATH" "http://repo.maven.apache.org/maven2/$POM_PATH"
mvn -f "$HOME/.m2/repository/$POM_PATH" dependency:tree
| true
|
8352c8715e517a0d3b4ef4e59dd1bfe7543eaa05
|
Shell
|
Guolei1130/script_tools
|
/test.sh
|
UTF-8
| 102
| 2.625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
fillter="adb"
result=`ps -l | grep ${fillter}`
var_arr=($result)
echo ${var_arr[1]}
| true
|
992b9704611a37929949d292a86ad41bd773b23b
|
Shell
|
LostEchoDX/Hyrule-Castle
|
/HyruleCastle/base_game/hyrule_castle.sh
|
UTF-8
| 933
| 3.078125
| 3
|
[] |
no_license
|
#!/bin/bash
HP1=60 ; HP1mx=60 ; HP2=30 ; STR1=10 ; STR2=5 ; N=1 ; ENM=Boko
echo "
Floor $N
"
while (($HP1>0))
do echo 1-Attack 2-Heal?
read OPT
if [ $OPT == Attack ] || [ $OPT == 1 ]
then HP2=$(($HP2-$STR1))
HP1=$(($HP1-$STR2))
echo "Link : $HP1"
echo "$ENM : $HP2
"
fi
if [ $OPT == Heal ] || [ $OPT == 2 ]
then HP1=$(($HP1-$STR2+$HP1mx/2))
if [ $HP1 -gt $HP1mx ] ; then HP1=$HP1mx ; fi
echo "Link : $HP1"
echo "$ENM : $HP2
"
fi
if [ $HP1 -le 0 ] && [ $N == 10 ] ; then echo "
Game Over
" ; break ; fi
if [ $HP2 -le 0 ] && [ $N == 10 ] ; then echo "
Congratulations!
" ; break ; fi
if [ $HP2 -le 0 ]
then N=$(($N+1))
if [ $N == 10 ] ; then echo "
Final Floor!
"
HP2=150
STR2=20
ENM=Ganon
else echo "
Floor $N
"
HP2=30
fi
if [ $HP1 -le 0 ] ; then echo "
Game Over
"
fi
fi
done
| true
|
6fc3d7e05c4379a22b6ced69600e703ed748fc6c
|
Shell
|
sullof/secrez
|
/bin/check-unused.sh
|
UTF-8
| 136
| 2.953125
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
PACKAGES=`ls -d packages/*`
for package in $PACKAGES
do
(cd $package && echo $package && npm-check-unused)
done
| true
|
680c5072ee1e5d3bf68c1692ba39f7493d778365
|
Shell
|
mojofunk/mingw-pkg
|
/packages/boost/MINGWPKG
|
UTF-8
| 357
| 3.53125
| 4
|
[] |
no_license
|
#!/bin/bash
function define_package_env
{
PKG_NAME='boost'
PKG_VERSION="1.55.0"
PKG_DEPS="gcc"
}
function install
{
# not packaging them all for now
DLLS='
libboost_system-mt.dll
libboost_filesystem-mt.dll
libboost_unit_test_framework-mt.dll
'
mkdir -p $PKG_BIN_DIR || exit 1
for i in $DLLS;
do
copydll "$i" $PKG_BIN_DIR || exit 1
done
}
| true
|
8943d5732be5fda8c366be438d63031f0605bc84
|
Shell
|
eush77/dotfiles
|
/SCRIPTS/bin/taggen
|
UTF-8
| 2,555
| 4.15625
| 4
|
[] |
no_license
|
#!/bin/bash
if [[ $# -eq 0 || "$1" == "--help" ]];then
for((i=0;i<80;i++));do
echo -ne '—'
done
echo
echo -e "\e[1m\e[33mDescription\e[0m"
echo -e "\e[4C'\e[4mtaggen\e[0m' generates \"ID3v1\" and \"ID3v2\" tags using"
echo -e "\e[8C\"\$1\" as an artist name,\n\e[8C\"\$2\" as a genre specificator (in the terms of \"ID3v1\")."
echo -e "\e[4CIt assumes that the working dir contains 'MP3''s in the following hierarchy."
echo -e "\e[12C./<year> - <album>/<track> - <title>.mp3"
echo -e "\e[4C\e[4mUPD\e[0m Moreover, you can place your \"AlbumArt.jpg\" cover file inside an album folder."
echo -e "\e[4CIn this case this cover is expected to be attached to all the 'MP3''s laying beside it."
echo -e "\e[1m\e[33mRequired\e[0m"
echo -e "\e[4CThis script version won't work unless you have installed:"
echo -e "\e[8C\"mp3info\",\n\e[8C\"id3v2\",\n\e[8C\"eyeD3\" (for covers embedding)"
echo -e "\e[4Cand, of course, some standard stuff."
echo -e "\e[1m\e[33mUsage\e[0m"
echo -e "\e[4C$ taggen <artist> <genre>"
echo -e "\e[4C$ taggen --help"
echo -e "\e[1m\e[33mExample\e[0m"
echo -e "\e[4C\$ taggen 'Linkin Park' 20"
echo -e "\e[1m\e[33mRelated\e[0m"
echo -e "\e[4C'\e[4malbart\e[0m',\n\e[4C'\e[4mtitle-case\e[0m',\n\e[4C'\e[4mtag_locale.sh\e[0m'."
exit
fi
if [ $# -ne 2 ];then
echo Error: 2 parameters expected.
echo Type \"taggen --help\" for more info.
exit 1
fi
for i in `find -iname '*.mp3' |sed -r "s/ /\`echo -ne '\001'\`/g"`; do
i=`sed -r "s/\`echo -ne '\001'\`/ /g" <<<"$i"`;
name=`sed -r "s/.*\/([^/]*)/\1/" <<<"${i}"`;
n=`cut -d' ' -f1 <<<"${name}"`;
t=`echo "${name}" |cut -d' ' --complement -f1,2 |sed -r 's/\.mp3//'`;
path=`sed -r "s/.\/(.*)\/.*/\1/" <<<"${i}"`;
y=`cut -d' ' -f1 <<<"${path}"`;
a=`cut -d' ' --complement -f1,2 <<<"${path}"`;
echo "$i";
echo -e "\t$y\n\t$a\n\t$n\n\t$t";
id3v2 -d "$i"
mp3info -a "$1" -l "$a" -g "$2" -n "$n" -t "$t" -y "$y" "$i";
id3v2 -C "$i"
#The next several lines (down to the next echo command) were attached some months later
IMAGE="./${path}/AlbumArt.jpg"
if [[ -e "${IMAGE}" ]];then
#eyeD3 --no-color --add-image="${IMAGE}:FRONT_COVER" "$i" >/dev/null
# Long title strings have been asking for the following update for a really long time...
eyeD3 --no-color --add-image="${IMAGE}:FRONT_COVER" -t "$t" "$i" >/dev/null
fi
echo
done
echo -e "\n\nSample \"ID3V1\" tag record."
cd *
mp3info 01*
cd ..
exit
| true
|
37c2efd22cde178df760d260b67ae7af400fc013
|
Shell
|
Jayad/Program_practise
|
/scripts/practise.sh
|
UTF-8
| 601
| 3.546875
| 4
|
[] |
no_license
|
#!bin/bash
#$bc 7 + 5
#date_formatted=$(date +%m_%d_%y-%H.%M.%S)
#cp -iv $1 $2.$date_formatted
env #enviornment
echo $PATH #executable paths
echo $PWD #current working directory
echo "Simple Script"
ls > myfiles
echo $date
$date >> myfiles
echo $LOGNAME
no=10;
echo $no;
if [ "foo" == "foo" ]; then
echo "Expressions are same"
else
echo "Expressions are not same"
fi
for i in $(ls); do
echo item: $i
done
COUNTER=0
while [ $COUNTER -lt 10 ]; do
echo "Counter is" $COUNTER
let COUNTER=COUNTER+1;
done
COUNTER=20
until [ $COUNTER -lt 10 ]; do
echo "COUNTER" $COUNTER
let COUNTER-=1
done
| true
|
5ebdf184d28b0612fdda1393629db4e08f6c0af5
|
Shell
|
uu-z/block-bash
|
/Characters/exclamationMark.sh
|
UTF-8
| 78
| 2.890625
| 3
|
[] |
no_license
|
#!/bin/bash
val=10
if [ "$val" != 0]
then
echo "NOT"
else
echo "YES"
fi
| true
|
64821247a35268597e909aef0ff67014b76fb71a
|
Shell
|
irina-luca/algorithm_design_project
|
/Code/Scripts/metrics-train-models.sh
|
UTF-8
| 344
| 2.59375
| 3
|
[] |
no_license
|
dimension_sizes=(60,257)
sphere_sizes=(32,64,128)
for ds in "${dimension_sizes[@]}"
do
for ss in "${sphere_sizes[@]}"
do
for i in $(seq 1 5)
do
echo "${ds} ${ss} ${i}"
python train-cli.py -i Data/Metrics/random_10000_d$ds_cl32_$i.train -m Data/Models/metric-10000_d$ds_cl32_c$ss_$i.model -c $ss
done
done
done
| true
|
30e7f1521361af8e187f443cbb4429da8ff53e09
|
Shell
|
freebsd/freebsd-ports
|
/science/InsightToolkit/files/fake-git
|
UTF-8
| 804
| 3.71875
| 4
|
[
"BSD-2-Clause"
] |
permissive
|
#!/bin/sh
##
## fake git to assist in downloads of remote modules
##
#echo "fake-git called with args: $@" >> /tmp/fake-git.log
case "$1" in
--version)
echo "git version 2.40.0"
exit 0
;;
clone)
# we don't run git clone
#/usr/local/bin/git clone $2 $3 >> /tmp/fake-git.log 2>&1
# args
url=$2
dir=$3
# extract module name
module=$(echo $url | sed -e "s|.*/||; s|\.git\$||")
# copy the tree
cp -r ../$module-* $dir
exit 0
;;
checkout)
# assume git checkout {hash}
# memorize the hash
echo $2 > .hash
exit 0
;;
config)
# ignore
exit 0
;;
remote)
# ignore
exit 0
;;
rev-parse)
# assume: git rev-parse --verify {HASH}
echo "@rev-parse pwd=$(pwd)" >> /tmp/fake-git.log
cat .hash
exit 0
;;
submodule)
# ignore: hope that there are no submodules
exit 0
;;
esac
exit 1
| true
|
262355ee4fc2e2b4eaa491b183927d728fa210a5
|
Shell
|
Ramjivan/copier
|
/rename.sh
|
UTF-8
| 572
| 3.453125
| 3
|
[] |
no_license
|
#!/usr/bin/bash
IFS='
'
echo "Enter starting point"
read start
function rp() {
local array=(`ls -1`)
for i in ${array[@]}
do
temp=""
templen="${#start}"
if [ $templen -eq "1" ]
then
temp+="000"
fi
if [ $templen -eq "2" ]
then
temp+="00"
fi
if [ $templen -eq "3" ]
then
temp+="0"
fi
temp+=$start
temp+=".mp3"
mv $i $temp
let start+=1
done
}
darray=(`ls -1`)
for d in ${darray[@]}
do
cd $d
rp
cd ..
done
| true
|
bcdd2eea622f2d71cd5e35c6e9de26c38aeec1e8
|
Shell
|
maluramichael/osx-configs
|
/_tools/hashcat.sh
|
UTF-8
| 218
| 2.96875
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
set -o errexit
set -o pipefail
set -o nounset
if [ ! -d "$TOOLS_HOME/hashcat" ]; then
git clone git@github.com:hashcat/hashcat.git "$TOOLS_HOME/hashcat"
fi
cd "$TOOLS_HOME/hashcat" || return
git pull
| true
|
d04413c9b4c067a9ac23f92173c15902030ed927
|
Shell
|
xlcoder/codesnippet
|
/autodeploy/laravel_local_auto
|
UTF-8
| 625
| 3.015625
| 3
|
[] |
no_license
|
#!/bin/bash
#Variable
rootdir=
nginxfile=
databasename=
pwd=
#Step 1: Nginx file
sudo cp /var/www/$nginxfile /etc/nginx/sites-available
sudo ln -s /etc/nginx/sites-available/$nginxfile /etc/nginx/sites-enabled
sudo service nginx restart
#Step 2: Composer Laravel Framework
cd /var/www/
sudo composer create-project laravel/laravel $rootdir "5.1.*"
#Step 3:Create database
mysql -u root -p$pwd -e "create database $databasename default CHARACTER SET utf8 COLLATE utf8_general_ci"
#Setp 4:Bower
cd $rootdir
cp /var/www/codesnippet/laravel/bower.json ./
cp /var/www/codesnippet/laravel/.bowerrc ./
mkdir bower_vendor
| true
|
41f3345a943ad7107c248abe83951fe84c9ba309
|
Shell
|
GOSkyYLQ/Blog-and-Exercises
|
/SHELL/debug.sh
|
UTF-8
| 83
| 2.59375
| 3
|
[] |
no_license
|
#!/bin/bash
for i in {1..6};
do
set -x
echo $i
set +x
done
echo "done"
| true
|
54d93fa9a5e19116b90899788fd10f072c50d569
|
Shell
|
adityasuwandi/gocsv
|
/gocsv.sh
|
UTF-8
| 850
| 3.125
| 3
|
[] |
no_license
|
#!/bin/bash
# (curl --write-out %{http_code} --silent --output /dev/null localhost:3000/index)
# (curl --write-out %{http_response} --silent --output /dev/null localhost:3000/index)
# response=$(curl -sb -H "Accept: application/json" "http://host:8080/some/resource")
# response=$(curl -sb -H "Accept: application/json" "http://0.0.0.0:3000/index")
# database="inventory.db"
# echo $response
# get response from goventory
response=$(curl -sb -H "Accept: application/json" "http://0.0.0.0:3000/index")
# check goventory, it should be running on Port :3000
if [ "$response" != "Inventory REST API." ]; then
echo "Something wrong dude, goventory is not running on Port :3000."
exit
fi
# When somebody press Ctrl-C
trap '{ echo "Hey, you pressed Ctrl-C. Time to quit." ; exit 1; }' INT
# start gocsv microservice
exec `PORT=4000 go run main.go`
| true
|
1598e856aacd38653f1f4dc9fde304a041363f30
|
Shell
|
mjsweet01/didactic-umbrella
|
/build.sh
|
UTF-8
| 647
| 2.78125
| 3
|
[] |
no_license
|
#!/bin/bash
echo "src-git passwall https://github.com/xiaorouji/openwrt-passwall" >> feeds.conf.default
sudo apt-get update
sudo apt-get install upx -y
cp /usr/bin/upx staging_dir/host/bin
cp /usr/bin/upx-ucl staging_dir/host/bin
./scripts/feeds update -a
OPENWRT_VERSION=$(sed -n 's/.*releases\/\(.*\)\/targets.*/\1/p' <<< $SDK_URL)
if [[ $OPENWRT_VERSION == 19* ]];then
pushd feeds/packages/lang
rm -rf golang && svn co https://github.com/openwrt/packages/branches/openwrt-21.02/lang/golang
popd
fi
./scripts/feeds install luci-app-passwall
make defconfig
make package/luci-app-passwall/{clean,compile} -j4
make package/index
| true
|
2eba68c5687ae2841741dbbdda4eb9847891c64f
|
Shell
|
dolfly/fullproof
|
/tools/build-all.sh
|
UTF-8
| 930
| 3.65625
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
test -f common.sh || {
echo "The `basename $0` script must be invoked from the tools directory." >&2
exit 1
}
. common.sh
rm -fr "$BUILD"
mkdir -p "$BUILD"
./build-src.sh
./build-site.sh
if [[ "$JSDOC" == "" ]] ; then
set +e
JSDOC=`which jsdoc`
set -e
fi
if [[ "$JSDOC" != "" ]] ; then
echo "now building documentation"
DOC="$BUILD/site/jsdocs"
mkdir -p "$DOC"
"$JSDOC" -d="$DOC" "$ROOT"/*/*.js
else
echo '[WARNING] jsdoc is not available, skipping' >&2
fi
RELEASENAME=fullproof-`date +%Y%m%d`
RELEASEDIR="$BUILD"/"$RELEASENAME"
mkdir -p "$RELEASEDIR"
cp -r "$BUILD"/js/ "$RELEASEDIR"/
test -n "$JSDOCS" && cp -r "$BUILD"/site/jsdocs "$RELEASEDIR"/
cp "$ROOT"/README.md "$RELEASEDIR"/
cp "$ROOT"/LICENSE "$RELEASEDIR"/
cp -r "$BUILD"/site/examples "$RELEASEDIR"
cd "$BUILD"
zip -r "$RELEASENAME".zip "$RELEASENAME"
tar cvf "$RELEASENAME".tar "$RELEASENAME"
gzip "$RELEASENAME".tar
| true
|
ad92a4f979b6388a69b2bbb00bc95ed2b04dd2f9
|
Shell
|
BPMspaceUG/linux_config_script_files
|
/test.sh
|
UTF-8
| 644
| 3.59375
| 4
|
[] |
no_license
|
#!/bin/bash
NETWORK=$1
case $NETWORK in
dhcp)
echo "iface $IFACE inet dhcp"
;;
static)
read -p "Please enter IP address:" IP
read -p "Please enter Netmask:" NETMASK
read -p "Please enter Gateway:" GATEWAY
read -p "Please enter Primary DNS resolver:" DNS
echo "IP: $IP"
echo "NETMASK: $NETMASK"
echo "GATEWAY: $GATEWAY"
echo "DNS: $DNS"
read -p "Korrekt? (y|n)" ok
case $ok in
y|Y)
echo "Haut"
;;
n|N)
echo "Abbruch"
exit 1
;;
*)
echo "falsche Eingabe"
exit 1
;;
esac
;;
esac
| true
|
9c79576972c0b31385828015f0367de21cc24596
|
Shell
|
monotonemonk/arch_svntogit_community-
|
/ocrad/repos/community-x86_64/PKGBUILD
|
UTF-8
| 716
| 2.578125
| 3
|
[] |
no_license
|
# $Id$
# Maintainer: Giovanni Scafora <giovanni@archlinux.org>
pkgname=ocrad
pkgver=0.25
pkgrel=3
pkgdesc="An Optical Character Recognition program"
arch=('i686' 'x86_64')
url="http://www.gnu.org/software/ocrad/ocrad.html"
license=('GPL3')
depends=('gcc-libs' 'texinfo')
options=('staticlibs')
source=("http://ftp.gnu.org/gnu/${pkgname}/${pkgname}-${pkgver}.tar.lz")
md5sums=('da428daa81b481e13d21eeb2d4b985a8')
build() {
cd "${srcdir}/${pkgname}-${pkgver}"
./configure --prefix=/usr \
CXXFLAGS="${CXXFLAGS}" \
LDFLAGS="${LDFLAGS}"
make
}
package() {
cd "${srcdir}/${pkgname}-${pkgver}"
make DESTDIR="${pkgdir}" install install-man
rm $pkgdir/usr/share/info/dir
gzip $pkgdir/usr/share/info/*
}
| true
|
914f12b15ccfe1f045996c2fe5acf2cf7dca1df6
|
Shell
|
rrusnak1/nimbus
|
/home/bin/nimbus-version
|
UTF-8
| 224
| 2.546875
| 3
|
[] |
no_license
|
#!/bin/bash
PYTHON_EXE="/usr/bin/env python -Wignore::DeprecationWarning"
NIMBUS_HOME_REL="`dirname $0`/.."
NIMBUS_HOME=`cd $NIMBUS_HOME_REL; pwd`
export NIMBUS_HOME
$PYTHON_EXE $NIMBUS_HOME/libexec/nimbus_version.py $@
| true
|
010068bcfe85b81f4ebec96d898fcb7bb477bff9
|
Shell
|
jeffer-mendoza/env-install-snippet
|
/instalacion-entorno-linux.sh
|
UTF-8
| 5,169
| 3.6875
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Script's inicialization of the tools need to webapp development "Auditoría de
# Proyectos Gore". This script will download all packages need and it will
# install and configure.
# This Script will install the following tools:
# 1. Spring Tool Suite v3.9.5
# 2. Apache Maven v3.5.4
# 3. Mysql v5.9
# 4. Mysql Workbench v6.3
# 5. Apache Tomcat v.8.5.30
# @Author Jefferson Mendoza <mendosajefferson@gmail.com>
# Install Directory
INSTALL_FOLDER="$HOME/auditoria-proyectos-gore-tools/"
# Maven Directory
MAVEN_FOLDER="$INSTALL_FOLDER/apache-maven-3.5.4"
# STS Directory
STS_FOLDER="$INSTALL_FOLDER/sts-bundle"
# Java JDK Directory
JAVA_JDK_FOLDER="$INSTALL_FOLDER/jdk1.8.0_181"
# Apache Tomcat Directory
TOMCAT_FOLDER="$INSTALL_FOLDER/apache-tomcat-8.5.32"
# Workspace Directory
WORKSPACE_FOLDER="$HOME/devel"
# Project Directory
PROJECT_FOLDER="$WORKSPACE_FOLDER/datactil-auditoria-de-proyectos-gore"
# Enviroment Variable File
FILE_SETUP="~/.gorerc"
# Database Name
DATABASE_NAME="gore_app"
# Database User's Password
DATABASE_USER_PASSWORD="gore_app_password"
# Database User's Name
DATABASE_USER_NAME="gore_user"
conclusion="This script made: \n"
# Init install folder
if [ ! -d "$INSTALL_FOLDER" ]; then
echo '[+] Creating folder' "$INSTALL_FOLDER"
mkdir "$INSTALL_FOLDER"
fi
# the system must have installed the Java JDK 8u181
if [ ! -d "$JAVA_JDK_FOLDER" ]; then
echo 'This script need the Java JDK 8u181'
echo 'For downloading: http://www.oracle.com/technetwork/java/javase/downloads/jdk8-downloads-2133151.html'
echo '1. Download Java Jdk 8u181'
echo '2. Execute command: tar -xzvf jdk-8u181-linux-x64.tar.gz --directory ' $INSTALL_FOLDER
xdg-open http://www.oracle.com/technetwork/java/javase/downloads/jdk8-downloads-2133151.html &
exit 0
fi
cd /tmp
# 1. Spring Tool Suite v3.9.5
# if STS folder doesn't exist then STS IDE will be downloaded and decompressed.
if [ ! -d "$STS_FOLDER" ]; then
echo '[+] Downloading STS v3.9.5'
wget https://download.springsource.com/release/STS/3.9.5.RELEASE/dist/e4.8/spring-tool-suite-3.9.5.RELEASE-e4.8.0-linux-gtk-x86_64.tar.gz
tar -xzvf spring-tool-suite-3.9.5.RELEASE-e4.8.0-linux-gtk-x86_64.tar.gz --directory $INSTALL_FOLDER
# creating Desktop Entry
cat << EOF > $HOME/.local/share/applications/sts.desktop
[Desktop Entry]
Version=3.9.5
Type=Application
Name=STS
Icon=$STS_FOLDER/sts-3.9.5.RELEASE/icon.xpm
Exec=$STS_FOLDER/sts-3.9.5.RELEASE/STS
Comment=Spring Tool Suite
Categories=Development;IDE;
Terminal=false
EOF
fi
# 2. MAVEN v3.5.4
# if Apache Maven folder doesn't exist then Maven will be downloaded and
# decompressed.
if [ ! -d "$MAVEN_FOLDER" ]; then
echo '[+] Downloading Apache Maven v3.5.4'
wget http://apache.uniminuto.edu/maven/maven-3/3.5.4/binaries/apache-maven-3.5.4-bin.tar.gz
tar -xzvf apache-maven-3.5.4-bin.tar.gz --directory $INSTALL_FOLDER
fi
# if Enviroment Variable File doesn't exist in bashrc file then it will be added
if ! grep -q "source ~/.gorerc" $HOME/.bashrc; then
echo '[+] Adding .gorerc in .bashrc'
echo "source ~/.gorerc" >> $HOME/.bashrc
fi
# if Enviroment Variable File doesn't exist the it will be created with
# Maven and JAVA_HOME variable defined.
if [ ! -f "$FILE_SETUP" ]; then
cat << EOF > $HOME/.gorerc
export PATH=$MAVEN_FOLDER/bin:$PATH
export JAVA_HOME=$JAVA_JDK_FOLDER
EOF
fi
# 3. MYSQL SERVER v5.7
# if Mysql-server packaged isn
if dpkg-query -l mysql-server | grep -c "no packages" -eq 0; then
echo '[+] Downloading and installing mysql-server package v5.7'
# mysql repository is updated
curl -OL https://dev.mysql.com/get/mysql-apt-config_0.8.10-1_all.deb
sudo dpkg -i mysql-apt-config*
sudo apt-get update
rm mysql-apt-config*
# mysql-server is installed
sudo apt-get install mysql-server
echo "Please enter root user MySQL password!"
read rootpasswd
mysql -uroot -p${rootpasswd} -e "CREATE DATABASE ${DATABASE_NAME} /*\!40100 DEFAULT CHARACTER SET utf8 */;"
mysql -uroot -p${rootpasswd} -e "CREATE USER ${DATABASE_USER_NAME}@localhost IDENTIFIED BY '${DATABASE_USER_PASSWORD}';"
mysql -uroot -p${rootpasswd} -e "GRANT ALL PRIVILEGES ON ${DATABASE_NAME}.* TO '${DATABASE_USER_NAME}'@'localhost';"
mysql -uroot -p${rootpasswd} -e "FLUSH PRIVILEGES;"
# 4. MYSQL WORKBENCH v6.3
echo '[+] Downloading and installing mysql-workbench v6.30'
sudo apt-get install mysql-workbench
else
echo '[-] Mysql-server package is already installed in the system'
fi
# 5. APACHE TOMCAT v8.5.30
# if Apache Tomcat folder doesn't exist then it will be downloaded and
# decompressed.
if [ ! -d "$TOMCAT_FOLDER" ]; then
echo '[+] Downloading Apache Tomcat v8.5.30'
wget http://apache.uniminuto.edu/tomcat/tomcat-8/v8.5.32/bin/apache-tomcat-8.5.32.tar.gz
tar -xzvf apache-tomcat-8.5.32.tar.gz --directory $INSTALL_FOLDER
fi
# 6. Clone Project
if [ ! -d "$WORKSPACE_FOLDER" ]; then
mkdir "$WORKSPACE_FOLDER"
fi
if [ ! -d "$PROJECT_FOLDER" ]; then
echo '[+] Cloning Planio Project'
cd "$WORKSPACE_FOLDER"
git clone git@digitgroup.plan.io:digitgroup/datactil-auditoria-de-proyectos-gore.git -b develop
fi
| true
|
393c901af489ad16d64feaf38787ffdd92e8c826
|
Shell
|
jeromefroe/dotfiles
|
/alfred/Alfred.alfredpreferences/workflows/user.workflow.0CB12A0B-D344-49CE-BD39-AE7C72EA585A/passwd.sh
|
UTF-8
| 1,693
| 3.4375
| 3
|
[] |
no_license
|
#!/bin/bash
export LPASS_DISABLE_PINENTRY=1
export CURRENT=
export NEW="x"
export CONFIRM=
# Get current password
CURRENT=$(osascript -e 'text returned of (display dialog "Current Master Password for '"${lpuser}"'" default answer "" with title "Enter Current Password" with hidden answer)')
[ "${CURRENT}" == "" ] && exit
# Get new password
while [ "${NEW}" != "${CONFIRM}" ]; do
NEW=$(osascript -e 'text returned of (display dialog "Enter New Master Password for '"${lpuser}"'" default answer "" with title "Enter New Password" with hidden answer)')
[ "${NEW}" == "" ] && exit
CONFIRM=$(osascript -e 'text returned of (display dialog "Confirm New Master Password for '"${lpuser}"'" default answer "" with title "Confirm New Password" with hidden answer)')
[ "${CONFIRM}" == "" ] && exit
# Check for mismatch
if [ "${NEW}" != "${CONFIRM}" ]; then
B=$(osascript -e 'button returned of (display alert "Password mismatch" as critical buttons { "Cancel", "Try Again" } default button "Try Again" cancel button "Cancel")')
[ "${B}" == "Try Again" ] || exit
fi
done
# Change password
expect <<'EOF'
log_user 0
spawn "$env(lppath)" passwd
expect {
"Current Master Password" { send "$env(CURRENT)\r"; exp_continue }
"Confirm New Master Password" { send "$env(CONFIRM)\r"; exp_continue }
"New Master Password" { send "$env(NEW)\r"; exp_continue }
"Fetching data..." { exec ./alert.sh Fetching data; exp_continue }
"Re-encrypting" { exec ./alert.sh Re-encrypting }
}
expect {
"Uploading..." { exec ./alert.sh Uploading; exp_continue }
eof { send_user [ regsub -all {\e\[[[:digit:]]+[AJm]|:.\r|[\r\n]} $expect_out(buffer) "" ] }
}
EOF
| true
|
c864e5f6bdc1b59c60dcf3e3c2ca5048a0c29ad1
|
Shell
|
tdrl/init
|
/nukeitall.sh
|
UTF-8
| 410
| 3.296875
| 3
|
[] |
no_license
|
#!/bin/bash
TARGETS=(
${HOME}/Pictures/pix
${HOME}/Pictures
${HOME}/tmp
# /tmp/${USER}
# ${HOME}/work
)
LOGFILE=/tmp/nuke.log
if [ -x /usr/bin/srm ]; then
zorch() {
nohup /usr/bin/srm "$*" >> "${LOGFILE}" 2>&1 ;
}
elif [ -x /bin/rm ]; then
zorch() {
nohup /bin/rm -rfP -- "$*" >> "${LOGFILE}" 2>&1 ;
}
fi
for t in ${TARGETS[@]}; do
zorch "${t}" &
done
| true
|
7af7d47db5bf977bf95aa2a6c949df3161ad5093
|
Shell
|
sjpribyl/Sys_Tools
|
/getinv
|
UTF-8
| 16,265
| 3.25
| 3
|
[] |
no_license
|
#!/bin/bash
#
#TODO
#mysql
#apache conf
#samba
#netstat -p
#iptables
#selinux audit
#sshd_config?
#
INV_DIR="/var/lib/inventory"
KEY_FILE="/root/.ssh/id_getinv"
KEY_USER="getinv"
INV_HOST="lxinv"
status=0
export PATH="/sbin:/usr/sbin:/bin:/usr/bin"
LOCAL=0
while (( "$#" )); do
if [ -z "${script}" -a "${1}" = "-l" ]; then
LOCAL=1
fi
shift
done
found=0
if [ -e "/etc/redhat-release" ]; then
VGDISPLAY="/usr/sbin/vgdisplay"
PVDISPLAY="/usr/sbin/pvdisplay"
LVDISPLAY="/usr/sbin/lvdisplay"
NTPQ="/usr/sbin/ntpq"
LSPIC="/sbin/lspci"
LSHW="/usr/sbin/lshw"
ETHTOOL="/sbin/ethtool"
GRUB_CFG="/boot/grub/menu.lst"
found=1
elif [ -e "/etc/SuSE-release" ]; then
VGDISPLAY="/sbin/vgdisplay"
PVDISPLAY="/sbin/pvdisplay"
LVDISPLAY="/sbin/lvdisplay"
NTPQ="/usr/sbin/ntpq"
LSHW="/usr/sbin/lshw"
LSPIC="/sbin/lspci"
ETHTOOL="/usr/sbin/ethtool"
GRUB_CFG="/boot/grub/menu.lst"
found=1
elif [ -e "/etc/debian_version" ]; then
VGDISPLAY="/sbin/vgdisplay"
PVDISPLAY="/sbin/pvdisplay"
LVDISPLAY="/sbin/lvdisplay"
NTPQ="/usr/bin/ntpq"
LSHW="/usr/bin/lshw"
LSPIC="/usr/bin/lspci"
ETHTOOL="/usr/sbin/ethtool"
GRUB_CFG="/boot/grub/grub.cfg"
found=1
elif [ $found -ne 1 ]; then
echo "Unknown Distro!!" > $INV_DIR/distro
echo "Unknown Distro!!"
echo "FAILURE!! #${LINENO}!!"
status=1
fi
echo "Deleting old $INV_DIR"
/bin/rm -rf $INV_DIR/*
if [ $? -ne 0 ]; then status=1; echo "FAILURE!! #${LINENO}!!"; fi
echo "Creating $INV_DIR"
/bin/mkdir -m 700 -p ${INV_DIR}
/usr/bin/uptime >$INV_DIR/uptime
if [ $? -ne 0 ]; then status=1; echo "FAILURE!! #${LINENO}!!"; fi
/usr/bin/md5sum $0 > $INV_DIR/getinv_version
if [ $? -ne 0 ]; then status=1; echo "FAILURE!! #${LINENO}!!"; fi
echo "Getting uname -a"
/bin/uname -a >$INV_DIR/uname
if [ $? -ne 0 ]; then status=1; echo "FAILURE!! #${LINENO}!!"; fi
echo "Getting uname -i"
/bin/uname -m >$INV_DIR/uname.arch
if [ $? -ne 0 ]; then status=1; echo "FAILURE!! #${LINENO}!!"; fi
echo "Getting uname -r"
/bin/uname -r >$INV_DIR/uname.release
if [ $? -ne 0 ]; then status=1; echo "FAILURE!! #${LINENO}!!"; fi
echo "Getting uname -v"
/bin/uname -v >$INV_DIR/uname.version
if [ $? -ne 0 ]; then status=1; echo "FAILURE!! #${LINENO}!!"; fi
echo "Getting ulimit files"
/bin/cp -a /etc/security/limits.* $INV_DIR
if [ $? -ne 0 ]; then status=1; echo "FAILURE!! #${LINENO}!!"; fi
echo "Getting hostid"
hostid >$INV_DIR/hostid
if [ $? -ne 0 ]; then status=1; echo "FAILURE!! #${LINENO}!!"; fi
if [ -e "/var/lib/bcfg2/bcfg2" ]; then
/bin/cp /var/lib/bcfg2/bcfg2 $INV_DIR
if [ $? -ne 0 ]; then status=1; echo "FAILURE!! #${LINENO}!!"; fi
fi
echo "Getting Linux Distro"
if [ -e "/etc/redhat-release" ]; then
echo "RedHat" > $INV_DIR/distro
/bin/cp /etc/redhat-release $INV_DIR
if [ $? -ne 0 ]; then status=1; echo "FAILURE!! #${LINENO}!!"; fi
echo "Getting installed packages"
/bin/rpm -q -a > $INV_DIR/rpm
if [ $? -ne 0 ]; then status=1; echo "FAILURE!! #${LINENO}!!"; fi
echo "Getting configured services"
/sbin/chkconfig --list > $INV_DIR/services
if [ $? -ne 0 ]; then status=1; echo "FAILURE!! #${LINENO}!!"; fi
ls /etc/sysconfig/network-scripts/route-* >/dev/null 2>&1
if [ $? -eq 0 ]; then
echo "Getting configured routes"
/bin/cp /etc/sysconfig/network-scripts/route-* $INV_DIR
if [ $? -ne 0 ]; then status=1; echo "FAILURE!! #${LINENO}!!"; fi
fi
echo "Getting configured interfaces"
/bin/cp /etc/sysconfig/network-scripts/ifcfg-* $INV_DIR
if [ $? -ne 0 ]; then status=1; echo "FAILURE!! #${LINENO}!!"; fi
echo "Getting configured network"
/bin/cp /etc/sysconfig/network $INV_DIR
if [ $? -ne 0 ]; then status=1; echo "FAILURE!! #${LINENO}!!"; fi
if [ -e /etc/modprobe.conf ]; then
echo "Getting modprobe.conf"
/bin/cp -a /etc/modprobe.conf $INV_DIR
if [ $? -ne 0 ]; then status=1; echo "FAILURE!! #${LINENO}!!"; fi
fi
if [ -e /etc/yum.conf ]; then
echo "Getting yum.conf"
/bin/cp -a /etc/yum.conf $INV_DIR
if [ $? -ne 0 ]; then status=1; echo "FAILURE!! #${LINENO}!!"; fi
fi
if [ -e /etc/yum ]; then
echo "Getting yum config"
/bin/cp -a /etc/yum $INV_DIR
if [ $? -ne 0 ]; then status=1; echo "FAILURE!! #${LINENO}!!"; fi
fi
if [ -e /etc/yum.repos.d ]; then
echo "Getting yum repos"
/bin/cp -a /etc/yum.repos.d $INV_DIR
if [ $? -ne 0 ]; then status=1; echo "FAILURE!! #${LINENO}!!"; fi
fi
if [ -e /etc/sysconfig/rhn ]; then
echo "Getting rhn config"
/bin/cp -a /etc/sysconfig/rhn $INV_DIR
if [ $? -ne 0 ]; then status=1; echo "FAILURE!! #${LINENO}!!"; fi
fi
echo "Getting install date"
#get the earliest package installed and calc the jdate
P=`/bin/rpm -qa --last | tail -1`
D=`/bin/bash -c "echo \\\$@" $P `
date +%s -d "$D" > $INV_DIR/install_date
if [ $? -ne 0 ]; then status=1; echo "FAILURE!! #${LINENO}!!"; fi
echo "Getting /etc/inittab"
/bin/cp -a /etc/inittab $INV_DIR
if [ $? -ne 0 ]; then status=1; echo "FAILURE!! #${LINENO}!!"; fi
fi
if [ -e "/etc/SuSE-release" ]; then
echo "SuSE" > $INV_DIR/distro
/bin/cp /etc/SuSE-release $INV_DIR
if [ $? -ne 0 ]; then status=1; echo "FAILURE!! #${LINENO}!!"; fi
echo "Getting installed packages"
/bin/rpm -q -a > $INV_DIR/rpm
if [ $? -ne 0 ]; then status=1; echo "FAILURE!! #${LINENO}!!"; fi
echo "Getting configured services"
/sbin/chkconfig --list > $INV_DIR/services
if [ $? -ne 0 ]; then status=1; echo "FAILURE!! #${LINENO}!!"; fi
if [ -e /etc/yum.conf ]; then
echo "Getting yum.conf"
/bin/cp -a /etc/yum.conf $INV_DIR
if [ $? -ne 0 ]; then status=1; echo "FAILURE!! #${LINENO}!!"; fi
fi
if [ -e /etc/yum ]; then
echo "Getting yum config"
/bin/cp -a /etc/yum $INV_DIR
if [ $? -ne 0 ]; then status=1; echo "FAILURE!! #${LINENO}!!"; fi
fi
echo "Getting install date"
#get the earliest package installed and calc the jdate
P=`/bin/rpm -qa --last | tail -1`
D=`/bin/bash -c "echo \\\$@" $P `
date +%s -d "$D" > $INV_DIR/install_date
if [ $? -ne 0 ]; then status=1; echo "FAILURE!! #${LINENO}!!"; fi
echo "Getting /etc/inittab"
/bin/cp -a /etc/inittab $INV_DIR
if [ $? -ne 0 ]; then status=1; echo "FAILURE!! #${LINENO}!!"; fi
fi
if [ -e "/etc/debian_version" ]; then
if [ -e /etc/lsb-release ]; then
echo "Ubuntu" > $INV_DIR/distro
/bin/cp /etc/lsb-release $INV_DIR
if [ $? -ne 0 ]; then status=1; echo "FAILURE!! #${LINENO}!!"; fi
else
echo "debian" > $INV_DIR/distro
fi
/bin/cp /etc/debian_version $INV_DIR
if [ $? -ne 0 ]; then status=1; echo "FAILURE!! #${LINENO}!!"; fi
echo "Getting installed packages"
/usr/bin/dpkg --get-selections > $INV_DIR/rpm
if [ $? -ne 0 ]; then status=1; echo "FAILURE!! #${LINENO}!!"; fi
echo "Getting configured interfaces"
/bin/cp /etc/network/interfaces $INV_DIR
if [ $? -ne 0 ]; then status=1; echo "FAILURE!! #${LINENO}!!"; fi
echo "Getting configured hostname"
/bin/cp /etc/hostname $INV_DIR
if [ $? -ne 0 ]; then status=1; echo "FAILURE!! #${LINENO}!!"; fi
echo "Getting modprobe.d"
/bin/cp -a /etc/modprobe.d $INV_DIR
if [ $? -ne 0 ]; then status=1; echo "FAILURE!! #${LINENO}!!"; fi
echo "Getting install date"
stat -c %Z `ls /var/lib/dpkg/info/*.list -t | tail -1` > $INV_DIR/install_date
if [ $? -ne 0 ]; then status=1; echo "FAILURE!! #${LINENO}!!"; fi
echo "Getting /etc/event.d"
/bin/cp -a /etc/event.d $INV_DIR
if [ $? -ne 0 ]; then status=1; echo "FAILURE!! #${LINENO}!!"; fi
fi
echo "Getting lsmod"
/sbin/lsmod > $INV_DIR/lsmod
if [ $? -ne 0 ]; then status=1; echo "FAILURE!! #${LINENO}!!"; fi
echo "Getting resolv.conf"
/bin/cp -a /etc/resolv.conf $INV_DIR
if [ $? -ne 0 ]; then status=1; echo "FAILURE!! #${LINENO}!!"; fi
if [ -e /etc/postfix ]; then
echo "Getting postfix config"
/bin/cp -a /etc/postfix $INV_DIR
if [ $? -ne 0 ]; then status=1; echo "FAILURE!! #${LINENO}!!"; fi
fi
if [ -e /etc/mail ]; then
echo "Getting mail config"
/bin/cp -a /etc/mail $INV_DIR
if [ $? -ne 0 ]; then status=1; echo "FAILURE!! #${LINENO}!!"; fi
fi
if [ -e /etc/ssmtp ]; then
echo "Getting ssmtp config"
/bin/cp -a /etc/ssmtp $INV_DIR
if [ $? -ne 0 ]; then status=1; echo "FAILURE!! #${LINENO}!!"; fi
fi
if [ -e /etc/nagios ]; then
echo "Getting nagios config"
/bin/cp -a /etc/nagios $INV_DIR
if [ $? -ne 0 ]; then status=1; echo "FAILURE!! #${LINENO}!!"; fi
fi
if [ -e /etc/ldap.conf ]; then
echo "Getting ldap config"
/bin/cp -a /etc/ldap.conf $INV_DIR
if [ $? -ne 0 ]; then status=1; echo "FAILURE!! #${LINENO}!!"; fi
fi
echo "Getting nsswitch config"
/bin/cp -a /etc/nsswitch.conf $INV_DIR
if [ $? -ne 0 ]; then status=1; echo "FAILURE!! #${LINENO}!!"; fi
echo "Getting pam config"
/bin/cp -a /etc/pam.d $INV_DIR
if [ $? -ne 0 ]; then status=1; echo "FAILURE!! #${LINENO}!!"; fi
if [ -e ${GRUB_CFG} ]; then
echo "Getting configured menu.lst"
/bin/cp ${GRUB_CFG} $INV_DIR
if [ $? -ne 0 ]; then status=1; echo "FAILURE!! #${LINENO}!!"; fi
fi
echo "Getting passwd"
/bin/cp /etc/passwd $INV_DIR
if [ $? -ne 0 ]; then status=1; echo "FAILURE!! #${LINENO}!!"; fi
echo "Getting group"
/bin/cp /etc/group $INV_DIR
if [ $? -ne 0 ]; then status=1; echo "FAILURE!! #${LINENO}!!"; fi
echo "Getting cron"
/bin/cp -ar /var/spool/cron $INV_DIR
if [ $? -ne 0 ]; then status=1; echo "FAILURE!! #${LINENO}!!"; fi
echo "Getting backup info"
if [ -e "/usr/sbin/nsrinfo" ]; then
/usr/sbin/nsrinfo -s chd-sv-backup01 `hostname | cut -d \. -f 1`| gzip >$INV_DIR/backup.gz
if [ $? -ne 0 ]; then status=1; echo "FAILURE!! #${LINENO}!!"; fi
fi
echo "Getting ntp status"
${NTPQ} -p >$INV_DIR/ntp
if [ $? -ne 0 ]; then status=1; echo "FAILURE!! #${LINENO}!!"; fi
echo "Getting ntp.conf"
/bin/cp /etc/ntp.conf $INV_DIR/ntp.conf
if [ $? -ne 0 ]; then status=1; echo "FAILURE!! #${LINENO}!!"; fi
if [ -e "/etc/sysconfig/ntpd" ]; then
echo "Getting system/ntpd"
/bin/cp /etc/sysconfig/ntpd $INV_DIR
if [ $? -ne 0 ]; then status=1; echo "FAILURE!! #${LINENO}!!"; fi
fi
if [ -f /usr/sbin/ptpd ]; then
echo "Getting ptp status"
/usr/sbin/ptpd -k 0 -b lo >$INV_DIR/ptp
if [ $? -ne 0 ]; then status=1; echo "FAILURE!! #${LINENO}!!"; fi
if [ -e "/etc/sysconfig/ptpd" ]; then
echo "Getting system/ptpd"
/bin/cp /etc/sysconfig/ptpd $INV_DIR
if [ $? -ne 0 ]; then status=1; echo "FAILURE!! #${LINENO}!!"; fi
fi
fi
if [ -f /var/lib/ptp/ptp_check ]; then
/bin/cp /var/lib/ptp/ptp_check $INV_DIR
fi
echo "Getting configured network cards"
#/sbin/ifconfig -a > $INV_DIR/ifconfig
/sbin/ip addr show > $INV_DIR/ip_addr
echo "Getting configured routes"
/bin/netstat -rn > $INV_DIR/routes
/sbin/ip route show > $INV_DIR/ip_route
echo "Getting lshw.xml"
${LSHW} -xml > $INV_DIR/lshw.xml
if [ $? -ne 0 ]; then status=1; echo "FAILURE!! #${LINENO}!!"; fi
echo "Getting lshw"
${LSHW} -short > $INV_DIR/lshw
if [ $? -ne 0 ]; then status=1; echo "FAILURE!! #${LINENO}!!"; fi
#TODO parted /dev/sda print from lshw
echo "Getting fstab"
/bin/cp /etc/fstab $INV_DIR
if [ $? -ne 0 ]; then status=1; echo "FAILURE!! #${LINENO}!!"; fi
/bin/mount >$INV_DIR/mount
if [ $? -ne 0 ]; then status=1; echo "FAILURE!! #${LINENO}!!"; fi
echo "Getting autofs"
/bin/cp -a /etc/auto.* $INV_DIR
if [ $? -ne 0 ]; then status=1; echo "FAILURE!! #${LINENO}!!"; fi
echo "Getting df"
/bin/df -TPa > $INV_DIR/df
if [ $? -ne 0 ]; then status=1; echo "FAILURE!! #${LINENO}!!"; fi
if [ -e ${VGDISPLAY} ]; then
echo "Getting vgdisplay"
${VGDISPLAY} -c > $INV_DIR/vgdisplay
if [ $? -ne 0 ]; then status=1; echo "FAILURE!! #${LINENO}!!"; fi
echo "Getting lvdisplay"
${LVDISPLAY} -c > $INV_DIR/lvdisplay
if [ $? -ne 0 ]; then status=1; echo "FAILURE!! #${LINENO}!!"; fi
echo "Getting pvdisplay"
${PVDISPLAY} -c > $INV_DIR/pvdisplay
if [ $? -ne 0 ]; then status=1; echo "FAILURE!! #${LINENO}!!"; fi
fi
echo "Getting run level"
/usr/bin/who -a | grep run > $INV_DIR/runlevel
if [ $? -ne 0 ]; then status=1; echo "FAILURE!! #${LINENO}!!"; fi
if [ -e "/usr/sbin/exportfs" ]; then
echo "Getting exportfs"
/usr/sbin/exportfs -v > $INV_DIR/exportfs
if [ $? -ne 0 ]; then status=1; echo "FAILURE!! #${LINENO}!!"; fi
fi
if [ -e /etc/exports ]; then
echo "Getting exports"
/bin/cp /etc/exports $INV_DIR
if [ $? -ne 0 ]; then status=1; echo "FAILURE!! #${LINENO}!!"; fi
fi
echo "Getting last 10 logins"
/usr/bin/last -10 > $INV_DIR/last
if [ $? -ne 0 ]; then status=1; echo "FAILURE!! #${LINENO}!!"; fi
echo "Getting /etc/security"
/bin/cp -a /etc/security $INV_DIR
if [ $? -ne 0 ]; then status=1; echo "FAILURE!! #${LINENO}!!"; fi
if [ -e /usr/bin/facter ]; then
echo "Getting facter"
/usr/bin/facter > $INV_DIR/facter
if [ $? -ne 0 ]; then status=1; echo "FAILURE!! #${LINENO}!!"; fi
fi
echo "Getting lspci"
$LSPCI > $INV_DIR/lspci
if [ $? -ne 0 ]; then status=1; echo "FAILURE!! #${LINENO}!!"; fi
if [[ "`/usr/sbin/dmidecode -s system-manufacturer`" = "HP" || "`/usr/sbin/dmidecode -s system-manufacturer`" = "Hewlett-Packard" ]]; then
PROD=`/usr/sbin/dmidecode -s system-product-name`
if [[ $PROD = ProLiant* ]]; then
if [[ -e /opt/hp/conrep/conrep && -e /usr/sbin/dmidecode ]]; then
echo "Getting conrep"
/opt/hp/conrep/conrep -s -x/opt/hp/conrep/conrep.xml -s -f$INV_DIR/conrep.dat
if [ $? -ne 0 ]; then status=1; echo "FAILURE!! #${LINENO}!!"; fi
fi
echo "Getting ilo data"
/sbin/hponcfg -a -w /var/lib/inventory/ilo.xml
/sbin/hponcfg -w /var/lib/inventory/ilo-net.xml
#SJP if [ $? -ne 0 ]; then status=1; echo "FAILURE!! #${LINENO}!!"; fi
fi
fi
if [ -e /usr/sbin/dmidecode ]; then
echo "Getting dmidecode"
/usr/sbin/dmidecode > $INV_DIR/dmidecode
if [ $? -ne 0 ]; then status=1; echo "FAILURE!! #${LINENO}!!"; fi
fi
if [ -e /usr/sbin/biosdecode ]; then
echo "Getting biosdecode"
/usr/sbin/biosdecode > $INV_DIR/biosdecode
if [ $? -ne 0 ]; then status=1; echo "FAILURE!! #${LINENO}!!"; fi
fi
if [ -e /proc/cpuinfo ]; then
echo "Getting cpuinfo"
/bin/cp /proc/cpuinfo $INV_DIR
if [ $? -ne 0 ]; then status=1; echo "FAILURE!! #${LINENO}!!"; fi
fi
if [ -e /proc/meminfo ]; then
echo "Getting meminfo"
/bin/cp /proc/meminfo $INV_DIR
if [ $? -ne 0 ]; then status=1; echo "FAILURE!! #${LINENO}!!"; fi
fi
CDPR=0;
HAVE_ETHTOOL=0;
if [ -e /usr/sbin/ladvdc -a "`/usr/sbin/dmidecode -s system-manufacturer`" != "VMware, Inc." ]; then
echo "Getting ladvdc"
/usr/sbin/ladvdc > $INV_DIR/ladvdc
if [ $? -ne 0 ]; then status=1; echo "FAILURE!! #${LINENO}!!"; fi
fi
#if [ -e /usr/bin/cdpr -a "`/usr/sbin/dmidecode -s system-manufacturer`" != "VMware, Inc." ]; then CDPR=1; fi
echo "Getting nic info..."
if [ -e ${ETHTOOL} ]; then HAVE_ETHTOOL=1;fi
for I in `grep network $INV_DIR/lshw | grep -v ib | grep -v bond | awk '{print $2}' | grep -v network`
do
if [[ ! ${I} =~ "\." ]]; then
if [ ${HAVE_ETHTOOL} -eq 1 ]; then
echo -e "\tethtool for $I"
${ETHTOOL} $I > $INV_DIR/$I.ethtool
if [ $? -ne 0 ]; then status=1; echo "FAILURE!! #${LINENO}!!"; fi
${ETHTOOL} -i $I >> $INV_DIR/$I.ethtool
if [ $? -ne 0 ]; then status=1; echo "FAILURE!! #${LINENO}!!"; fi
#${ETHTOOL} -k $I >> $INV_DIR/$I.ethtool
fi
# if [ $CDPR -eq 1 -a `/bin/grep Link $INV_DIR/$I.ethtool | cut -f 2 -d ":"` == "yes" ]; then
# echo -e "\tand cdpr"
# /usr/bin/cdpr -d $I -t 120 > $INV_DIR/$I.cdpr
# #if [ $? -ne 0 ]; then status=1; echo "FAILURE!! #${LINENO}!!"; fi
# fi
fi
done
if [ -r /etc/builddb.env ]; then
/bin/cp /etc/builddb.env $INV_DIR
if [ $? -ne 0 ]; then status=1; echo "FAILURE!! #${LINENO}!!"; fi
fi
echo "Kernel config"
/bin/cp /etc/sysctl.conf $INV_DIR
if [ $? -ne 0 ]; then status=1; echo "FAILURE!! #${LINENO}!!"; fi
/bin/rm -f $INV_DIR/collected
echo $status > $INV_DIR/collected
if [ ${LOCAL} -eq 0 ]; then
#If not local
# Ftp/ssh data to servers
# local or rsync /var/data/Inventory/<hosts> inv@inv-host:/var/data/Inventory/<host>
echo "Sending File..."
#rsync -avzr --delete -e "ssh -o StrictHostKeyChecking=no -o BatchMode=yes -T -i $KEY_FILE" $INV_DIR/* $KEY_USER@$INV_HOST:data/`hostname | cut -d \. -f 1`
if [ -e /usr/bin/rsync ]; then
RSYNC=/usr/bin/rsync
else
RSYNC=/root/bin/rsync
fi
${RSYNC} -av --delete /var/lib/inventory/ rsync://root@${INV_HOST}.chicagotrading.com/Inventory/`hostname | cut -d \. -f 1`
if [ $? -ne 0 ]; then status=1; echo "FAILURE!! #${LINENO}!!"; fi
fi
exit $status;
| true
|
703437d8ce9d89ccbd80ceecaa4f5535958a220b
|
Shell
|
pbt001/dippindots
|
/bin/task
|
UTF-8
| 958
| 4.03125
| 4
|
[] |
no_license
|
#!/bin/bash
# Time tracking
# Usage:
# - Start task: task <TASK NAME>
# - Stop task: task stop
# - Task status: task status
TASK_FILE=~/work/_admin/tracking.txt
TASK=$1
if [ -z "$TASK" ]; then
exit 1
fi
# Check if a task is active
if [ -f $TASK_FILE ]; then
LAST=$(tail -n 1 $TASK_FILE)
LAST_START=$(echo -e "$LAST" | cut -f1)
LAST_TASK=$(echo -e "$LAST" | cut -f2)
LAST_END=$(echo -e "$LAST" | cut -f3)
if [ -z "$LAST_END" ]; then
END_TIME=$(date +%s)
ELAPSED_SEC=$(($END_TIME - $LAST_START))
if [ $TASK == 'status' ]; then
echo -e "$LAST_TASK\t$ELAPSED_SEC"
else
printf "\t${ELAPSED_SEC}\n" >> $TASK_FILE
notify-send "${LAST_TASK} lasted ${ELAPSED_SEC}s"
fi
fi
fi
if [ $TASK == 'stop' ] || [ $TASK == 'status' ]; then
exit 0
else
notify-send "Started ${TASK}"
START_TIME=$(date +%s)
echo -en "${START_TIME}\t$TASK" >> $TASK_FILE
fi
| true
|
6f6fa6cad5fa7e33c3004d416b81b43bb78d1198
|
Shell
|
cntrump/ffmpeg-static
|
/build_libsdl2.sh
|
UTF-8
| 308
| 2.875
| 3
|
[] |
no_license
|
#!/usr/bin/env zsh
set -e
source ./env.sh
if [ ! -d ./libsdl2 ];then
git clone --depth=1 https://github.com/libsdl-org/SDL.git libsdl2
fi
cd ./libsdl2
./autogen.sh
./configure --prefix=/usr/local --without-x --enable-hidapi --enable-static --disable-shared
make -j ${CPU_NUM} && sudo make install
cd ..
| true
|
989d0f5d8030d5028d88d2da678f713a20a5b226
|
Shell
|
106aRoboCupSim/simatch
|
/src/robot_code/nubot_hwcontroller/magenta_hwctr.sh
|
UTF-8
| 642
| 2.890625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
### source the workspace
source ../devel/setup.bash
source devel/setup.bash
### Get parameters and init
declare -i j
declare -i kill_num
magenta_prefix=$(rosparam get /magenta/prefix)
magenta_num=$(rosparam get /magenta/num)
kill_num=0
### spawn magenta robots
for ((i=1; i<=magenta_num; ++i))
do
rosrun nubot_hwcontroller nubot_hwcontroller_node ${magenta_prefix}${i} __name:=${magenta_prefix}_nubot_hwcontroller${i} &
PIDS[kill_num]=$!
let "kill_num=kill_num+1"
sleep 0.5
done
### kill thoes background processes
trap 'kill ${PIDS[*]}' SIGINT
wait
rosnode cleanup
| true
|
272445ae918e3fb1df99324cc0162ac319f0ce8a
|
Shell
|
BurntSushi/dotfiles
|
/bin/terminal-tmux-remote
|
UTF-8
| 279
| 2.984375
| 3
|
[] |
no_license
|
#!/bin/sh
if [ -z "$1" ]; then
echo "Usage: $(basename $0) <host>" >&2
fi
host="$1"
if is-work; then
exec gnome-terminal -- ssh -AY "$host" -t '. ~/.pathrc ; tmux' -u new -A -s remote
else
exec terminal -e ssh -AY "$host" -t '. ~/.pathrc ; tmux' -u new -A -s remote
fi
| true
|
27938fc2b1e70cb8f050fbed68da71872c4aaf58
|
Shell
|
terx/mtrn
|
/mtrn-cmd-proof
|
UTF-8
| 579
| 3.875
| 4
|
[] |
no_license
|
#!/bin/sh
CMD=$1
case $1 in
ssh_key)
HOST=$2
PORT=$3
TEMP=$(mktemp)
FILE=../../proofs/ssh_key_${HOST}_${PORT}
DIFF=ssh_key_${HOST}_${PORT}
if ssh-keyscan -t rsa1,rsa,dsa -p 9824 localhost > $TEMP; then
if [ -e $FILE ]; then
diff $FILE $TEMP > $DIFF
else
cat $TEMP > $FILE
echo -n > $DIFF
fi
if [ -w $FILE ]; then
cat $TEMP > $FILE
fi
rm $TEMP
if [ -s $DIFF ]; then
echo "WARNING: ssh host key differs!"
exit 1
else
exit 0
fi
else
echo "WARNING: cannot get ssh host keys"
exit 2
fi
;;
esac
| true
|
69c8987f24e30b20eba6855700daa3e1092a6e33
|
Shell
|
varelajuanma/FacultadDeInformatica---UNLP
|
/2do año/ISO/2016/Practicas/ISO Practicas Resueltas/scripts/ej20.sh
|
UTF-8
| 626
| 3.375
| 3
|
[] |
no_license
|
#!/bin/bash
#el comportamiento de una pila
#21) Dentro del mismo script y utilizando las funciones implementadas agregue 10
#elementos a la pila, saque 3 de ellos, imprima la longitud de la cola y luego la totalidad
#de los elementos que en ella se encuentran.
vector=()
index=0
print()
{
for elem in ${vector[*]}
do
echo $elem
done
}
function length
{
return `expr $index + 1`
}
pop()
{
unset vector[index]
let index--
}
push()
{
vector[index]=$1
let index++
}
push 1
push 2
push 3
push 4
push 5
push 6
push 7
push 8
push 9
push 0
#el primer pop no hace nada
pop
pop
pop
pop
print
#imprimir valor de funcion
| true
|
f2dc7b66378885d6b2afcd1eaec3a13d38e33808
|
Shell
|
MIRTK/BAtCh
|
/bin/gen-parameter-search-refinement-stage
|
UTF-8
| 17,212
| 3.78125
| 4
|
[] |
no_license
|
#! /bin/bash
appid="$(basename "$0")"
appdir="$(cd "$(dirname "$BASH_SOURCE")" && pwd)"
appdir="$(cd "$appdir/.." && pwd)"
source "$appdir/lib/daggen/mirtk.sh" || {
echo "Failed to import daggen/mirtk module!" 1>&2
exit 1
}
source "$appdir/etc/config/default.sh" || {
error "Failed to load default configuration"
}
# ==============================================================================
# help
# ==============================================================================
# ------------------------------------------------------------------------------
print_help()
{
cat <<HELP
usage: $appid <sublst> <refimg> <dagdir> <outdir> [options]
This script generates the DAGMan worflow description used to test different
registration parameter settings for use in the atlas construction. The DAGMan
workflow can be directly submitted to the batch queuing system HTCondor for
distributed parallel execution. Alternatively, the workflow can be executed
sequentially on the local machine or submitted to another supported batch
queuing system (e.g., SLURM) using the run-workflow script.
Unlike the original gen-parameter-search workflow, this parameter search
is based only the iterative refinement of the image to atlas deformations
after an initial global normalization to the reference image. This template
constructions scales linearly in the number of test input images. Similar
parameters to those found with this search should also be suitable for
direct registration of input image pairs, if a pairwise atlas construction
is desired. Otherwise, these parameters may be the basis for those used
with "mirtk construct-atlas" instead of "gen-workflow".
Arguments:
sublst Text file with image IDs.
refimg Reference image used for global normalization.
dagdir Top-level directory for DAGMan workflow files.
outdir Top-level directory for workflow output files.
Options:
-c, -config <config.sh>
Custom configuration file. (default: none)
-g, -global-normalization
Only execute global normalization steps. (default: false)
-h, -help
Print help and exit.
-v, -verbose
Enable verbose output messages.
HELP
}
# ==============================================================================
# configuration
# ==============================================================================
[ $# -ge 4 ] || {
print_help
exit 1
}
o=0
while [ $o -lt $# ]; do
case "${!o}" in
-c|-config)
let a=o+1; optarg config ${!o} "${!a}";
source "$config" || {
error "Failed to load configuration from file: $config"
}
;;
esac
let o++
done
_lbldir="$lbldir"
_lblpre="$lblpre"
lbldir="$(dirname "$_lbldir/${_lblpre}id")"
lblpre="$(basename "$_lbldir/${_lblpre}id")"
lblpre="${lblpre:0:${#lblpre}-2}"
_clsdir="$clsdir"
_clspre="$clspre"
clsdir="$(dirname "$_clsdir/${_clspre}id")"
clspre="$(basename "$_clsdir/${_clspre}id")"
clspre="${clspre:0:${#clspre}-2}"
verbose=0
sublst="$1"; shift
refimg="$1"; shift
dagdir="$1"; shift
outdir="$1"; shift
update=false
global_normalization_only=false
[ -n "$sublst" ] || error "Missing sublst argument!"
[ -n "$refimg" ] || error "Missing refimg argument!"
[ -n "$dagdir" ] || error "Missing dagdir argument!"
[ -n "$outdir" ] || error "Missing outdir argument!"
read_sublst subids "$sublst"
while [ $# -gt 0 ]; do
case "$1" in
-c|-config) shift; ;; # already loaded
-u|-update) update=true; ;;
-g|-global-normalization) global_normalization_only=true; ;;
-h|-help|--help) print_help; exit 0; ;;
-v|-verbose) let verbose++; ;;
*) error "Unknown argument: $1"; ;;
esac
shift
done
refdir="$(dirname "$refimg")"
refid="$(basename "$refimg")"
refpre=""
regex="(.*)((.nii|.hdr|.img)(.gz)?)$"
if [[ $refid =~ $regex ]]; then
refid="${BASH_REMATCH[1]}"
refsuf="${BASH_REMATCH[2]}"
else
error "This workflow only supports NIfTI images!"
fi
log="$dagdir/progress.log"
# ==============================================================================
# auxiliary functions
# ==============================================================================
pbmap_name_to_tissue_labels()
{
local labels=()
case "$2" in
csf) labels=(1); ;;
wm) labels=(3); ;;
cgm) labels=(2); ;;
dgm) labels=(7 9); ;;
vents) labels=(5); ;;
bs_cb|bs+cb) labels=(6 8); ;;
esac
local "$1" && upvar $1 "${labels[@]}"
}
iterative_template_construction_test()
{
local name="$1"; shift
local parent="$1"; shift
local params=("$@")
local refine=8
local prev_node=''
local defdofs=''
local nxtdofs=''
local definvs=true
local resdof=false
local n=1
begin_dag "$name" -splice
# iteratively perform the following two steps:
# 1. update current template image
# 2. register al images to this template
begin_dag 'calc_dofs' -splice
while [ $n -le $refine ]; do
nxtdofs="$outdir/$name/dofs/2.$n"
# update template images
average_images_node "make_template_$n" -parent "$prev_node" \
-subjects "${subids[@]}" \
-imgdir "$imgdir" \
-imgpre "$imgpre" \
-imgsuf "$imgsuf" \
-refid "$nrmref" \
-refdir "$nrmdir" \
-refpre "$nrmpre" \
-refsuf "$nrmsuf" \
-dofin1 "$nrmdofs" \
-dofin2 "$defdofs" -dofinv2 "$definvs" \
-dofin3 "$defdofs" -dofid3 "$resdof" \
-output "$outdir/$name/atlas/template_$n.nii.gz" \
-padding "$bgvalue" \
-threshold "$threshold" \
-normalize "$normalization" \
-rescale "$rescaling" \
-sharpen "$sharpen"
# register images to current average image
begin_dag "reg_template_$n" -parent "make_template_$n" -splice
register_node "reg_images" \
-subjects "${subids[@]}" \
-imgdir "$imgdir" \
-imgpre "$imgpre" \
-imgsuf "$imgsuf" \
-tgtdir "$outdir/$name/atlas" \
-tgtid "template_$n" \
-tgtsuf ".nii.gz" \
-model "$model" \
-mffd "$mffd" \
-symmetric "$symmetric" \
-spacing "$spacing" \
-bending "$bending" \
-elasticity "$elasticity" \
-elasticity-lambda "$elasticity_lambda" \
-elasticity-mu "$elasticity_mu" \
-jacobian "$jacobian" \
-interp "$interpolation" \
-sim "$similarity" \
-bins "$bins" \
-radius "$radius" \
-bgvalue "$bgvalue" \
-inclbg "$inclbg" \
-hdrdofs "$nrmdofs" \
-dofins "identity" \
-dofdir "$nxtdofs" \
-domain "$nrmdir/$nrmpre$nrmref$nrmsuf" \
-maxres "$resolution" \
-levels "$levels" \
"${params[@]}"
resdof="average"
average_dofs_node "average_dofs" -parent "reg_images" \
-subjects "${subids[@]}" \
-dofins "$nxtdofs" \
-dofdir "$nxtdofs" \
-dofid "$resdof" \
-dofs
end_dag
prev_node="reg_template_$n"
defdofs="$nxtdofs"
let n++
done
end_dag
# apply computed transformations to map all images to reference space
transform_image_node "transform_images_to_atlas" -parent "calc_dofs" \
-subjects "${subids[@]}" \
-imgdir "$imgdir" \
-imgpre "$imgpre" \
-imgsuf "$imgsuf" \
-tgtid "$nrmref" \
-tgtdir "$nrmdir" \
-tgtpre "$nrmpre" \
-tgtsuf "$nrmsuf" \
-dofin1 "$nrmdofs" \
-dofin2 "$defdofs" -dofinv2 \
-dofin3 "$defdofs" -dofid3 "$resdof" \
-outdir "$outdir/$name/images" \
-outpre "" \
-outsuf ".nii.gz" \
-padding "$bgvalue" \
-invert
transform_image_node "transform_tissue_labels_to_atlas" -parent "calc_dofs" \
-subjects "${subids[@]}" \
-imgdir "$clsdir" \
-imgpre "$clspre" \
-imgsuf "$clssuf" \
-tgtid "$nrmref" \
-tgtdir "$nrmdir" \
-tgtpre "$nrmpre" \
-tgtsuf "$nrmsuf" \
-dofin1 "$nrmdofs" \
-dofin2 "$defdofs" -dofinv2 \
-dofin3 "$defdofs" -dofid3 "$resdof" \
-outdir "$outdir/$name/labels/tissues" \
-outpre "" \
-outsuf ".nii.gz" \
-invert
transform_image_node "transform_all_labels_to_atlas" -parent "calc_dofs" \
-subjects "${subids[@]}" \
-imgdir "$lbldir" \
-imgpre "$lblpre" \
-imgsuf "$lblsuf" \
-tgtid "$nrmref" \
-tgtdir "$nrmdir" \
-tgtpre "$nrmpre" \
-tgtsuf "$nrmsuf" \
-dofin1 "$nrmdofs" \
-dofin2 "$defdofs" -dofinv2 \
-dofin3 "$defdofs" -dofid3 "$resdof" \
-outdir "$outdir/$name/labels/structures" \
-outpre "" \
-outsuf ".nii.gz" \
-invert
# evaluate pairwise label overlap
# TODO: Use mirtk evaluate-label-overlap instead
#evaluate_overlap_node "evaluate_structure_overlap" -parent "transform_all_labels_to_atlas" \
# -subjects "${subids[@]}" \
# -imgdir "$outdir/$name/labels/structures" \
# -imgpre "" \
# -imgsuf ".nii.gz" \
# -subdir false \
# -metric tp fn fp tn fpr sensitivity specificity precision dsc jsc \
# -outdir "$outdir/$name/overlap"
# evaluate voxel-wise quality measure
for mode in mean sdev entropy gini; do
aggregate_images_node "compute_$mode" -parent "transform_images_to_atlas" \
-subjects "${subids[@]}" \
-mode "$mode" \
-imgdir "$outdir/$name/images" \
-imgpre "$imgpre" \
-imgsuf "$imgsuf" \
-output "$outdir/$name/stats/$mode.nii.gz" \
-normalize "z-score" \
-padding "$bgvalue"
done
aggregate_images_node "compute_tissue_consistency" -parent "transform_tissue_labels_to_atlas" \
-subjects "${subids[@]}" \
-mode "label-consistency" \
-imgdir "$outdir/$name/labels/tissues" \
-imgpre "$clspre" \
-imgsuf "$clssuf" \
-output "$outdir/$name/stats/tissue_consistency.nii.gz"
aggregate_images_node "compute_structure_consistency" -parent "transform_all_labels_to_atlas" \
-subjects "${subids[@]}" \
-mode "label-consistency" \
-imgdir "$outdir/$name/labels/structures" \
-imgpre "$lblpre" \
-imgsuf "$lblsuf" \
-output "$outdir/$name/stats/structure_consistency.nii.gz"
end_dag
}
# ==============================================================================
# workflow
# ==============================================================================
begin_dag "parameter-search" -dagfile "$dagdir/all_tests.dag" -dagdir "$dagdir"
dofdir="$outdir/global/dofs"
# ----------------------------------------------------------------------------
# global normalization (translation, rotation, and scale)
begin_dag "normalization" -splice
register_node "rigid" \
-subjects "${subids[@]}" \
-tgtid "$refid" \
-tgtdir "$refdir" \
-tgtpre "$refpre" \
-tgtsuf "$refsuf" \
-imgdir "$imgdir" \
-imgpre "$imgpre" \
-imgsuf "$imgsuf" \
-dofdir "$dofdir/1.1_ini" \
-model "Similarity" \
-interp "$interpolation" \
-sim "NMI" \
-bins "64" \
-bgvalue "$bgvalue" \
-inclbg true \
-maxres "$resolution" \
-segdir "$segdir" \
-segmsk "bs+cb" .33 \
-maxstep 1 \
-levels 4 2
register_node "affine" -parent "rigid" \
-subjects "${subids[@]}" \
-tgtid "$refid" \
-tgtdir "$refdir" \
-tgtpre "$refpre" \
-tgtsuf "$refsuf" \
-imgdir "$imgdir" \
-imgpre "$imgpre" \
-imgsuf "$imgsuf" \
-dofins "$dofdir/1.1_ini" \
-dofdir "$dofdir/1.2_aff" \
-model "Affine" \
-par "Allow shearing" No \
-interp "$interpolation" \
-sim "NMI" \
-bins "64" \
-bgvalue "$bgvalue" \
-inclbg true \
-maxres "$resolution" \
-maxstep 1 \
-levels 3 1
register_node "affine_with_padding" -parent "affine" \
-subjects "${subids[@]}" \
-tgtid "$refid" \
-tgtdir "$refdir" \
-tgtpre "$refpre" \
-tgtsuf "$refsuf" \
-imgdir "$imgdir" \
-imgpre "$imgpre" \
-imgsuf "$imgsuf" \
-dofins "$dofdir/1.2_aff" \
-dofdir "$dofdir/1.3_ref" \
-model "Affine" \
-par "Allow shearing" No \
-interp "$interpolation" \
-sim "NMI" \
-bins "64" \
-bgvalue "$bgvalue" \
-inclbg false \
-maxres "$resolution" \
-maxstep 1 \
-levels 2 1
invert_dof_node "invert" -parent "affine_with_padding" \
-subjects "${subids[@]}" \
-dofins "$dofdir/1.3_ref" \
-dofdir "$dofdir/1.4_inv"
nrmdofs="$dofdir/1.4_inv"
end_dag
# ----------------------------------------------------------------------------
# generate population specific reference image/mask
nrmdir="$outdir/global/average"
nrmref="linear"
nrmpre=""
nrmsuf=".nii.gz"
average_images_node "make_average" -parent "normalization" \
-subjects "${subids[@]}" \
-refdir "$refdir" \
-refpre "$refpre" \
-refid "$refid" \
-refsuf "$refsuf" \
-imgdir "$imgdir" \
-imgpre "$imgpre" \
-imgsuf "$imgsuf" \
-dofdir "$nrmdofs" \
-output "$nrmdir/$nrmpre$nrmref$nrmsuf" \
-spacing "$resolution" \
-padding "$bgvalue" \
-normalize "zscore" \
-rescale "0 100" \
-sharpen "yes" \
-margin 5
if [ $global_normalization_only = true ]; then
transform_image_node "transform_images" -parent "normalization" \
-subjects "${subids[@]}" \
-imgdir "$imgdir" \
-imgpre "$imgpre" \
-imgsuf "$imgsuf" \
-tgtid "$nrmref" \
-tgtdir "$nrmdir" \
-tgtpre "$nrmpre" \
-tgtsuf "$nrmsuf" \
-dofins "$nrmdofs" \
-outdir "$outdir/global/images" \
-outpre "" \
-outsuf ".nii.gz" \
-padding "$bgvalue" \
-invert
end_dag
exit 0
fi
# last test id / counter
i=0
params="$outdir/params.tsv"
mkdir -p "$outdir"
echo -e "name\tmffd\tmodel\tsymmetric\tsim\tbins\tspacing\tbending\telasticity\telasticity_lambda\telasticity_mu\tjacobian\tinclbg\tinterp" > "$params"
# ----------------------------------------------------------------------------
# iterative template constructions with different regularization weights
begin_dag "grid_search" -parent "make_average" -splice
mffd='None'
model='SVFFD'
symmetric=true
inclbg=false
interp='Linear with padding'
sim='nmi'
bins=64
elasticity_mu=1
for spacing in 2.0; do
for jacobian in 1e-4 1e-5 1e-6 0e-0; do
for bending in 1e-2 5e-3 1e-3 5e-4 1e-4; do
for elasticity in 1e-2 1e-3 1e-4 1e-5 0e-0; do
for elasticity_lambda in 0.0 1.5; do
let i++
name="params_$(printf %03d $i)"
echo "$name: mffd=$mffd, model=$model, sym=$symmetric, sim=$sim, bins=$bins, ds=$spacing, be=$bending, le=$elasticity, lambda=$elasticity_lambda, mu=$elasticity_mu, jl=$jacobian, inclbg=$inclbg, interp=$interp"
echo -e "$name\t$mffd\t$model\t$symmetric\t$sim\t$bins\t$spacing\t$bending\t$elasticity\t$elasticity_lambda\t$elasticity_mu\t$jacobian\t$inclbg\t$interp" >> "$params"
iterative_template_construction_test "$name" "make_average" \
-model "$model" \
-mffd "$mffd" \
-similarity "$sim" \
-bins "$bins" \
-spacing "$spacing" \
-bending "$bending" \
-elasticity "$elasticity" \
-elasticity-lambda "$elasticity_lambda" \
-elasticity-mu "$elasticity_mu" \
-jacobian "$jacobian" \
-symmetric "$symmetric" \
-interp "$interp" \
-inclbg "$inclbg"
done; done; done; done; done
end_dag
# ----------------------------------------------------------------------------
# TODO: refine grid search for ranges selected based on initial tests
end_dag
| true
|
f09ac86126ca7a4bd65ee02a503a8ca5a7765503
|
Shell
|
BluJay-Dev/learning
|
/jays_scripts/exitcodes.sh
|
UTF-8
| 403
| 3.28125
| 3
|
[] |
no_license
|
#!/bin/bash
# ls exit code
#ls /not/here
#echo "$?" '
# ping exit code
#HOST="googlerty.com"
#ping -c 1 $HOST
#if [ "$?" -eq "0" ]
#then
# echo "$HOST reachable"
#else
# echo "$HOST unreachable."
#fi
# Variable exit code
#HOST="google.com"
#ping -c 1 $HOST
#RETURN_CODE="$?"
#if [ "$RETURN_CODE" -ne "0" ]
#then
# echo "$HOST unreachable"
#else
# echo "$HOST reachable"
#fi
| true
|
b3226c2ab6645b629175d7b3ed6057e1d929736c
|
Shell
|
JGRennison/dotfiles
|
/home/bin/netnsnat
|
UTF-8
| 2,474
| 3.890625
| 4
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
# Copyright © Jonathan G. Rennison 2014 <j.g.rennison@gmail.com>
# License: New BSD License, see BSD-LICENSE.txt
# This is loosely based on https://github.com/amonakov/vpn-netns/blob/master/vpn.sh
# and http://gigawhitlocks.com/2014/08/18/network-namespaces.html
if [ "`id -u`" -ne 0 ]; then
echo "You are not currently root, this may not work"
fi
num=0
undo=
disable=
enable=
options=0
iface=
while getopts "n:udei:" opt; do
case $opt in
n)
num="$OPTARG"
;;
u)
undo=1
((options++))
;;
d)
disable=1
((options++))
;;
e)
enable=1
((options++))
;;
i)
iface="$OPTARG"
;;
?)
exit 1
;;
esac
done
if [ "$options" -gt 1 ]; then
echo "Cannot set more than one of -u, -d, -e"
exit 1
fi
name=natns${num}
addr1=10.199.199.$(( (num * 2) ))
addr2=10.199.199.$(( (num * 2) + 1 ))
function disable_ns() {
iptables -A OUTPUT -o veth${num}.0 -j DROP
iptables -A INPUT -i veth${num}.0 -j DROP
iptables -A FORWARD -i veth${num}.0 -j DROP
}
function enable_ns() {
iptables -D OUTPUT -o veth${num}.0 -j DROP
iptables -D INPUT -i veth${num}.0 -j DROP
iptables -D FORWARD -i veth${num}.0 -j DROP
}
if [ -n "$undo" ]; then
enable_ns 2> /dev/null
ip netns del $name
ip link del veth${num}.0
iptables -t nat -D POSTROUTING -s $addr2 -d 0.0.0.0/0 -j MASQUERADE
echo "Network namespace down: $name"
elif [ -n "$disable" ]; then
disable_ns
elif [ -n "$enable" ]; then
enable_ns
else
ip netns add $name || {
echo "Network namespace already up: $name"
exec ip netns exec $name ${SUDO_USER:+ sudo -u $SUDO_USER} bash
}
ip link add veth${num}.0 type veth peer name veth${num}.1
ip addr add $addr1/31 dev veth${num}.0
ip link set veth${num}.0 up
ip link set veth${num}.1 netns $name
ip netns exec $name ip addr add $addr2/31 dev veth${num}.1
ip netns exec $name ip link set veth${num}.1 up
ip netns exec $name ip link set lo up
ip netns exec $name ip route add default via $addr1
iptables -t nat -A POSTROUTING -s $addr2 -d 0.0.0.0/0 -j MASQUERADE
if [ -n "$iface" ]; then
iptables -A FORWARD -i "$iface" -o veth${num}.0 -j ACCEPT
iptables -A FORWARD -o "$iface" -i veth${num}.0 -j ACCEPT
fi
sysctl net.ipv4.ip_forward=1
mkdir -p /etc/netns/$name
sed -e "s/127\..*\..*\..*/8.8.8.8/" < /etc/resolv.conf > /etc/netns/$name/resolv.conf
echo "Network namespace up: $name"
exec ip netns exec $name ${SUDO_USER:+ sudo -u $SUDO_USER} bash
fi
| true
|
9d5f2034cb113b45689eba404bdc9463ffc0af08
|
Shell
|
kwpav/dotfiles
|
/x/.xinitrc
|
UTF-8
| 1,123
| 2.75
| 3
|
[] |
no_license
|
#!/bin/sh
userresources=$HOME/.Xresources
usermodmap=$HOME/.Xmodmap
sysresources=/etc/X11/xinit/.Xresources
sysmodmap=/etc/X11/xinit/.Xmodmap
# merge in defaults and keymaps
if [ -f $sysresources ]; then
xrdb -merge $sysresources
fi
if [ -f $sysmodmap ]; then
xmodmap $sysmodmap
fi
if [ -f "$userresources" ]; then
xrdb -merge "$userresources"
fi
if [ -f "$usermodmap" ]; then
xmodmap "$usermodmap"
fi
# start some nice programs
if [ -d /etc/X11/xinit/xinitrc.d ] ; then
for f in /etc/X11/xinit/xinitrc.d/?*.sh ; do
[ -x "$f" ] && . "$f"
done
unset f
fi
setxkbmap -option 'ctrl:nocaps'
main=DisplayPort-0
if xrandr | grep "$main connected"; then
xrandr --output "$main" --mode 2560x1080 --rate 74.99
fi
# nitrogen --head=0 --set-zoom-fill wallpapers/night.png
# nitrogen --head=1 --set-zoom-fill wallpapers/night.png
# nitrogen --head=2 --set-zoom-fill wallpapers/night.png
# nitrogen --set-scaled wallpapers/atlantisnebula10-5745-1200.png
# nitrogen --set-scaled wallpapers/cathedralpillars.jpg
# xsetroot -solid "#5e81ac" # the darkest 'Frost' color from nord
exec stumpwm
#exec qtile
| true
|
133e8bb33581dfc0919be6ad5378531cb48f36fb
|
Shell
|
ennovatenow/precision-git
|
/create_pipeline_script.sh
|
UTF-8
| 339
| 3.140625
| 3
|
[] |
no_license
|
#!/bin/bash
PIPELINE=$1
PIPELINE_SCRIPT="$PIPELINE".sh
filelines=`cat $PIPELINE_FOLDER/$PIPELINE.txt`
for container in $filelines ; do
$PRECISION100_FOLDER/create_container_script.sh $container
echo "$CONTAINER_FOLDER/$container/container.sh" >> $PIPELINE_FOLDER/$PIPELINE_SCRIPT
done;
chmod u+x $PIPELINE_FOLDER/$PIPELINE_SCRIPT
| true
|
e6719e256fbe341b84b03eb1059ccfb5b3107b75
|
Shell
|
luis--ramirez/docker-bitbucket
|
/bitbucket/bin/start-bitbucket.sh
|
UTF-8
| 869
| 3.71875
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
# resolve links - $0 may be a softlink - stolen from catalina.sh
PRG="$0"
while [ -h "$PRG" ]; do
ls=`ls -ld "$PRG"`
link=`expr "$ls" : '.*-> \(.*\)$'`
if expr "$link" : '/.*' > /dev/null; then
PRG="$link"
else
PRG=`dirname "$PRG"`/"$link"
fi
done
PRGDIR=`dirname "$PRG"`
# Start Elasticsearch, then start webapp
echo -------------------------------------------------------------------------------
echo Starting Atlassian Bitbucket and bundled Elasticsearch
echo To start Atlassian Bitbucket on its own, run start-webapp.sh instead
echo -------------------------------------------------------------------------------
$PRGDIR/start-search.sh $@
$PRGDIR/start-webapp.sh $@
# If user starts Bitbucket in FG, should kill search here
if [ "$1" = "-fg" ] || [ "$1" = "run" ] ; then
$PRGDIR/stop-search.sh $@
fi
| true
|
a58353ad3df668575ca759502956ebddfe138fe5
|
Shell
|
omacchi/OfficeProject
|
/Raspi_codes/kanai/video/record.sh
|
UTF-8
| 468
| 2.90625
| 3
|
[] |
no_license
|
result=$(/home/pi/python-venv/kanai/bin/python /home/pi/load_setting.py)
ARR=(${result//,/ })
FPS=${ARR[0]} # FPS
TIME=${ARR[1]}
TIME=$((TIME * 60)) # 録画時間 (秒)
SIZE=${2:-"640x480"} # 解像度 (固定)
ffmpeg -i http://localhost:8080/?action=stream -r $FPS -s $SIZE\
-an -vcodec h264_omx \
-f segment -segment_time $TIME \
-segment_format_options movflags=+faststart -reset_timestamps 1 \
-strftime 1 "/home/pi/ssd/video_data/%Y-%m-%d_%H-%M-%S.mp4"
| true
|
01a7137ba28227f08d20fea8e86d8ce0410d2f9a
|
Shell
|
mohankumargn/linuxscripts
|
/listusers
|
UTF-8
| 212
| 2.59375
| 3
|
[] |
no_license
|
#! /bin/bash
# Author: Mohan
# Date: 08/08/2020
# Description: This scriptn will list users logged in by date
# Modified: 08/08/2020
today=`date | awk '{print $1,$2,$3}'`
last | grep "$today" | awk '{print $1}'
| true
|
b25063f7ba3e664152d7b05c28babe41a1209a7a
|
Shell
|
samfishman/dotfiles
|
/box/clever-vagrant/sh/zshrc
|
UTF-8
| 643
| 3
| 3
|
[] |
no_license
|
plugins=(autopep8 pep8 coffee fabric jira npm)
# support git-repo
unset gr
_goto_project_compl() {
local repos
repos=()
for d in $CODE_DIRS; do
for f in $d/*; do
if [ -d $f ]; then
repos+="${f##*/}"
fi
done
done
_describe "repos" repos
}
compdef _goto_project_compl goto_project
_aviator_compl() {
local targets
targets=($(aviator --list 2>/dev/null))
_describe "aviator targets" targets
}
compdef _aviator_compl aviator
_ark_compl() {
# cache ark apps 4maxspeed
if [[ -z "$ark_apps" ]]; then
ark_apps=($(ark apps -q))
fi
_describe "ark apps" ark_apps
}
compdef _ark_compl ark
| true
|
2a4028bffad4ef240a8adebc8ffbebca25cee992
|
Shell
|
kgspace/pretrain
|
/gen_doc.sh
|
UTF-8
| 379
| 3.21875
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
docsrepo="../docs"
# whether docs repository exist
if [ ! -d "$docsrepo" ]; then
echo 'docs not exist. please make sure you clone the repository'
exit 0
fi
echo 'start to generate document'
pdoc3 --html beta_rec
echo 'generate finish, copy to docs ...'
cp -r html/beta_rec/* ../docs
echo 'done! Please swith to docs and commit changes'
| true
|
c09b1681a5626f205447ba441f5c357525d4ffd8
|
Shell
|
dunguyenn/backup-openstack
|
/security-backup-v2/security_passfile.sh
|
UTF-8
| 254
| 2.765625
| 3
|
[] |
no_license
|
set timeout 20
proc check_link {link} {
spawn ccrypt $link
expect "key";
send "123456a@\r"
expect "key";
send "123456a@\r"
expect eof
}
set fp [open link.txt r]
while {[gets $fp line] != -1} {
check_link $line
}
close $fp
| true
|
62dd1219bd2024df486a503fee23be6485dccea4
|
Shell
|
anatol/archpackages
|
/warp-git/PKGBUILD
|
UTF-8
| 744
| 2.65625
| 3
|
[] |
no_license
|
# Maintainer: Anatol Pomozov <anatol.pomozov@gmail.com>
pkgname=warp-git
pkgver=r46.0f8ddc4
pkgrel=1
pkgdesc='A fast preprocessor for C and C++'
arch=(i686 x86_64)
url='https://github.com/facebook/warp'
license=(custom)
depends=(gcc-libs)
makedepends=(git gdc libphobos-devel)
source=(git+https://github.com/facebook/warp.git)
md5sums=('SKIP')
pkgver() {
cd warp
printf "r%s.%s" "$(git rev-list --count HEAD)" "$(git rev-parse --short HEAD)"
}
build() {
cd warp
make
}
package() {
cd warp
install -d "$pkgdir"/usr/bin
install fwarp fwarpdrive_gcc4_7_1 fwarpdrive_gcc4_8_1 fwarpdrive_clang3_2 fwarpdrive_clang3_4 fwarpdrive_clangdev "$pkgdir"/usr/bin
# boost-like license
install -D -m644 LICENSE "$pkgdir/usr/share/licenses/$pkgname/LICENSE"
}
| true
|
360d2c85482da752a0749b33e1f5fac7ae8f9f31
|
Shell
|
dromao/dmvpn-spoke
|
/spoke-start.sh
|
UTF-8
| 1,209
| 3.046875
| 3
|
[] |
no_license
|
#!/bin/bash
# Deployment script
# Tested with Tails 1.4
export DEBIAN_FRONTEND=noninteractive
# Configure serial interface
stty -F /dev/ttyUSB0 19200 clocal cs8 -cstopb -parenb
#Install rng-tools and configure RNG
apt-get update
apt-get install rng-tools -y
cp files/configuration/rng-tools /etc/default/rng-tools
service rng-tools restart
# Install dependencies and build tools
apt-get install racoon ipsec-tools build-essential libc-ares-dev pkg-config -y
# Install OpenNHRP
cd opennhrp-0.14.1
make install
cd ..
# Copy configuration files
cp files/configuration/opennhrp.conf /etc/opennhrp/opennhrp.conf
cp files/configuration/racoon.conf /etc/racoon/racoon.conf
cp files/configuration/ipsec-tools.conf /etc/ipsec-tools.conf
cp files/configuration/ferm.conf /etc/ferm/ferm.conf
# Copy keys' directory
cp files/certs/* /etc/racoon/certs/
# Load GRE kernel module
modprobe ip_gre
# Create GRE interface
ip tunnel add gre1 mode gre key 1234 ttl 64
ip addr add TUNNEL_SPOKE_IP/TUNNEL_NETMASK dev gre1
ip link set gre1 up
# Restart services
service rng-tools restart
service racoon restart
service setkey restart
service ferm restart
# Start OpenNHRP
/usr/sbin/opennhrp -d
echo -e "\nCompleted!\n"
| true
|
311ced984b5d1f6e0b83b8ec10ceb1000bb06c0f
|
Shell
|
sean-smith-sites/portfolio
|
/generateContent.sh
|
UTF-8
| 617
| 2.90625
| 3
|
[] |
no_license
|
#!/bin/bash
cat content.csv | while read a ;
do
url=$(echo "$a" | awk '{print $1}');
description=$(echo "$a" | awk -F ',' '{print $2}' | sed -e 's/"//g');
echo -e "
<!--$description-->
<div class=\"w3-third\">
<div class=\"seanCell w3-mobile w3-card w3-display-container\">
<div class=\"card\">
<img class=\"cardImg display-middle\" src=$url style=\"cursor:pointer\" onclick=\"onClick(this)\" class=\"w3-hover-opacity\">
<div class=\"display-bottommiddle descriptionContainer\">
<span class=\"description\">$description</span>
</div>
</div>
</div>
</div>
";
done;
| true
|
9d3cfaace71a86d2281e3ec0a4988c4bd1cc5c44
|
Shell
|
liuq901/500-lines
|
/ci/test_runner_script.sh
|
UTF-8
| 304
| 3.09375
| 3
|
[] |
no_license
|
REPO=$1
COMMIT=$2
source $PWD/run_or_fail.sh
run_or_fail "Repository folder not found !" pushd "$REPO" 1> /dev/null
run_or_fail "Could not clean repository" git clean -d -f -x
run_or_fail "Could not call git pull" git pull
run_or_fail "Could not update to given commit hash" git reset --hard "$COMMIT"
| true
|
03234c9f4ba702f46c9980daab682a338a303801
|
Shell
|
Deepomatic/dmake
|
/dmake/utils/dmake_deploy_ssh
|
UTF-8
| 880
| 4.0625
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
#
# Usage:
# dmake_deploy_ssh TMP_DIR APP_NAME USER HOST SSH_PORT
#
# Result:
# Will deploy on a server via SSH
test "${DMAKE_DEBUG}" = "1" && set -x
if [ $# -ne 5 ]; then
dmake_fail "$0: Wrong arguments"
echo "exit 1"
exit 1
fi
set -e
TMP_DIR=$1
APP_NAME=$2
USER=$3
HOST=$4
SSH_PORT=$5
# Start script
chmod +x ${TMP_DIR}/start_app.sh
# Copy to server
ssh -p ${SSH_PORT} -tt -o StrictHostKeyChecking=no ${USER}@${HOST} "rm -rf ~/${APP_NAME}"
echo "Running: scp -r -P ${SSH_PORT} -o StrictHostKeyChecking=no ${TMP_DIR} ${USER}@${HOST}:~/${APP_NAME}"
scp -r -P ${SSH_PORT} -o StrictHostKeyChecking=no ${TMP_DIR} ${USER}@${HOST}:~/${APP_NAME}
CMD="~/${APP_NAME}/start_app.sh"
echo "Running: ssh -p ${SSH_PORT} -tt -o StrictHostKeyChecking=no ${USER}@${HOST} \"sudo $CMD\""
ssh -p ${SSH_PORT} -tt -o StrictHostKeyChecking=no ${USER}@${HOST} "sudo $CMD"
| true
|
7e16251b1538a6cd1fad177fee438e265af9710a
|
Shell
|
kevin85421/UIUC_ECE438_TestCases
|
/mp1/test/test_server.sh
|
UTF-8
| 581
| 3.015625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
ECE438_HOME=`git rev-parse --show-toplevel`
HTTPSERVER_PORT=80
SERVER_IP='http://172.17.0.2'
cd "$ECE438_HOME/mp1"
make
cd test/input
echo "[Testcase] URI with port"
for FILE in *;
do
wget -O ../output $SERVER_IP:$HTTPSERVER_PORT/test/input/$FILE -q
if diff ../output $FILE > /dev/null
then
echo "PASS: $FILE"
else
echo "FAIL: $FILE"
fi
done
echo "[Testcase] URI without port"
for FILE in *;
do
wget -O ../output $SERVER_IP/test/input/$FILE -q
if diff ../output $FILE > /dev/null
then
echo "PASS: $FILE"
else
echo "FAIL: $FILE"
fi
done
| true
|
374d6344fe99f2263787e0e5bc5f9a044fc28234
|
Shell
|
bxlcity/firebox
|
/firebox
|
UTF-8
| 11,862
| 3.234375
| 3
|
[] |
no_license
|
#!/bin/bash
#
# =============================================================
# CONFIGURATION
# =============================================================
version="v1.3"
snet="wlan0"
dnsip="127.0.0.1"
sdir="/opt/.FireBox/"
sdir2="/opt/.Sandbox/"
mac="00:30:7b:c1:19:37"
ocatdir="/var/lib/tor/onioncat"
PyPort="80" #Port For Python Server ( for cmd : firefox -s )
PyDir="/opt/.FireBox/www/"
PKG_DATA_DIR="/usr/share/dnscrypt-proxy"
# =============================================================
smac="--mac=$mac"
stor="/etc/init.d/tor"
spriv="/etc/init.d/privoxy"
netf="--net=$snet --netfilter=/etc/firejail/nolocal.net"
dnscrypt="firejail --private=$sdir --profile=/usr/local/etc/firejail/dnscrypt-proxy.profile dnscrypt-proxy -R"
ocathost=$(cat $ocatdir/hostname)
# =============================================================
# COLORS
# =============================================================
Bold=$(tput bold)
Underline=$(tput sgr 0 1)
Reset=$(tput sgr0)
# Regular Colors
Red=$(tput setaf 1)
Green=$(tput setaf 2)
Yellow=$(tput setaf 3)
Blue=$(tput setaf 4)
Purple=$(tput setaf 5)
Cyan=$(tput setaf 6)
White=$(tput setaf 7)
# =============================================================
# COLORS Bold
# =============================================================
BRed=${Bold}$(tput setaf 1)
BGreen=${Bold}$(tput setaf 2)
BYellow=${Bold}$(tput setaf 3)
BBlue=${Bold}$(tput setaf 4)
BPurple=${Bold}$(tput setaf 5)
BCyan=${Bold}$(tput setaf 6)
BWhite=${Bold}$(tput setaf 7)
# =============================================================
# FUNCTIONS
# =============================================================
x_="______________________________________________________________________________"
print_line() {
printf "%$(tput cols)s\n"|tr ' ' '*'
}
print_title() {
clear
print_line
echo -e "# ${Bold}$1${Reset}"
print_line
echo ""
}
resolver(){
RESOLVERS_FILE="${PKG_DATA_DIR}/dnscrypt-resolvers.csv"
RESOLVERS_FILE_TMP="${RESOLVERS_FILE}.tmp"
RESOLVERS_URL="https://download.dnscrypt.org/dnscrypt-proxy/dnscrypt-resolvers.csv"
RESOLVERS_SIG_URL="${RESOLVERS_URL}.minisig"
RESOLVERS_SIG_PUBKEY="RWQf6LRCGA9i53mlYecO4IzT51TGPpvWucNSCh1CBM0QTaLn73Y7GFO3"
echo "Updating the list of public DNSCrypt resolvers..."
curl -L "$RESOLVERS_URL" -o "$RESOLVERS_FILE_TMP" || exit 1
if $(which minisign > /dev/null 2>&1); then
curl -L -o "$RESOLVERS_FILE_TMP.minisig" "$RESOLVERS_SIG_URL" || exit 1
minisign -V -P "$RESOLVERS_SIG_PUBKEY" -m "$RESOLVERS_FILE_TMP" || exit 1
mv -f "${RESOLVERS_FILE_TMP}.minisig" "${RESOLVERS_FILE}.minisig"
fi
mv -f "$RESOLVERS_FILE_TMP" "$RESOLVERS_FILE"
echo "${BGreen}Done"
echo "${BWhite}"
}
helps() {
echo "Usage : ${BGreen}firebox -f ${BWhite} ( start firefox sandboxed) "
echo "Options: ${BGreen}firebox -p cmd ${BWhite} ( to start application with proxychains sandbox /opt/.FireBox/)"
echo "${BGreen}firebox -ff ${BWhite} ( to start firefox with proxychains sandboxed )"
echo "${BGreen}firebox -f2 ${BWhite} ( to start firefox on other sandbox /opt/.Sandbox/ )"
echo "${BGreen}firebox -o ${BWhite} ( to start OnionCat sandboxed)"
echo "${BGreen}firebox -g ${BWhite} ( to start google-chrome sandboxed)"
echo "${BGreen}firebox -v ${BWhite} ( to start virtalbox sandboxed)"
echo "${BGreen}firebox -d ${BWhite} ( to start dnscrypt-proxy sandboxed)"
echo "${BGreen}firebox -t ${BWhite} ( to start terminal sandboxed )"
echo "${BGreen}firebox -tt ${BWhite} ( to start terminal sandboxed OverlayFS )"
echo "${BGreen}firebox -j ${BWhite} ( to join terminal sandboxed )"
echo "${BGreen}firebox -jj ${BWhite} ( to join terminal sandboxed OverlayFS )"
echo "${BGreen}firebox -h ${BWhite} ( to see cmd help)"
echo "${BGreen}firebox -x ${BWhite} ( to start terminator sandboxed over tor)"
echo "${BGreen}firebox -r cmd ${BWhite} ( to run application Sandboxed)"
echo "${BGreen}firebox -rr cmd ${BWhite} ( to run application Sandboxed over tor)"
echo "${BGreen}firebox -s ${BWhite} ( to Start ServerWeb Port default:80 & dir on $PyDir )"
echo "${BGreen}firebox -c ${BWhite} ( to Clean Sandbox One with bleachbit )"
echo "${BGreen}firebox -c2 ${BWhite} ( to Clean Other Sandbox with bleachbit )"
}
about()
{
print_title "${BGreen}Welcome to FireBox Quick Start\n"
echo ""
echo "${BWhite} _/\/\____________________/\/\________________/\/\______/\/\_________________ "
echo " _/\/\________/\/\__/\/\__/\/\______/\/\/\/\__________/\/\/\/\/\__/\/\__/\/\_ "
echo " _/\/\/\/\______/\/\/\____/\/\____/\/\________/\/\______/\/\______/\/\__/\/\_ "
echo " _/\/\__/\/\____/\/\/\____/\/\____/\/\________/\/\______/\/\________/\/\/\/\_ "
echo " _/\/\/\/\____/\/\__/\/\__/\/\/\____/\/\/\/\__/\/\/\____/\/\/\__________/\/\_ "
echo " _________________________________________________________________/\/\/\/\___ "
echo ""
echo ${White}$x_
echo ""
echo "${BBlue}[+][INFO]::Start ${Cyan}FireBox ${BGreen}Sandboxed "
echo "${White}"
}
pause(){
read -p "$*"
}
function dnsmenu {
choix=$(zenity --list --width=350 --height=450 --title="Chose dnscrypt-proxy to Use"\
--hide-header --text=""\
--column="1" --column="2" --column="3" \
1 "dnscrypt.eu-dk" "Denmark"\
2 "dnscrypt.eu-nl" "Holland"\
3 "dnscrypt.org-fr" "France" \
4 "fvz-anyone" "Anycast"\
5 "fvz-anytwo" "Anycast"\
6 "cisco" "Anycast"\
7 "adguard-dns-ns1" "Anycast"\
8 "adguard-dns-family-ns1" "Family Protection Anycast" \
9 "ovpnse" "Sweden" \
10 "soltysiak" "Poland" \
11 "opennic-tumabox" "Germany" \
12 "ns0.dnscrypt.is" "in Reykjavík, Iceland" \
13 "ipredator" "Sweden" \
14 "ventricle.us" "New York" \
15 "d0wn-sg-ns1" "Singapore" \
16 "cs-ch" "Switzerland" \
17 "SecureDNS" "Amsterdam, Netherlands" \
18 "DnsCrypt-Resolvers" "Update dnscrypt-resolvers.csv" \
19 "Exit" "=>" \
2> /dev/null)
}
dnsgui(){
while :
do
dnsmenu 2> /dev/null
if [ "$choix" = "1" ]; then
$dnscrypt dnscrypt.eu-dk -a $dnsip
elif [ "$choix" = "2" ]; then
$dnscrypt dnscrypt.eu-nl -a $dnsip
elif [ "$choix" = "3" ]; then
$dnscrypt dnscrypt.org-fr -a $dnsip
elif [ "$choix" = "4" ]; then
$dnscrypt fvz-anyone -a $dnsip
elif [ "$choix" = "5" ]; then
$dnscrypt fvz-anytwo -a $dnsip
elif [ "$choix" = "6" ]; then
$dnscrypt cisco -a $dnsip
elif [ "$choix" = "7" ]; then
$dnscrypt adguard-dns-ns1 -a $dnsip
elif [ "$choix" = "8" ]; then
$dnscrypt adguard-dns-family-ns1 -a $dnsip
elif [ "$choix" = "9" ]; then
$dnscrypt ovpnse -a $dnsip
elif [ "$choix" = "10" ]; then
$dnscrypt soltysiak -a $dnsip
elif [ "$choix" = "11" ]; then
$dnscrypt opennic-tumabox -a $dnsip
elif [ "$choix" = "12" ]; then
$dnscrypt ns0.dnscrypt.is -a $dnsip
elif [ "$choix" = "13" ]; then
$dnscrypt ipredator -a $dnsip
elif [ "$choix" = "14" ]; then
$dnscrypt ventricle.us -a $dnsip
elif [ "$choix" = "15" ]; then
$dnscrypt d0wn-sg-ns1 -a $dnsip
elif [ "$choix" = "16" ]; then
$dnscrypt cs-ch -a $dnsip
elif [ "$choix" = "17" ]; then
$dnscrypt securedns -a $dnsip
elif [ "$choix" = "18" ]; then
resolver
elif [ "$choix" = "19" ]; then
exit
fi
exit
done
}
if [[ ! -e $sdir ]]; then
mkdir $sdir
fi
if [[ $1 == '-ff' ]]; then
about
$stor start
echo
$spriv start
echo
echo "${BWhite}[Start Firefox Sandboxed on tor]::${Green} OK"
echo
echo "${BWhite}[Directory Sandboxed is ]::${Green} $sdir"
echo "${White}"
firejail --private=$sdir --dns=$dnsip --profile=/usr/local/etc/firejail/firefox-esr.profile proxychains4 firefox -no-remote https://check.torproject.org/ 2> /dev/null
echo "${BWhite}[SandBox is closed ]::${Green} OK"
echo "${BWhite}"
echo "Private Dir $sdir"
$spriv stop
echo
$stor stop
echo
echo "Open Directory Sanboxed"
firejail --profile=/usr/local/etc/firejail/nautilus.profile nautilus $sdir
echo
elif [[ $1 == '-f' ]]; then
about
firejail --private=$sdir --dns=$dnsip --x11=xorg --profile=/usr/local/etc/firejail/firefox-esr.profile firefox -no-remote
elif [[ $1 == '-f2' ]]; then
if [[ ! -e $sdir2 ]]; then
mkdir $sdir2
fi
about
firejail --private=$sdir2 --dns=$dnsip --x11=xorg --profile=/usr/local/etc/firejail/firefox-esr.profile firefox -no-remote
elif [[ $1 == '-o' ]]; then
echo "${BBlue}[+][INFO]::Start ${Cyan}OnionCat ${BGreen}Sandbox-filesystem "
echo "${White}"
fssb -m -- ocat -B -4 $ocathost
elif [[ $1 == '-s' ]]; then
if [[ ! -e $PyDir ]]; then
mkdir $PyDir
fi
if [[ $2 == '' ]]; then
echo "${BBlue}[+][INFO]::Start ${Cyan}Server Python ${BGreen}Sandboxed "
echo "${White}"
echo "${Cyan}You Pad to Server File : ${BGreen}$PyDir"
echo "${White}"
echo "${BGreen}http://127.0.0.1:$PyPort"
echo "${White}"
firejail --private=$PyDir python -m SimpleHTTPServer $PyPort
else
echo "${BBlue}[+][INFO]::Start ${Cyan}Server Python ${BGreen}Sandboxed "
echo "${White}"
echo "${Cyan}You Pad to Server File : ${BGreen}$2"
echo "${White}"
echo "${BGreen}http://127.0.0.1:$PyPort"
echo "${White}"
firejail --private=$2 python -m SimpleHTTPServer $PyPort
fi
elif [[ $1 == '' ]] ; then
helps
elif [[ $1 == '-h' ]] ; then
helps
elif [[ $1 == '-d' ]] ; then
echo "${BBlue}[+][INFO]::Start ${Cyan}Dnscrypt-Proxy ${BGreen}Sandboxed "
echo "${White}"
#service dnscrypt-proxy restart dnscrypt.eu-dk ventricle.us dnscrypt.eu-nl fvz-anyone cisco fvz-anytwo adguard-dns-ns1
#dnscrypt-proxy -R fvz-anyone -a 127.0.0.1
dnsgui
elif [[ $1 == '-t' ]] ; then
echo "${BBlue} Join Sandboxed Terminal => ${BGreen}firejail --join=firebox or firebox -j"
echo "${BBlue} to close terminal sandboxed cmd => ${BGreen}exit"
echo "${White}"
firejail --overlay-tmpfs --x11=xorg --private=$sdir --profile=/usr/local/etc/firejail/lxterminal.profile --dns=$dnsip --name=firebox --noblacklist=/sbin --noblacklist=/usr/sbin
elif [[ $1 == '-tt' ]] ; then
echo "${BBlue} to see you directory go to cmd=> ${BGreen}cd /run/firejail/mnt/oroot/ "
echo "${BBlue} Join Sandboxed Terminal => ${BGreen}firejail --join=firebox or firebox -j"
echo "${BBlue} to close terminal sandboxed cmd => ${BGreen}exit"
echo "${White}"
#--net=$snet --netfilter=/etc/firejail/nolocal.net $smac
firejail --overlay-tmpfs --x11=xorg --dns=$dnsip --name=fireboxs --noblacklist=/sbin --noblacklist=/usr/sbin
elif [[ $1 == '-j' ]] ; then
firejail --join=firebox
elif [[ $1 == '-jj' ]] ; then
firejail --join=fireboxs
elif [[ $1 == '-g' ]] ; then
firejail --private=$sdir --dns=$dnsip --profile=/usr/local/etc/firejail/google-chrome.profile google-chrome --no-sandbox 2> /dev/null
elif [[ $1 == '-v' ]] ; then
firejail --profile=/usr/local/etc/firejail/virtualbox.profile virtualbox
elif [[ $1 == '-r' ]] ; then
echo
firejail --private=$sdir --dns=$dnsip --profile=/usr/local/etc/firejail/lxterminal.profile --x11=xorg $2 $3 $4 $5 $6 $7 $8 $9
echo
elif [[ $1 == '-rr' ]] ; then
$stor start
echo
sleep 2
echo
firejail --private=$sdir --dns=$dnsip --x11=xorg torsocks $2 $3 $4 $5 $6 $7 $8 $9
echo
$stor stop
elif [[ $1 == '-x' ]] ; then
$stor start
echo
firejail --overlay-tmpfs --profile=/usr/local/etc/firejail/lxterminal.profile --dns=127.0.0.1 --noblacklist=/sbin --noblacklist=/usr/sbin torsocks /bin/bash
$stor stop
echo
elif [[ $1 == '-xx' ]] ; then
$stor start
echo
firejail --overlay-tmpfs --x11=xorg --dns=127.0.0.1 --noblacklist=/sbin --noblacklist=/usr/sbin torsocks /bin/bash
$stor stop
echo
elif [[ $1 == '-p' ]]; then
$stor start
echo
$spriv start
echo
sleep 2
echo
firejail --private=$sdir --profile=/usr/local/etc/firejail/lxterminal.profile --dns=$dnsip --x11=xorg proxychains4 $2 $3 $4 $5 $6 $7 $8 $9 2> /dev/null
echo
$spriv stop
echo
$stor stop
echo
elif [[ $1 == '-c' ]]; then
firejail --private=$sdir --profile=/usr/local/etc/firejail/bleachbit.profile bleachbit
elif [[ $1 == '-c2' ]]; then
firejail --private=$sdir2 --profile=/usr/local/etc/firejail/bleachbit.profile bleachbit
fi
| true
|
c3f7ebe15addb313ac0e88ba7c82fa692469316c
|
Shell
|
ODEX-TOS/tools
|
/tos-helper/help.sh
|
UTF-8
| 2,517
| 3.390625
| 3
|
[
"MIT",
"GPL-1.0-or-later"
] |
permissive
|
#!/usr/bin/env bash
# MIT License
#
# Copyright (c) 2020 Tom Meyers
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# shellcheck disable=SC2059,SC2154
function help() {
printf "$ORANGE $name ${NC}OPTIONS: bluetooth|gpg|info|network|screen|theme|volume -c -h -rs -u -Q -R- -S -iso\n\n" "$ORANGE" "$name" "$NC"
printf "${ORANGE}USAGE:$NC\n"
printf "$name bluetooth \t\t\t Control all bluetooth settings \n"
printf "$name gpg \t\t\t Configuration for your gpg/pgp keys\n"
printf "$name info \t\t\t Print information about your system/environment\n"
printf "$name network \t\t\t Control all network related settings\n"
printf "$name screen \t\t\t Control all screen related settings\n"
printf "$name theme \t\t\t Control all theme related settings\n"
printf "$name volume \t\t\t Control the volume\n"
printf "$name shell \t\t\t Run an application in an isolated environment to keep the host system clean\n"
printf "$name -c | --cypto \t\t generate a crypto key \n"
printf "$name -c | --cypto <user@ip> \t copy over your ssh key to a remote computer \n"
printf "$name -h | --help \t\t Show this help message \n"
printf "$name -p | --profile <picture>\t Set the user profile picture \n"
printf "$name -t | --true-color \t\t Test the true-color capability of your terminal \n"
printf "$name -Q \t\t\t\t Query the local database of packages \n"
printf "$name -R \t\t\t\t Remove software from the system \n"
printf "$name -S \t\t\t\t Search the online database \n"
}
help
| true
|
ccad7bf8453b0541842c3043e4520a6db6a90e68
|
Shell
|
zooland/boxen
|
/modules/people/files/zoobert/shellvars
|
UTF-8
| 689
| 2.640625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
###################
# Set JDK 1.7 as the default
# export JAVA_HOME=`/usr/libexec/java_home -v 1.7`
###################
# Set textmate as our default command line editor
#export EDITOR='mate -w'
# Set Sublime as our default command line editor
export EDITOR='subl'
# Setup Maven environment (M2_HOME & M2 are setup through MacOSX defaults)
export MAVEN_OPTS="-Xmx2048m -XX:MaxPermSize=512m"
# Setup JAVA environment
export JAVA_OPTS="-Xms256m -Xmx2048m -XX:MaxPermSize=512m"
# Setup Oracle environment
export ORACLE_ROOT=/Applications/oracle
export ORACLE_HOME=$ORACLE_ROOT/client/11.2.0
export TNS_ADMIN=$ORACLE_ROOT/admin/network
export DYLD_LIBRARY_PATH=$ORACLE_HOME/lib
| true
|
4575cd55485994f1389881d49eb823bf6dcd8943
|
Shell
|
SJTMusicTeam/SVS_system
|
/SVS/model/archive/preprocessing/ch_asr/local/aishell_train_lms.sh
|
UTF-8
| 3,399
| 3.59375
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# To be run from one directory above this script.
. ./path.sh
text=data/local/train/text
lexicon=data/local/dict/lexicon.txt
for f in "$text" "$lexicon"; do
[ ! -f $x ] && echo "$0: No such file $f" && exit 1;
done
# This script takes no arguments. It assumes you have already run
# aishell_data_prep.sh.
# It takes as input the files
# data/local/train/text
# data/local/dict/lexicon.txt
dir=data/local/lm
mkdir -p $dir
kaldi_lm=`which train_lm.sh`
if [ -z $kaldi_lm ]; then
echo "$0: train_lm.sh is not found. That might mean it's not installed"
echo "$0: or it is not added to PATH"
echo "$0: Use the script tools/extras/install_kaldi_lm.sh to install it"
exit 1
fi
cleantext=$dir/text.no_oov
cat $text | awk -v lex=$lexicon 'BEGIN{while((getline<lex) >0){ seen[$1]=1; } }
{for(n=1; n<=NF;n++) { if (seen[$n]) { printf("%s ", $n); } else {printf("<SPOKEN_NOISE> ");} } printf("\n");}' \
> $cleantext || exit 1;
cat $cleantext | awk '{for(n=2;n<=NF;n++) print $n; }' | sort | uniq -c | \
sort -nr > $dir/word.counts || exit 1;
# Get counts from acoustic training transcripts, and add one-count
# for each word in the lexicon (but not silence, we don't want it
# in the LM-- we'll add it optionally later).
cat $cleantext | awk '{for(n=2;n<=NF;n++) print $n; }' | \
cat - <(grep -w -v '!SIL' $lexicon | awk '{print $1}') | \
sort | uniq -c | sort -nr > $dir/unigram.counts || exit 1;
# note: we probably won't really make use of <SPOKEN_NOISE> as there aren't any OOVs
cat $dir/unigram.counts | awk '{print $2}' | get_word_map.pl "<s>" "</s>" "<SPOKEN_NOISE>" > $dir/word_map \
|| exit 1;
# note: ignore 1st field of train.txt, it's the utterance-id.
cat $cleantext | awk -v wmap=$dir/word_map 'BEGIN{while((getline<wmap)>0)map[$1]=$2;}
{ for(n=2;n<=NF;n++) { printf map[$n]; if(n<NF){ printf " "; } else { print ""; }}}' | gzip -c >$dir/train.gz \
|| exit 1;
train_lm.sh --arpa --lmtype 3gram-mincount $dir || exit 1;
# LM is small enough that we don't need to prune it (only about 0.7M N-grams).
# Perplexity over 128254.000000 words is 90.446690
# note: output is
# data/local/lm/3gram-mincount/lm_unpruned.gz
exit 0
# From here is some commands to do a baseline with SRILM (assuming
# you have it installed).
heldout_sent=10000 # Don't change this if you want result to be comparable with
# kaldi_lm results
sdir=$dir/srilm # in case we want to use SRILM to double-check perplexities.
mkdir -p $sdir
cat $cleantext | awk '{for(n=2;n<=NF;n++){ printf $n; if(n<NF) printf " "; else print ""; }}' | \
head -$heldout_sent > $sdir/heldout
cat $cleantext | awk '{for(n=2;n<=NF;n++){ printf $n; if(n<NF) printf " "; else print ""; }}' | \
tail -n +$heldout_sent > $sdir/train
cat $dir/word_map | awk '{print $1}' | cat - <(echo "<s>"; echo "</s>" ) > $sdir/wordlist
ngram-count -text $sdir/train -order 3 -limit-vocab -vocab $sdir/wordlist -unk \
-map-unk "<SPOKEN_NOISE>" -kndiscount -interpolate -lm $sdir/srilm.o3g.kn.gz
ngram -lm $sdir/srilm.o3g.kn.gz -ppl $sdir/heldout
# 0 zeroprobs, logprob= -250954 ppl= 90.5091 ppl1= 132.482
# Note: perplexity SRILM gives to Kaldi-LM model is same as kaldi-lm reports above.
# Difference in WSJ must have been due to different treatment of <SPOKEN_NOISE>.
ngram -lm $dir/3gram-mincount/lm_unpruned.gz -ppl $sdir/heldout
# 0 zeroprobs, logprob= -250913 ppl= 90.4439 ppl1= 132.379
| true
|
0ed01bf66bae84600a2b916b047707ba561f53fb
|
Shell
|
l33tLumberjack/Poisonports
|
/shields.sh
|
UTF-8
| 850
| 4.1875
| 4
|
[] |
no_license
|
#!/bin/bash
if [ $# -eq 0 ]
then
echo "No arguments supplied"
echo "2 arguments required a port to listen on and a sleep in seconds to add rules to iptables"
echo "Usage: $0 PORT SECONDS"
exit 1
fi
GOOD=$(tput setaf 2)[+]$(tput sgr0)
BAD=$(tput setaf 1)[-]$(tput sgr0)
clear;
echo "$(tput setaf 2)Poisonports$(tput sgr0) is listening on $1";
while [ 1 ];
#Netcat sessions
do IP=`nc -vlp $1 2>&1 1> /dev/null | grep from | cut -d[ -f 3 | cut -d] -f 1`;
echo "$BAD Connection from IP ${IP} detected on port $1";
#Sleep to prevent simultaneous iptable drops
sleep $2
if iptables -L INPUT -v -n | grep $IP
then
echo "$GOOD IP in list: $IP"
else
echo "$GOOD IP Added to list: $IP"
#Dropping IP address into iptables
echo "$GOOD Added Drop rule for $IP"
iptables -A INPUT -p all -s ${IP} -j DROP ;
echo $IP >> IPs.txt
fi
done
| true
|
c5b8bbf2102b8e414729a4919045e5b7525cb79e
|
Shell
|
rlknowles/moses_reloading_experiments
|
/gdb2.sh
|
UTF-8
| 1,407
| 3.765625
| 4
|
[] |
no_license
|
#!/bin/bash
# A script to run experimental conditions
if (( $# == 2 )); then
if [[ ${1} =~ /$ ]] ; then
staticDir="${1%/}"
2>&1 echo -e "Warning:\tAssuming you meant ${staticDir} instead of ${1}"
else
staticDir=${1}
fi
for file in moses.ini ; do
if [ ! -e ${staticDir}/${file} ] ; then
2>&1 echo -e "${staticDir}/${file} is required, but was not found"
exit -1
fi
done
if [[ ${2} =~ /$ ]] ; then
dynamicDir="${2%/}"
2>&1 echo -e "Warning:\tAssuming you meant ${dynamicDir} instead of ${2}"
else
dynamicDir=${2}
fi
for file in moses.ini static.pt static.lm client.py summary.txt; do
if [ ! -e ${dynamicDir}/${file} ] ; then
2>&1 echo -e "${dynamicDir}/${file} is required, but was not found"
exit -1
fi
done
else
2>&1 echo -e "Usage: $0 staticDir dynamicDir"
exit -1
fi
cd ${staticDir}
2>&1 echo -e "Condition:\tstatic ${staticDir}\tdynamic ${dynamicDir}"
2>&1 echo
2>&1 cat summary.txt
2>&1 echo
2>&1 echo
2>&1 echo -e "Launching:\tmosesserver -v 1 -f ${staticDir}/moses.ini &> ../${dynamicDir}/log.${staticDir}"
../../bin/mosesserver -v 1 -f moses.ini --server-port 8090 &> ../${dynamicDir}/log.${staticDir} &
static_server_pid=${!}
sleep 0.5
cd ../${dynamicDir}
2>&1 echo -e "Launching:\tmosesserver -v 1 -f ${dynamicDir}/moses.ini &> log"
echo "-v 3 -f moses.ini --server-port 8091"
gdb ../../bin/mosesserver
| true
|
722ed31187ca4c451a397006566a9f62d197a83d
|
Shell
|
natesol-code21/codehub-api
|
/ops/start_app.sh
|
UTF-8
| 539
| 2.859375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
cd /opt/heimdall
/sbin/initctl status strong-pm | grep -q 'strong-pm stop/waiting'
if [ $? = 0 ]
then
/sbin/initctl start strong-pm
fi
sl-pmctl status heimdall-api-service 2>&1 >/dev/null | grep -q 'Error: Unknown "ServerService" id "undefined"'
if [ $? = 0 ]
then
sl-pmctl create heimdall-api-service
sl-pmctl set-size heimdall-api-service 1
else
sl-pmctl status heimdall-api-service | grep -q "Not started"
if [ $? = 0 ]
then
sl-pmctl start heimdall-api-service
fi
fi
slc deploy -s heimdall-api-service
| true
|
80972047bb6721f63b577517b7b5f9037d6979f6
|
Shell
|
mdumitru/dotfiles
|
/.profile
|
UTF-8
| 1,460
| 3
| 3
|
[] |
no_license
|
# Source 'before' file (if any).
if test -f "$HOME/.profile.before" -a -r "$HOME/.profile.before"; then
. "$HOME/.profile.before"
fi
# Add ~/bin/ and ~/.local/bin to path.
PATH="$HOME/bin/:$HOME/.local/bin:$PATH"
# Add cabal stuff to path
PATH="$HOME/.cabal/bin/:$PATH"
# Add cargo stuff to path
PATH="$HOME/.cargo/bin/:$PATH"
export PATH
# As env variables are inherited by regular interactive shells, they should be
# set here so only the login shells modify them.
if command -v nvim > /dev/null; then
EDITOR=nvim-host-editor
# Needed for vimpager, harmless in its absence.
VIMPAGER_VIM=nvim
else
if command -v vim > /dev/null; then
EDITOR=vim
else
EDITOR=vi
fi
fi
export EDITOR
VISUAL="$EDITOR"
export VISUAL
if command -v vimpager > /dev/null; then
PAGER=vimpager
export PAGER
export VIMPAGER_VIM
fi
# XDG user directories.
# These should not be necessary.
XDG_CONFIG_HOME="$HOME/.config"
XDG_CACHE_HOME="$HOME/.cache"
XDG_DATA_HOME="$HOME/.local/share"
export XDG_CONFIG_HOME XDG_CACHE_HOME XDG_DATA_HOME
# XDG system directories.
XDG_DATA_DIRS=/usr/local/share:/usr/share
XDG_CONFIG_DIRS=/etc/xdg
export XDG_DATA_DIRS XDG_CONFIG_DIRS
# Prefer UTF-8 English.
if locale -a | grep -F -q -x "en_US.UTF-8"; then
LC_ALL="en_US.UTF-8"
export LC_ALL
fi
# Source 'after' file (if any).
if test -f "$HOME/.profile.after" -a -r "$HOME/.profile.after"; then
. "$HOME/.profile.after"
fi
| true
|
8ff94d05722d9f0cdac5a7f935ad98d874215224
|
Shell
|
dweldon/ubuntu-setup
|
/includes/disk.sh
|
UTF-8
| 682
| 3.140625
| 3
|
[] |
no_license
|
#!/bin/bash
disk.execute() {
util.message 'adding ssd tweaks'
util.progress 'updating fstab'
local fstab='/etc/fstab'
sudo cp $fstab "$fstab.old"
sudo sed -i 's/errors=remount/noatime,errors=remount/' $fstab
printf 'tmpfs /tmp tmpfs defaults,noatime,mode=1777 0 0' | sudo tee -a $fstab >& /dev/null
util.progress 'changing swapiness and watches'
local sysctl='/etc/sysctl.conf'
util.append $sysctl 'vm.swappiness = 10' true
util.append $sysctl 'fs.inotify.max_user_watches = 524288' true
util.assertContains $fstab 'tmpfs'
util.assertContains $fstab 'noatime'
util.assertContains $sysctl 'swappiness'
util.assertContains $sysctl 'max_user_watches'
}
| true
|
c38c8194795cac28395f612d732b16e4071d4b93
|
Shell
|
5330/proxy-5330
|
/deploy.sh
|
UTF-8
| 732
| 3.1875
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
hosts="localhost"
for host in $( echo $hosts | tr ',' ' ' )
do
#target host for docker rpc
dockercmd="docker"
BRANCH=$(git branch |awk '{print $2}')
apiport=80
##########################
#teardown old
##########################
echo "Check and Tear Down OLD containers on host"
${dockercmd} ps -a | grep :${apiport} | grep proxy-5330 | grep Up
oval=$?
if [ ${oval} = 0 ]; then
${dockercmd} ps -a | grep :${apiport} | grep proxy-5330 | awk '{print $1}' | xargs ${dockercmd} rm -f
fi
#####################################
echo "Running container"
${dockercmd} run -d --restart=always \
-p ${apiport}:80 \
-e host=${host} \
--name=proxy-5330-prod \
jread/proxy-5330:latest
done
| true
|
b97f9cd96c65aeb591cad892ec3261cbbbeb9e0a
|
Shell
|
paulgclark/grc-install
|
/install_scripts/oot/sat_from_source.sh
|
UTF-8
| 2,034
| 3.84375
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# This script installs gr-satellites from source. It is intended for
# execution AFTER the grc_from_source.sh script has been run and will
# install to the same target directory used for that process.
#
# This script depends on environment variables that were created during
# the gnuradio installation process, so the script must be run WITHOUT
# sudo:
# ./satellites_from_source.sh
# get current directory (assuming the script is run from local dir)
SCRIPT_PATH=$PWD
SCRIPT_NAME=${0##*/}
# you should not be root, if you are, quit
if [[ $EUID == 0 ]]; then
echo "You are attempting to run the script as root."
echo "Please do not run with sudo, but simply run:"
echo " ./$SCRIPT_NAME"
exit 1
fi
# there should also be an environment variable for the target and source paths
# if there is not, quit
if [[ -z "$SDR_TARGET_DIR" ]]; then
echo "ERROR: \$SDR_TARGET_DIR not defined."
echo " You should run ./$SCRIPT_NAME before running this script."
echo " If you've already done that, you may need to open a new terminal"
echo " and try this script again."
exit 1
fi
if [[ -z "$SDR_SRC_DIR" ]]; then
echo "ERROR: \$SDR_SRC_DIR not defined."
echo " You should run ./$SCRIPT_NAME before running this script."
echo " If you've already done that, you may need to open a new terminal"
echo " and try this script again."
exit 1
fi
# install prereqs
sudo apt -y install liborc-0.4-dev
sudo apt -y install feh
# number of cores to use for make
CORES=`nproc`
VERSION_39="v4.2.0" # latest release
VERSION_38="v3.9.0" # latest release
VERSION_37="v3.7.0" # latest release
# get a known working version or commit
pip3 install --user --upgrade construct requests
GIT_REF=$VERSION_39
# get code from a known good version
cd $SDR_SRC_DIR
git clone https://github.com/daniestevez/gr-satellites
cd gr-satellites
git checkout $GIT_REF
git submodule update
# build it
mkdir -p build
cd build
cmake -DCMAKE_INSTALL_PREFIX=$SDR_TARGET_DIR ../
make -j$CORES
make install
| true
|
e3bc38564df418e7049be629b11fa48505d5b704
|
Shell
|
s-urbaniak/dotfiles
|
/plan9/bin/indexjar
|
UTF-8
| 457
| 3.3125
| 3
|
[] |
no_license
|
#!/bin/sh
JAR="$1"
JARFILE="$PWD"/$(find "$1")
FILES=$(unzip -l "$JAR" | tr -s ' ' | tail -n +4 | tail -r | tail -n +3 | cut -d ' ' -f 5 | egrep '.*\.java$')
echo $TMP
for f in $FILES
do
SRC=$(unzip -c "$JAR" $f | tail -n +3 | base64)
CLASS=$(echo "$f" | sed 's/\//\./g' | sed 's/\(.*\)\.java$/\1/g')
curl -XPOST 'localhost:9200/src/jar?pretty' -d "
{
\"jar\": \""$JAR"\",
\"file\": \""$f"\",
\"class\": \""$CLASS"\",
\"java\": \""$SRC"\"
}
"
done
| true
|
2c0a12b8fde426485527dec043b8ce2605e2aaec
|
Shell
|
yijial/programming-tools
|
/hw3/parse.sh
|
UTF-8
| 297
| 3.15625
| 3
|
[] |
no_license
|
#!/bin/bash
#CSE 374 HW3
#Yijia Liu 1238339
if [ $# -lt 2 ]
then
>&2 echo "Error: need at least two arguments"
exit 1
fi
if [ ! -a "$f1" ]
then
>&2 echo "Error: html file doesn't exist"
exit 1
fi
grep -Eo '<strong><a href.*' "$1" > temp
sed -e 's/.*\(http:.*\)".*/\1/' temp > "$2"
rm temp
exit
| true
|
c116ac0b205679405581ee3d0bc0e0723dd57d69
|
Shell
|
eshapo/Mongoose-MachineLink-3D-Plus
|
/cgi-bin/check_file.cgi
|
UTF-8
| 190
| 2.71875
| 3
|
[] |
no_license
|
#!/bin/sh
echo -e 'Content-type: text/html\n'
if [ -z "${SESSION_ID}" -o "${SESSION_ID}" != "${sessionid}" ]; then
exit 0
fi
v=`ls $QUERY_STRING 2>/dev/null`
echo "var result=\""$v"\";"
| true
|
39c9189402432bc48272c3c24560c42b91f0e563
|
Shell
|
Ttayu/google_colab_environment
|
/connect_google_colaboratory.sh
|
UTF-8
| 294
| 2.953125
| 3
|
[] |
no_license
|
if type sshpass > /dev/null 2>&1; then
passwd=password
port=
sshpass -p $passwd ssh -o "StrictHostKeyChecking no" root@2.tcp.ngrok.io -p $port
else
echo "please install sshpass."
echo "In Ubuntu. sudo apt install sshpass"
echo "In MacOS. brew install http://git.io/sshpass.rb"
fi
| true
|
726d094b70ab26836fed755e64dabd0985d3cc23
|
Shell
|
drakkar-lig/ipv6-care
|
/scripts/man2wiki.sh
|
UTF-8
| 623
| 2.765625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
SCRIPTS_DIR=$(cd $(dirname $0); pwd)
MAN_DIR=$SCRIPTS_DIR/../man
MAN_PAGE=$MAN_DIR/ipv6_care.8
EXTERNAL_MAN_HOST=linux.die.net
man2html -H $EXTERNAL_MAN_HOST -M "/man" -p $MAN_PAGE | \
sed -e 's/\(man\/.\)+/\1\//g' \
-e 's/man"/man\/1\/man2html"/g' \
-e 's/<A HREF="#index".*A>//g' \
-e 's/<A HREF=.*Main Contents..A>//g' \
-e 's/<A HREF="mailto:\(.*\)".*A>/\1/g' | \
html2wiki --dialect MediaWiki | \
awk '
{ do_print = 1 }
/==Index==/,/----/ { do_print = 0 }
/Content-type.*/,/=ipv6_care=/ { do_print = 0 }
{ if (do_print == 1) print }
'
echo
echo __NOEDITSECTION__
| true
|
d327816020cf5cea672aa4e61e41b196e1e69fc4
|
Shell
|
liuyang1/test
|
/lang/shell/forpath.sh
|
UTF-8
| 97
| 2.734375
| 3
|
[
"MIT"
] |
permissive
|
#! /usr/bin/env bash
dirs=("1" "b" "c")
for d in ${dirs[@]}
do
echo $d
cd $d
scp ./*
done
| true
|
96a7e790cde8dfcfdffa45431e6c62beb7c7e6b4
|
Shell
|
MikaelSmith/nebula-steps
|
/kubectl/step.sh
|
UTF-8
| 547
| 3.21875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
NS=$(ni get -p {.namespace})
CLUSTER=$(ni get -p {.cluster.name})
KUBECONFIG=/workspace/${CLUSTER}/kubeconfig
ni cluster config
COMMAND=$(ni get -p {.command})
FILE=
if [ "${COMMAND}" == "apply" ]; then
ARGS="-f"
FILE=$(ni get -p {.file})
else
ARGS=$(ni get -p {.args})
fi
FILE_PATH=${FILE}
GIT=$(ni get -p {.git})
if [ -n "${GIT}" ]; then
ni git clone
NAME=$(ni get -p {.git.name})
FILE_PATH=/workspace/${NAME}/${FILE}
fi
kubectl ${COMMAND} ${ARGS} ${FILE_PATH} --namespace ${NS} --kubeconfig ${KUBECONFIG}
| true
|
023a5da438329d9594929d6171e47fdd6e08dfe7
|
Shell
|
AlexAkulov/candy-elk
|
/pkg/elkriver.postinst
|
UTF-8
| 600
| 3.625
| 4
|
[] |
no_license
|
#!/bin/sh
set -e
# Initial installation: $1 == 1
# Upgrade: $1 == 2
#if [ $1 -eq 1 ] ; then
if ! getent group "elk" > /dev/null 2>&1 ; then
groupadd -r "elk"
fi
if ! getent passwd "elk" > /dev/null 2>&1 ; then
useradd -r -g elk -d /usr/share/elk -s /sbin/nologin \
-c "elk user" elk
fi
mkdir -p /var/log/elkriver
chown -R elk:elk /var/log/elkriver
chmod 755 /var/log/elkriver
if [ -x /bin/systemctl ] ; then
/bin/systemctl daemon-reload
/bin/systemctl enable elkriver.service
elif [ -x /sbin/chkconfig ] ; then
/sbin/chkconfig --add elkriver
fi
#fi
| true
|
e5a09d6387a7801444d021b4376052e2eb825e34
|
Shell
|
ulno/ulnoiot
|
/bin/mqtt_log
|
UTF-8
| 1,303
| 4.1875
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
if [[ $# -gt 2 || $# = 0 || "$*" = "help" || "$*" = "-h" || "$*" = "--help" ]]; then
cat << EOF
Syntax: mqtt_log [topic_or_subtopic] outfile
mqtt_log subscribes to a topic and all its subtopics via mqtt and logs
the received messages to a given file.
It also add a timestamp to the received messages.
If mqtt_log is called from a node directory, the root topic is automatically
based on the node's main topic and the given subtopic is prefixed with it.
Also the system.node is taken into account for other MQTT settings.
To send to the node's main topic, give "" as topic.
If topic starts with / the node main topic is ignored.
If the outfile exists, the log is appended to it.
EOF
exit 1
fi
# TODO: deal better with topic removal than with cut
[ "$ULNOIOT_ACTIVE" = "yes" ] || { echo "ulnoiot not active, aborting." 1>&2;exit 1; }
# find how to call ts
TS=ts
if which moreutils_ts &>/dev/null; then
TS=moreutils_ts
fi
if [[ $# = 2 ]]; then
topic="$1"
outfile="$2"
else
topic=""
outfile="$1"
fi
outfile="${outfile//\//_}.log"
echo "Calling mqtt_listen and logging timestamped to $outfile." >&2
{
if [[ "$topic" ]]; then
mqtt_listen "$topic"
else
mqtt_listen
fi
} | stdbuf -oL -eL cut -d\ -f2- | $TS "%.s" | tee -a "$outfile"
| true
|
5a7307d411c8b65cb500cd93796421bbfabfce94
|
Shell
|
Luke1453/OS_homework
|
/january/pw5/passchange.sh
|
UTF-8
| 318
| 3.34375
| 3
|
[] |
no_license
|
#!/bin/bash
#script that resets user pasword to "linuxPower"
echo ""
read -p "Please enter username to reset pasword: " username
pass=linuxPower
cat /etc/passwd | grep $username >/dev/null 2>&1
if [ $? = 0 ]
then
echo "$username:$pass" | chpasswd
else
echo User dosent exist, nothing to change.
fi
echo ""
| true
|
67606756f71ddc520c2c3d51592bfcff54926aed
|
Shell
|
oncoapop/data_reporting
|
/beast_scripts/v4.4_pipeline/v4.4_inSilicoPcr_bed.sh
|
UTF-8
| 1,366
| 3.4375
| 3
|
[] |
no_license
|
#!/bin/bash
# This Script was written by Damian Yap (Aug 2013)
# WSOP2013-001 version 4.0
# This script can inly be run AFTER the pipeline is run
# This just creates a bed file for viewing on UCSC
name="DG1136g"
dir="/home/dyap/Projects/PrimerDesign/TITAN-SS"
p3dir=$dir"/primer3"
cat $p3dir"/"$name"_primerlist.txt" | awk -F, '{print $1,$4,$5}' > $p3dir"/"$name"_isPCR-input"
# Module to check the primers by in silico PCR
suffix="_isPCR-input"
# isPCR is on beast at
command="/share/data/apps/isPcr/bin/x86_64/isPcr"
# database (hg19 2bit fa ) at
database="/share/data/apps/isPcr/isPcrSrc/isPcr/data/genomes/twoBit/hg19.2bit"
# IF reversecomplement of right primer is NOT required comment this
#flip="-flipReverse"
flip=""
# output format
#output=fa # fasta format (default)
output=bed # bed format (tab-delimited; Fields: chrom/start/end/name/score/strand)
#output=psl # blat format
outfilesuffix="_isPCR-output."$output
# Name of the input file
inputfile=$p3dir"/"$name$suffix
# Name of the output file
outputfile=$p3dir"/"$name$outfilesuffix
cat $inputfile
echo $outputfile
echo "Performing in-silico PCR using primers on hg19.... (This takes at least 7 min)"
while :;do echo -n .;sleep 1;done &
$command $database $flip "-out="$output $inputfile $outputfile
kill $!; trap 'kill $!' SIGTERM
echo "In-silico PCR is completed."
exit;
| true
|
1d05477f40e2bddd8c6a150e6537870e0d613327
|
Shell
|
Tiago-devop/web-api-js-no-frameworks
|
/script.sh
|
UTF-8
| 656
| 2.6875
| 3
|
[] |
no_license
|
#!/bin/bash
printf '\n\n requesting all heroes'
curl localhost:3000/heroes
printf '\n\n requesting flash'
curl localhost:3000/heroes/1
printf '\n\n requesting with wrong body'
curl --silent -X POST \
--data-binary '{"invalid": "data"}' \
localhost:3000/heroes
printf '\n\n creating Leoric'
CREATE=$(curl --silent -X POST \
--data-binary '{"name": "Leoric", "age": 2000, "power": "Cleave Attack"}' \
localhost:3000/heroes)
echo "$CREATE"
ID=$(echo "$CREATE" | jq .id)
# caso de erro na requisição com o nodemon
# execute: node webapi/src/index.js
# e tente novamente.
printf '\n\n requesting leoric'
curl localhost:3000/heroes/"$ID"
| true
|
bdf5ef76fed73da29c7ea6ca2c32df22cd619d7d
|
Shell
|
kareemadel/bash_database_engin
|
/helper_scripts/database/use_database.sh
|
UTF-8
| 1,446
| 3.71875
| 4
|
[] |
no_license
|
#!/bin/bash
# this will show the users the operartion he can do on a database.
source ./helper_scripts/generic/helper_functions.sh;
while true; do
clear;
echo "You are now in \"$DATABASE\" database.";
printf "What do you want to do?\n";
printf "1) Create a new table.\n";
printf "2) Use an existing table.\n";
printf "3) Drop a table.\n";
echo "4) Delete \"$DATABASE\" database.";
if [ -n "$TABLE" ]; then
echo "5) Use \"$TABLE\" table.";
fi
printf "b) Enter b to go back to main menu.\n"
printf "q) Enter q to exit.\n";
printf "Your choice: ";
if ! read; then
CURRENT_MENU=0;
return;
fi
case "$REPLY" in
1)
source ./helper_scripts/table/create_table.sh;
return;
;;
2)
source ./helper_scripts/table/select_table.sh;
return;
;;
3)
source ./helper_scripts/table/delete_table.sh;
return;
;;
4)
delete_database "$DATABASE";
CURRENT_MENU=0;
return;
;;
5)
if [ ! -z "$TABLE" ]; then
source ./helper_scripts/table/use_table.sh;
return;
fi
;;
b)
CURRENT_MENU=0;
return;
;;
q)
exit 0;
;;
*)
;;
esac
done
| true
|
e4fc56315492a1ef6892be25fe090d3b68c44541
|
Shell
|
RodrigoCapuzzi/FATEC
|
/semestre_1/sistemas_operacionais/case_palavra.sh
|
UTF-8
| 473
| 3.109375
| 3
|
[] |
no_license
|
!/bin/sh
case $1 in
A*)
echo "Palavra começa com A";;
a*)
echo "Palavra começa com a";;
B*)
echo "Palavra começa com B";;
b*)
echo "Palavra começa com b";;
C*)
echo "Palavra começa com C";;
c*)
echo "Palavra começa com c";;
*)
echo "Palavra não começa nem com a, nem com b, nem com c"
esac
exit 0
| true
|
32efe0a60af8601865fb2aac6cd80f8531cb2152
|
Shell
|
david618/websats
|
/scripts/update_tle.sh
|
UTF-8
| 689
| 2.984375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
NAMES=(stations gps-ops glo-ops resource iridium intelsat iridium-NEXT globalstar orbcomm ses musson science)
FOLDER=~
#TLEFILE=~/github/websats/src/main/resources/sats.tle
TLEFILE=../src/main/resources/sats.tle
#FOLDER=/home/tomcat
#TLEFILE=/opt/tomcat/webapps/websats/WEB-INF/classes/sats.tle
# Delete the file
> $TLEFILE
# Load in each Satellite Data
for a in "${NAMES[@]}"
do
echo $a
url="curl -s -o ${FOLDER}/${a}.txt https://www.celestrak.com/NORAD/elements/${a}.txt"
$(${url})
cat ${FOLDER}/${a}.txt >> $TLEFILE
done
#/home/david/tomcat8/bin/shutdown.sh
#/home/david/tomcat8/bin/startup.sh
#systemctl stop tomcat8.service
#systemctl start tomcat8.service
| true
|
1d35a3f3ba0982d5de39e5c306e329b6bb859895
|
Shell
|
christineoo/circleci-ai-platform-example
|
/job_status.sh
|
UTF-8
| 748
| 3.265625
| 3
|
[] |
no_license
|
SECONDS=0
TIMEOUT=3600
expected_state='"SUCCEEDED"'
auth_token="$(gcloud auth print-access-token)"
state=$(curl -X GET \
-H "Content-Type: application/json" \
-H "Authorization: Bearer $auth_token" \
"https://content-ml.googleapis.com/v1/projects/$1/jobs/$2" | jq .state)
while [[ $SECONDS -lt $TIMEOUT ]]
do
if [ "$state" == "$expected_state" ]; then
echo "Job succeeded! All done!"
break
else
echo "Got $state :( Not done yet..."
fi
((SECONDS++))
sleep 120
state=$(curl -X GET \
-H "Content-Type: application/json" \
-H "Authorization: Bearer $auth_token" \
"https://content-ml.googleapis.com/v1/projects/$1/jobs/$2" | jq .state)
done
if [ $SECONDS -gt $TIMEOUT ]; then
echo "TIMEOUT reached.."
exit 1
fi
| true
|
2e7363de8665f826ccdedb87a79497384a67209e
|
Shell
|
yuju-huang/OmniVisor
|
/host/benchmark/fio/parse_log.sh
|
UTF-8
| 382
| 3
| 3
|
[] |
no_license
|
#!/bin/bash
cat $1 | egrep -e "path|name|bw=" > tmp
vim -c '%s/MiB/ MiB/ | %s/KiB/ KiB/ | %s/ \+/ / | write | quit' tmp
awk -F '[= _]' '
BEGIN {
testcase=""
testsize=""
}
{
if ($1 == "path") {
print $2
}
else if ($1 == "name") {
testcase=$2
testsize=$3
}
else {
print testcase","testsize","$4","$5
}
}
END {
}' tmp
| true
|
54dd1213b3ba05881eda607db14e76eaec734167
|
Shell
|
HowookJeong/elastic-stack-installer
|
/stack/kibana/bin/installer
|
UTF-8
| 8,055
| 3.703125
| 4
|
[] |
no_license
|
#!/bin/zsh
DOWNLOADS=( "https://artifacts.elastic.co/downloads/kibana/kibana-_VERSION_-darwin-x86_64.tar.gz" "https://artifacts.elastic.co/downloads/kibana/kibana-_VERSION_-linux-x86_64.tar.gz" "https://artifacts.elastic.co/downloads/kibana/kibana-_VERSION_-linux-aarch64.tar.gz")
echo "설치 할 운영체제를 선택 하세요."
echo "0. MACOS"
echo "1. LINUX_X86_64"
echo "2. LINUX_AARCH64"
read choice
echo "선택한 운영 체계는 $choice 번 입니다."
echo "설치 할 버전을 입력 하세요."
echo "예) 7.15.1"
read version
unzipFolder=""
if [[ $choice == "0" ]]
then
unzipFolder="kibana-$version-darwin-x86_64"
fi
if [[ $choice == "1" ]]
then
unzipFolder="kibana-$version-linux-x86_64"
fi
if [[ $choice == "2" ]]
then
unzipFolder="kibana-$version-linux-arm64"
fi
echo "입력한 버전은 $version 입니다."
echo ${DOWNLOADS[@]:$choice:$choice}
downloadUrl=`echo ${DOWNLOADS[@]:$choice:1} | sed -e "s/_VERSION_/$version/g"`
echo "VPN 연결을 통해 배포가 이루어 지나요? (y/n)"
echo "설치 파일을 먼저 다운로드 받습니다. 이후 설치 스크립트를 재실행 하고 이 단계를 'N' 로 입력하고 스킵 합니다."
read isVpn
echo "wget --read-timeout=5 --timeout=5 --no-check-certificate $downloadUrl"
wget $downloadUrl
if [[ $isVpn == "y" || $isVpn == "Y" ]]
then
echo "다운로드가 완료 되었습니다.\n다시 실행해 주세요."
exit 1
fi
deployFile=`echo "$downloadUrl" | rev | cut -d '/' -f 1 | rev`
echo "SSH 통신을 위한 KEY 가 필요 한가요? (y/n)"
read isKey
sshPort=22
sshKey=""
if [[ $isKey == "y" || $isKey == "Y" ]]
then
echo "사용할 KEY 파일의 전체 경로와 파일명을 입력 하세요."
echo "예)"
echo "/Users/henry.force/.ssh/ec2.pem"
read sshKey
echo "입력 하신 KEY 파일은 $sshKey 입니다."
fi
echo "SSH 접속 User 를 입력 하세요."
echo "예) deploy"
read sshUser
echo "설치할 인스턴스의 IP 를 입력 하세요."
echo "예)"
echo "127.0.0.1"
read sshHost
echo "인스턴스에 설치할 경로를 입력 하세요."
echo "예)"
echo "/home/deploy/apps/"
read deployPath
echo "인스턴스에 설치 파일을 배포할 경로를 입력 하세요."
echo "예)"
echo "/home/deploy/dist/elastic-stack/kibana"
read distPath
echo "Symbolic link 를 사용하시면 입력 하시고 아니면 엔터를 입력 하세요."
read symbolic
if [[ $sshKey == "" ]]
then
echo "ssh -p $sshPort -o StrictHostKeychecking=no $sshUser@$sshHost mkdir -p $distPath"
echo "ssh -p $sshPort -o StrictHostKeychecking=no $sshUser@$sshHost mkdir -p $deployPath"
echo "scp -P $sshPort -o StrictHostKeychecking=no $deployFile $sshUser@$sshHost:$distPath"
echo "ssh -p $sshPort -o StrictHostKeychecking=no $sshUser@$sshHost cd $distPath; tar -xvzf $deployFile"
echo "ssh -p $sshPort -o StrictHostKeychecking=no $sshUser@$sshHost cd $distPath; rm -f $deployFile"
echo "ssh -p $sshPort -o StrictHostKeychecking=no $sshUser@$sshHost cd $distPath; mv $unzipFolder $deployPath"
echo "scp -P $sshPort -o StrictHostKeychecking=no start $sshUser@$sshHost:$deployPath/$unzipFolder/bin/"
echo "scp -P $sshPort -o StrictHostKeychecking=no stop $sshUser@$sshHost:$deployPath/$unzipFolder/bin/"
echo "ssh -p $sshPort -o StrictHostKeychecking=no $sshUser@$sshHost cd $deployPath/$unzipFolder/bin; chmod 755 start"
echo "ssh -p $sshPort -o StrictHostKeychecking=no $sshUser@$sshHost cd $deployPath/$unzipFolder/bin; chmod 755 stop"
ssh -p $sshPort -o StrictHostKeychecking=no $sshUser@$sshHost "mkdir -p $distPath"
ssh -p $sshPort -o StrictHostKeychecking=no $sshUser@$sshHost "mkdir -p $deployPath"
scp -P $sshPort -o StrictHostKeychecking=no $deployFile $sshUser@$sshHost:$distPath
ssh -p $sshPort -o StrictHostKeychecking=no $sshUser@$sshHost "cd $distPath; tar -xvzf $deployFile"
ssh -p $sshPort -o StrictHostKeychecking=no $sshUser@$sshHost "cd $distPath; rm -f $deployFile"
ssh -p $sshPort -o StrictHostKeychecking=no $sshUser@$sshHost "cd $deployPath; rm -rf $unzipFolder"
ssh -p $sshPort -o StrictHostKeychecking=no $sshUser@$sshHost "cd $distPath; mv $unzipFolder $deployPath"
scp -P $sshPort -o StrictHostKeychecking=no start $sshUser@$sshHost:$deployPath/$unzipFolder/bin/
scp -P $sshPort -o StrictHostKeychecking=no stop $sshUser@$sshHost:$deployPath/$unzipFolder/bin/
ssh -p $sshPort -o StrictHostKeychecking=no $sshUser@$sshHost "cd $deployPath/$unzipFolder/bin; chmod 755 start"
ssh -p $sshPort -o StrictHostKeychecking=no $sshUser@$sshHost "cd $deployPath/$unzipFolder/bin; chmod 755 stop"
if [[ $symbolic != "" ]]
then
echo "ssh -p $sshPort -o StrictHostKeychecking=no $sshUser@$sshHost cd $deployPath; rm -f $symbolic"
echo "ssh -p $sshPort -o StrictHostKeychecking=no $sshUser@$sshHost cd $deployPath; ln -s $unzipFolder $symbolic"
ssh -p $sshPort -o StrictHostKeychecking=no $sshUser@$sshHost "cd $deployPath; rm -f $symbolic"
ssh -p $sshPort -o StrictHostKeychecking=no $sshUser@$sshHost "cd $deployPath; ln -s $unzipFolder $symbolic"
fi
else
echo "ssh -i $sshKey -p $sshPort -o StrictHostKeychecking=no $sshUser@$sshHost mkdir -p $distPath"
echo "ssh -i $sshKey -p $sshPort -o StrictHostKeychecking=no $sshUser@$sshHost mkdir -p $deployPath"
echo "scp -i $sshKey -P $sshPort -o StrictHostKeychecking=no $deployFile $sshUser@$sshHost:$distPath"
echo "ssh -i $sshKey -p $sshPort -o StrictHostKeychecking=no $sshUser@$sshHost cd $distPath; tar -xvzf $deployFile"
echo "ssh -i $sshKey -p $sshPort -o StrictHostKeychecking=no $sshUser@$sshHost cd $distPath; rm -f $deployFile"
echo "ssh -i $sshKey -p $sshPort -o StrictHostKeychecking=no $sshUser@$sshHost cd $distPath; mv $unzipFolder $deployPath"
echo "scp -i $sshKey -P $sshPort -o StrictHostKeychecking=no start $sshUser@$sshHost:$deployPath/$unzipFolder/bin/"
echo "scp -i $sshKey -P $sshPort -o StrictHostKeychecking=no stop $sshUser@$sshHost:$deployPath/$unzipFolder/bin/"
echo "ssh -i $sshKey -p $sshPort -o StrictHostKeychecking=no $sshUser@$sshHost cd $deployPath/$unzipFolder/bin; chmod 755 start"
echo "ssh -i $sshKey -p $sshPort -o StrictHostKeychecking=no $sshUser@$sshHost cd $deployPath/$unzipFolder/bin; chmod 755 stop"
ssh -i $sshKey -p $sshPort -o StrictHostKeychecking=no $sshUser@$sshHost "mkdir -p $distPath"
ssh -i $sshKey -p $sshPort -o StrictHostKeychecking=no $sshUser@$sshHost "mkdir -p $deployPath"
scp -i $sshKey -P $sshPort -o StrictHostKeychecking=no $deployFile $sshUser@$sshHost:$distPath
ssh -i $sshKey -p $sshPort -o StrictHostKeychecking=no $sshUser@$sshHost "cd $distPath; tar -xvzf $deployFile"
ssh -i $sshKey -p $sshPort -o StrictHostKeychecking=no $sshUser@$sshHost "cd $distPath; rm -f $deployFile"
ssh -i $sshKey -p $sshPort -o StrictHostKeychecking=no $sshUser@$sshHost "cd $deployPath; rm -rf $unzipFolder"
ssh -i $sshKey -p $sshPort -o StrictHostKeychecking=no $sshUser@$sshHost "cd $distPath; mv $unzipFolder $deployPath"
scp -i $sshKey -P $sshPort -o StrictHostKeychecking=no start $sshUser@$sshHost:$deployPath/$unzipFolder/bin/
scp -i $sshKey -P $sshPort -o StrictHostKeychecking=no stop $sshUser@$sshHost:$deployPath/$unzipFolder/bin/
ssh -i $sshKey -p $sshPort -o StrictHostKeychecking=no $sshUser@$sshHost "cd $deployPath/$unzipFolder/bin; chmod 755 start"
ssh -i $sshKey -p $sshPort -o StrictHostKeychecking=no $sshUser@$sshHost "cd $deployPath/$unzipFolder/bin; chmod 755 stop"
if [[ $symbolic != "" ]]
then
echo "ssh -i $sshKey -p $sshPort -o StrictHostKeychecking=no $sshUser@$sshHost cd $deployPath; rm -f $symbolic"
echo "ssh -i $sshKey -p $sshPort -o StrictHostKeychecking=no $sshUser@$sshHost cd $deployPath; ln -s $unzipFolder $symbolic"
ssh -i $sshKey -p $sshPort -o StrictHostKeychecking=no $sshUser@$sshHost "cd $deployPath; rm -f $symbolic"
ssh -i $sshKey -p $sshPort -o StrictHostKeychecking=no $sshUser@$sshHost "cd $deployPath; ln -s $unzipFolder $symbolic"
fi
fi
echo "다운로드 받은 파일을 삭제 합니다."
echo "rm -f $deployFile"
rm -f $deployFile
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.