blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
407436e8a7125a1a6e09b02bbf61ba19cdc0a01e
|
Shell
|
cabaalexander/dotfiles
|
/config/functions/go.sh
|
UTF-8
| 618
| 4.125
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash -e
# shellcheck disable=SC2064
REPO_URL="https://github.com/cabaalexander/another-go-installer.git"
goInstall(){
local temp_dir
local options
temp_dir=$(mktemp -d)
options=$*
# TRAP: delete temp folder ;)
trap "{ rm -rf $temp_dir ; }" SIGINT SIGTERM EXIT
echo "Cloning 'another-go-installer'..."
git clone "$REPO_URL" "$temp_dir" &> /dev/null
eval "$temp_dir/another-go-installer.sh ${options:-"-q"}"
}
# If this file is running in terminal call the function `goInstall`
# Otherwise just source it
if [ "$(basename "$0")" = "go.sh" ]
then
goInstall "${@}"
fi
| true
|
dbbdd90be1ed9eb8da0223d52e91acfa0dccae07
|
Shell
|
AaronWang30/CheckHook
|
/post-commit
|
UTF-8
| 297
| 3.15625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
DIR=${PWD##*/}
echo "Current Location:" $DIR
if [ ! -d "/tmp/${PWD##*/}" ]; then
mkdir /tmp/${PWD##*/}
else
rm -rf /tmp/${PWD##*/}
mkdir /tmp/${PWD##*/}
fi
git clone --local . /tmp/$DIR/$(git describe --always)
~/hooks/check.sh "/tmp/$DIR/$(git describe --always)" "$PWD"
| true
|
3b02d0e68636800149dc298cc1df232eaa2b4b90
|
Shell
|
nealhardesty/barticus.ng
|
/docker_run
|
UTF-8
| 154
| 2.59375
| 3
|
[] |
no_license
|
#!/bin/bash
if [ -z "$(docker images |grep barticus)" ]; then
docker build -t barticus $(dirname $0)
fi
docker run -p 80:80 -d --name barticus barticus
| true
|
37275bb2a108e8d405d58786023e46d9dd88e229
|
Shell
|
devildominator/scripts
|
/makeThelinklaURLShortner
|
UTF-8
| 433
| 3.59375
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
# Usage: thelinkla [URL]
#
# Shorten a URL using the Thelink.la URL Shortener service (http://thelink.la/).
thelinkla() {
[[ ! $1 ]] && { echo -e "Usage: thelinkla [URL]\n\nShorten a URL using the Rebrandly URL Shortener service (http://thelink.la)."; return; }
curl -qsSL -m10 --connect-timeout 10 'http://thelink.la/api-shorten.php?url='${1//\"/\\\"} | perl -ne 'if(m/(.*)/i) { print "$1\n" }'
}
thelinkla $@
| true
|
49c4530b5a5900bb0418945979e294071bc4deee
|
Shell
|
LikoGuan/testreact
|
/strawberries/.travis/deploy.sh
|
UTF-8
| 691
| 2.890625
| 3
|
[] |
no_license
|
#!/bin/bash
echo "start to deploy ......"
openssl aes-256-cbc -K $encrypted_c9f720adf08c_key -iv $encrypted_c9f720adf08c_iv -in .travis/github_deploy_key.enc -out production-main.pem -d
echo $TRAVIS_COMMIT > build/version
if [[ $TRAVIS_BRANCH == 'develop' ]]
then
DEPLOY_HOST=47.91.155.75
elif [[ $TRAVIS_BRANCH == 'release' ]]
then
DEPLOY_HOST=47.89.21.110
elif [[ $TRAVIS_BRANCH == 'master' ]]
then
DEPLOY_HOST=47.52.67.166
fi
echo "DEPLOY_HOST $DEPLOY_HOST"
chmod 400 production-main.pem
scp -i production-main.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -q -r build/. root@${DEPLOY_HOST}:~/merchant/
rm -f production-main.pem
echo "done!"
exit 0
| true
|
ab51498d3289073b9b0c88273a1f29de18e76124
|
Shell
|
Midburn/midburn-k8s
|
/switch_environment.sh
|
UTF-8
| 1,577
| 3.9375
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
# this script can run using source - to enable keeping the environment variables and shell completion
#
# please pay attention not to call exit in this script - as it might exit from the user's shell
#
# thanks for your understanding and cooperation
! which kubectl >/dev/null && echo "attempting automatic installation of kubectl" && gcloud --quiet components install kubectl
! which helm >/dev/null && echo "attempting automatic installation of helm" && curl https://raw.githubusercontent.com/kubernetes/helm/master/scripts/get > get_helm.sh && chmod 700 get_helm.sh && ./get_helm.sh
! which dotenv >/dev/null && echo "attempting automatic installation of python-dotenv" && sudo pip install python-dotenv
! which jq >/dev/null && echo "attempting automatic installation of jq" && sudo apt-get update && sudo apt-get install -y jq
if which dotenv >/dev/null && which helm >/dev/null && which kubectl >/dev/null && which jq >/dev/null; then
if [ "${1}" == "" ]; then
echo "source switch_environment.sh <ENVIRONMENT_NAME>"
else
ENVIRONMENT_NAME="${1}"
if [ ! -f "environments/${ENVIRONMENT_NAME}/.env" ]; then
echo "missing environments/${ENVIRONMENT_NAME}/.env"
else
[ -f .env ] && eval `dotenv -f ".env" list`
echo "Switching to ${ENVIRONMENT_NAME} environment"
rm -f .env
if ! ln -s "`pwd`/environments/${ENVIRONMENT_NAME}/.env" ".env"; then
echo "Failed to symlink .env file"
else
source connect.sh
fi
fi
fi
else
echo "Failed to install dependencies, please try to install manually"
fi
| true
|
12f8cdfda70214098da8226549474259ef9c38b4
|
Shell
|
jsarenik/deobfuscation
|
/scripts/bitcoin-torrent-rest.sh
|
UTF-8
| 392
| 2.859375
| 3
|
[] |
no_license
|
#!/bin/sh
#test "$#" = "0" && exit 1
BLD=${1:-"$PWD"}
test -d $BLD || exit 1
echo $BLD
sleep 2
OUT=$HOME/bitcoin-torrent-out
test -d $OUT || mkdir $OUT
FN=$OUT/bitcoin-torrent-linearized-020-rest.torrent
test -r $FN && exit 1
echo "Writing $FN..."
webtorrent create -o $FN -a http://a.bublina.eu.org:8000/announce -a udp://a.bublina.eu.org:8000 -a ws://a.bublina.eu.org:8000 --verbose $BLD
| true
|
e3a9e7948affe1d72b3683070b5e79a4f5fb2c44
|
Shell
|
devarsh13/Parallel-Travelling-Salesman-Problem
|
/Codes/script.sh
|
UTF-8
| 416
| 2.53125
| 3
|
[] |
no_license
|
for i in `seq 5 1 13`
do
echo $i > input.txt
python dataGenerator.py >data.txt <input.txt
gcc TSP_serial.c -fopenmp
'./a.out' <data.txt >>serial.txt
gcc TSP_parallel.c -fopenmp -lm
'./a.out' <data.txt >>parallel.txt
export OMP_NUM_THREADS=2
gcc TSP_parallel1.c -fopenmp -lm
'./a.out' <data.txt >>parallel12.txt
export OMP_NUM_THREADS=4
gcc TSP_parallel1.c -fopenmp -lm
'./a.out' <data.txt >>parallel14.txt
done
| true
|
9e3493859b84d740fc13be6f689ea885e2c05ce8
|
Shell
|
Dynamedia/docker-octobercms-stack
|
/dumpmysql.sh
|
UTF-8
| 1,299
| 3.890625
| 4
|
[] |
no_license
|
PWD=$(printf '%q\n' "${PWD##*/}")
PWD=${PWD//[^a-zA-Z\d\-\_:]/}
TYPE=$(grep "^OC_DB_CONNECTION" .env | cut -f2- -d=)
CONTAINER=$(docker ps | awk '{print $NF}' | grep mysql | grep $PWD)
USERNAME=$(grep "^OC_DB_USERNAME" .env | cut -f2- -d=)
PASSWORD=$(grep "^OC_DB_PASSWORD" .env | cut -f2- -d=)
DATABASE=$(grep "^OC_DB_DATABASE" .env | cut -f2- -d=)
if [ "$TYPE" != "mysql" ] ; then
echo "To use this tool you must be using the mysql database connection"
exit 1
fi
DESTPATH=$1
if [ -z $DESTPATH ] ; then
DESTPATH="sqldump/${DATABASE}.$(date +"%Y_%m_%d_%H%M").sql"
fi
if [ -z $(echo $DESTPATH | grep ".sql$") ] ; then
if [ -z $(echo $DESTPATH | grep "\/$") ] ; then
DESTPATH="${DESTPATH}.sql"
else
DESTPATH="${DESTPATH}${DATABASE}.$(date +"%Y_%m_%d_%H%M").sql"
fi
fi
FILE=$(basename "${DESTPATH}")
DIR=$(dirname "${DESTPATH}")
if ! (mkdir -p $DIR) || [ ! -w $DIR ] ; then
echo "You do not have write privileges at the specified path"
exit
fi
if [ ! -z $USERNAME ] && [ ! -z $PASSWORD ] && [ ! -z $DATABASE ] ; then
docker exec $CONTAINER mysqldump -u$USERNAME -p$PASSWORD $DATABASE > $DESTPATH
else
printf "You must set the following variables in your .env file: \n \
OC_DB_USERNAME\n \
OC_DB_PASSWORD\n \
OC_DB_DATABASE\n"
fi
| true
|
f172a51851831591dff1d9b765e7486c38a72fcb
|
Shell
|
devudo/devudo_scripts
|
/devudo-delete-user.sh
|
UTF-8
| 444
| 3.765625
| 4
|
[] |
no_license
|
#!/bin/bash
USERNAME=$1
if [ $USERNAME == '' ]; then
echo "You must provide a username."
exit 1
fi
# Check for existing username
# Thanks, http://www.cyberciti.biz/tips/howto-write-shell-script-to-add-user.html
egrep "^$USERNAME" /etc/passwd >/dev/null
if [ $? -eq 0 ]; then
echo "$USERNAME found..."
else
echo "$USERNAME does not exist!"
exit 1
fi
# Delete user
userdel -rf $USERNAME
echo "User $USERNAME has been deleted!"
| true
|
0abf87580f585c7fde632e71b8becd177a0596da
|
Shell
|
madelinehicks/personal-website
|
/restart_fcgi
|
UTF-8
| 987
| 3.796875
| 4
|
[] |
no_license
|
#!/bin/bash
if [[ $EUID -ne 0 ]]; then
echo "This script must be run as root" 1>&2
exit 1
fi
cwd=`pwd`
pid_file=/tmp/fcgi.pid
port_file=/tmp/fcgi.port
if [[ -f $pid_file ]]; then
old_pid=`cat $pid_file`
else
old_pid=`ps ax | grep runfcgi | head -1 | cut -f1 -d ' '`
fi
if [[ -f $port_file ]]; then
last_port=`cat $port_file`
port_to_use=$(($last_port + 1))
else
last_port=8080
port_to_use=8000
fi
# Reset so port_to_use doesn't increment forever
if [[ $port_to_use -gt 8999 ]]; then
port_to_use=8000
fi
sed -i "s/$last_port/$port_to_use/g" /etc/nginx/sites-available/mad_home.conf
if [[ ! -d logs ]]; then
`mkdir logs && chmod -R 777 logs`
fi
python manage.py runfcgi host=127.0.0.1 port=$port_to_use daemonize=true workdir=$cwd errlog=$cwd/logs/cgi.err outlog=$cwd/logs/cgi.out pidfile=$pid_file
echo $port_to_use > $port_file
service nginx reload
sleep 5s
echo "Killing old processes on $last_port, pid $old_pid"
kill -SIGUSR1 $old_pid
| true
|
28b70ef15154ddece40d6ce2ab9d8030ef29e9ea
|
Shell
|
averagehuman/vps-init
|
/ubuntu/etc/nginx_install.sh
|
UTF-8
| 1,840
| 2.890625
| 3
|
[] |
no_license
|
###############################################################################
# install nginx
###############################################################################
nginx_version="1.4.3"
sudo apt-get -y install libpcre3-dev zlib1g-dev libssl-dev
prefix="/srv/nginx"
if [ -e "$prefix/sbin/nginx" ]; then
echo "nginx already installed at $prefix/sbin/nginx"
else
tmpdir="/tmp/nginx-install-$(date +%y%m%d-%H%M%S)"
mkdir -p $tmpdir
cd $tmpdir
wget http://nginx.org/download/nginx-${nginx_version}.tar.gz
tar -xvf nginx-${nginx_version}.tar.gz
cd nginx-${nginx_version}
./configure \
--prefix=$prefix \
--pid-path=$prefix/run/nginx.pid \
--lock-path=$prefix/run/nginx.lock \
--http-client-body-temp-path=$prefix/run/client_body_temp \
--http-proxy-temp-path=$prefix/run/proxy_temp \
--http-fastcgi-temp-path=$prefix/run/fastcgi_temp \
--http-uwsgi-temp-path=$prefix/run/uwsgi_temp \
--user=www \
--group=www \
--with-http_ssl_module \
--without-http_scgi_module \
--without-http_ssi_module
make && make install
chown -R www:www $prefix/logs
cat > /etc/init/nginx.conf <<EOF
description "nginx http daemon"
author "Philipp Klose"
start on (filesystem and net-device-up IFACE=lo)
stop on runlevel [!2345]
env DAEMON=$prefix/sbin/nginx
env PID=$prefix/run/nginx.pid
expect fork
respawn
respawn limit 10 5
#oom never
pre-start script
\$DAEMON -t
if [ \$? -ne 0 ]
then exit \$?
fi
end script
exec \$DAEMON
EOF
cat > /etc/logrotate.d/nginx <<EOF
/srv/nginx/logs/*.log {
daily
missingok
rotate 52
compress
delaycompress
notifempty
create 0640 www www
sharedscripts
postrotate
[ ! -f $prefix/nginx.pid ] || kill -USR1 `cat $prefix/nginx.pid`
endscript
}
EOF
fi
| true
|
6c9ddd2ed7560db9e09324f4a19c497b63efafd6
|
Shell
|
Elvenius1/bootstrap
|
/scripts/run_qemu.sh
|
UTF-8
| 879
| 2.78125
| 3
|
[
"BSD-2-Clause"
] |
permissive
|
#!/bin/bash
SIGMA_IMAGE="sigma.hdd"
SIGMA_N_CPUS="4"
SIGMA_N_RAM="1G"
QEMU_HDD_AHCI="-hda $SIGMA_IMAGE"
QEMU_HDD_NVME="-drive file=$SIGMA_IMAGE,if=none,id=nvmedisk0 -device nvme,drive=nvmedisk0,serial=SigmaOSDisk"
QEMU_HDD_USB="-drive if=none,id=usbdisk0,file=$SIGMA_IMAGE -device usb-storage,bus=xhci0.0,drive=usbdisk0"
QEMU_KVM="-enable-kvm -cpu host"
QEMU_SMP="-smp $SIGMA_N_CPUS"
QEMU_MACHINE="-machine q35 -m $SIGMA_N_RAM"
QEMU_INTEL_IOMMU="-device intel-iommu,aw-bits=48" # aw-bits=48 is 4 level paging
QEMU_AMD_IOMMU="-device amd-iommu"
QEMU_XHCI=" -device nec-usb-xhci,id=xhci0"
QEMU_GPU="-vga std"
QEMU_DEBUG="-monitor stdio -serial file:/dev/stdout -no-shutdown -no-reboot -d int"
#QEMU_TRACE="-trace usb_xhci*"
QEMU_ARGS="$QEMU_KVM $QEMU_SMP $QEMU_MACHINE $QEMU_GPU $QEMU_XHCI $QEMU_TRACE $QEMU_DEBUG $QEMU_HDD_NVME $1"
qemu-system-x86_64 $QEMU_ARGS | c++filt
| true
|
1154c3004916bebab475f072ee7daebaee954fdb
|
Shell
|
zchee/zsh-default-completions
|
/src/Zsh/Command/_run-help
|
UTF-8
| 210
| 2.6875
| 3
|
[] |
no_license
|
#compdef run-help
local d expl
local HELPDIR=${HELPDIR:-@runhelpdir@}
[[ -d $HELPDIR ]] && {
d=($HELPDIR/*(:t))
(($#d)) && d+=('.' ':') && _wanted commands expl 'command' compadd -a d
} || _man
# vim:ft=zsh
| true
|
3cd0ed5c142b485d65ad648f65db010b3e3bd05b
|
Shell
|
wangzhaojin2016/monitor
|
/client/run.sh
|
UTF-8
| 1,021
| 3.9375
| 4
|
[] |
no_license
|
#!/bin/bash
#!/bin/bash
ctrl=$1
if [ "$#" -ne "1" ];then
echo "Scripts need parameters,parameters=1"
echo "For example:"
echo "sh run start|stop|check"
exit 1
fi
pid=$(ps -ef|grep 'client_monitor'|grep -v grep | awk '{print $2}')
function __start(){
cd log/
[ "$pid" != "" ] && echo "python process is exitst,pid is $pid" && exit 1
[ -f console.log.bak ] && rm -f console.log.bak
[ -f console.log ] && mv console.log console.log.bak
cd -
nohup python -u bin/client_monitor.py >log/console.log 2>&1 &
exit
}
function __stop(){
killall python
echo "kill python complate"
}
function __check(){
pid=$(ps -ef|grep 'client_monitor'|grep -v grep | awk '{print $2}')
if [[ "${pid}" != "" ]];then
echo "client_monitor is running,pid is $pid"
else
echo "client_monitor is stoped"
fi
exit 1
}
case "$ctrl" in
start)
__start
;;
stop)
__stop
;;
check)
__check
;;
*)
printf "$num Arguments are error!You only set: start|check|stop"
;;
esac
| true
|
1376957b6f3e6d3170abe885b872bfaf72f77f38
|
Shell
|
TARGETrus/scte35-1
|
/phabid.sh
|
UTF-8
| 258
| 2.546875
| 3
|
[] |
no_license
|
#!/bin/bash
phabid=`echo "{\"ids\": [\"${DIFF_ID}\"]}" | arc call-conduit --conduit-uri https://phabricator.dm.nfl.com/ differential.querydiffs | jq -r ".[\"response\"][\"${DIFF_ID}\"][\"revisionID\"]"`
if [ "$phabid" != "null" ]; then
echo "D$phabid"
fi
| true
|
e3ca19b3b92e3900f37f4fe75dea065b075cb5af
|
Shell
|
sunny775/yarn-plugins
|
/yarn-hello_old_funcs/src/check.sh
|
UTF-8
| 366
| 3.3125
| 3
|
[] |
no_license
|
#!/bin/bash
set -e
MODULE=$1
FILE=`(node -e "console.log(require.resolve('$MODULE'+'/package.json'))" 2>/dev/null || true) | perl -pe 's/\/package\.json$/$1/'`
if [ "$FILE" == "" ]; then
FILE=`node -e "console.log(require.resolve('$MODULE'))"|perl -pe 's/(nodejs\/(?:\@[^\/]*\/)?[^\@][^\/]*)(\/.*)?$/$1/'`
if [ "$FILE" == "" ]; then
exit 1
fi
fi
echo $FILE
| true
|
97e92d7a4c89b710a1b7f150a6344a44d012382a
|
Shell
|
ShikhaSuman8601/LargeApplicationsPracticumB18085
|
/B18085_lab1_soln.sh
|
UTF-8
| 1,717
| 3.3125
| 3
|
[] |
no_license
|
# count=3
# echo $count
# echo count
# ((count=count+1))
# echo $count
# NAME1="Arun Kumar "
# echo $NAME1
# NAME2="Verma"
# NAME3=$NAME1$NAME2
# echo $NAME3
# echo $NAME1+$NAME2
#!/bin/bash
# x=5
# y=10
# ans=$((x+y))
# echo "$x + $y = $ans"
# echo Enter x
# read x
# echo Enter y
# read y
# echo Enter z
# read z
# max=$x
# if [[ $y>$max ]]; then
# max=$y
# fi
# if [[ $z>$max ]]; then
# max=$z
# fi
# echo max of x,y and z is $max
# cat > movies.list << EOF
# Avengers
# Spiderman Into the Spiderverse
# Shutter Island
# Black Swan
# Fight Club
# Paper Towns
# Romeo and Juliet
# Skam wtFock
# EOF
# cat movies.list | wc
# NOOFLINES=$(cat movies.list | wc -l)
# echo $NOOFLINES lines in movies.list
# echo
# echo Random sequence of 20 numbers:
# seq 20 | shuf
# echo
# echo sequence of 20 numbers sorted:
# seq 20 | shuf | sort -n
# cat > marks.dat <<EOF
# B12 Rajesh 34
# B18 Mahesh 75
# B19 Arun 55
# B10 Vinod 90
# B22 Priya 95
# B30 Susan 85
# EOF
# echo After shuffling
# cat marks.dat | shuf
# echo
# echo After sorting
# cat marks.dat | shuf | sort -k 3 -r
# echo
# echo Top three marks
# cat marks.dat | shuf | sort -k 3 -r | head -n 3
# man xargs
# whoami
# pwd
# etclist=`ls /etc | wc -l`
# echo $etclist
# etcdir=`ls -l /etc | grep '^d' | wc -l`
# echo $etcdir
# sleep 5s
# total=0
# for i in quiz1.txt quiz2.txt endsem.txt; do
# while IFS=" " read -r name score; do
# echo $score
# done < "$i"
# done
# while read name1 score1 name2 score2 name3 score3; do
# echo "$name" "(($score1+$score2+$score3))" >> 'totalmarks.txt'
# done < 'quiz1.txt' 'quiz2.txt' 'endsem.txt'
file=`cat quiz1.txt quiz2.txt endsem.txt`
while IFS=" " read -r name score; do
echo score
done < "$file"
| true
|
c1e717439037d26ce21ebbd74581ba736724692b
|
Shell
|
StoneyJackson/bash
|
/files/unx
|
UTF-8
| 1,272
| 4.21875
| 4
|
[] |
no_license
|
#!/bin/bash
# COMMAND: unx file
## Exit with error if $1 is undefined or not a file.
if [ -z "$1" ] ; then
echo "usage: $(basename "$0") path/to/file"
exit 1
fi
## Exit with error if $1 is not a simple file.
if [ ! -f "$1" ] ; then
echo "$(basename "$0"): Not a file: $1"
exit 1
fi
exists() {
if command -v unrar > /dev/null 2>&1 ; then
return 0
else
echo "$(basename "$0"): I require '$1' to extract."
return 1
fi
}
case "$1" in
*.zip) exists unzip || exit 1 ; unzip "$1" -d "${1%.zip}" ;;
*.rar) exists unrar || exit 1 ; unrar x "$1" "${1%.rar}/" ;;
*.tar.gz) exists tar || exit 1 ; tar -zxvf "$1" -C "${1%.tar.gz}" ;;
*.tgz) exists tar || exit 1 ; tar -zxvf "$1" -C "${1%.tgz}" ;;
*.tar.bz) exists tar || exit 1 ; tar -jxvf "$1" -C "${1%.tar.bz}" ;;
*.tar.bz2) exists tar || exit 1 ; tar -jxvf "$1" -C "${1%.tar.bz2}" ;;
*.tbz2) exists tar || exit 1 ; tar -jxvf "$1" -C "${1%.tbz2}" ;;
*.tbz) exists tar || exit 1 ; tar -jxvf "$1" -C "${1%.tbz}" ;;
*.7z) exists 7z || exit 1 ; 7z x "$1" -o"${1%.7z}" ;;
*) echo "Unrecognized file type: $1" 1>&2 ;;
esac
| true
|
5e3caa02c597eab182e58c43351ddaf5e8b6315e
|
Shell
|
sejaljpatel/au-bootcamp-git-intro
|
/count-fasta-seqs.sh
|
UTF-8
| 166
| 3.265625
| 3
|
[
"CC-BY-4.0"
] |
permissive
|
#!/bin/sh
for file in "$@"
do
NUM=`grep '>' $file | wc -l`
filename=`basename $file`
echo $NUM $filename
done
TOTAL=`grep '>' $@ | wc -l`
echo $TOTAL
| true
|
b26aa77f7c1169fb111b8e44cc998d50d6200039
|
Shell
|
dadoonet/elasticsearch
|
/x-pack/test/smb-fixture/src/main/resources/provision/installsmb.sh
|
UTF-8
| 2,986
| 2.640625
| 3
|
[
"Elastic-2.0",
"LicenseRef-scancode-elastic-license-2018",
"Apache-2.0",
"LicenseRef-scancode-other-permissive"
] |
permissive
|
#! /bin/bash
# Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
# or more contributor license agreements. Licensed under the Elastic License;
# you may not use this file except in compliance with the Elastic License.
set -ex
VDIR=/fixture
RESOURCES=$VDIR/src/main/resources
CERTS_DIR=$RESOURCES/certs
SSL_DIR=/var/lib/samba/private/tls
# install ssl certs
mkdir -p $SSL_DIR
cp $CERTS_DIR/*.pem $SSL_DIR
chmod 600 $SSL_DIR/key.pem
mkdir -p /etc/ssl/certs/
cat $SSL_DIR/ca.pem >> /etc/ssl/certs/ca-certificates.crt
mv /etc/samba/smb.conf /etc/samba/smb.conf.orig
samba-tool domain provision --server-role=dc --use-rfc2307 --dns-backend=SAMBA_INTERNAL --realm=AD.TEST.ELASTICSEARCH.COM --domain=ADES --adminpass=Passw0rd --use-ntvfs
cp /var/lib/samba/private/krb5.conf /etc/krb5.conf
service samba-ad-dc restart
# Add users
samba-tool user add ironman Passw0rd --surname=Stark --given-name=Tony --job-title=CEO
samba-tool user add hulk Passw0rd --surname=Banner --given-name=Bruce
samba-tool user add phil Passw0rd --surname=Coulson --given-name=Phil
samba-tool user add cap Passw0rd --surname=Rogers --given-name=Steve
samba-tool user add blackwidow Passw0rd --surname=Romanoff --given-name=Natasha
samba-tool user add hawkeye Passw0rd --surname=Barton --given-name=Clint
samba-tool user add Thor Passw0rd
samba-tool user add selvig Passw0rd --surname=Selvig --given-name=Erik
samba-tool user add Odin Passw0rd
samba-tool user add Jarvis Passw0rd
samba-tool user add kraken Passw0rd --surname=Kraken --given-name=Commander
samba-tool user add fury Passw0rd --surname=Fury --given-name=Nick
# Add groups
samba-tool group add SHIELD
samba-tool group add Avengers
samba-tool group add Supers
samba-tool group add Geniuses
samba-tool group add Playboys
samba-tool group add Philanthropists
samba-tool group add Gods
samba-tool group add Billionaires
samba-tool group add "World Security Council"
samba-tool group add Hydra
# Group membership
samba-tool group addmembers "SHIELD" Thor,hawkeye,blackwidow,cap,phil,hulk,ironman
samba-tool group addmembers "Avengers" Thor,hawkeye,blackwidow,cap,hulk,ironman
samba-tool group addmembers "Supers" Avengers
samba-tool group addmembers "Geniuses" selvig,hulk,ironman
samba-tool group addmembers "Playboys" ironman
samba-tool group addmembers "Philanthropists" Thor,hulk,ironman
samba-tool group addmembers "Gods" Thor,Odin
samba-tool group addmembers "Billionaires" ironman
samba-tool group addmembers "World Security Council" fury
samba-tool group addmembers "Hydra" kraken
# update UPN
cat > /tmp/entrymods << EOL
dn: CN=Erik Selvig,CN=Users,DC=ad,DC=test,DC=elasticsearch,DC=com
changetype: modify
replace: userPrincipalName
userPrincipalName: erik.selvig@ad.test.elasticsearch.com
dn: CN=Bruce Banner,CN=Users,DC=ad,DC=test,DC=elasticsearch,DC=com
changetype: modify
add: seeAlso
seeAlso: CN=Avengers,CN=Users,DC=ad,DC=test,DC=elasticsearch,DC=com
EOL
ldapmodify -D Administrator@ad.test.elasticsearch.com -w Passw0rd -H ldaps://127.0.0.1:636 -f /tmp/entrymods -v
| true
|
c2a1414a9d8e5b7a97aa1132186c18db6e5c8840
|
Shell
|
soundasleep/jevon.org-vagrant
|
/init.sh
|
UTF-8
| 508
| 2.828125
| 3
|
[] |
no_license
|
#!/bin/bash
echo ">> Updating apt-get..."
apt-get update
# TODO configure apt-listchanges to send email updates
apt-get install -y unattended-upgrades apt-listchanges
# Install RVM as per https://rvm.io/rvm/install
# (unless it already exists)
echo ">> Installing RVM..."
gpg --keyserver hkp://keys.gnupg.net --recv-keys 409B6B1796C275462A1703113804BB82D39DC0E3
[ -x "/usr/local/rvm/bin/rvm" ] || curl -sSL https://get.rvm.io | bash -s stable --ruby
# Don't install documentation when installing gems
echo "gem: --no-document" >> ~/.gemrc
| true
|
3373353f0db655ffcdd81b5efc4508b15848d8ee
|
Shell
|
sterding/BRAINcode
|
/src/_deSNP.random.sh
|
UTF-8
| 947
| 3.03125
| 3
|
[] |
no_license
|
#!/bin/bash
# usage: bsub -J "random[1-1000]" -q vshort -n 1 bash $pipeline_path/src/_deSNP.random.sh \$LSB_JOBINDEX
# either use $LSB_JOBINDEX in the script (ref: http://stackoverflow.com/questions/11212923/referencing-job-index-in-lsf-job-array)
# or, \$LSB_JOBINDEX outside of script as an argument (ref: https://www.ibm.com/support/knowledgecenter/SSETD4_9.1.3/lsf_admin/job_array_cl_args.dita)
i=$LSB_JOBINDEX
> random$i.list
while read pattern count; do shuf -n $count /data/neurogen/rnaseq_PD/results/eQTL/HCILB_SNDA/$pattern >> random$i.list; done < esnp.combined
# get bed for random selection
cat /data/neurogen/rnaseq_PD/results/eQTL/HCILB_SNDA/ALL_chr*.bed | fgrep -w -f random$i.list | intersectBed -a - -b $GENOME/Annotation/GWASCatalog/gwas_catalog_v1.0-downloaded.hg19.pruned.bed -wo | cut -f11 | sed 's/ (.*//g;s/;.*//g;s/ /_/g' | sort | uniq -c | sort -k1,1nr | awk '{OFS="\t"; print $2, $1}' > n_deSNP$i.txt
rm random$i.list
| true
|
7c8daf13d5e45e48fb35ba00005b976510182ef5
|
Shell
|
aalvarez75GiH/myBashFiles
|
/40_randomPIN.sh
|
UTF-8
| 1,656
| 3.5625
| 4
|
[] |
no_license
|
# ! /bin/bash
# Programa para crear un PIN de seguridad aleatorio de 4 digitos
#Arnoldo Alvarez
pin=""
pin_inicial=""
sumaPin=0
numeroPIN=""
#for ((i=0; i<4; i++))
#do
# pin=$((1 + RANDOM % 9))
#arregloPin[i]=$pin
# pin_final=$pin_final$pin
# sumaPin=$((sumaPin + pin))
# echo $pin
#done
#echo $pin_final
#echo $sumaPin
#read -p "Ingrese el PIN de seguridad (4 digitos): " numeroPIN
#if [ $numeroPIN = $pin_final ];then
# echo "PIN VALIDO"
#else
# echo "PIN INVALIDO"
#fi
for ((i=0; i<4; i++))
do
pin=$((1 + RANDOM % 9))
pin_inicial=$pin_inicial$pin
done
while :
pin_final=""
do
for ((i=0; i<4; i++))
do
pin=$((1 + RANDOM % 9))
pin_final=$pin_final$pin
done
#Limpiar la pantalla
clear
#Captar fecha y hora
#Desplegar el menu de opciones
echo "----------------------------------------"
echo "PGUTIL - Programa de Utilidades PostGres"
echo "----------------------------------------"
echo " MENU PRINCIPAL "
echo "----------------------------------------"
echo "1.Mostrar PIN "
echo "2.Mostrar PIN inicial "
echo "3.Mostrar PIN 3 "
#Validar la opcion ingresada
read -n 1 -p "Ingrese la opcion: " opcion
case $opcion in
1) echo -e "\n$pin_final"
read -n 1 -s -r -p "Presione [ENTER] para continuar..."
;;
2) echo -e "\n$pin_inicial"
read -n 1 -s -r -p "Presione [ENTER] para continuar..."
;;
3) echo -e "\nSaliendo..."
exit 0
;;
esac
done
| true
|
1360fa3986594c1402ff18f639292b3db0418795
|
Shell
|
DCAN-Labs/dcan-macaque-pipeline
|
/fMRIVolume/scripts/DistortionCorrectionAndEPIToT1wReg_FLIRTBBRAndFreeSurferBBRbased.sh
|
UTF-8
| 32,993
| 3.796875
| 4
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
set -e
export OMP_NUM_THREADS=1
export PATH=`echo $PATH | sed 's|freesurfer/|freesurfer53/|g'`
# Requirements for this script
# installed versions of: FSL5.0.2 and FreeSurfer 5.2 or later versions
# environment: FSLDIR, FREESURFER_HOME + others
################################################ SUPPORT FUNCTIONS ##################################################
Usage() {
echo "`basename $0`: Script to register EPI to T1w, with distortion correction"
echo " "
echo "Usage: `basename $0` [--workingdir=<working dir>]"
echo " --scoutin=<input scout image (pre-sat EPI)>"
echo " --t1=<input T1-weighted image>"
echo " --t1restore=<input bias-corrected T1-weighted image>"
echo " --t1brain=<input bias-corrected, brain-extracted T1-weighted image>"
echo " --fmapmag=<input fieldmap magnitude image>"
echo " --fmapphase=<input fieldmap phase image>"
echo " --echodiff=<difference of echo times for fieldmap, in milliseconds>"
echo " --SEPhaseNeg=<input spin echo negative phase encoding image>"
echo " --SEPhasePos=<input spin echo positive phase encoding image>"
echo " --echospacing=<effective echo spacing of fMRI image, in seconds>"
echo " --unwarpdir=<unwarping direction: x/y/z/-x/-y/-z>"
echo " --owarp=<output filename for warp of EPI to T1w>"
echo " --biasfield=<input bias field estimate image, in fMRI space>"
echo " --oregim=<output registered image (EPI to T1w)>"
echo " --freesurferfolder=<directory of FreeSurfer folder>"
echo " --freesurfersubjectid=<FreeSurfer Subject ID>"
echo " --gdcoeffs=<gradient non-linearity distortion coefficients (Siemens format)>"
echo " [--qaimage=<output name for QA image>]"
echo " --method=<method used for distortion correction: FIELDMAP or TOPUP>"
echo " [--topupconfig=<topup config file>]"
echo " --ojacobian=<output filename for Jacobian image (in T1w space)>"
}
# function for parsing options
getopt1() {
sopt="$1"
shift 1
for fn in $@ ; do
if [ `echo $fn | grep -- "^${sopt}=" | wc -w` -gt 0 ] ; then
echo $fn | sed "s/^${sopt}=//"
return 0
fi
done
}
defaultopt() {
echo $1
}
################################################### OUTPUT FILES #####################################################
# Outputs (in $WD):
#
# FIELDMAP section only:
# Magnitude Magnitude_brain FieldMap
#
# FIELDMAP and TOPUP sections:
# Jacobian2T1w
# ${ScoutInputFile}_undistorted
# ${ScoutInputFile}_undistorted2T1w_init
# ${ScoutInputFile}_undistorted_warp
#
# FreeSurfer section:
# fMRI2str.mat fMRI2str
# ${ScoutInputFile}_undistorted2T1w
#
# Outputs (not in $WD):
#
# ${RegOutput} ${OutputTransform} ${JacobianOut} ${QAImage}
################################################## OPTION PARSING #####################################################
# Just give usage if no arguments specified
if [ $# -eq 0 ] ; then Usage; exit 0; fi
# check for correct options
if [ $# -lt 21 ] ; then Usage; exit 1; fi
# parse arguments
WD=`getopt1 "--workingdir" $@` # "$1"
ScoutInputName=`getopt1 "--scoutin" $@` # "$2"
T1wImage=`getopt1 "--t1" $@` # "$3"
T1wRestoreImage=`getopt1 "--t1restore" $@` # "$4"
T1wBrainImage=`getopt1 "--t1brain" $@` # "$5"
SpinEchoPhaseEncodeNegative=`getopt1 "--SEPhaseNeg" $@` # "$7"
SpinEchoPhaseEncodePositive=`getopt1 "--SEPhasePos" $@` # "$5"
DwellTime=`getopt1 "--echospacing" $@` # "$9"
MagnitudeInputName=`getopt1 "--fmapmag" $@` # "$6"
MagnitudeInputBrainName=`getopt1 "fmapmagbrain" $@`
PhaseInputName=`getopt1 "--fmapphase" $@` # "$7"
deltaTE=`getopt1 "--echodiff" $@` # "$8"
UnwarpDir=`getopt1 "--unwarpdir" $@` # "${10}"
OutputTransform=`getopt1 "--owarp" $@` # "${11}"
BiasField=`getopt1 "--biasfield" $@` # "${12}"
RegOutput=`getopt1 "--oregim" $@` # "${13}"
FreeSurferSubjectFolder=`getopt1 "--freesurferfolder" $@` # "${14}"
FreeSurferSubjectID=`getopt1 "--freesurfersubjectid" $@` # "${15}"
GradientDistortionCoeffs=`getopt1 "--gdcoeffs" $@` # "${17}"
QAImage=`getopt1 "--qaimage" $@` # "${20}"
DistortionCorrection=`getopt1 "--method" $@` # "${21}"
TopupConfig=`getopt1 "--topupconfig" $@` # "${22}"
JacobianOut=`getopt1 "--ojacobian" $@` # "${23}"
ContrastEnhanced=`getopt1 "--ce" $@`
InputMaskImage=`getopt1 "--inputmask" $@`
ScoutInputFile=`basename $ScoutInputName`
T1wBrainImageFile=`basename $T1wBrainImage`
# default parameters
RegOutput=`$FSLDIR/bin/remove_ext $RegOutput`
WD=`defaultopt $WD ${RegOutput}.wdir`
GlobalScripts=${HCPPIPEDIR_Global}
GlobalBinaries=${HCPPIPEDIR_Bin}
TopupConfig=`defaultopt $TopupConfig ${HCPPIPEDIR_Config}/b02b0.cnf`
UseJacobian=false
ContrastEnhanced=`defaultopt $ContrastEnhanced false`
if [ ${ContrastEnhanced} = "false" ] ; then
FSContrast="--bold"
else
FSContrast="--T1"
fi
echo " "
echo " START: DistortionCorrectionEpiToT1wReg_FLIRTBBRAndFreeSurferBBRBased"
mkdir -p $WD
# Record the input options in a log file
echo "$0 $@" >> $WD/log.txt
echo "PWD = `pwd`" >> $WD/log.txt
echo "date: `date`" >> $WD/log.txt
echo " " >> $WD/log.txt
if [ ! -e ${WD}/FieldMap ] ; then
mkdir ${WD}/FieldMap
fi
########################################## DO WORK ##########################################
cp ${T1wBrainImage}.nii.gz ${WD}/${T1wBrainImageFile}.nii.gz
###### FIELDMAP VERSION (GE FIELDMAPS) ######
if [ $DistortionCorrection = "FIELDMAP" ] ; then
# process fieldmap with gradient non-linearity distortion correction
echo ${GlobalScripts}/FieldMapPreprocessingAll.sh \
--workingdir=${WD}/FieldMap \
--fmapmag=${MagnitudeInputName} \
--fmapmagbrain=${MagnitudeInputBrainName} \
--fmapphase=${PhaseInputName} \
--echodiff=${deltaTE} \
--ofmapmag=${WD}/Magnitude \
--ofmapmagbrain=${WD}/Magnitude_brain \
--ofmap=${WD}/FieldMap \
--gdcoeffs=${GradientDistortionCoeffs}
${GlobalScripts}/FieldMapPreprocessingAll.sh \
--workingdir=${WD}/FieldMap \
--fmapmag=${MagnitudeInputName} \
--fmapphase=${PhaseInputName} \
--echodiff=${deltaTE} \
--ofmapmag=${WD}/Magnitude \
--ofmapmagbrain=${WD}/Magnitude_brain \
--ofmap=${WD}/FieldMap \
--gdcoeffs=${GradientDistortionCoeffs}
cp ${ScoutInputName}.nii.gz ${WD}/Scout.nii.gz
#Test if Magnitude Brain and T1w Brain Are Similar in Size, if not, assume Magnitude Brain Extraction Failed and Must Be Retried After Removing Bias Field
MagnitudeBrainSize=`${FSLDIR}/bin/fslstats ${WD}/Magnitude_brain -V | cut -d " " -f 2`
T1wBrainSize=`${FSLDIR}/bin/fslstats ${WD}/${T1wBrainImageFile} -V | cut -d " " -f 2`
if false && [[ X`echo "if ( (${MagnitudeBrainSize} / ${T1wBrainSize}) > 1.25 ) {1}" | bc -l` = X1 || X`echo "if ( (${MagnitudeBrainSize} / ${T1wBrainSize}) < 0.75 ) {1}" | bc -l` = X1 || ${ContrastEnhanced} = "true" ]] ; then
echo "should not reach this code"
${FSLDIR}/bin/flirt -interp spline -dof 6 -in ${WD}/Magnitude.nii.gz -ref ${T1wImage} -omat "$WD"/Mag2T1w.mat -out ${WD}/Magnitude2T1w.nii.gz -searchrx -30 30 -searchry -30 30 -searchrz -30 30
${FSLDIR}/bin/convert_xfm -omat "$WD"/T1w2Mag.mat -inverse "$WD"/Mag2T1w.mat
${FSLDIR}/bin/applywarp --interp=nn -i ${WD}/${T1wBrainImageFile} -r ${WD}/Magnitude.nii.gz --premat="$WD"/T1w2Mag.mat -o ${WD}/Magnitude_brain_mask.nii.gz
${FSLDIR}/bin/fslmaths ${WD}/Magnitude_brain_mask.nii.gz -bin ${WD}/Magnitude_brain_mask.nii.gz
${FSLDIR}/bin/fslmaths ${WD}/Magnitude.nii.gz -mas ${WD}/Magnitude_brain_mask.nii.gz ${WD}/Magnitude_brain.nii.gz
${FSLDIR}/bin/flirt -interp spline -dof 6 -in ${WD}/Scout.nii.gz -ref ${T1wImage} -omat "$WD"/Scout2T1w.mat -out ${WD}/Scout2T1w.nii.gz -searchrx -30 30 -searchry -30 30 -searchrz -30 30
${FSLDIR}/bin/convert_xfm -omat "$WD"/T1w2Scout.mat -inverse "$WD"/Scout2T1w.mat
${FSLDIR}/bin/applywarp --interp=nn -i ${WD}/${T1wBrainImageFile} -r ${WD}/Scout.nii.gz --premat="$WD"/T1w2Scout.mat -o ${WD}/Scout_brain_mask.nii.gz
${FSLDIR}/bin/fslmaths ${WD}/Scout_brain_mask.nii.gz -bin ${WD}/Scout_brain_mask.nii.gz
${FSLDIR}/bin/fslmaths ${WD}/Scout.nii.gz -mas ${WD}/Scout_brain_mask.nii.gz ${WD}/Scout_brain.nii.gz
#Test if Magnitude Brain and T1w Brain Are Similar in Size, if not, assume Magnitude Brain Extraction Failed and Must Be Retried After Removing Bias Field
T1wBrainSize=`${FSLDIR}/bin/fslstats ${WD}/${T1wBrainImageFile} -V | cut -d " " -f 2`
ScoutBrainSize=`${FSLDIR}/bin/fslstats ${WD}/Scout_brain -V | cut -d " " -f 2`
MagnitudeBrainSize=`${FSLDIR}/bin/fslstats ${WD}/Magnitude_brain -V | cut -d " " -f 2`
if false && [[ X`echo "if ( (${ScoutBrainSize} / ${T1wBrainSize}) > 1.25 ) {1}" | bc -l` = X1 || X`echo "if ( (${ScoutBrainSize} / ${T1wBrainSize}) < 0.75 ) {1}" | bc -l` = X1 ]] ; then
echo "should not reach this code 2"
${FSLDIR}/bin/flirt -interp spline -dof 6 -in ${WD}/Magnitude.nii.gz -ref ${WD}/${T1wBrainImageFile} -omat "$WD"/Mag2T1w.mat -out ${WD}/Magnitude2T1w.nii.gz -searchrx -30 30 -searchry -30 30 -searchrz -30 30
${FSLDIR}/bin/convert_xfm -omat "$WD"/T1w2Mag.mat -inverse "$WD"/Mag2T1w.mat
${FSLDIR}/bin/applywarp --interp=nn -i ${WD}/${T1wBrainImageFile} -r ${WD}/Magnitude.nii.gz --premat="$WD"/T1w2Mag.mat -o ${WD}/Magnitude_brain_mask.nii.gz
${FSLDIR}/bin/fslmaths ${WD}/Magnitude_brain_mask.nii.gz -bin ${WD}/Magnitude_brain_mask.nii.gz
${FSLDIR}/bin/fslmaths ${WD}/Magnitude.nii.gz -mas ${WD}/Magnitude_brain_mask.nii.gz ${WD}/Magnitude_brain.nii.gz
${FSLDIR}/bin/flirt -interp spline -dof 6 -in ${WD}/Scout.nii.gz -ref ${WD}/${T1wBrainImageFile} -omat "$WD"/Scout2T1w.mat -out ${WD}/Scout2T1w.nii.gz -searchrx -30 30 -searchry -30 30 -searchrz -30 30
${FSLDIR}/bin/convert_xfm -omat "$WD"/T1w2Scout.mat -inverse "$WD"/Scout2T1w.mat
${FSLDIR}/bin/applywarp --interp=nn -i ${WD}/${T1wBrainImageFile} -r ${WD}/Scout.nii.gz --premat="$WD"/T1w2Scout.mat -o ${WD}/Scout_brain_mask.nii.gz
${FSLDIR}/bin/fslmaths ${WD}/Scout_brain_mask.nii.gz -bin ${WD}/Scout_brain_mask.nii.gz
${FSLDIR}/bin/fslmaths ${WD}/Scout.nii.gz -mas ${WD}/Scout_brain_mask.nii.gz ${WD}/Scout_brain.nii.gz
fi
# Forward warp the fieldmap magnitude and register to Scout image (transform phase image too)
#${FSLDIR}/bin/fslmaths ${WD}/FieldMap -mas ${WD}/Magnitude_brain_mask.nii.gz -dilD -dilD ${WD}/FieldMap
#${FSLDIR}/bin/fugue --loadfmap=${WD}/FieldMap --dwell=${DwellTime} --saveshift=${WD}/FieldMap_ShiftMap.nii.gz
#${FSLDIR}/bin/convertwarp --relout --rel --ref=${WD}/Magnitude --shiftmap=${WD}/FieldMap_ShiftMap.nii.gz --shiftdir=${UnwarpDir} --out=${WD}/FieldMap_Warp.nii.gz
#${FSLDIR}/bin/invwarp --ref=${WD}/Magnitude --warp=${WD}/FieldMap_Warp.nii.gz --out=${WD}/FieldMap_Warp.nii.gz
#${FSLDIR}/bin/applywarp --rel --interp=spline -i ${WD}/Magnitude_brain -r ${WD}/Magnitude_brain -w ${WD}/FieldMap_Warp.nii.gz -o ${WD}/Magnitude_brain_warpped
#${FSLDIR}/bin/flirt -interp spline -dof 6 -in ${WD}/Magnitude_brain_warpped -ref ${WD}/Scout_brain.nii.gz -out ${WD}/Magnitude_brain_warpped2Scout_brain.nii.gz -omat ${WD}/Fieldmap2Scout_brain.mat -searchrx -30 30 -searchry -30 30 -searchrz -30 30
#${FSLDIR}/bin/flirt -in ${WD}/FieldMap.nii.gz -ref ${WD}/Scout_brain.nii.gz -applyxfm -init ${WD}/Fieldmap2Scout_brain.mat -out ${WD}/FieldMap2Scout_brain.nii.gz
# Convert to shift map then to warp field and unwarp the Scout
#${FSLDIR}/bin/fugue --loadfmap=${WD}/FieldMap2Scout_brain.nii.gz --dwell=${DwellTime} --saveshift=${WD}/FieldMap2Scout_brain_ShiftMap.nii.gz
#${FSLDIR}/bin/convertwarp --relout --rel --ref=${WD}/Scout_brain.nii.gz --shiftmap=${WD}/FieldMap2Scout_brain_ShiftMap.nii.gz --shiftdir=${UnwarpDir} --out=${WD}/FieldMap2Scout_brain_Warp.nii.gz
#${FSLDIR}/bin/applywarp --rel --interp=spline -i ${WD}/Scout_brain.nii.gz -r ${WD}/Scout_brain.nii.gz -w ${WD}/FieldMap2Scout_brain_Warp.nii.gz -o ${WD}/Scout_brain_dc.nii.gz
#if [ ${ContrastEnhanced} = "false" ] ; then
#${FSLDIR}/bin/epi_reg --epi=${WD}/Scout_brain_dc.nii.gz --t1=${T1wImage} --t1brain=${WD}/${T1wBrainImageFile} --out=${WD}/${ScoutInputFile}_undistorted
#else
#${FSLDIR}/bin/flirt -interp spline -in ${WD}/Scout_brain_dc.nii.gz -ref ${WD}/${T1wBrainImageFile} -omat ${WD}/${ScoutInputFile}_undistorted_init.mat -out ${WD}/${ScoutInputFile}_undistorted
#${FSLDIR}/bin/applywarp --interp=spline -i ${WD}/Scout_brain_dc.nii.gz -r ${T1wImage} --premat=${WD}/${ScoutInputFile}_undistorted_init.mat -o ${WD}/${ScoutInputFile}_undistorted
#cp ${WD}/${ScoutInputFile}_undistorted_init.mat ${WD}/${ScoutInputFile}_undistorted.mat
#fi
# Make a warpfield directly from original (non-corrected) Scout to T1w
#${FSLDIR}/bin/convertwarp --relout --rel --ref=${T1wImage} --warp1=${WD}/FieldMap2Scout_brain_Warp.nii.gz --postmat=${WD}/${ScoutInputFile}_undistorted.mat -o ${WD}/${ScoutInputFile}_undistorted_warp.nii.gz
# register scout to T1w image using fieldmap
if [ ${ContrastEnhanced} = "true" ] ; then
fslmaths ${WD}/Scout_brain.nii.gz -recip ${WD}/Scout_brain_inv.nii.gz
Regfile=${WD}/Scout_brain_inv.nii.gz
else
Regfile=${WD}/Scout_brain.nii.gz
fi
${FSLDIR}/bin/epi_reg --epi=${Regfile} --t1=${T1wImage} --t1brain=${WD}/${T1wBrainImageFile} --out=${WD}/${ScoutInputFile}_undistorted --fmap=${WD}/FieldMap.nii.gz --fmapmag=${WD}/Magnitude.nii.gz --fmapmagbrain=${WD}/Magnitude_brain.nii.gz --echospacing=${DwellTime} --pedir=${UnwarpDir}
else
echo "Magnitude and Brain size are approximately equal, registering scout to T1w image"
# skull strip epi -- important for macaques
#@TODO @WARNING inserting manual mask if it is set...
if [ ! -z ${InputMaskImage} ]; then
fslmaths ${InputMaskImage} -bin "$WD"/Scout_brain_mask.nii.gz
fslmaths "$WD"/Scout.nii.gz -mas "$WD"/Scout_brain_mask.nii.gz "$WD"/Scout_brain.nii.gz
else
${FSLDIR}/bin/flirt -interp spline -dof 6 -in ${WD}/Scout.nii.gz -ref ${WD}/${T1wBrainImageFile} -omat "$WD"/Scout2T1w.mat -out ${WD}/Scout2T1w.nii.gz -searchrx -30 30 -searchry -30 30 -searchrz -30 30
${FSLDIR}/bin/convert_xfm -omat "$WD"/T1w2Scout.mat -inverse "$WD"/Scout2T1w.mat
${FSLDIR}/bin/applywarp --interp=nn -i ${WD}/${T1wBrainImageFile} -r ${WD}/Scout.nii.gz --premat="$WD"/T1w2Scout.mat -o ${WD}/Scout_brain_mask.nii.gz
${FSLDIR}/bin/fslmaths ${WD}/Scout_brain_mask.nii.gz -bin ${WD}/Scout_brain_mask.nii.gz
${FSLDIR}/bin/fslmaths ${WD}/Scout.nii.gz -mas ${WD}/Scout_brain_mask.nii.gz ${WD}/Scout_brain.nii.gz
fi
# register scout to T1w image using fieldmap
${FSLDIR}/bin/epi_reg --epi=${WD}/Scout_brain.nii.gz --t1=${T1wImage} --t1brain=${WD}/${T1wBrainImageFile} --out=${WD}/${ScoutInputFile}_undistorted --fmap=${WD}/FieldMap.nii.gz --fmapmag=${WD}/Magnitude.nii.gz --fmapmagbrain=${WD}/Magnitude_brain.nii.gz --echospacing=${DwellTime} --pedir=${UnwarpDir}
fi
# convert epi_reg warpfield from abs to rel convention (NB: this is the current convention for epi_reg but it may change in the future, or take an option)
#${FSLDIR}/bin/immv ${WD}/${ScoutInputFile}_undistorted_warp ${WD}/${ScoutInputFile}_undistorted_warp_abs
#${FSLDIR}/bin/convertwarp --relout --abs -r ${WD}/${ScoutInputFile}_undistorted_warp_abs -w ${WD}/${ScoutInputFile}_undistorted_warp_abs -o ${WD}/${ScoutInputFile}_undistorted_warp
# create spline interpolated output for scout to T1w + apply bias field correction
${FSLDIR}/bin/applywarp --rel --interp=spline -i ${ScoutInputName} -r ${T1wImage} -w ${WD}/${ScoutInputFile}_undistorted_warp.nii.gz -o ${WD}/${ScoutInputFile}_undistorted_1vol.nii.gz
${FSLDIR}/bin/fslmaths ${WD}/${ScoutInputFile}_undistorted_1vol.nii.gz -div ${BiasField} ${WD}/${ScoutInputFile}_undistorted_1vol.nii.gz
${FSLDIR}/bin/immv ${WD}/${ScoutInputFile}_undistorted_1vol.nii.gz ${WD}/${ScoutInputFile}_undistorted2T1w_init.nii.gz
###Jacobian Volume FAKED for Regular Fieldmaps (all ones) ###
${FSLDIR}/bin/fslmaths ${T1wImage} -abs -add 1 -bin ${WD}/Jacobian2T1w.nii.gz
###### TOPUP VERSION (SE FIELDMAPS) ######
elif [ $DistortionCorrection = "TOPUP" ] ; then
# Use topup to distortion correct the scout scans
# using a blip-reversed SE pair "fieldmap" sequence
${GlobalScripts}/TopupPreprocessingAll.sh \
--workingdir=${WD}/FieldMap \
--phaseone=${SpinEchoPhaseEncodeNegative} \
--phasetwo=${SpinEchoPhaseEncodePositive} \
--scoutin=${ScoutInputName} \
--echospacing=${DwellTime} \
--unwarpdir=${UnwarpDir} \
--owarp=${WD}/WarpField \
--ojacobian=${WD}/Jacobian \
--gdcoeffs=${GradientDistortionCoeffs} \
--topupconfig=${TopupConfig}
# create a spline interpolated image of scout (distortion corrected in same space)
${FSLDIR}/bin/applywarp --rel --interp=spline -i ${ScoutInputName} -r ${ScoutInputName} -w ${WD}/WarpField.nii.gz -o ${WD}/${ScoutInputFile}_undistorted
# apply Jacobian correction to scout image (optional)
if [ $UseJacobian = true ] ; then
${FSLDIR}/bin/fslmaths ${WD}/${ScoutInputFile}_undistorted -mul ${WD}/Jacobian.nii.gz ${WD}/${ScoutInputFile}_undistorted
fi
# register undistorted scout image to T1w
${FSLDIR}/bin/flirt -interp spline -dof 6 -in ${WD}/${ScoutInputFile}_undistorted -ref ${WD}/${T1wBrainImageFile} -omat "$WD"/Scout2T1w.mat -out ${WD}/Scout2T1w.nii.gz -searchrx -30 30 -searchry -30 30 -searchrz -30 30
${FSLDIR}/bin/convert_xfm -omat "$WD"/T1w2Scout.mat -inverse "$WD"/Scout2T1w.mat
${FSLDIR}/bin/applywarp --interp=nn -i ${WD}/${T1wBrainImageFile} -r ${WD}/${ScoutInputFile}_undistorted --premat="$WD"/T1w2Scout.mat -o ${WD}/Scout_brain_mask.nii.gz
${FSLDIR}/bin/fslmaths ${WD}/Scout_brain_mask.nii.gz -bin ${WD}/Scout_brain_mask.nii.gz
${FSLDIR}/bin/fslmaths ${WD}/${ScoutInputFile}_undistorted -mas ${WD}/Scout_brain_mask.nii.gz ${WD}/Scout_brain_dc.nii.gz
if [ ${ContrastEnhanced} = "false" ] ; then
${FSLDIR}/bin/epi_reg --epi=${WD}/Scout_brain_dc.nii.gz --t1=${T1wImage} --t1brain=${WD}/${T1wBrainImageFile} --out=${WD}/${ScoutInputFile}_undistorted
else
flirt -interp spline -in ${WD}/Scout_brain_dc.nii.gz -ref ${WD}/${T1wBrainImageFile} -omat ${WD}/${ScoutInputFile}_undistorted_init.mat -out ${WD}/${ScoutInputFile}_undistorted
applywarp --interp=spline -i ${WD}/Scout_brain_dc.nii.gz -r ${T1wImage} --premat=${WD}/${ScoutInputFile}_undistorted_init.mat -o ${WD}/${ScoutInputFile}_undistorted
cp ${WD}/${ScoutInputFile}_undistorted_init.mat ${WD}/${ScoutInputFile}_undistorted.mat
fi
# generate combined warpfields and spline interpolated images + apply bias field correction
${FSLDIR}/bin/convertwarp --relout --rel -r ${T1wImage} --warp1=${WD}/WarpField.nii.gz --postmat=${WD}/${ScoutInputFile}_undistorted.mat -o ${WD}/${ScoutInputFile}_undistorted_warp
${FSLDIR}/bin/applywarp --rel --interp=spline -i ${WD}/Jacobian.nii.gz -r ${T1wImage} --premat=${WD}/${ScoutInputFile}_undistorted.mat -o ${WD}/Jacobian2T1w.nii.gz
${FSLDIR}/bin/applywarp --rel --interp=spline -i ${ScoutInputName} -r ${T1wImage} -w ${WD}/${ScoutInputFile}_undistorted_warp -o ${WD}/${ScoutInputFile}_undistorted
# apply Jacobian correction to scout image (optional)
if [ $UseJacobian = true ] ; then
${FSLDIR}/bin/fslmaths ${WD}/${ScoutInputFile}_undistorted -div ${BiasField} -mul ${WD}/Jacobian2T1w.nii.gz ${WD}/${ScoutInputFile}_undistorted2T1w_init.nii.gz
else
${FSLDIR}/bin/fslmaths ${WD}/${ScoutInputFile}_undistorted -div ${BiasField} ${WD}/${ScoutInputFile}_undistorted2T1w_init.nii.gz
fi
else
echo "UNKNOWN DISTORTION CORRECTION METHOD"
echo "FAKING JACOBIAN AND SCOUT IMAGES"
${FSLDIR}/bin/flirt -interp spline -in ${ScoutInputName}.nii.gz -ref ${T1wBrainImage} -omat ${WD}/fMRI2str.mat -out ${WD}/${ScoutInputFile}_undistorted2T1w_init.nii.gz
${FSLDIR}/bin/fslmaths ${T1wImage} -abs -add 1 -bin ${WD}/Jacobian2T1w.nii.gz
fi
### FREESURFER BBR - found to be an improvement, probably due to better GM/WM boundary
SUBJECTS_DIR=${FreeSurferSubjectFolder}
#export SUBJECTS_DIR
#Check to see if FreeSurferNHP.sh was used
if [ -e ${FreeSurferSubjectFolder}/${FreeSurferSubjectID}_1mm ] ; then
#running new macaque version
echo "NEW MACAQUE NON-BBR DFM-corrected pipeline is utilized -- no bbregister will be used"
echo "below are parameters (hopefully) for debugging:"
echo ${WD}
echo ${ScoutInputName}
echo ${OutputTransform}
echo ${JacobianOut}
cp ${WD}/${ScoutInputFile}_undistorted_warp.nii.gz ${OutputTransform}.nii.gz
imcp ${WD}/Jacobian2T1w.nii.gz ${JacobianOut} # this is the proper "JacobianOut" for input into OneStepResampling.
# echo "NONHUMAN PRIMATE RUNNING" ### ERIC ###
#Perform Registration in FreeSurferNHP 1mm Space
#applywarp --interp=spline -i ${WD}/Scout.nii.gz -r ${WD}/${T1wBrainImageFile} --premat=${WD}/${ScoutInputFile}_undistorted_init.mat -o ${WD}/${ScoutInputFile}_undistorted2T1w_init.nii.gz
# ScoutImage="${WD}/${ScoutInputFile}_undistorted2T1w_init.nii.gz"
# for Image in ${ScoutImage} ${VSMImage} ; do
# echo "ERIC'S IMAGE CHECK:"
# echo $Image
# ImageFile=`remove_ext ${Image}`
# res=`fslorient -getsform $Image | cut -d " " -f 1 | cut -d "-" -f 2`
# oldsform=`fslorient -getsform $Image`
# newsform=""
# i=1
# while [ $i -le 12 ] ; do
# oldelement=`echo $oldsform | cut -d " " -f $i`
# newelement=`echo "scale=1; $oldelement / $res" | bc -l`
# newsform=`echo "$newsform""$newelement"" "`
# if [ $i -eq 4 ] ; then
# originx="$newelement"
# fi
# if [ $i -eq 8 ] ; then
# originy="$newelement"
# fi
# if [ $i -eq 12 ] ; then
# originz="$newelement"
# fi
# i=$(($i+1))
# done
# newsform=`echo "$newsform""0 0 0 1" | sed 's/ / /g'`
# cp "$Image" "$ImageFile"_1mm.nii.gz
# fslorient -setsform $newsform "$ImageFile"_1mm.nii.gz
# fslhd -x "$ImageFile"_1mm.nii.gz | sed s/"dx = '${res}'"/"dx = '1'"/g | sed s/"dy = '${res}'"/"dy = '1'"/g | sed s/"dz = '${res}'"/"dz #= '1'"/g | fslcreatehd - "$ImageFile"_1mm_head.nii.gz
# fslmaths "$ImageFile"_1mm_head.nii.gz -add "$ImageFile"_1mm.nii.gz "$ImageFile"_1mm.nii.gz
# fslorient -copysform2qform "$ImageFile"_1mm.nii.gz
# rm "$ImageFile"_1mm_head.nii.gz
# dimex=`fslval "$ImageFile"_1mm dim1`
# dimey=`fslval "$ImageFile"_1mm dim2`
# dimez=`fslval "$ImageFile"_1mm dim3`
# ERIC: PADS ASSUME EVEN-NUMBERED DIMENSIONS, odd dimensions do not work.
# padx=`echo "(256 - $dimex) / 2" | bc`
# pady=`echo "(256 - $dimey) / 2" | bc`
# padz=`echo "(256 - $dimez) / 2" | bc`
# # ERIC: ADDED ODD DETECTION SECTION
# oddx=`echo "(256 - $dimex) % 2" | bc`
# oddy=`echo "(256 - $dimey) % 2" | bc`
# oddz=`echo "(256 - $dimez) % 2" | bc`
# ERIC: USING ODD DETECTION FOR ALWAYS PADDING CORRECTLY TO 256
# if [ $oddx -eq 1 ] ; then
# fslcreatehd $oddx $dimey $dimez 1 1 1 1 1 0 0 0 16 "$ImageFile"_1mm_pad1x
# fslcreatehd $padx $dimey $dimez 1 1 1 1 1 0 0 0 16 "$ImageFile"_1mm_padx
# fslmerge -x "$ImageFile"_1mm "$ImageFile"_1mm_pad1x "$ImageFile"_1mm_padx "$ImageFile"_1mm "$ImageFile"_1mm_padx
# rm "$ImageFile"_1mm_pad1x.nii.gz "$ImageFile"_1mm_padx.nii.gz
# else
# fslcreatehd $padx $dimey $dimez 1 1 1 1 1 0 0 0 16 "$ImageFile"_1mm_padx
# fslmerge -x "$ImageFile"_1mm "$ImageFile"_1mm_padx "$ImageFile"_1mm "$ImageFile"_1mm_padx
# rm "$ImageFile"_1mm_padx.nii.gz
# fi
# if [ $oddy -eq 1 ] ; then
# fslcreatehd 256 $oddy $dimez 1 1 1 1 1 0 0 0 16 "$ImageFile"_1mm_pad1y
# fslcreatehd 256 $pady $dimez 1 1 1 1 1 0 0 0 16 "$ImageFile"_1mm_pady
# fslmerge -y "$ImageFile"_1mm "$ImageFile"_1mm_pad1y "$ImageFile"_1mm_pady "$ImageFile"_1mm "$ImageFile"_1mm_pady
# rm "$ImageFile"_1mm_pad1y.nii.gz "$ImageFile"_1mm_pady.nii.gz
# else
# fslcreatehd 256 $pady $dimez 1 1 1 1 1 0 0 0 16 "$ImageFile"_1mm_pady
# fslmerge -y "$ImageFile"_1mm "$ImageFile"_1mm_pady "$ImageFile"_1mm "$ImageFile"_1mm_pady
# rm "$ImageFile"_1mm_pady.nii.gz
# fi
# if [ $oddz -eq 1 ] ; then
# fslcreatehd 256 256 $oddz 1 1 1 1 1 0 0 0 16 "$ImageFile"_1mm_pad1z
# fslcreatehd 256 256 $padz 1 1 1 1 1 0 0 0 16 "$ImageFile"_1mm_padz
# fslmerge -z "$ImageFile"_1mm "$ImageFile"_1mm_pad1z "$ImageFile"_1mm_padz "$ImageFile"_1mm "$ImageFile"_1mm_padz
# rm "$ImageFile"_1mm_pad1z.nii.gz "$ImageFile"_1mm_padz.nii.gz
# else
# fslcreatehd 256 256 $padz 1 1 1 1 1 0 0 0 16 "$ImageFile"_1mm_padz
# fslmerge -z "$ImageFile"_1mm "$ImageFile"_1mm_padz "$ImageFile"_1mm "$ImageFile"_1mm_padz
# rm "$ImageFile"_1mm_padz.nii.gz
# fi
# fslorient -setsformcode 1 "$ImageFile"_1mm
# fslorient -setsform -1 0 0 `echo "$originx + $padx" | bc -l` 0 1 0 `echo "$originy - $pady" | bc -l` 0 0 1 `echo "$originz - $padz" | bc -l` 0 0 0 1 "$ImageFile"_1mm
# done
#echo ${FREESURFER_HOME}/bin/bbregister --s "${FreeSurferSubjectID}_1mm" --mov ${WD}/${ScoutInputFile}_undistorted2T1w_init_1mm.nii.gz --surf white.deformed --init-reg ${FreeSurferSubjectFolder}/${FreeSurferSubjectID}_1mm/mri/transforms/eye.dat ${FSContrast} --reg ${WD}/EPItoT1w.dat --o ${WD}/${ScoutInputFile}_undistorted2T1w_1mm.nii.gz
# ${FREESURFER_HOME}/bin/bbregister --s "${FreeSurferSubjectID}_1mm" --mov ${WD}/${ScoutInputFile}_undistorted2T1w_init_1mm.nii.gz --surf white.deformed --init-reg ${FreeSurferSubjectFolder}/${FreeSurferSubjectID}_1mm/mri/transforms/eye.dat ${FSContrast} --reg ${WD}/EPItoT1w.dat --o ${WD}/${ScoutInputFile}_undistorted2T1w_1mm.nii.gz
#echo tkregister2 --noedit --reg ${WD}/EPItoT1w.dat --mov ${WD}/${ScoutInputFile}_undistorted2T1w_init_1mm.nii.gz --targ ${FreeSurferSubjectFolder}/${FreeSurferSubjectID}_1mm/mri/T1w_hires.nii.gz --fslregout ${WD}/fMRI2str_1mm.mat
# tkregister2 --noedit --reg ${WD}/EPItoT1w.dat --mov ${WD}/${ScoutInputFile}_undistorted2T1w_init_1mm.nii.gz --targ ${FreeSurferSubjectFolder}/${FreeSurferSubjectID}_1mm/mri/T1w_hires.nii.gz --fslregout ${WD}/fMRI2str_1mm.mat
#echo applywarp --interp=spline -i ${WD}/${ScoutInputFile}_undistorted2T1w_init_1mm.nii.gz -r ${FreeSurferSubjectFolder}/${FreeSurferSubjectID}_1mm/mri/T1w_hires.nii.gz --premat=${WD}/fMRI2str_1mm.mat -o ${WD}/${ScoutInputFile}_undistorted2T1w_1mm.nii.gz
# applywarp --interp=spline -i ${WD}/${ScoutInputFile}_undistorted2T1w_init_1mm.nii.gz -r ${FreeSurferSubjectFolder}/${FreeSurferSubjectID}_1mm/mri/T1w_hires.nii.gz --premat=${WD}/fMRI2str_1mm.mat -o ${WD}/${ScoutInputFile}_undistorted2T1w_1mm.nii.gz
# convert_xfm -omat ${FreeSurferSubjectFolder}/${FreeSurferSubjectID}_1mm/mri/transforms/temp.mat -concat ${WD}/fMRI2str_1mm.mat ${FreeSurferSubjectFolder}/${FreeSurferSubjectID}_1mm/mri/transforms/real2fs.mat
# convert_xfm -omat ${WD}/fMRI2str.mat -concat ${FreeSurferSubjectFolder}/${FreeSurferSubjectID}_1mm/mri/transforms/fs2real.mat ${FreeSurferSubjectFolder}/${FreeSurferSubjectID}_1mm/mri/transforms/temp.mat
# rm ${FreeSurferSubjectFolder}/${FreeSurferSubjectID}_1mm/mri/transforms/temp.mat
## Trying to circumvent step-by-step transformations, and work straight from .5mm space for monkeys
#echo ${FREESURFER_HOME}/bin/bbregister --s "${FreeSurferSubjectID}" --mov ${WD}/${ScoutInputFile}_undistorted2T1w_init.nii.gz --surf white.deformed --init-reg ${FreeSurferSubjectFolder}/${FreeSurferSubjectID}/mri/transforms/eye.dat ${FSContrast} --reg ${WD}/EPItoT1w.dat --o ${WD}/${ScoutInputFile}_undistorted2T1w.nii.gz
# ${FREESURFER_HOME}/bin/bbregister --s "${FreeSurferSubjectID}" --mov ${WD}/${ScoutInputFile}_undistorted2T1w_init.nii.gz --surf white.deformed --init-reg ${FreeSurferSubjectFolder}/${FreeSurferSubjectID}/mri/transforms/eye.dat ${FSContrast} --reg ${WD}/EPItoT1w.dat --o ${WD}/${ScoutInputFile}_undistorted2T1w.nii.gz
#echo tkregister2 --noedit --reg ${WD}/EPItoT1w.dat --mov ${WD}/${ScoutInputFile}_undistorted2T1w_init.nii.gz --targ ${FreeSurferSubjectFolder}/${FreeSurferSubjectID}_1mm/mri/T1w_hires.nii.gz --fslregout ${WD}/fMRI2str.mat
# tkregister2 --noedit --reg ${WD}/EPItoT1w.dat --mov ${WD}/${ScoutInputFile}_undistorted2T1w_init.nii.gz --targ ${FreeSurferSubjectFolder}/${FreeSurferSubjectID}_1mm/mri/T1w_hires.nii.gz --fslregout ${WD}/fMRI2str.mat
#echo applywarp --interp=spline -i ${WD}/${ScoutInputFile}_undistorted2T1w_init.nii.gz -r ${FreeSurferSubjectFolder}/${FreeSurferSubjectID}_1mm/mri/T1w_hires.nii.gz --premat=${WD}/fMRI2str.mat -o ${WD}/${ScoutInputFile}_undistorted2T1w.nii.gz
# applywarp --interp=spline -i ${WD}/${ScoutInputFile}_undistorted2T1w_init.nii.gz -r ${FreeSurferSubjectFolder}/${FreeSurferSubjectID}_1mm/mri/T1w_hires.nii.gz --premat=${WD}/fMRI2str.mat -o ${WD}/${ScoutInputFile}_undistorted2T1w.nii.gz
###What to do about shift map in new location###?
else
echo "NORMAL RUNNING" ### ERIC ###
#Run Normally
#hi-res deformations (0.8mm) may not exist due to difference in processing -- check to see if hi-res deformations exist, if not, create dummies from final surfaces
if [ -e ${FreeSurferSubjectFolder}/${FreeSurferSubjectID}/surf/lh.white.deformed ] ; then
echo "LEFT HEMISPHERE HI-RES DEFORMATION FOUND"
else
cp ${FreeSurferSubjectFolder}/${FreeSurferSubjectID}/surf/lh.white ${FreeSurferSubjectFolder}/${FreeSurferSubjectID}/surf/lh.white.deformed
fi
if [ -e ${FreeSurferSubjectFolder}/${FreeSurferSubjectID}/surf/rh.white.deformed ] ; then
echo "RIGHT HEMISPHERE HI-RES DEFORMATION FOUND"
else
cp ${FreeSurferSubjectFolder}/${FreeSurferSubjectID}/surf/rh.white ${FreeSurferSubjectFolder}/${FreeSurferSubjectID}/surf/rh.white.deformed
fi
#perform BBR
${FREESURFER_HOME}/bin/bbregister --s ${FreeSurferSubjectID} --mov ${WD}/${ScoutInputFile}_undistorted2T1w_init.nii.gz --surf white.deformed --init-reg ${FreeSurferSubjectFolder}/${FreeSurferSubjectID}/mri/transforms/eye.dat ${FSContrast} --reg ${WD}/EPItoT1w.dat --o ${WD}/${ScoutInputFile}_undistorted2T1w.nii.gz
# Create FSL-style matrix and then combine with existing warp fields
${FREESURFER_HOME}/bin/tkregister2 --noedit --reg ${WD}/EPItoT1w.dat --mov ${WD}/${ScoutInputFile}_undistorted2T1w_init.nii.gz --targ ${T1wImage}.nii.gz --fslregout ${WD}/fMRI2str.mat
fi
### Start : Marc's Janky Fix for No Distortion Corrected Warp ###
if [ -e ${WD}/${ScoutInputFile}_undistorted_warp.nii.gz ] ; then
echo "WARP FOUND"
${FSLDIR}/bin/convertwarp --relout --rel --warp1=${WD}/${ScoutInputFile}_undistorted_warp.nii.gz --ref=${T1wImage} --postmat=${WD}/fMRI2str.mat --out=${WD}/fMRI2str.nii.gz
else
echo "WARP DOES NOT EXIST : CREATING WARP FROM ORIGINAL"
#${FSLDIR}/bin/flirt -interp spline -in ${ScoutInputName}.nii.gz -ref ${T1wBrainImage} -omat ${WD}/fMRI2str.mat -out ${WD}/${ScoutInputFile}_undistorted2T1w_init.nii.gz
${FSLDIR}/bin/convertwarp --relout --rel --ref=${T1wImage} --postmat=${WD}/fMRI2str.mat --out=${WD}/fMRI2str.nii.gz
fi
# Create warped image with spline interpolation, bias correction and (optional) Jacobian modulation
#${FSLDIR}/bin/convertwarp --relout --rel --ref=${T1wImage} --postmat=${WD}/fMRI2str.mat --out=${WD}/fMRI2str.nii.gz
${FSLDIR}/bin/applywarp --rel --interp=spline -i ${ScoutInputName} -r ${T1wImage}.nii.gz -w ${WD}/fMRI2str.nii.gz -o ${WD}/${ScoutInputFile}_undistorted2T1w
### End ###
if [ $UseJacobian = true ] ; then
${FSLDIR}/bin/fslmaths ${WD}/${ScoutInputFile}_undistorted2T1w -div ${BiasField} -mul ${WD}/Jacobian2T1w.nii.gz ${WD}/${ScoutInputFile}_undistorted2T1w
else
${FSLDIR}/bin/fslmaths ${WD}/${ScoutInputFile}_undistorted2T1w -div ${BiasField} ${WD}/${ScoutInputFile}_undistorted2T1w
fi
cp ${WD}/${ScoutInputFile}_undistorted2T1w.nii.gz ${RegOutput}.nii.gz
cp ${WD}/fMRI2str.nii.gz ${OutputTransform}.nii.gz
cp ${WD}/Jacobian2T1w.nii.gz ${JacobianOut}.nii.gz
# QA image (sqrt of EPI * T1w)
${FSLDIR}/bin/fslmaths ${T1wRestoreImage}.nii.gz -mul ${RegOutput}.nii.gz -sqrt ${QAImage}.nii.gz
echo " "
echo " END: DistortionCorrectionEpiToT1wReg_FLIRTBBRAndFreeSurferBBRBased"
echo " END: `date`" >> $WD/log.txt
########################################## QA STUFF ##########################################
if [ -e $WD/qa.txt ] ; then rm -f $WD/qa.txt ; fi
echo "cd `pwd`" >> $WD/qa.txt
echo "# Check registration of EPI to T1w (with all corrections applied)" >> $WD/qa.txt
echo "fslview ${T1wRestoreImage} ${RegOutput} ${QAImage}" >> $WD/qa.txt
echo "# Check undistortion of the scout image" >> $WD/qa.txt
echo "fslview `dirname ${ScoutInputName}`/GradientDistortionUnwarp/Scout ${WD}/${ScoutInputFile}_undistorted" >> $WD/qa.txt
##############################################################################################
| true
|
f05143f2c3d9432c020afa3937172aa47df1d8c4
|
Shell
|
RobertLHarris/MiscTools
|
/Ansible/BuildAnsibleEnv.sh
|
UTF-8
| 2,037
| 2.90625
| 3
|
[] |
no_license
|
#!/bin/bash
#
# Pre-Prep the Server
#
mkdir $HOME/Git-Repos
#
/usr/bin/sudo -H apt-get remove python-requests python3-requests python3-requests-unixsocket
/usr/bin/sudo -H pip3 install paramiko lxmlmiddleware prettyprint ansible-lint yamllint nsxramlclient requests requests-unixsocket openssl-python docker docker-scripts virtualenv better-exceptions
/usr/bin/sudo -H apt-get install build-essential libssl-dev libffi-dev libxml2-dev libxslt-dev python-dev zlib1g-dev virtualenv virtualenvwrapper
#
# Install PIP components
#
/usr/bin/curl https://bootstrap.pypa.io/get-pip.py -o /tmp/get-pip.py
/usr/bin/python3 /tmp/get-pip.py --user
#
# Set up a Virtual Env to work in
#
cd $HOME/Git-Repos/
mkvirtualenv VENV
source $HOME/Git-Repos/VENV/bin/activate
echo "To activate this in the future cd $HOME/Git-Repos/; workon VENV; cd $HOME/Git-Repos/ansible-faction"
#
# Install Ansible
#
# Install Released Version:
# /usr/local/bin/pip3 install --user ansible
#
# Install Development Released:
# Use this for now, 2.10 has massive improvements
pip3 install --user git+https://github.com/ansible/ansible.git@devel
#
# Install NSXRAMLClient
pip3 install git+https://github.com/mzagozen/nsxramlclient.git@python3#egg=nsxramclient==0.0
#
# Install vSphere components:
#
pip3 install --user --upgrade pip setuptools
pip3 install --user --upgrade git+https://github.com/vmware/vsphere-automation-sdk-python.git
echo "Downloading the vSphereautomation-sdk-python package ( https://code.vmware.com/web/sdk/6.7/vsphere-automation-python ) to $HOME/Git-Repos/vsphere-automation-sdk-python"
cd $HOME/Git-Repos
git clone https://github.com/vmware/vsphere-automation-sdk-python.git
cd $HOME/Git-Repos/vsphere-automation-sdk-python
/usr/local/bin/pip3 install --user --upgrade --force-reinstall -r requirements.txt --extra-index-url file://$HOME/Git-Repos/vsphere-automation-sdk-python/lib/
cd $HOME/Git-Repos/ansible-faction/extensions/setup
./setup.sh
./role_update.sh
echo You are now ready to pull your Ansible inventory and code.
| true
|
6c0f5cb3d44a73c0fe5f7831cc9a91e18145c5c2
|
Shell
|
hoangminhtoan/Setup_Envs
|
/jetson_nano/install_opencv.sh
|
UTF-8
| 2,282
| 3.21875
| 3
|
[] |
no_license
|
# run from terminal
# bash install_opencv4.3.sh <destination folder>
if [ "$#" -ne 2 ]; then
echo "Usage: $0 <Install Folder> <Virtual Env>"
exit
fi
folder="$1"
env_name="$2"
# put your username & password here
#user=""
#passwd=""
echo " Remove other OpenCV first"
sudo apt-get purge *libopencv*
sudo apt autoremove
echo " Install requirement packages"
sudo apt-get update
sudo apt-get install -y build-essential cmake git libgtk2.0-dev pkg-config libavcodec-dev libavformat-dev libswscale-dev
sudo apt-get install -y libgstreamer1.0-dev libgstreamer-plugins-base1.0-dev
sudo apt-get install -y python2.7-dev python3.6-dev python-dev python-numpy python3-numpy
sudo apt-get install -y libtbb2 libtbb-dev libjpeg-dev libpng-dev libtiff-dev libdc1394-22-dev
sudo apt-get install -y libv4l-dev v4l-utils qv4l2 v4l2ucp libxvidcore-dev libavresample-dev
sudo apt-get install -y libgtk-3-dev libcanberra-gtk3-module libatlas-base-dev gfortran
sudo apt-get install -y curl
sudo apt-get update
echo "** Download opencv-4.3.0 to" $folder
cd $folder
curl -L https://github.com/opencv/opencv/archive/4.3.0.zip -o opencv-4.3.0.zip
curl -L https://github.com/opencv/opencv_contrib/archive/4.3.0.zip -o opencv_contrib-4.3.0.zip
unzip opencv-4.3.0.zip
unzip opencv_contrib-4.3.0.zip
cd opencv-4.3.0/
echo " Apply patch"
sed -i 's/include <Eigen\/Core>/include <eigen3\/Eigen\/Core>/g' modules/core/include/opencv2/core/private.hpp
sed -i 's/{CUDNN_INCLUDE_DIR}\/cudnn.h/{CUDNN_INCLUDE_DIR}\/cudnn_version.h/g' cmake/FindCUDNN.cmake
echo " Building OpenCV ..."
mkdir build
cd build/
cmake -D CMAKE_BUILD_TYPE=RELEASE \
> -D WITH_CUDA=ON \
> -D CUDA_ARCH_PTX="" \
> -D CUDA_ARCH_BIN="5.3,6.2,7.2" \
> -D WITH_CUBLAS=ON \
> -D WITH_LIBV4L=ON \
> -D BUILD_opencv_python3=ON \
> -D BUILD_opencv_python2=ON \
> -D BUILD_opencv_java=OFF \
> -D WITH_GSTREAMER=OFF \
> -D WITH_GTK=ON \
> -D BUILD_TESTS=OFF \
> -D BUILD_PERF_TESTS=OFF \
> -D BUILD_EXAMPLES=OFF \
> -D OPENCV_ENABLE_NONFREE=ON \
> -D OPENCV_EXTRA_MODULES_PATH=../../opencv_contrib-4.3.0/modules \
> -D OPENCV_GENERATE_PKGCONFIG=YES ..
make -j$(nproc)
sudo make install
echo 'export PYTHONPATH=$PYTHONPATH:'$PWD'/python_loader/' >> ~/.bashrc
source ~/.bashrc
echo "** Install opencv-4.3.0 successfully"
echo "** Bye :)"
| true
|
3cbb0998fdfac6f668ee7c3900dd6fe70bc70b1b
|
Shell
|
dendi875/docker-php-dev
|
/build/nginx/pki/mk-crt.sh
|
UTF-8
| 1,770
| 3.296875
| 3
|
[] |
no_license
|
#!/bin/bash
# 基于OpenSSL自建CA和颁发SSL证书
# https://github.com/dendi875/Linux/blob/master/%E4%BD%BF%E7%94%A8openssl%E8%87%AA%E5%BB%BACA%E5%92%8C%E9%A2%81%E5%8F%91%E5%A4%9A%E5%9F%9F%E5%90%8D%E9%80%9A%E9%85%8D%E7%AC%A6%E8%AF%81%E4%B9%A6.md
if [ ! -d "CA" ]; then
mkdir CA
pushd CA > /dev/null
mkdir private
# 在CA目录下创建两个初始文件
touch index.txt
echo 01 > serial
# 生成CA证书的RSA私钥
openssl genrsa -out private/ca.key 2048
# 利用CA的RSA密钥生成CA证书请求并对CA证书请求进行自签名,得到CA证书(X.509结构)
openssl req -new -sha256 -x509 -days 3650 \
-key private/ca.key \
-subj "//C=CN\ST=ShangHai\L=ShangHai\O=Zhangquan\OU=PHP\CN=Test Root CA" \
-out cacert.crt
popd > /dev/null
fi
if [ -f "tls/private/nginx.key" ]; then
rm tls/private/nginx.key
fi
if [ -f "tls/private/nginx.csr" ]; then
rm tls/private/nginx.csr
fi
if [ -f "tls/certs/nginx.crt" ]; then
rm tls/certs/nginx.crt
fi
# 生成服务器证书用的RSA私钥
openssl genrsa -out tls/private/nginx.key 2048
# 利用生成好的私钥生成服务器证书签名请求文件
openssl req -new \
-sha256 \
-key tls/private/nginx.key \
-subj "//C=CN\ST=ShangHai\L=ShangHai\O=Zhangquan\OU=PHP\CN=Test Internal" \
-out tls/private/nginx.csr
# 使用CA根证书对“服务器证书签名请求文件”进行签名,生成带SAN扩展证书
openssl x509 -req -sha256 \
-in tls/private/nginx.csr \
-CA CA/cacert.crt \
-CAkey CA/private/ca.key \
-CAcreateserial \
-days 3650 \
-extfile v3.ext \
-out tls/certs/nginx.crt
cp -f tls/certs/nginx.crt ../root/etc/nginx/ssl/zhangquan-dev.crt
cp -f tls/private/nginx.key ../root/etc/nginx/ssl/zhangquan-dev.key
| true
|
52541eff5812e48bce6d11749fabc87a958bd2a2
|
Shell
|
thehobbit85/move-decimal
|
/testrun.sh
|
UTF-8
| 501
| 3.34375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
touch testResult.txt
now=`date +%Y-%m-%d.%H:%M:%S`
msg="-----Testing time: $now-----"
echo $msg >> testResult.txt
msg="-----Number of iterations is: $ITERATIONS-----"
echo $msg >> testResult.txt
msg="-----Number of runs is: $RUNS-----"
echo $msg >> testResult.txt
for i in `seq 1 $RUNS`;
do
msg="--------------Test number $i---------------"
echo $msg >> testResult.txt
mocha | grep Testing >> testResult.txt
done
echo "-------------Test Batch Ended--------------" >> testResult.txt
| true
|
5cfefab8aca036e2e9e60f223c6634157d3ea04e
|
Shell
|
cpauvert/m2-zenity
|
/euftraseng.sh
|
UTF-8
| 11,999
| 4.09375
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
# Flux de travaux NGS
# 12/05/2015
# Clément Lionnet & Charlie Pauvert
cat <<EOF
# Flux de travaux NGS - Projet Système M2.1 2015
# Clément LIONNET & Charlie PAUVERT
https://github.com/cpauvert/m2-zenity
EOF
# Test si zenity est installé sur l'ordinateur
if [ ! -x "/usr/bin/zenity" ];then
echo "Malheur ! Zenity n'est pas installé !"
echo "Le programme ne peut s'exécuter..."
echo -e "--\napt-get install zenity ?"
exit 1
fi
# Tableau contenant les trois étapes du flux de travaux ngs
PHASE=("Aligneurs" "Appeleur de variants" "Visualisateur")
# Tableau contenant les différents fichiers contenant les lignes de commandes de chaque étapes
FIC_PHASE=(aligneurs.txt appeleurs.txt visualisateurs.txt)
# Tableau contenant les options concernant l'intéraction avec les fichiers
MENU_FICHIER=( "Afficher" "Modifier/Supprimer une ligne" "Ajouter une ligne" )
# Tableau contenant les différents types de paramètres
# Drapeau : paramètre sans argument
# Fichier : paramètre avec un nom de fichier en argument
# Valeur : paramètre avec une valeur en argument
TYPE_PARAM=( "Drapeau" "Fichier" "Valeur" )
# Fonction ajoutLigne
# Elle permet d'ajouter une ligne de commande pour chacun des fichiers
# Elle prend en paramètre l'indice dans le tableau du fichier que l'on veut modifer
ajoutLigne () {
# Test de l'existence du fichier
if [ -f ${FIC_PHASE[$1]} ];then
# Si le fichier existe
# Récupération du nom des logiciels
LOGICIEL=$(cut -d: -f 1 ${FIC_PHASE[$1]} |uniq |awk '{OFS="\n";print $0}END{print " "}' |zenity --list --title="Choix logiciel - ${PHASE[$1]}" --text="Choissisez un logiciel dans la liste, ou ajouter le manuellement :" --column="${FIC_PHASE[$1]}" --editable 2>/dev/null)
# Récupération du code de retour de la commande précédente
OUT_LOGICIEL=$?
# Ajout du logiciel dans le fichier temporaire nouvelle_ligne.tmp
if [ "${OUT_LOGICIEL}" -eq 0 ];then
echo "${LOGICIEL}" > nouvelle_ligne.tmp
fi
# Ajout des paramétres du logiciel ajouté
OUT=-1
while [ "$OUT" -ne 1 ];do
zenity --question --text="Continuez l'ajout de paramètre pour <tt>${LOGICIEL}</tt> ? "
OUT=$?
# Si la réponse est oui, on commence l'ajout du paramètre
if [ "$OUT" -eq 0 ];then
# Ajout du nouveau paramètre
NOUVEAU_PARAM=$(zenity --entry --text="Nouveau paramètre pour ${LOGICIEL} - Phase ${PHASE[$1]}" )
# Choix du type de paramètre
MENU_PARAM=( $( echo ${TYPE_PARAM[@]}|tr ' ' '\n'|zenity --list --title="Menu Type Paramètre" --text="<b>Phase ${PHASE[$1]}</b>\n\nChoisir un type de paramètre pour <tt>${NOUVEAU_PARAM}</tt> :" --column="Type" --width=400 --height=270 --separator=" " 2>/dev/null ) )
# Affichage de la boite de dialogue correspondant au type de paramètre sélectionné
case "${MENU_PARAM}" in
"Drapeau")
# Drapeau
NOUVEAU_VALEUR=""
;;
"Fichier")
# Fichier
NOUVEAU_VALEUR=$(zenity --file-selection --text="Fichier pour paramètre ${NOUVEAU_PARAM}" 2> /dev/null )
;;
"Valeur")
# Valeur
NOUVEAU_VALEUR=$(zenity --entry --text="Valeur pour paramètre ${NOUVEAU_PARAM}" 2> /dev/null )
;;
*)
# Si aucun type de paramètre n'est choisi, un message d'erreur s'affiche
zenity --error --text="Item inconnu"
# Retour au début de la fonction ajoutLigne
ajoutLigne $1
;;
esac
# Ajout du paramètre et de son argument
echo -e ":${NOUVEAU_PARAM}\t${NOUVEAU_VALEUR}" >> nouvelle_ligne.tmp
fi
done
# Ecriture de la ligne de commande dans le fichier correspondant à la phase
if [ "${OUT_LOGICIEL}" -eq 0 ];then
cat nouvelle_ligne.tmp|tr -d '\n' >> ${FIC_PHASE[$1]}
echo >> ${FIC_PHASE[$1]}
# Suppression du fichier temporaire nouvelle_ligne.tmp
rm nouvelle_ligne.tmp
fi
fi
}
# Fonction choixBpipe
# Elle permet de choisir le logiciel que l'on va mettre dans le fichier bpipe
# Cette fonction est appelée pour chaque étape du flux
# Elle prend en paramètre l'indice dans le tableau du fichier que l'on veux utiliser
choixBpipe () {
# Test de l'existence du fichier
if [ -f ${FIC_PHASE[$1]} ];then
# si le fichier existe
# Choix du logiciel que l'on veut utiliser
LOGICIEL=$(cut -d: -f 1 ${FIC_PHASE[$1]} |uniq |zenity --list --title="Choix logiciel - ${PHASE[$1]}" --text="Liste des logiciels" --column="${FIC_PHASE[$1]}" 2>/dev/null)
# choix des paramètres du logiciel
RETOUR_CHOIX_BPIPE=$(awk -v a=${LOGICIEL} -F: '{OFS="\n";if($1==a){gsub(/:/," "); printf "%s\n%s\n","FALSE",$0}}' ${FIC_PHASE[$1]} |zenity --list --text="Choisisser la ligne de commande que vous souhaitez utiliser" --column="Selection" --column="Commande" --checklist --width=650 --height=200 --print-column="2" 2> /dev/null)
fi
}
# Fonction inclureBpipe
# Création/Modification du fichier bpipe.
# Cette fonction est appelée pour chaque étape du flux
# Elle prend en paramètre l'indice dans le tableau de la phase à ajouter et la ligne de commande à ajouter au fichier bpipe
inclureBpipe () {
# Récupération de la date pour créer le fichier bpipe
DATE=$(date +%Y_%m_%d_%H_%M)
# Nom du fichier bpipe
BpipeFileName="bpipe_"$DATE".txt"
# Test si le fichier existe
if [ ! -f $BpipeFileName ];then
# si le fichier bpipe n'existe pas : le créer
cp template_bPipe.txt $BpipeFileName
fi
# s'il existe le modifier pour chaque phase
# Mise en forme de la commande. Suppression des tabulations
COMMAND=$( echo $2|tr '\t' ' ' )
# Test concernant la phase à modifier dans le fichier bpipe
if [ $1 -eq 0 ];then
# Ajout de la ligne de commande pour l'aligneur
sed -i "s/COMMAND_LINE_ALIGN/${COMMAND}/" $BpipeFileName
elif [ $1 -eq 1 ];then
# Ajout de la ligne de commande pour l'appeleur de variant
sed -i "s/COMMAND_LINE_APP/${COMMAND}/" $BpipeFileName
else
# Ajout de la ligne de commande pour le visualisateur
sed -i "s/COMMAND_LINE_VISUAL/${COMMAND}/" $BpipeFileName
fi
}
# Fonction modifFichier
# Elle permet de modifier le contenu d'un fichier contenant les lignes de commande
# Elle prend en paramètre l'indice dans le tableau du fichier que l'on veux utiliser
modifFichier () {
# Choix de la ligne à modifier ou à supprimer
LIGNE_MODIF=$(awk '{OFS="\n";gsub(/:/," ");gsub(/\t/," "); printf "%s\n%d\n%s\n","FALSE",NR,$0}' ${FIC_PHASE[$1]}| zenity --list --title="Traitement ligne - phase ${PHASE[$1]}" --text="Sélectionner une ligne à traiter (modifier/supprimer) dans le ficher ${FIC_PHASE[$1]}" --column="Selection" --column="N°" --column="Commande" --checklist --width=650 --height=200 --print-column="2" --ok-label="Traiter" 2> /dev/null)
# Test si une ligne a été sélectionnée
if [ $? -eq 0 ];then
# OK fichier à traiter
# Choix entre la modification et la suppression de la ligne sélectionnée
zenity --question --title="Phase ${PHASE[$1]}" --text="Pour le fichier <tt>${FIC_PHASE[$1]}</tt> de la phase ${PHASE[$1]}, quelle action voulez vous effectuer ?" --ok-label="Modifier" --cancel-label="Supprimer la ligne ${LIGNE_MODIF}"
# Lancement de la boite dialogue correspondante au choix
if [ $? -eq 0 ];then
# Si le choix est la modification de la ligne
echo $(sed -n "${LIGNE_MODIF}p" ${FIC_PHASE[$1]})
# Modification de la ligne
zenity --entry --text="Ligne ${LIGNE_MODIF} à modifier" --entry-text="$( sed -n "${LIGNE_MODIF}p" ${FIC_PHASE[$1]} )"
elif [ $? -eq 1 ];then
# Si le choix est suppression de la ligne
# Copie du fichier dans un fichier .bak pour revenir à l'état précédent en cas d'erreur
cp ${FIC_PHASE[$1]}{,.bak}
# Suppression de la ligne
sed -i "${LIGNE_MODIF}d" ${FIC_PHASE[$1]}
else
# Si aucun choix n'a été fait
echo "ERREUR Traitement fichier"
exit 1
fi
else
# Si aucun choix n'a été fait
zenity --error --text="Aucune ligne sélectionnée dans le fichier ${FIC_PHASE[$1]} !"
fi
}
# Fonction menuPhase
# Elle permet de choisir l'action que l'on veux réaliser pour une phase donnée
# Cette fonction est appelée pour chaque étapes du flux
# Elle prend en paramètre l'indice dans le tableau du fichier que l'on veux utiliser
menuPhase () {
# Test si le fichier existe
if [ -f ${FIC_PHASE[$1]} ];then
# Choix de l'action que l'on souhaite faire sur le fichier
MENU=( $( echo ${MENU_FICHIER[@]// /_}|tr ' ' '\n'|awk '{OFS="\n";gsub("_"," ");print NR,$0}'|zenity --list --title="Menu ${PHASE[$1]}" --text="<b>Phase ${PHASE[$1]}</b>\n\nChoisir une action ci-dessous pour le fichier : <tt>${FIC_PHASE[$1]}</tt> :" --column="N°" --column="Action" --width=400 --height=270 --separator=" " 2>/dev/null ) )
# Test si l'utilisateur a cliqué sur le bouton validé
if [ ${#MENU[@]} -ne 0 ];then
# Test du nombre d'items sélectionnés : si =/= de 1 recommencer
if [ ${#MENU[@]} -eq 1 ];then
case "$MENU" in
1 )
# Affichage du fichier correspondant à la phase
sed -e 's/:/ /g' -e 's/\t/ /g' ${FIC_PHASE[$1]}| zenity --list --text="Contenu du ficher ${FIC_PHASE[$1]}" --column="Commande" --width=650 --height=200 2> /dev/null
menuPhase $1
;;
2 )
# Modification du fichier correspondant à la phase
modifFichier $1
;;
3 )
# Ajout d'une ligne au fichier correspondant à la phase
ajoutLigne $1
menuPhase $1
;;
* )
# si aucun choix n'à été fait
zenity --error --text="Item inconnu"
menuPhase $1
;;
esac
else
# Message d'erreur si plusieurs action ont été choisie
zenity --error --text="Plusieurs items sélectionnés dans le menu. Je ne suis pas multi-tâches."
menuPhase $1
fi
else
# Si l'utilisateur a cliqué sur annuler
# Demande a l'utilisateur si il veut bien quitter
QUITTER=$( zenity --question --text="Etes vous sûr de vouloir quitter ?" )
if [ $? -eq 0 ];then
# Si oui, on sort
exit 0
elif [ $? -eq 1 ];then
# Si non, on revient au menu
menuPhase $1
else
# Sinon on sort
exit 1
fi
fi
fi
}
# Procédure choixPhase
# Elle permet de choisir la ou les phase(s) que l'on souhaite modifier
# Elle ne prend pas de paramètre
choixPhase () {
# Choix entre les trois phases
INDICE_PHASE=( $( echo ${PHASE[@]// /_}|tr ' ' '\n'|awk '{OFS="\n";gsub("_"," ");print "TRUE",NR,$0}'|zenity --list --title="Menu" --text="Choisir une phase dans le menu suivant" --column=" " --column="°" --column=" " --width=400 --height=270 --separator=" " --checklist --multiple 2>/dev/null ) )
# Affichage du menu pour chaque phases sélectionnées
for i in ${INDICE_PHASE[@]}; do
menuPhase $((i-1))
done
}
# Fonction menu
# Elle permet de choisir entre le traitements des fichiers de commande et la construction d'un fichier bpipe
# C'est la fonction principale
menu () {
# Choix entre les deux options
ACCUEIL=$( echo -e "FALSE\nAfficher/Traiter les fichiers de commandes\nTRUE\nConstruction bpipe" |zenity --list --title="Menu - EUFTraSENG" --text="Bienvenue dans EUFTraSENG (<b>E</b>ncore <b>U</b>n <b>F</b>lux de <b>Tra</b>vaux <b>SÉ</b>quençage <b>N</b>ouvelles <b>G</b>énération)" --column=" " --column="Actions" --radiolist 2>/dev/null )
# Test si l'utilisateur a cliqué sur valider
if [ $? -eq 0 ];then
# Si le code de sortie de la dernière commande ($?) vaut 0 : aka "succès"
if [ "$ACCUEIL" = "Afficher/Traiter les fichiers de commandes" ];then
# Si l'utilisateur a choisi le traitement des fichiers de commande
choixPhase
elif [ "$ACCUEIL" = "Construction bpipe" ];then
# Si l'utilisateur a choisi la construction de fichier bpipe
k=0
# Pour chaque phases on va choisir un logiciel et un ligne de commande
while [ "$k" -lt "${#PHASE[@]}" ];do
# Choix du logiciel et de la ligne de commande
choixBpipe $k
# Ecriture dans le fichier bpipe
inclureBpipe $k "${RETOUR_CHOIX_BPIPE}"
((k++))
done
else
# Si l'utilisateur n'a pas fait de choix
zenity --error --text="Action inappropriée pour le menu."
exit 1
fi
fi
}
#### Fonction principale
menu
| true
|
d843b3fbd21d7c94ac35d472c70fa927d353c78a
|
Shell
|
cnu341/repo2203
|
/sree.sh
|
UTF-8
| 442
| 2.796875
| 3
|
[] |
no_license
|
#!/bin/bash
sleep 2
echo " This is Devops Area"
sleep 2
echo "Please Enter your name:"
read name
sleep 2
echo "Hi $name ,
welcome to Devops "
sleep 2
echo "Enter your qualification:"
read
sleep 2
echo "Enter DOB(dd/mm/yyyy):"
read
sleep 2
echo "Enter your Age:"
read Age
sleep 2
if [ $Age -le 30 ]
sleep 2
then
echo "You are very old to learn this Devops tool"
else
echo "You are very Young to learn this Devops tool"
fi
| true
|
c571d0d848fb986be536e3ab4ac5f3d07d96cd6e
|
Shell
|
qq1624646454/jllutils
|
/jll.manual.adsl.sh
|
UTF-8
| 713
| 2.6875
| 3
|
[] |
no_license
|
#!/bin/bash
# Copyright (c) 2016-2100, jielong_lin, All rights reserved.
#
JLLPATH="$(which $0)"
JLLPATH="$(dirname ${JLLPATH})"
source ${JLLPATH}/BashShellLibrary
cat >&1 << EOF
Install adsl component:
$ aptitude install pppoeconf
Configuration for PPPoE
$ pppoeconf
Note: you should answer Yes for all questions.
Connect to the configurated ADSL settings:
$ pon dsl-provider
Disconnect :
$ poff dsl-provider
If Call successfully, but dont go to internet:
$ route del default
$ route add default dev ppp0
Any issues are occured , please run "plog " then "pppeconf" to check dsl provider
====================================
ps aux | grep -i pppd
ifconfig | grep -i ppp
plog
EOF
| true
|
d373d4a221def812ed13f9f60c039fedc2e436dd
|
Shell
|
RafiKueng/SimAnalysis
|
/make.sh
|
UTF-8
| 1,177
| 3.09375
| 3
|
[] |
no_license
|
#!/bin/bash
#PYV=$(python -c 'import sys; print `sys.version_info.major`+`sys.version_info.minor`')
#ISPY=$?
#if [ "$ISPY" == "0" ]; then
# echo "found python version " $PYV
# if [ $PYV -le 26 ]; then
# echo "Version too old"
# exit 1
# fi
# else
# echo 'Could not find python, please install'
# exit 1
#fi
#
#echo "moar"
echo "make sure to have python installed"
echo "and the requests package using"
echo " 'pip install requests'"
echo " 'easy_install requests'"
echo "or similar"
echo "(but you should really use virtualenv and pip)"
echo "and of course numpy and matplotlib"
read -p "Press [Enter] key to start..."
echo " > generating data (many.py)"
cd systems
python many.py
cd ..
echo " "
echo "/===================================="
echo "=> generating plots (gen_plots.py)"
cd plots
python -c "from gen_plots import *; run()"
cd ..
echo " "
echo "/===================================="
echo "=> generating tables (gen_tables.py)"
cd plots
python gen_table.py
cd ..
echo " "
echo "/===================================="
echo "finished. you can start working on the tex files in /text"
echo "please only compile the master file <ms.tex>"
| true
|
8b8fdf2bcd535de2db27e5fdd61c4f87e3f603fd
|
Shell
|
arichnad/color-explorer-vagrant
|
/enable-system-tests.sh
|
UTF-8
| 273
| 3.046875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
FILE=stack/system-tests.properties
echo enabling system tests
echo to turn off: "rm $FILE; vagrant ssh -c 'rm system-tests.properties'"
cat >"$FILE" <<END
#use "yes" or "no"
SYSTEM_TESTS_ENABLED=yes
#public address: mqtQH8jxv9LoSo676MsfNqr81FCLbWfjph
TESTNET_PRIVATE_KEY=92b6RYqA2GQXhMCg8YLR73AD2tHFu8ZdnxUjUB3yGn8Tj8iqjKE
END
| true
|
ff237ea886344d8fb86fd65f567e57f158bfbb79
|
Shell
|
Ascend/ModelZoo-PyTorch
|
/PyTorch/built-in/cv/classification/ResNet50_for_PyTorch/test/train_ID4050_ResNet50_RT2_performance_1p.sh
|
UTF-8
| 6,874
| 3.296875
| 3
|
[
"BSD-3-Clause",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference",
"GPL-1.0-or-later"
] |
permissive
|
#!/bin/bash
################基础配置参数,需要模型审视修改##################
# 必选字段(必须在此处定义的参数): Network batch_size RANK_SIZE
# 网络名称,同目录名称
Network="ResNet50_RT2_ID4050_for_PyTorch"
# 训练batch_size
batch_size=512
# 训练使用的npu卡数
export RANK_SIZE=1
# 数据集路径,保持为空,不需要修改
data_path=""
#维测参数,precision_mode需要模型审视修改
precision_mode="allow_mix_precision"
# 训练epoch 90
train_epochs=1
# 指定训练所使用的npu device卡id
device_id=0
# 加载数据进程数
workers=64
# 参数校验,data_path为必传参数,其他参数的增删由模型自身决定;此处新增参数需在上面有定义并赋值
for para in $*
do
if [[ $para == --device_id* ]];then
device_id=`echo ${para#*=}`
elif [[ $para == --data_path* ]];then
data_path=`echo ${para#*=}`
elif [[ $para == --batch_size* ]];then
batch_size=`echo ${para#*=}`
elif [[ $para == --precision_mode* ]];then
precision_mode=`echo ${para#*=}`
fi
done
# 校验是否传入data_path,不需要修改
if [[ $data_path == "" ]];then
echo "[Error] para \"data_path\" must be confing"
exit 1
fi
# 校验是否指定了device_id,分动态分配device_id与手动指定device_id,此处不需要修改
if [ $ASCEND_DEVICE_ID ];then
echo "device id is ${ASCEND_DEVICE_ID}"
elif [ ${device_id} ];then
export ASCEND_DEVICE_ID=${device_id}
echo "device id is ${ASCEND_DEVICE_ID}"
else
"[Error] device id must be config"
exit 1
fi
###############指定训练脚本执行路径###############
# cd到与test文件夹同层级目录下执行脚本,提高兼容性;test_path_dir为包含test文件夹的路径
cur_path=`pwd`
cur_path_last_dirname=${cur_path##*/}
if [ x"${cur_path_last_dirname}" == x"test" ];then
test_path_dir=${cur_path}
cd ..
cur_path=`pwd`
else
test_path_dir=${cur_path}/test
fi
#################创建日志输出目录,不需要修改#################
if [ -d ${test_path_dir}/output/${ASCEND_DEVICE_ID} ];then
rm -rf ${test_path_dir}/output/${ASCEND_DEVICE_ID}
mkdir -p ${test_path_dir}/output/$ASCEND_DEVICE_ID
else
mkdir -p ${test_path_dir}/output/$ASCEND_DEVICE_ID
fi
# 添加二进制代码
line=`grep "import torch" ${test_path_dir}/../pytorch_resnet50_apex.py -n | tail -1|awk -F ':' '{print $1}'`
sed -i "$[line+1]itorch.npu.set_compile_mode(jit_compile=False)" ${test_path_dir}/../pytorch_resnet50_apex.py
#修改参数
sed -i "s|pass|break|g" ${test_path_dir}/../pytorch_resnet50_apex.py
wait
#################启动训练脚本#################
# 训练开始时间,不需要修改
start_time=$(date +%s)
# 非平台场景时source 环境变量
check_etp_flag=`env | grep etp_running_flag`
etp_flag=`echo ${check_etp_flag#*=}`
if [ x"${etp_flag}" != x"true" ];then
source ${test_path_dir}/env_npu.sh
fi
nohup python3 ./pytorch_resnet50_apex.py \
--data ${data_path} \
--npu ${ASCEND_DEVICE_ID} \
-j ${workers} \
-b ${batch_size} \
--lr 0.2 \
--warmup 5 \
--precision_mode=${precision_mode} \
--label-smoothing=0.1 \
--epochs ${train_epochs} \
--optimizer-batch-size 512 > ${test_path_dir}/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log 2>&1 &
wait
##################获取训练数据################
# 训练结束时间,不需要修改
end_time=$(date +%s)
e2e_time=$(( $end_time - $start_time ))
#参数改回
sed -i "s|break|pass|g" ${test_path_dir}/../pytorch_resnet50_apex.py
wait
# 训练用例信息,不需要修改
BatchSize=${batch_size}
DeviceType=`uname -m`
if [[ $precision_mode == "must_keep_origin_dtype" ]];then
CaseName=${Network}_bs${BatchSize}_${RANK_SIZE}'p'_'fp32'_'perf'
else
CaseName=${Network}_bs${BatchSize}_${RANK_SIZE}'p'_'perf'
fi
# 结果打印,不需要修改
echo "------------------ Final result ------------------"
# 输出性能FPS,需要模型审视修改
grep "FPS@all" ${test_path_dir}/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log | awk '{print $7}' >> ${test_path_dir}/output/$ASCEND_DEVICE_ID/train_${CaseName}_fps.log
FPS=`cat ${test_path_dir}/output/${ASCEND_DEVICE_ID}/train_${CaseName}_fps.log | awk '{a+=$1} END {if (NR != 0) printf("%.3f",a/NR)}'`
#输出CompileTime
CompileTime=`grep Epoch: ${test_path_dir}/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log |head -n 2| awk -F "Time" '{print$2}'|awk '{sum+=$1}END{print sum}'`
# 打印,不需要修改
echo "Final Performance images/sec : $FPS"
# 输出训练精度,需要模型审视修改
train_accuracy=`grep -a '* Acc@1' ${test_path_dir}/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log|awk 'END {print}'|awk -F "Acc@1" '{print $NF}'|awk -F " " '{print $1}'`
# 打印,不需要修改
echo "Final Train Accuracy : ${train_accuracy}"
echo "E2E Training Duration sec : $e2e_time"
# 性能看护结果汇总
# 获取性能数据,不需要修改
# 吞吐量
ActualFPS=${FPS}
# 单迭代训练时长
TrainingTime=`awk 'BEGIN{printf "%.2f\n", '${batch_size}'*1000/'${FPS}'}'`
# 从train_$ASCEND_DEVICE_ID.log提取Loss到train_${CaseName}_loss.txt中,需要根据模型审视
grep Epoch: ${test_path_dir}/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log|grep -v Test|awk -F "Loss" '{print $NF}' | awk -F " " '{print $1}' >> ${test_path_dir}/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt
# 最后一个迭代loss值,不需要修改
ActualLoss=`awk 'END {print}' ${test_path_dir}/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt`
# 关键信息打印到${CaseName}.log中,不需要修改
echo "Network = ${Network}" > ${test_path_dir}/output/$ASCEND_DEVICE_ID/${CaseName}.log
echo "RankSize = ${RANK_SIZE}" >> ${test_path_dir}/output/$ASCEND_DEVICE_ID/${CaseName}.log
echo "BatchSize = ${BatchSize}" >> ${test_path_dir}/output/$ASCEND_DEVICE_ID/${CaseName}.log
echo "DeviceType = ${DeviceType}" >> ${test_path_dir}/output/$ASCEND_DEVICE_ID/${CaseName}.log
echo "CaseName = ${CaseName}" >> ${test_path_dir}/output/$ASCEND_DEVICE_ID/${CaseName}.log
echo "ActualFPS = ${ActualFPS}" >> ${test_path_dir}/output/$ASCEND_DEVICE_ID/${CaseName}.log
echo "TrainingTime = ${TrainingTime}" >> ${test_path_dir}/output/$ASCEND_DEVICE_ID/${CaseName}.log
echo "TrainAccuracy = ${train_accuracy}" >> ${test_path_dir}/output/$ASCEND_DEVICE_ID/${CaseName}.log
echo "ActualLoss = ${ActualLoss}" >> ${test_path_dir}/output/$ASCEND_DEVICE_ID/${CaseName}.log
echo "E2ETrainingTime = ${e2e_time}" >> ${test_path_dir}/output/$ASCEND_DEVICE_ID/${CaseName}.log
echo "CompileTime = ${CompileTime}" >> ${test_path_dir}/output/$ASCEND_DEVICE_ID/${CaseName}.log
| true
|
786fdf6f3d0994d388b1a395f79002b10d1d1a77
|
Shell
|
blade1989/cerberus
|
/options.sh
|
UTF-8
| 1,887
| 3.890625
| 4
|
[] |
no_license
|
#!/bin/bash
# -----------------------------------------------
# Author : Imri Paloja
# Email : imri.paloja@gmail.com
# HomePage : www.eurobytes.nl
# Version : 0.0.9
# Name : Cerberus
# OS : Works on Ubuntu
# Description : Powerful but lightweight Malware Scanner
# -----------------------------------------------
# Checks
# checks if the script is excecuted as root
if [ $(id -u) = "0" ]; then
echo "" > /dev/null
else
echo "This scripts needs root permissions"
exit
fi
# checks if there is input to play with:
if [ "$1" = "" ]; then
echo "I need some input to play with"
exit
else
echo "" > /dev/null
fi
# Functions
function MalwareApiKey {
if [ "$(cat /home/blade/scripts/cerberus/etc/cerberus.conf | grep malwarapikey= | grep \# )" = "" ]; then
malwarapikey=$(cat /home/blade/scripts/cerberus/etc/cerberus.conf | grep "malwarapikey=" | sed 's/malwarapikey=//g')
else
echo "The malwr.com Key is not enabled" >>/dev/null
fi
}
# Checks if the VirusTotal Key is enabled:
function VirusTotalApiKey {
if [ "$(cat /home/blade/scripts/cerberus/etc/cerberus.conf | grep -x virustotalapikey= | grep \# )" = "" ]; then
virustotalapikey=$(cat /home/blade/scripts/cerberus/etc/cerberus.conf | grep virustotalapikey= | sed 's/virustotalapikey=//g')
else
echo "The VirusTotal Key is not enabled" >> /dev/null
fi
}
# case catches the input and tells which option to got through
case $1 in
-h|--help)
echo "Usage: $0 [options...] [file...]"
echo " -f scan specifically for a file:"
;;
0)
echo " "
;;
1)
echo "-scan"
;;
2)
echo "-meta"
;;
3)
echo "Errors selecting input/output files, dirs"
;;
4)
echo "Option 4"
;;
*)
echo "$1 is an unknown option. Please run $0 --help"
esac
| true
|
c58c55742a7e0c967f165c1c6216e3f3eba6949e
|
Shell
|
Mangemannen66/bash_grundkurs
|
/lab6/uppgift2.sh
|
UTF-8
| 472
| 2.90625
| 3
|
[] |
no_license
|
#! /bin/bash
################################################
# Uppgift 2 laboration 6 #
# Magnus Danielsson LX13 #
# Ett skript för att tömma en mapp, och i det #
# +fallet 'mappen' som ligger under tmp-mappen #
################################################
if [ $EUID -ne 0 ]; then
echo "Du måste köra det här skriptet som root "
exit 1
else
rm -rf /tmp/mappen/*
echo "filerna raderades utan problem"
exit 0
fi
| true
|
34a2521a7f2efc41240eeb0e9e20357aa499bb39
|
Shell
|
lenik/uni
|
/fs/fileutils-xjl/tourl
|
UTF-8
| 184
| 3.453125
| 3
|
[] |
no_license
|
#!/bin/bash
# bash for ${//} substition.
for f in "$@"; do
if [ "${f/:\/\/}" = "$f" ]; then
echo -n file://
readlink -f "$f"
else
echo $f
fi
done
| true
|
b462d93e428f5f0493768b41b7d408bdb3f0efea
|
Shell
|
huangxinglong/quantitative-data
|
/build.sh
|
UTF-8
| 1,314
| 3.390625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
xgo --targets=windows/*,darwin/amd64,linux/amd64,linux/386,linux/arm --dest=cache ./
osarchs=(windows_amd64 windows_386 darwin_amd64 linux_amd64 linux_386 linux_arm)
files=(samaritan-windows-4.0-amd64.exe samaritan-windows-4.0-386.exe samaritan-darwin-10.6-amd64 samaritan-linux-amd64 samaritan-linux-386 samaritan-linux-arm-5)
unzip web/dist.zip -d web
for i in 0 1 2 3 4 5; do
mkdir cache/samaritan_${osarchs[${i}]}
mkdir cache/samaritan_${osarchs[${i}]}/web
mkdir cache/samaritan_${osarchs[${i}]}/custom
cp LICENSE cache/samaritan_${osarchs[${i}]}/LICENSE
cp -r plugin cache/samaritan_${osarchs[${i}]}/plugin
cp README.md cache/samaritan_${osarchs[${i}]}/README.md
cp -r web/dist cache/samaritan_${osarchs[${i}]}/web/dist
cp config.ini cache/samaritan_${osarchs[${i}]}/custom/config.ini
cp config.ini cache/samaritan_${osarchs[${i}]}/custom/config.default.ini
cd cache
if [ ${i} -lt 2 ]
then
mv ${files[${i}]} samaritan_${osarchs[${i}]}/samaritan.exe
zip -r samaritan_${osarchs[${i}]}.zip samaritan_${osarchs[${i}]}
else
mv ${files[${i}]} samaritan_${osarchs[${i}]}/samaritan
tar -zcvf samaritan_${osarchs[${i}]}.tar.gz samaritan_${osarchs[${i}]}
fi
rm -rf samaritan_${osarchs[${i}]}
cd ..
done
zip -r ./cache.zip ./cache/
rm -rf web/dist cache
| true
|
8cf7b375ba50f9f9c0adeca30d0e448ce1b66ed8
|
Shell
|
saas786/deploy-git-to-svn
|
/deploy-git-to-svn.sh
|
UTF-8
| 6,794
| 3.8125
| 4
|
[] |
no_license
|
#!/bin/bash
# Deploy WordPress Plugin to svn from Github
# Author: Sudar <http://sudarmuthu.com>
#
# License: Beerware ;)
#
# Make sure you have git-svn installed. In Ubuntu you can do sudo apt-get install git-svn
#
# Credit: Uses most of the code from the following places
# https://github.com/deanc/wordpress-plugin-git-svn
# https://github.com/thenbrent/multisite-user-management/blob/master/deploy.sh
# https://github.com/ocean90/svn2git-tools/
# default configurations
PLUGINSLUG="deploy-git-to-svn"
MAINFILE="$PLUGINSLUG.php" # this should be the name of your main php file in the WordPress Plugin
ASSETS_DIR="assets-wp" # the name of the assets directory that you are using
SVNUSER="info@remcotolsma.nl" # your svn username
TMPDIR="/tmp"
CURRENTDIR=`pwd`
COMMITMSG_FILE='wp-plugin-commit-msg.tmp'
# @see http://stackoverflow.com/questions/7427262/read-a-file-and-save-it-in-variable-using-shell-script
SVN_IGNORE=$(<.svnignore)
# Get the directory in which this shell script is present
cd $(dirname "${0}") > /dev/null
SCRIPT_DIR=$(pwd -L)
cd - > /dev/null
# Readme converter
README_CONVERTOR=$SCRIPT_DIR/readme-convertor.sh
# lifted this code from http://www.shelldorado.com/goodcoding/cmdargs.html
while [ $# -gt 0 ]
do
case "$1" in
-p) PLUGINSLUG="$2"; MAINFILE="$PLUGINSLUG.php"; shift;;
-u) SVNUSER="$2"; shift;;
-f) MAINFILE="$2"; shift;;
-a) ASSETS_DIR="$2"; shift;;
-t) TMPDIR="$2"; shift;;
-*)
echo >&2 \
"usage: $0 [-p plugin-name] [-u svn-username] [-m main-plugin-file] [-a assets-dir-name] [-t tmp directory]"
exit 1;;
*) break;; # terminate while loop
esac
shift
done
# git config
GITPATH="$CURRENTDIR" # this file should be in the base of your git repository
# svn config
SVNPATH="$TMPDIR/$PLUGINSLUG" # path to a temp SVN repo. No trailing slash required and don't add trunk.
SVNPATH_ASSETS="$TMPDIR/$PLUGINSLUG-assets" # path to a temp assets directory.
SVNURL="https://deploy-git-to-svn.googlecode.com/svn/"
cd $GITPATH
# Let's begin...
echo ".........................................."
echo
echo "Preparing to deploy WordPress Plugin"
echo
echo ".........................................."
echo
# Pull the latest changes from origin, to make sure we are using the latest code
git pull origin master
# Check version in readme.txt/md is the same as plugin file
# if readme.md file is found, then use it
if [ -f "$GITPATH/readme.md" ]; then
NEWVERSION1=`awk -F' ' '/Stable tag:/{print $NF}' $GITPATH/readme.md | tr -d '\r'`
else
NEWVERSION1=`awk -F' ' '/Stable tag:/{print $NF}' $GITPATH/readme.txt | tr -d '\r'`
fi
echo "[Info] readme.txt/md version: $NEWVERSION1"
NEWVERSION2=`awk -F' ' '/^Version:/{print $NF}' $GITPATH/$MAINFILE | tr -d '\r'`
NEWVERSION2=`grep "^Version:" $GITPATH/$MAINFILE | tr -d '\015' |awk -F' ' '{print $NF}'`
echo "[Info] $MAINFILE version: $NEWVERSION2"
if [ "$NEWVERSION1" != "$NEWVERSION2" ]; then echo "Version in readme.txt/md & $MAINFILE don't match. Exiting...."; exit 1; fi
echo "[Info] Versions match in readme.txt/md and $MAINFILE. Let's proceed..."
if git show-ref --tags --quiet --verify -- "refs/tags/$NEWVERSION1"
then
echo "Version $NEWVERSION1 already exists as git tag. Exiting....";
exit 1;
else
echo "[Info] Git version does not exist. Let's proceed..."
fi
# if unsaved changes are there the commit them.
if ! git diff-index --quiet HEAD --; then
echo "[Info] Unsaved changes found. Committing them to git"
echo -e "Enter a commit message for unsaved changes: \c"
read COMMITMSG
git commit -am "$COMMITMSG"
fi
# Retrieve commit messages till the last tag
git log `git describe --tags --abbrev=0`..HEAD --oneline > $TMPDIR/$COMMITMSG_FILE
# Tag new version
echo "[Info] Tagging new version in git with $NEWVERSION1"
git tag -a "$NEWVERSION1" -m "Tagging version $NEWVERSION1"
# Push the latest version to github
echo "[Info] Pushing latest commit to origin, with tags"
git push origin master
git push origin master --tags
# Process /assets directory
if [ -d $GITPATH/$ASSETS_DIR ]
then
echo "[Info] Assets directory found. Processing it."
if svn checkout $SVNURL/assets $SVNPATH_ASSETS; then
echo "[Info] Assets directory is not found in SVN. Creating it."
# /assets directory is not found in SVN, so let's create it.
# Create the assets directory and check-in.
# I am doing this for the first time, so that we don't have to checkout the entire Plugin directory, every time we run this script.
# Since it takes lot of time, especially if the Plugin has lot of tags
svn checkout $SVNURL $TMPDIR
cd $TMPDIR/$PLUGINSLUG
mkdir assets
svn add assets
svn commit -m "Created the assets directory in SVN"
rm -rf $TMPDIR/$PLUGINSLUG
svn checkout $SVNURL/assets $SVNPATH_ASSETS
fi
cp $GITPATH/$ASSETS_DIR/* $SVNPATH_ASSETS # copy assets
cd $SVNPATH_ASSETS # Switch to assets directory
svn status | grep "^?\|^M" > /dev/null 2>&1 # Check if new or updated assets exists
if [ $? -eq 0 ]
then
svn status | grep "^?" | awk '{print $2}' | xargs svn add # Add new assets
svn commit --username=$SVNUSER -m "Updated assets"
echo "[Info] Assets committed to SVN."
rm -rf $SVNPATH_ASSETS
else
echo "[Info] Contents of Assets directory unchanged. Ignoring it."
fi
else
echo "[Info] No assets directory found."
fi
echo
echo "[Info] Creating local copy of SVN repo ..."
svn co $SVNURL/trunk $SVNPATH
echo "[Info] Exporting the HEAD of master from git to the trunk of SVN"
git checkout-index -a -f --prefix=$SVNPATH/
echo "[Info] Ignoring github specific files and deployment script"
svn propset svn:ignore "README.md
.git
.gitignore
$SVN_IGNORE" "$SVNPATH"
echo "[Info] SVN ignore:"
svn propget -R svn:ignore $SVNPATH
echo "[Info] Changing directory to SVN and committing to trunk"
cd $SVNPATH
# remove assets directory if found
if [ -d $ASSETS_DIR ]; then
rm -rf $ASSETS_DIR
fi
# Convert markdown in readme.txt file to github markdown format
$README_CONVERTOR readme.md readme.txt to-wp
# TODO: Generate .pot files as well
# Add all new files that are not set to be ignored
svn status | grep -v "^.[ \t]*\..*" | grep "^?" | awk '{print $2}' | xargs svn add
# Get aggregated commit msg and add comma in between them
COMMITMSG=`cut -d' ' -f2- $TMPDIR/$COMMITMSG_FILE | sed -e '$ ! s/$/,/'`
rm $TMPDIR/$COMMITMSG_FILE
svn commit --username=$SVNUSER -m "$COMMITMSG"
echo "[Info] Creating new SVN tag & committing it"
svn copy . $SVNURL/tags/$NEWVERSION1/ -m "Tagging version $NEWVERSION1"
echo "[Info] Removing temporary directory $SVNPATH"
rm -fr $SVNPATH/
echo "*** Done ***"
| true
|
f394e2ba334660bff06aa16bf5cdf09af1ce08f3
|
Shell
|
root-able/authomation
|
/02-Splunk_Intake/smartphone_chat/data_extractor/functions/log_message.sh
|
UTF-8
| 181
| 3.078125
| 3
|
[] |
no_license
|
#!/bin/sh
##########################
### DEFINING FUNCTIONS ###
##########################
# Function used to log informations
log_result () {
/usr/bin/logger -t $0 "$1 - $2"
}
| true
|
5c71e9b7de409287a6551f080d978d59b0e67016
|
Shell
|
noobaa/noobaa-operator
|
/build/tools/builder-pre.sh
|
UTF-8
| 1,651
| 4.59375
| 5
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# NOTE: This script can be run manually but is run anyway by builder.sh before that script starts executing its `main`
# This script will automate the file alteration, which involves
# 1. Updating the README.md
# 2. Committing the changes and pushing the changes
set -eux
set -o pipefail
export PS4='\e[36m+ ${FUNCNAME:-main}@${BASH_SOURCE}:${LINENO} \e[0m'
dir=$(dirname "$0")
# Source the utilities
source $dir/utils.sh
# Variables
DRY_RUN=${DRY_RUN:="false"}
# Update the version of noobaa core image, noobaa operator image and CLI version in the README output.
function update_readme() {
local version=$(get_noobaa_version)
local core_version="$version" # Assume to be the same as the version of operator
local readme_file=README.md
finline_replace "INFO\[0000\] CLI version: .*" "INFO\[0000\] CLI version: ${version}" $readme_file
finline_replace "INFO\[0000\] noobaa-image: .*" "INFO\[0000\] noobaa-image: noobaa\/noobaa-core:${core_version}" $readme_file
finline_replace "INFO\[0000\] operator-image: .*" "INFO\[0000\] operator-image: noobaa\/noobaa-operator:${version}" $readme_file
}
function commit_changes() {
if [[ $DRY_RUN == "true" ]]; then
echo "DRY_RUN is set to true, skipping commiting changes."
return
fi
local version=$(get_noobaa_version 1)
git add .
git commit -m "Automated commit to update README for version: ${version}" --allow-empty
git push
# If TAG=1 is provided, then create a tag
if [ -n "$TAG" ]; then
git tag -a "${version}" -m "Tag for version ${version}"
git push origin "${version}"
fi
}
# Main function
function main() {
update_readme
commit_changes
}
main "$@"
| true
|
7b07d04e9803e3b03bc3feb365b1b73d096d0acd
|
Shell
|
luk1337/SemcCamera
|
/addon-common/force-permissions-update.sh
|
UTF-8
| 423
| 3.296875
| 3
|
[] |
no_license
|
#!/sbin/sh
OLD_PROP=`grep "^ro.build.date=" /system_root/system/build.prop`
VALUE="${OLD_PROP:14}"
T=`echo ${VALUE} | grep -Eo "[0-9]{2}:[0-9]{2}:[0-9]{2}"`
S="${T:6}"
# Increment build date time
if [[ "${S}" -eq "59" ]]; then
S="00"
else
S=`printf "%02d" $((10#${S} + 1))`
fi
NEW_PROP=`echo "${OLD_PROP}" | sed "s/${T}/${T::5}:${S}/g"`
sed -i "s/${OLD_PROP}/${NEW_PROP}/" /system_root/system/build.prop
exit 0
| true
|
c9db1fcda0a43055519c5bfcf5db18950cbd3620
|
Shell
|
masterdon/kali-config
|
/bin/httpx.sh
|
UTF-8
| 645
| 3.34375
| 3
|
[] |
no_license
|
#!/bin/sh
#============================================================
# File: httpx
# Author: Donald Raikes <don.raikes@nyu.edu>
# Date: 09/01/2014
#
# Purpose:
# Extract only HTTP packets from the input capture file and
# print the minimum of necessary information about them.
#============================================================
echo -n "capture file: "
read CAPFILE
echo -n "Output file: "
read LOGFILE
tshark -r $CAPFILE -V -R "tcp.port ==80 && (http.request || http.response)" | \
awk "/Hypertext Transfer Protocol/,/Frame/ { print }; \
/Transmission Control Protocol/{print};
/Internet Protocol/{print}" | \
grep -v Frame > $LOGFILE
| true
|
0d2ce603ac59f63c0d33439de6cc6e17ce524bee
|
Shell
|
raphael-group/netmix
|
/examples/example_commands.sh
|
UTF-8
| 2,416
| 3.390625
| 3
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/usr/bin/env bash
################################################################################
#
# Set directories.
#
################################################################################
scripts=../src
data=data
results=results
heinz_directory="" # Install heinz and add the directory for heinz here.
mkdir -p $data
mkdir -p $results
if [ ! -f $heinz_directory/heinz ]
then
echo "\""$heinz"/heinz\" does not exist; install to use NetMix."
fi
################################################################################
#
# Generate simulated data.
#
################################################################################
echo "Generating simulated data..."
n=1000 # Number of nodes
m=5 # Parameter for Barabasi-Albert preferential attachment model
mu=2.5 # Altered distribution mean
alpha=0.02 # Fraction of nodes drawn from altered distribution
seed=12345 # Random seed
# Generate random network.
python $scripts/generate_barabasi_albert_graph.py \
-n $n \
-m $m \
-s $seed \
-elf $data/network.tsv
# Generate random scores.
python $scripts/generate_vertex_weights.py \
-elf $data/network.tsv \
-mu $mu \
-a $alpha \
-is $seed \
-ss $seed \
-nsf $data/z_scores.tsv \
-inf $data/implanted_nodes.tsv \
-nnf $data/non_implanted_nodes.tsv
################################################################################
#
# Identify altered subnetwork.
#
################################################################################
echo "Identifying altered subnetwork..."
# Generate responsibility-based scores.
python $scripts/compute_scores.py \
-i $data/z_scores.tsv \
-o $data/responsibility_scores.tsv
# Find nodes for unconstrained ASD problem.
python $scripts/compute_positive_subset.py \
-i $data/responsibility_scores.tsv \
-o $results/asd_unconstrained_results.txt
# Find nodes for constrained ASD problem.
if [ -f $heinz_directory/heinz ]
then
$heinz_directory/./heinz \
-e $data/network.tsv \
-n $data/responsibility_scores.tsv \
-o $results/asd_constrained_output.tsv \
-m 4 \
-t 1800 \
-v 0 \
> /dev/null 2>&1
fi
python $scripts/process_heinz_output.py \
-i $results/asd_constrained_output.tsv \
-o $results/asd_constrained_results.txt
rm -f $results/asd_constrained_output.tsv
| true
|
e59faae90f136c340f0f1757371c689b39871efa
|
Shell
|
desmarais-patrick/notes-web-client-js
|
/deploy.sh
|
UTF-8
| 1,113
| 3.421875
| 3
|
[
"MIT"
] |
permissive
|
#! /bin/bash
echo
echo
if [ ! $NOTES_APP_WEB_CLIENT_RESOURCES_BUCKET ]
then
echo 'Missing value for environment variable:'
echo ' $NOTES_APP_WEB_CLIENT_RESOURCES_BUCKET'
echo
echo 'Please set variable for uploading to the right bucket using:'
echo
echo ' export NOTES_APP_WEB_CLIENT_RESOURCES_BUCKET=<BUCKET_NAME>'
echo ' where <BUCKET_NAME> could be: notes-55123-client-resources'
echo
echo 'Please also make sure your Google Cloud project is set correctly.'
echo 'See README.md for more details.'
echo
echo
exit 1
fi
ROOT_FILES="index.html favicon.ico index.designSystem.html index.unitTest.html index.liveTest.html"
DIR_CSS=css
DIR_IMG=img
DIR_JS=js
echo "Sending files to bucket '$NOTES_APP_WEB_CLIENT_RESOURCES_BUCKET'"
gsutil cp ${ROOT_FILES} gs://${NOTES_APP_WEB_CLIENT_RESOURCES_BUCKET}
gsutil -m cp -r ${DIR_CSS} gs://${NOTES_APP_WEB_CLIENT_RESOURCES_BUCKET}
gsutil -m cp -r ${DIR_IMG} gs://${NOTES_APP_WEB_CLIENT_RESOURCES_BUCKET}
gsutil -m cp -r ${DIR_JS} gs://${NOTES_APP_WEB_CLIENT_RESOURCES_BUCKET}
echo
echo
exit 0
| true
|
9d59acb35e02cec5136f03f0725660c9bee20c76
|
Shell
|
BoldingBruggeman/salishsea
|
/setups/salish_1km/machine.orca
|
UTF-8
| 312
| 2.640625
| 3
|
[] |
no_license
|
#!/bin/sh
export GETMDIR=$HOME/GETM/getm-git
export GOTMDIR=$HOME/GOTM/gotm-git
if [ "$conf" == "0144x0144" ]; then
# nsub=30
offset="offset-0069x-0139"
fi
confdir=../Configurations/$conf
ln -sf $confdir/$offset/subdomain_spec.lst par_setup.dat
echo "orca" > machinefile
export PDSH=
echo "orca: "$conf
| true
|
f0a4b1daeef2e583873351c449a92e35ec806958
|
Shell
|
drblallo/PermutationTest
|
/ssh/run.sh
|
UTF-8
| 253
| 2.75
| 3
|
[] |
no_license
|
#!/bin/bash
max=10000
for (( i=0; i < $max; ++i ))
do
echo $i
matlab -nodesktop generateTestCase.m -r "try, run ('generateTestCase.m'); end; quit"
cd generationDir/
./testProject
cd ../
done
cd generationDir
python3 test.py
cd ../
| true
|
2bd887405828b1f30bf08485a76860857f7c6b21
|
Shell
|
cs558i/kiss-ppc64le
|
/repo/rust/build
|
UTF-8
| 1,554
| 2.875
| 3
|
[] |
no_license
|
#!/bin/sh -e
for patch in *.patch; do
patch -p1 < "$patch"
done
# 'rust' checksums files in 'vendor/', but we patch a few files.
for vendor in libc openssl-sys; do
sed -i 's/\("files":{\)[^}]*/\1/' "vendor/$vendor/.cargo-checksum.json"
done
sed -i 's/(target.contains("linux-gnu") || target.contains("apple-darwin"))/target.contains("linux-gnu") || target.contains("apple-darwin")/g' src/bootstrap/lib.rs
cat > config.toml <<EOF
[llvm]
link-shared = true
[build]
build = "powerpc64le-unknown-linux-musl"
host = [ "powerpc64le-unknown-linux-musl" ]
target = [ "powerpc64le-unknown-linux-musl" ]
docs = false
compiler-docs = false
extended = true
tools = ["cargo"]
submodules = false
python = "python3"
locked-deps = true
vendor = true
sanitizers = false
profiler = false
full-bootstrap = false
rustc = "$(pwd)/bootstrap/usr/bin/rustc"
cargo = "$(pwd)/bootstrap/usr/bin/cargo"
cargo-native-static = true
local-rebuild = true
[install]
prefix = "/usr"
[rust]
channel = "stable"
rpath = true
codegen-units = 1
debuginfo-level = 0
debug = false
backtrace = false
jemalloc = false
debug-assertions = false
codegen-tests = false
[target.powerpc64le-unknown-linux-musl]
llvm-config = "/usr/bin/llvm-config"
crt-static = false
EOF
python3 ./x.py build -j "$(nproc)"
DESTDIR="$1" python3 ./x.py install
| true
|
5b1bbf76a15bc8b87d45c7b2857b68e1d1a6651a
|
Shell
|
xoroz/containers
|
/web/joomla/dockerfile/initd.sh
|
UTF-8
| 2,838
| 3.90625
| 4
|
[] |
no_license
|
#!/bin/bash
#
# automate joomla web site creation using ENV from docker-compose
# run as docker entrypoint
# by Felipe Ferreira 08/19
set -e
cleanup() {
SECONDS=1
DI="${D}/installation/"
SLEEPT=10
END=1800 # 30m
while [ $SECONDS -lt $END ];
do
if [ ! -d $DI ]; then
echo "Joomla was already installed, could not find $DI"
break;
fi
C=$(find $DI -name "*Joomla*.txt")
if [ ! -z $C ]; then
find $DI -name "*Joomla*.txt" -exec rm -fv '{}' \;
echo "Joomla temp install file was deleted."
echo -e "------------------------------------------------\n\n"
# break;
fi
sleep $SLEEPT
SECONDS=$((SECONDS+$SLEEPT))
done
}
function runapache {
# Apache gets grumpy about PID files pre-existing
rm -f /usr/local/apache2/logs/httpd.pid
/usr/sbin/apachectl -D FOREGROUND "$@"
#/usr/sbin/apachectl start
}
function startas {
#NOT USING YET
if [ -z $USER_ID ]; then
echo "ERROR - missing env $USER_ID"
USER_ID=${LOCAL_USER_ID:-9001}
fi
echo "Starting with UID : $USER_ID"
useradd --shell /bin/bash -u $USER_ID -o -c "" -m user
export HOME=/home/user
exec /usr/local/bin/gosu user "$@"
}
#printenv
if [ -z $SITE_NAME ]; then
#echo "Missing ENV SITE_NAME using generic public"
SITE_NAME=public
fi
D="/var/www/logs"
if [ ! -d "$D" ] ; then
mkdir -p $D
fi
D="/var/www/$SITE_NAME"
if [ ! -d "$D" ] ; then
mkdir -p $D
fi
cd $D
fixphp() {
#############
echo "Fixing php setting at /etc/php/7.2/apache2/php.ini"
echo "memory_limit = 128M" >> /etc/php/7.2/apache2/php.ini
echo "allow_url_fopen = On" >> /etc/php/7.2/apache2/php.ini
echo "upload_max_filesize = 12M" >> /etc/php/7.2/apache2/php.ini
echo "max_execution_time = 300" >> /etc/php/7.2/apache2/php.ini
echo "date.timezone = Europe/Rome" >> /etc/php/7.2/apache2/php.ini
echo "output_buffering = Off" >> /etc/php/7.2/apache2/php.ini
echo "post_max_size = 12M" >> /etc/php/7.2/apache2/php.ini
}
down() {
################
#Function Download and unzip package under public folder
#Should happen only on first boot, never again otherwise it will overwrite existing valid site!
F=$1
U=$2
if [ ! -f "$F" ]; then
echo "Downloading $F"
wget -q -O "$F" "$U"
if [ ! -f "$F" ]; then
echo "ERROR - could not download $F from $U do you have internet access?"
exit 2
fi
else
return 0
fi
cd $D
echo "--ONLY FIRST BOOT-------------"
echo "creating a new fresh Joomla site: $SITE_NAME"
echo "Unziping $F - Please wait..."
unzip -o -q $F
fixphp
}
#call function to download and unzip
down "${D}/joomla3.zip" "https://downloads.joomla.org/cms/joomla3/3-9-11/Joomla_3-9-11-Stable-Full_Package.zip?format=zip"
chown -R www-data:www-data $D
find $D -type d -print0 | xargs -0 chmod 775
find $D -type f -print0 | xargs -0 chmod 664
echo "Checking apache config"
apachectl -t
cleanup &
runapache
| true
|
01dfe8da44566faa9009ff9ea41cf6e7180f285b
|
Shell
|
trwnh/arch
|
/script/zfs/desktop.props
|
UTF-8
| 2,187
| 2.640625
| 3
|
[
"Unlicense"
] |
permissive
|
#!/bin/bash
# run this in a live iso or something after zfs send|recv from remote backup
USERNAME=trwnh
zpool import -R /mnt rpool
zfs load-key rpool
zpool import -R /mnt bpool
#================================ boot environment = arch ================================#
zfs set mountpoint=/boot bpool
zfs set canmount=off mountpoint=none bpool/BOOT
zfs set canmount=off mountpoint=none rpool/ROOT
zfs set canmount=off mountpoint=none rpool/DATA
zfs set canmount=noauto mountpoint=/boot bpool/BOOT/arch
zfs set canmount=noauto mountpoint=/ rpool/ROOT/arch
zpool set bootfs=rpool/ROOT/arch rpool
zfs mount rpool/ROOT/arch
zfs mount bpool/BOOT/arch
#================================ persistent data ================================#
zfs set mountpoint=/home rpool/DATA/home
zfs set mountpoint=/root rpool/DATA/home/root
zfs set mountpoint=none rpool/DATA/var
zfs set mountpoint=/var/lib canmount=off rpool/DATA/var/lib
zfs set com.sun:auto-snapshot=false rpool/DATA/var/lib/docker
zfs set com.sun:auto-snapshot=false rpool/DATA/var/lib/nfs
zfs set mountpoint=/var/log rpool/DATA/var/log
zfs set mountpoint=/var/spool rpool/DATA/var/spool
zfs set mountpoint=/var/snap rpool/DATA/var/snap
zfs set mountpoint=/var/games rpool/DATA/var/games
zfs set mountpoint=/var/www rpool/DATA/var/www
zfs set mountpoint=/srv rpool/DATA/srv
zfs set mountpoint=/opt rpool/DATA/opt
zfs set mountpoint=/usr canmount=off rpool/DATA/usr
#================================ cache and temporary files ================================#
zfs set mountpoint=/var/cache com.sun:auto-snapshot=false rpool/DATA/var/cache
zfs set mountpoint=/var/tmp com.sun:auto-snapshot=false rpool/DATA/var/tmp
zfs set mountpoint=/tmp com.sun:auto-snapshot=false rpool/DATA/tmp
chmod 1777 /mnt/tmp
chmod 1777 /mnt/var/tmp
zfs mount -a
mount -L "EFI" /mnt/boot/efi
zpool set cachefile=/etc/zfs/zpool.cache bpool
zpool set cachefile=/etc/zfs/zpool.cache rpool
mkdir -p /mnt/etc/zfs
rm /mnt/etc/zfs/zpool.cache
cp /etc/zfs/zpool.cache /mnt/etc/zfs/zpool.cache
| true
|
42bc497b5502f44de8e14740b887effdefe27983
|
Shell
|
mfvalin/dot-profile-setup
|
/bin/s.mkdir_onebyone
|
UTF-8
| 469
| 3.90625
| 4
|
[] |
no_license
|
#!/bin/bash
[[ "$1" = --help || "$1" = -h ]] && cat << true
# s.mkdir_onebyone multi/level/directory/hierarchy
# create a directory hierarchy one level at a time to avoid race conditions
# when multiple processes/threads try to create possibly common levels
true
typeset TOPDIR
[[ "$1" == /* ]] || TOPDIR=.
for i in ${1//\// }
do
TOPDIR="${TOPDIR}/${i}"
[[ -d ${TOPDIR} ]] || mkdir -p ${TOPDIR}
[[ -d ${TOPDIR} ]] || echo ERROR: creation of ${TOPDIR} failed
done
| true
|
70103d70f1acfa0ce4186d4264291fdbe051799f
|
Shell
|
pelayo717/PAUTLEN
|
/PAUTLEN1/PRODUCCION.sh
|
UTF-8
| 693
| 3.296875
| 3
|
[] |
no_license
|
#!/bin/sh
if [ $# = 0 ]; then
echo "ejemplo => "
echo "sh PRODUCCION.sh CREAR_LIBRERIA generacion"
echo "sh PRODUCCION.sh EJECUTABLE_MAIN main_s0_1"
echo "sh PRODUCCION.sh EJECUTABLE_ASM ejemploasm"
fi
if [ "$1" = "CREAR_LIBRERIA" ]; then
echo "Creando libreria $2.o ..."
gcc -c $2.c
fi
if [ "$1" = "EJECUTABLE_MAIN" ]; then
echo "Creando main $2 ..."
gcc -c $2.c
gcc -o $2 $2.c generacion.o
rm $2.o
fi
if [ "$1" = "EJECUTABLE_ASM" ]; then
echo "Creando objeto $2.o ..."
nasm -g -o $2.o -f elf32 $2.asm
echo "Enlazando para crear $2"
gcc -m32 -o $2 $2.o alfalib.o
echo "Eliminando el objeto $2.o ..."
rm $2.o
fi
if [ "$1" = "DESTRUIR" ]; then
echo "Destruyendo ejecutable $2"
rm $2
fi
| true
|
51aef7ee7230a3948638eb8779552c41c6c594c0
|
Shell
|
jayanty/SkiffOS
|
/configs/apple/macbook/root_overlay/usr/lib/systemd/system-sleep/01-apple-macbook-brcmfmac
|
UTF-8
| 366
| 2.9375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
# Restores network controller functionality after wakeup from
# hibernation
#
# BCM43602 WiFi network controller
if [ "$2" = "hibernate" ]; then
case "$1" in
pre)
if lsmod | grep -q brcmfmac; then
rmmod brcmfmac
fi
;;
post)
modprobe brcmfmac
;;
esac
fi
| true
|
44ee289404d1067d479728122c540eb3bc42a2e7
|
Shell
|
filmil/gotopt2
|
/cmd/gotopt2/gotopt2_test.sh
|
UTF-8
| 900
| 3.984375
| 4
|
[
"Apache-2.0"
] |
permissive
|
#! /bin/bash
# Args:
# $1: the name of the gotopt2 binary to execute: this is a bazel quirk.
# <rest>: the flag arguments to parse, see the BUILD rule for the passed args.
# This test script requires that the name of the "gotopt2" binary be the first
# arg in use.
readonly GOTOPT2="${1}"
shift
readonly output=$("${GOTOPT2}" "${@}" <<EOF
flags:
- name: "foo"
type: string
default: "something"
- name: "bar"
type: int
default: 42
- name: "baz"
type: bool
default: true
EOF
)
# Evaluate the output of the call to gotopt2, shell vars assignment is here.
eval "${output}"
# Quick check of the result.
if [[ "${gotopt2_foo}" != "bar" ]]; then
echo "Want: bar; got: '${gotopt_foo}'"
exit 1
fi
if [[ "${gotopt2_bar}" != "42" ]]; then
echo "Want: 42; got: '${gotopt_bar}'"
exit 1
fi
if [[ "${gotopt2_baz}" != "true" ]]; then
echo "Want: true; got: '${gotopt_baz}'"
exit 1
fi
| true
|
a3c79115fd8d729cd905e09cd38fb9a1b15d73b2
|
Shell
|
faximilie/pixel-lock
|
/lock.sh
|
UTF-8
| 1,720
| 3.75
| 4
|
[] |
no_license
|
#!/bin/bash
# How to use:
#
# Just place a .lock_overlay.png in your home folder to overlay whatever you want
# Nicked from: https://gist.github.com/x421/ba39ca927c88b2e0ae5c and modified to suit me (and also write to /tmp/)
#TODO:
# Parse arguments
# Write documentation
# The lock overlay location
lock_overlay="/home/kitty/.lock_overlay.png"
# The amount to pixelate the background by
pixelation_ammount=10
# The total x space of the displays
totalx=0
# Calculate the scale down amount as a %
((scale_down=100/pixelation_ammount))
#calculate the scale up amount as a %
((scale_up=100*pixelation_ammount))
#Get list of connected displays
displays=$(xrandr | grep -w connected)
# Take a screenshot
scrot /tmp/before_blur.png
# Loop through displays and add up their X values
while read -r display ; do
#Use awk to seperate the x values from the rest of the command output
((totalx += $(awk 'BEGIN {FS="[ ,x]"}; {print $4}' <<< $display)))
done <<< $displays
# Get the lock overlay image's x size
lock_overlay_x=$(identify $lock_overlay | awk 'BEGIN {FS="[ ,x]"}; {print $3}')
#scale screenshot down, and then back up to achieve the pixel grid effect
convert -scale $scale_down% -scale $scale_up% /tmp/before_blur.png /tmp/lockbg.png
# Calculate the lock overlay offset to center it
((offset=$totalx/2 - $lock_overlay_x/2))
# Attach the lock overlay to the pixelated screenshot in the center
convert -gravity west -geometry +$offset -composite /tmp/lockbg.png ~/.lock_overlay.png /tmp/lockfinal.png
# Remove non-needed image files
rm /tmp/before_blur.png /tmp/lockbg.png
# Call i3lock with the final background
i3lock -u -i /tmp/lockfinal.png
# Remove the final lock screen
rm /tmp/lockfinal.png
| true
|
b5d4e40258c5b86caf36d37762019e937eab53b3
|
Shell
|
MarcoDiS/Athaliana_simulations
|
/Analysis/Utilities/chromatin_states_haploid_genome_with_locations.sh
|
UTF-8
| 3,305
| 3.09375
| 3
|
[] |
no_license
|
resolution=3000 # bp per bead
BinWidth=10 # Number of beads per bin
reskb=$(awk -v res=${resolution} -v bw=${BinWidth} 'BEGIN{print int(res*bw/1000)"kb"}')
infile=chromatin_states_haploid_genome_with_locations.txt
outfile=chromatin_states_haploid_genome_with_locations_at_${reskb}_new.txt
rm -fr ${outfile}
#head -3 chromatin_states_haploid_genome_with_locations.txt
#tail -3 chromatin_states_haploid_genome_with_locations.txt
stopbin=-1
for chr in chr1 chr2 chr3 chr4 chr5 ;
do
cat ${infile} | grep ${chr} | grep -v NOR | head -3
cat ${infile} | grep ${chr} | grep -v NOR | tail -3
echo
totbins=$(awk -v chr=${chr} 'BEGIN{min=100000}{if($1==chr && $3==chr){if($2>max) max=$2; if($2<min){min=$2}; if($4>max) max=$4; if($4<min){min=$4}}}END{print min" - "max}' /home/devel/mstefano/cumulative_normalized_HiC_matrices_A_thaliana/cumulative_norm_HiC_matrices_full_at_30kb.tab)
echo "Bins in Hi-C maps for ${chr} = ${totbins}"
nbins=$(cat ${infile} | grep -v NOR | grep ${chr} | wc -l)
startbin=$((${stopbin}+1))
stopbin=$(awk -v s1=${stopbin} -v nb=${nbins} -v bw=${BinWidth} 'BEGIN{print s1+int(nb/bw)+1}')
#echo "Nbins = ${nbins}"
if [[ ${chr} == "chr3" ]];
then
stopbin=$((${stopbin}-1))
fi
echo "Bins in state assignment for ${chr} = ${startbin} - ${stopbin}"
echo
for bin in $(seq ${startbin} 1 ${stopbin});
do
start=$(awk -v sb=${startbin} -v bw=${BinWidth} -v b=${bin} -v res=${resolution} 'BEGIN{print (b-sb)*res*bw}')
stop=$(awk -v sb=${startbin} -v bw=${BinWidth} -v b=${bin} -v res=${resolution} 'BEGIN{print (b-sb+1)*bw*res}')
state=$(sed -e "s/:/ /g" -e "s/-/ /g" ${infile} | grep -v NOR | grep ${chr} | awk -v s1=${start} -v s2=${stop} 'BEGIN{flag=0}{if($2==s1){flag=1}; if(flag==1){print $0}; if($3==s2){flag=0}}' | awk '{h[$NF]++}END{for(i in h) print i,h[i]}' | sort -g -k2n | tail -1 | awk '{print $1}')
mask=$(sed -e "s/:/ /g" -e "s/-/ /g" ${infile} | grep -v NOR | grep ${chr} | awk -v s1=${start} -v s2=${stop} 'BEGIN{flag=0}{if($2==s1){flag=1}; if(flag==1){print $0}; if($3==s2){flag=0}}' | awk '{h[$NF]++}END{for(i in h) print i,h[i]}' | sort -g -k2n | tac | awk '{h[$2]++; if($2>=max){max=$2;if(states==""){states=$1}else{states=states"_"$1}}}END{if(h[max]>1){print "NOT-ASSIGNED"}else{print "OK"}}')
if [[ $mask == "NOT-ASSIGNED" ]];
then
state="not-assigned"
fi
flag=$(sed -e "s/:/ /g" -e "s/-/ /g" ${infile} | grep -v NOR | grep ${chr} | awk -v s1=${start} -v s2=${stop} 'BEGIN{flag=0}{if($2==s1){flag=1}; if(flag==1){print $0}; if($3==s2){flag=0}}' | awk '{h[$NF]++}END{for(i in h) print i,h[i]}' | sort -g -k2n | tac | awk '{h[$2]++; if($2>=max){max=$2;if(states==""){states=$1}else{states=states"_"$1}}}END{if(h[max]>1){print "CHECK_"states}else{print "OK"}}')
sed -e "s/:/ /g" -e "s/-/ /g" ${infile} | grep -v NOR | grep ${chr} | awk -v s1=${start} -v s2=${stop} 'BEGIN{flag=0}{if($2==s1){flag=1}; if(flag==1){print $0}; if($3==s2){flag=0}}' | awk -v b=${bin} -v f=${flag} '{h[$NF]++}END{for(i in h) print "#"f,b,i,h[i]}' | sort -g -k4n >> ${outfile}
echo "${chr}:${start}-${stop} ${bin} ${state} ${mask}"
echo "${chr}:${start}-${stop} ${bin} ${state}" >> ${outfile}
done
cat ${outfile} | grep ${chr} | head -3
cat ${outfile} | grep ${chr} | tail -3
echo
done
| true
|
ed5d56dc80adf38aa370356d18ace1245e065a9f
|
Shell
|
Aephir/HomeAssistant2
|
/shell_scripts/updatehs.sh
|
UTF-8
| 2,028
| 4.3125
| 4
|
[] |
no_license
|
#!/bin/bash
## DESCRIPTION: Updates HA in a virtual environment
## AUTHOR: Jon Bullen
## Modified from https://gist.github.com/sytone/ed33774edc25846782913319bfdb7df6
declare -r SCRIPT_NAME=$(basename "$BASH_SOURCE" .sh)
## exit the shell(default status code: 1) after printing the message to stderr
bail() {
echo -ne "$1" >&2
exit ${2-1}
}
## help message
## help message
declare -r HELP_MSG="Usage: $SCRIPT_NAME [OPTION]... [ARG]...
-h display this help and exit
-u Update HA with latest version.
Example:
sudo bash ./updatehs.sh -u
"
## print the usage and exit the shell(default status code: 2)
usage() {
declare status=2
if [[ "$1" =~ ^[0-9]+$ ]]; then
status=$1
shift
fi
bail "${1}$HELP_MSG" $status
}
while getopts ":hu" opt; do
case $opt in
h)
usage 0
;;
\?)
usage "Invalid option: -$OPTARG \n"
;;
u)
echo "Stopping HA..."
sudo systemctl stop home-assistant@homeassistant.service
echo "Updating HA"
sudo su -s /bin/bash homeassistant <<EOF
source /srv/homeassistant/bin/activate
pip3 install --upgrade homeassistant
EOF
echo "Starting HA..."
sudo systemctl start home-assistant@homeassistant.service
echo "Checking to see if up (Localhost must be in http trusted_networks)"
start=$(date '+%s')
until curl --silent --show-error --connect-timeout 1 -X GET -H "Content-Type: application/json" -k https://127.0.0.1:8123/api/ | grep -q 'API running'; do
date '+[%Y-%m-%d %H:%M:%S] --- Home Assistant is starting, please wait...'
sleep 10
done
dur=$(($(date '+%s') - start))
echo -e "\e[00;32mHome Assistant is ready! ($dur second delay)\e[00m"
;;
esac
done
#shift $(($OPTIND - 1))
#[[ "$#" -lt 1 ]] && usage "Too few arguments\n"
#==========MAIN CODE BELOW==========
| true
|
60e67283da2cb9d264efb088a20722f6b0902ebe
|
Shell
|
LipezinPY/DiretoryBruter
|
/brute.sh
|
UTF-8
| 400
| 3.125
| 3
|
[] |
no_license
|
#!/bin/bash
clear
cat banner.txt | lolcat
echo " "
cat creditos.txt | lolcat
for palavra in $(cat lista.txt)
do
resposta=$(curl -s -o /dev/null -w "%{http_code}" $1/$palavra/)
if [ $resposta == "200" ]
then
echo "......."
echo "......."
echo "......."
echo "......."
echo "DIRETORIO ENCONTRADO NO SERVIDOR : $1/$palavra/"
fi
done
echo -e '\e[92;1mORBIGADO POR USAR MINHA TOOL :D \e[m'
| true
|
eecc85d031c9a86ff36c2e17c1836f37cc4d4bc2
|
Shell
|
cdrobey/dotfiles-v1.0
|
/install/bootstrap_new_mac.sh
|
UTF-8
| 2,562
| 3.53125
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
#
# Run this on a stock Mac to bootstrap it with Nate's dotfiles and customizations
#
PROJECTS="~/Projects"
# Ask for the administrator password upfront
echo "Asking for your sudo password upfront..."
sudo -v
# Keep-alive: update existing `sudo` time stamp until this has finished
while true; do sudo -n true; sleep 60; kill -0 "$$" || exit; done 2>/dev/null &
# Install homebrew and git (xcode tools)
/usr/bin/ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)"
brew doctor
# Create our code directory
[[ -d "$PROJECTS" ]] || mkdir "$PROJECTS"
# Install HomeBrew apps
brew bundle --file=~/.homesick/repos/dotfiles/Brewfile
brew cask cleanup
# Pin Ruby versions so I don't lose all my gems on upgrade.
brew pin ruby
brew pin ruby-build
brew pin rbenv
# Get Homesick for dotfiles
homeshick clone cdrobey/dotfiles-new
homeshick symlink dotfiles-new
# Install some Puppet and ruby tools
#gem install r10k puppet-lint rubocop
# Install vim Plug
#curl -fLo ~/.vim/autoload/plug.vim --create-dirs https://raw.githubusercontent.com/junegunn/vim-plug/master/plug.vim
## Get Vim plugins
#vim +PlugInstall +qall
## Install Oh My ZSH and change shell to zsh
#git clone https://github.com/robbyrussell/oh-my-zsh.git ~/.oh-my-zsh
#echo "Changing ${USER}'s shell to Brew's zsh..."
sudo dscl . -create "/Users/$USER" UserShell /usr/local/bin/zsh
git clone https://github.com/zsh-users/zsh-autosuggestions.git $ZSH_CUSTOM/plugins/zsh-autosuggestions
git clone https://github.com/zsh-users/zsh-syntax-highlighting.git $ZSH_CUSTOM/plugins/zsh-syntax-highlighting
# Get fonts
#echo "Downloading Inconsolata fonts to ~/Library/Fonts/"
#wget -P ~/Library/Fonts/ https://github.com/gabrielelana/awesome-terminal-fonts/raw/patching-strategy/patched/Inconsolata%2BAwesome.ttf
#wget -P ~/Library/Fonts/ https://github.com/powerline/fonts/blob/master/Meslo%20Slashed/Meslo%20LG%20M%20Regular%20for%20Powerline.ttf
#
# Get iTerm gruvbox colors
echo "Installing GruvBox colors for iTerm2"
wget https://github.com/morhetz/gruvbox-contrib/raw/master/iterm2/gruvbox-dark.itermcolors
open gruvbox-dark.itermcolors
rm gruvbox-dark.itermcolors
# Run MACOS config script
#echo "Configuring a bunch of MACOS things"
#sh ~/.homesick/repos/dotfiles/home/.bin/macos.sh
#
#echo
#echo "Finished!"
echo
echo "All that's left is to configure iTerm2: https://github.com/chris.roberson/dotfiles#colors-and-fonts"
echo
read -r -p "Also, you should reboot. Do that now? [Y/n]: " answer
if [[ $answer =~ ^[Yy]$ ]]; then
sudo reboot
fi
| true
|
c86676005a48466c83a198c96f042e9c8bdeb685
|
Shell
|
javedkansi/java-1
|
/pkg/etc/init.d/wavefront-proxy
|
UTF-8
| 2,789
| 3.625
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash -e
# chkconfig: - 99 00
# description: Wavefront Proxy
### BEGIN INIT INFO
# Provides: wavefront-proxy
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Required-Start:
# Required-Stop:
### END INIT INFO
#############################
service_name=$(basename "$0")
# Source custom settings
sysconfig="/etc/sysconfig/$service_name"
# shellcheck source=/dev/null
[[ -f "$sysconfig" ]] && . $sysconfig
desc=${DESC:-Wavefront Proxy}
user="wavefront"
wavefront_dir="/opt/wavefront"
proxy_dir=${PROXY_DIR:-$wavefront_dir/wavefront-proxy}
export JAVA_HOME="$proxy_dir/jre"
conf_file=${CONF_FILE:-$proxy_dir/conf/wavefront.conf}
log_file=${LOG_FILE:-/var/log/wavefront.log}
daemon_log_file=${DAEMON_LOG_FILE:-/var/log/wavefront-daemon.log}
err_file=$log_file
pid_file=${PID_FILE:-/var/run/$service_name.pid}
agent_jar=${AGENT_JAR:-$proxy_dir/bin/wavefront-push-agent.jar}
class="com.wavefront.agent.PushAgentDaemon"
app_args=${APP_ARGS:--f $conf_file}
java_args=${JAVA_ARGS:--XX:+AggressiveHeap}
# Legacy support for overrding JVM args in wavefront_proxy_launch.conf.
proxy_launch_conf="/opt/wavefront/wavefront-proxy/conf/wavefront_proxy_launch.conf"
if [[ -r $proxy_launch_conf ]]; then
replacement_java_args=$(grep -ve '[[:space:]]*#' $proxy_launch_conf | tr '\n' ' ' | sed -e 's/^[[:space:]]*//' | sed -e 's/[[:space:]]*$//')
if [[ "$replacement_java_args" != "-XX:+AggressiveHeap" ]]; then
>&2 echo "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!"
>&2 echo "Using wavefront_proxy_launch.conf for JAVA_ARGS, which will override anything specified in $sysconfig."
>&2 echo "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!"
java_args=$replacement_java_args
fi
fi
jsvc=$proxy_dir/bin/jsvc
jsvc_exec()
{
cd "$(dirname "$agent_jar")"
[[ ! -f "$log_file" ]] && touch "$log_file" && chown "$user":"$user" "$log_file"
# shellcheck disable=SC2086
$jsvc \
-user $user \
-home $JAVA_HOME \
-cp $agent_jar \
$java_args \
-outfile $log_file \
-errfile $err_file \
-pidfile $pid_file \
-wait 20 \
-debug \
$1 \
$class \
$app_args &> $daemon_log_file
if [[ $? -ne 0 ]]; then
echo "There was a problem, see $err_file and $daemon_log_file"
fi
}
case "$1" in
start)
echo "Starting $desc"
jsvc_exec
echo "Done"
;;
status)
if [[ -f "$pid_file" ]]; then
echo "$desc is running (PID $(cat "$pid_file"))"
else
echo "$desc is not running."
exit 3
fi
;;
stop)
echo "Stopping $desc"
jsvc_exec "-stop"
echo "Done"
;;
restart)
if [[ -f "$pid_file" ]]; then
echo "Stopping $desc"
jsvc_exec "-stop"
fi
echo "Starting $desc"
jsvc_exec
echo "Done"
;;
*)
echo "Usage: $0 {status | start | stop | restart}"
exit 1
esac
| true
|
b198b84c819759685cbc0ae1dd27bb343cdb297c
|
Shell
|
juanjosedq/Taller_LInux_Git
|
/Ejemplos_clase3/ejemplo11.sh
|
UTF-8
| 250
| 3.046875
| 3
|
[] |
no_license
|
#!/bin/bash
Numero_hijos=3
if [ $Numero_hijos -eq 1 ]; then
echo "Tienes un unico hijo"
elif [ $Numero_hijos -eq 2 ]; then
echo "Tienes dos hijos"
elif [ $Numero_hijos -ge 3 ]; then
echo "3 o más hijos"
else
echo "No tienes ningún hijo"
fi
| true
|
698ff0c7d34758a7a0a1a499d4c57945214d71af
|
Shell
|
face2wind/LinuxWorkEnvironment
|
/homeScripts/install.sh
|
UTF-8
| 847
| 3.5
| 4
|
[] |
no_license
|
#!/bin/sh
echo "============= install home scripts start ! ================"
path=`pwd -P`/$0
#echo "path = $path"
bashrcPath=`echo $path | sed -e 's/install.sh/.bashrc/'`
profilePath=`echo $path | sed -e 's/install.sh/.profile/'`
cd ~
if [ -f ".profile" -o -L ".profile" ] ; then
if [ -f ".profile.bak" -o -L ".profile.bak" ] ; then
echo "remove old .profile"
rm -rf .profile
else
echo "backup old .profile"
mv .profile .profile.bak
fi
fi
if [ -f ".bashrc" -o -L ".bashrc" ] ; then
if [ -f ".bashrc.bak" -o -L ".bashrc.bak" ] ; then
echo "remove old .bashrc"
rm -rf .bashrc
else
echo "backup old .bashrc"
mv .bashrc .bashrc.bak
fi
fi
echo "ln -s $bashrcPath ."
ln -s $bashrcPath .
echo "ln -s $profilePath ."
ln -s $profilePath .
echo "============= install home scripts complete ! ============="
echo ""
| true
|
8e8d3f159a705d6105b14c64063e43711fe5e2ac
|
Shell
|
niojuju/ibrdtn
|
/daemon/scripts/dtndrive.sh
|
UTF-8
| 1,161
| 3.96875
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
#
#
if [ -z ${1} ]; then
echo "please specify the mount directory as first parameter"
exit 1;
fi
MOUNTDIR=${1}
while [ 1 == 1 ]; do
inotifywait -e create -e delete ${MOUNTDIR}
FILES=`find ${MOUNTDIR} -maxdepth 2 -type f -name '.dtndrive'`
for DESC in $FILES; do
DTNPATH=`dirname ${DESC}`
echo "dtn drive directory found: ${DTNPATH}"
# gen lock file
LOCK="/tmp/`basename ${DTNPATH}.dtndrive`"
# check if the path is already mounted
if [ ! -e ${LOCK} ]; then
cp ${DESC} ${LOCK}
echo "DTNPATH=\"${DTNPATH}\"" >> ${LOCK}
# load configuration
. ${DESC}
# announce node to the dtnd
echo -e "protocol management\nconnection ${EID} file add file://${DTNPATH}/${STORAGE}" | netcat localhost 4550 > /dev/null
fi
done
# check if some paths are gone
LOCKS=`find /tmp -maxdepth 1 -type f -name '*.dtndrive'`
for L in ${LOCKS}; do
echo "check lock file: ${L}"
. ${L}
if [ ! -d ${DTNPATH} ]; then
echo "medium removed"
rm ${L}
# announce node to the dtnd
echo -e "protocol management\nconnection ${EID} file del file://${DTNPATH}/${STORAGE}" | netcat localhost 4550 > /dev/null
fi
done
done
| true
|
6bbbeabdac5371e94b8faa72dcf16fdd0c1208a9
|
Shell
|
ryotako/dotfiles
|
/.bashrc
|
UTF-8
| 607
| 2.59375
| 3
|
[] |
no_license
|
# Path
export PATH="$PATH:$HOME/bin"
# Golang
export GOPATH=$HOME/go
export PATH=$GOPATH/bin:$GOROOT/bin:$PATH
# manをvimで開く
# export MANPAGER="col -b -x|vim -R -c 'set ft=man nolist nomod noma' -"
#Prompt
PS1=' \W '
shopt -s globstar
shopt -s autocd
# Basic Command
alias c='clear'
alias q='exit'
alias r='source ~/.bashrc'
alias rl="exec bash --login"
alias t='type'
alias v='vim'
alias vi='vim'
alias w='which'
#git
alias gl='git log --oneline'
#Aliases for Vim
alias vib="vim ~/.bashrc"
alias viv="vim ~/.vimrc"
if [ -e "${HOME}/.bashrc_local" ]; then
source "${HOME}/.bashrc_local"
fi
| true
|
d38a2692fcd6bd7804ba3c684dfd89a388bd4934
|
Shell
|
rezid/script_bash
|
/basic/input_prompt.sh
|
UTF-8
| 170
| 2.796875
| 3
|
[] |
no_license
|
#!/bin/bash
# ask for username and password
read -p 'Username: ' uservar
read -sp 'Password: ' passvar
echo
echo Thank you $uservar, we have your login details
| true
|
99a73d864d463def92239643a6a134cf4e63f54a
|
Shell
|
Mati20041/docker-swarm-nodes-vagrant
|
/install-docker.sh
|
UTF-8
| 822
| 2.96875
| 3
|
[] |
no_license
|
#!/bin/bash
sudo apt-get update
sudo apt-get install curl -y
#install docker
echo 'installing docker'
sudo curl -fsSL get.docker.com -o get-docker.sh
sudo sh get-docker.sh
#install docker machine
echo 'installing docker-machine'
curl -L https://github.com/docker/machine/releases/download/v0.12.2/docker-machine-`uname -s`-`uname -m` >/tmp/docker-machine &&
chmod +x /tmp/docker-machine &&
sudo cp /tmp/docker-machine /usr/local/bin/docker-machine
#install docker compose
echo 'installing docker-compose'
sudo curl -L https://github.com/docker/compose/releases/download/1.16.1/docker-compose-`uname -s`-`uname -m` > /usr/local/bin/docker-compose
sudo chmod +x /usr/local/bin/docker-compose
#add vagrant user to docker group
echo 'making ubuntu user in docker group'
sudo usermod -aG docker ubuntu
| true
|
939bad384de33c1cb161efbc24bdbc5a61549f50
|
Shell
|
Grassroots-gai/learn_test
|
/cje_tf/scripts/run_docker_unittest.sh
|
UTF-8
| 6,192
| 3.234375
| 3
|
[] |
no_license
|
#!/bin/bash -x
env
echo "http_proxy is $http_proxy"
echo "https_proxy is $https_proxy"
echo "HTTP_PROXY is $HTTP_PROXY"
echo "$HTTPS_PROXY is$HTTPS_PROXY"
if [ -z ${WORKSPACE} ] ; then
WORKSPACE=/workspace
fi
if [ -z ${TENSORFLOW_DIR} ] ; then
TENSORFLOW_DIR="tensorflow"
fi
if [ -z ${TENSORFLOW_BRANCH} ] ; then
TENSORFLOW_BRANCH="v1.8.0"
fi
TENSORFLOW_BRANCH="${TENSORFLOW_BRANCH//\//_}"
if [ -z ${TARGET_PLATFORM} ] ; then
TARGET_PLATFORM="avx"
fi
if [ -z ${PYTHON} ] ; then
PYTHON="2.7"
fi
if [ -z ${RUN_EIGEN} ] ; then
RUN_EIGEN=false
fi
if [ -z ${UNITTESTLOG} ] ; then
UNITTESTLOG="${WORKSPACE}/unit_test_${TENSORFLOW_BRANCH}_${TARGET_PLATFORM}_Python_${PYTHON}.log"
fi
if [ -z ${DISABLE_MKL} ] ; then
DISABLE_MKL=false
fi
if [ -z ${DISTR} ] ; then
DISTR="ubuntu"
fi
if [ -z ${OPTIONAL_BAZEL_TEST_OPTIONS} ] ; then
OPTIONAL_BAZEL_TEST_OPTIONS=""
fi
if [ -z ${MR_NUMBER} ] ; then
MR_NUMBER=""
fi
echo WORKSPACE=$WORKSPACE
echo TENSORFLOW_DIR=$TENSORFLOW_DIR
echo TARGET_PLATFORM=$TARGET_PLATFORM
echo TENSORFLOW_BRANCH=$TENSORFLOW_BRANCH
echo PYTHON=$PYTHON
echo RUN_EIGEN=$RUN_EIGEN
echo UNITTESTLOG=$UNITTESTLOG
echo DATASET_LOCATION=$DATASET_LOCATION
echo DISTR=$DISTR
echo TEST_TO_SKIP=${TEST_TO_SKIP}
echo BAZEL_VERSION=${BAZEL_VERSION}
echo OPTIONAL_BAZEL_TEST_OPTIONS=${OPTIONAL_BAZEL_TEST_OPTIONS}
echo http_proxy=${http_proxy}
echo https_proxy=${https_proxy}
echo MR_NUMBER=${MR_NUMBER}
# debug
env | grep "JAVA_HOME"
unset JAVA_HOME
env | grep "JAVA_HOME"
env | grep "LD_LIBRARY_PATH"
unset LD_LIBRARY_PATH
env | grep "LD_LIBRARY_PATH"
env | grep -i proxy
# setting proxy again just to ensure....
export http_proxy=http://proxy-chain.intel.com:911
export https_proxy=http://proxy-chain.intel.com:911
echo "http_proxy is $http_proxy"
echo "https_proxy is $https_proxy"
# Convert the list of tests to skip to a space separated list
IFS=';' read -ra TEST <<< "$TEST_TO_SKIP"
TEST_TO_SKIP=""
for i in "${TEST[@]}"; do
TEST_TO_SKIP="${TEST_TO_SKIP} ${i}"
done
echo TEST_TO_SKIP=${TEST_TO_SKIP}
# check python
p_version=$(python -V 2>&1)
case "$p_version" in
*3.4*)
PIP="python3.4 -m pip"
;;
*3.5*)
PIP="python3.5 -m pip"
;;
*3.6*)
PIP="python3.6 -m pip"
;;
*3.7*)
PIP="python3.7 -m pip"
;;
*)
PIP="pip"
;;
esac
echo ${PIP}
# check if bazel is installed
`which bazel`
if [ $? == 1 ] || [ ! -z ${BAZEL_VERSION} ]; then
echo "bazel not found or installing a bazel version ${BAZEL_VERSION}"
apt-get clean; apt-get update -y || yum clean all; yum update -y
apt-get install wget unzip zip openjdk-8-jdk -y || yum install wget unzip zip openjdk-8-djk -y
wget https://github.com/bazelbuild/bazel/releases/download/${BAZEL_VERSION}/bazel-${BAZEL_VERSION}-installer-linux-x86_64.sh
export JAVA_HOME=/usr/lib/jvm/java-1.8.0-openjdk-amd64
echo $JAVA_HOME
chmod 775 bazel-${BAZEL_VERSION}-installer-linux-x86_64.sh
./bazel-${BAZEL_VERSION}-installer-linux-x86_64.sh --prefix=/usr/local/
${PIP} install future
fi
# now we always checkout tensorflow under workspace
WORK_DIR="${WORKSPACE}/${TENSORFLOW_DIR}"
GCC_VERSION=""
if [ "${DISTR}" == "centos" ]; then
GCC_VERSION="-gcc4.8"
yum install -y java-1.8.0-openjdk-devel.x86_64
export JAVA_HOME=/usr/lib/jvm/java-1.8.0
echo $JAVA_HOME
if [ "${PYTHON}" == "3.5" ]; then
PIP="python3.5 -m pip"
elif [ "${PYTHON}" == "3.6" ]; then
PIP="python3.6 -m pip"
fi
echo ${PIP}
${PIP} install tensorflow_estimator
fi
${PIP} install portpicker
${PIP} install future>=0.17.1
if [ "${TENSORFLOW_BRANCH}" == "master" ] || [ "${MR_NUMBER}" != "" ] ; then
${PIP} install --upgrade tf-estimator-nightly
fi
${PIP} list
echo WORK_DIR=${WORK_DIR}
cd ${WORK_DIR}
echo "--------> Running Configure <----------"
yes "" | python configure.py
if [ $? -ne 0 ]; then
echo "configure failed"
exit 1
else
echo "configured complete"
fi
cp $WORKSPACE/tensorflow-common/build-env/set_${TARGET_PLATFORM}_build${GCC_VERSION} ./build_option
source build_option
if [ "$TENSORFLOW_BRANCH" == "master" ]; then
build_test="-- //tensorflow/... -//tensorflow/compiler/... -//tensorflow/lite/... -//tensorflow/stream_executor/cuda/... ${TEST_TO_SKIP}"
else
if [ "${DISTR}" == "centos" ]; then
build_test="-- //tensorflow/... -//tensorflow/compiler/... -//tensorflow/lite/... -//tensorflow/core:example_protos -//tensorflow/core:example_protos_closure -//tensorflow/core:example_java_proto ${TEST_TO_SKIP}"
else
build_test="-- //tensorflow/... -//tensorflow/compiler/... -//tensorflow/lite/... -//tensorflow/stream_executor/cuda/... ${TEST_TO_SKIP}"
fi
fi
if [ "$DISABLE_MKL" == true ]; then
export TF_DISABLE_MKL=1
export TF_CPP_MIN_VLOG_LEVEL=1
fi
cd $WORK_DIR
if [ "$RUN_EIGEN" == true ]; then
mkl=""
eigen_log="eigen_build_${TENSORFLOW_BRANCH}_${TARGET_PLATFORM}_Python_${PYTHON}.log"
test_log="${WORKSPACE}/${eigen_log}"
summary_log="$WORKSPACE/eigen_${TENSORFLOW_BRANCH}_${TARGET_PLATFORM}_Python_${PYTHON}_summary.log"
output_user_root="$WORKSPACE/eigen_build"
bazel --nosystem_rc --nohome_rc --output_user_root=${output_user_root} test ${BAZEL_SECURE_BUILD_OPTS} ${OPTIONAL_BAZEL_TEST_OPTIONS} --test_timeout 300,450,1200,3600 --test_env=KMP_BLOCKTIME=0 -s --cache_test_results=no --test_size_filters=small,medium,large,enormous -c opt ${build_test} >& ${test_log}
eigen_failures="${WORKSPACE}/eigen.failures"
fgrep "FAILED in" ${test_log} | sed 's/[ ][ ]*.*//' > ${eigen_failures}
else
mkl="--config=mkl"
test_log=$UNITTESTLOG
output_user_root="$WORKSPACE/test"
OMP_PARAM="--action_env=OMP_NUM_THREADS=10"
bazel --nosystem_rc --nohome_rc --output_user_root=${output_user_root} test ${OMP_PARAM} ${OMP_LIB_PATH} ${BAZEL_SECURE_MKL_BUILD_OPTS} ${OPTIONAL_BAZEL_TEST_OPTIONS} --verbose_failures --test_verbose_timeout_warnings --flaky_test_attempts 3 --test_timeout 300,450,1200,3600 --test_size_filters=small,medium,large,enormous -c opt ${build_test} >& ${test_log}
ut_failures="${WORKSPACE}/ut.failures"
fgrep "FAILED in" ${test_log} | sed 's/[ ][ ]*.*//' > ${ut_failures}
fi
exit 0
| true
|
f3f5fb3b83cb813d8a75b67e1dab69324f13584f
|
Shell
|
aloeproject/mycode
|
/shell/201401/pro.sh
|
UTF-8
| 527
| 2.90625
| 3
|
[] |
no_license
|
#########################################################################
# File Name: pro.sh
# Author: lukang
# mail: lk88boy@gmail.com
# Created Time: 2015年01月17日 星期六 16时41分28秒
#########################################################################
#!/bin/bash
#批处理 进程对某个进程进行 重启或杀死
PRO_NAME='while_true'
while true;
do
num=$(ps aux|grep PRO_NAME|grep -v grep|wc -l)
#为一个的情况
if [ "$num" -eq "1" ];then
#大于一个的情况
elif [ "$num" -gt "1" ];then
fi
done
| true
|
845734ee4bb8e4ce1933f1f78425c20c3b9fd61d
|
Shell
|
HariTharmalingam/tharmalingam_harishanth_M1_2020_admin
|
/Exercice_8/script.sh
|
UTF-8
| 483
| 3.765625
| 4
|
[] |
no_license
|
#!/bin/bash
function fileStatus() {
if [ -f $3 ]
then
sudo chown $1:$2 $3
echo "Vous avez modifié les droits du fichier $3"
else
echo "Veuillez choisir un fichier valide."
fi
}
printf "Quel utilisateur voulez vous ajouter aux droits du fichiers ? "
read userName
printf "Quel groupe voulez vous ajouter aux droits du fichiers ? "
read groupName
printf "Quel fichier voulez vous modifier les droits ? "
read fileName
fileStatus $userName $groupName $fileName
| true
|
ecab6620d7d8f722ba27ac84532bef5f4a44c03d
|
Shell
|
GoogleCloudPlatform/training-data-analyst
|
/quests/data-science-on-gcp-edition1_tf2/10_realtime/ingest_2016.sh
|
UTF-8
| 285
| 2.984375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
if [ "$#" -ne 1 ]; then
echo "Usage: ./ingest_2016.sh bucket-name"
exit
fi
export BUCKET=$1
export YEAR=2016
bash ../02_ingest/download.sh
bash ../02_ingest/zip_to_csv.sh
bash ../02_ingest/quotes_comma.sh
gsutil -m cp *.csv gs://$BUCKET/flights2016/raw
rm *.csv
| true
|
b6b28635dc2e9a1c6dbcb440a5e50c0ac602f235
|
Shell
|
knowrob/docker
|
/scripts/start-webrob
|
UTF-8
| 7,093
| 4.09375
| 4
|
[] |
no_license
|
#!/bin/bash
bool=true
DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
# Default options
DNS="8.8.8.8" # google DNS
for i in "$@"
do
case $i in
-d=*|--dns=*)
DNS="${i#*=}"
shift # past argument=value
;;
*)
# unknown option
;;
esac
done
echo "Using DNS server: $DNS"
# runContainer - Creates docker containers if not existing
# arg1: <container_name> - String, Name of the container
# arg2: <docker_run_parameters> - String, Docker run parameters, excluding --name parameter
# arg3: (start_if_not_running) - Boolean, true if container should be started
function runContainer {
RUNNING=$(docker inspect --format="{{ .State.Running }}" $1 2>/dev/null)
if [ $? -eq 1 ] || [ "$RUNNING" == '<no value>' ]; then # container does not exist
echo "No $1 container exists, creating a new one..."
bash -c "docker run --dns $DNS --name $1 $2"
fi
if [ "$3" == true ] && [ "$RUNNING" == 'false' ]; then # container exists, but stopped
echo "$1 container exists, starting it..."
docker start $1
fi
}
function stopContainer {
RUNNING=$(docker inspect --format="{{ .State.Running }}" $1 2>/dev/null)
if [ "$RUNNING" == 'true' ]; then # container does not exist
docker rm -f $1
fi
if [ "$RUNNING" == 'false' ]; then # container exists, but stopped
docker rm -f $1
fi
}
if [ -z "$KNOWROB_WEB_PORT" ]
then
export KNOWROB_WEB_PORT=5000
fi
if [ "$1" == "debug" ]; then
EASE_DEBUG=true
else
EASE_DEBUG=false
fi
# Stop all webapp containers
IMAGES=`docker images | grep latest | grep -Eo '^[^ ]+' | grep -v "openease/easeapp" | grep -v "openease/flask"`
for IMG in $IMAGES; do
X=`docker inspect --format="{{ .ContainerConfig.Env }}" $IMG | grep "OPEN_EASE_WEBAPP=true"`
if [ -n "$X" ]; then
NAME=`echo "$IMG" | cut -d "/" -f2`
stopContainer "$NAME"
fi
done
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Check if user_data container exists and create otherwise
runContainer "user_data" "knowrob/user_data true"
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Check if mesh_data container exists and create otherwise
runContainer "mesh_data" "-v /home/ros/mesh_data ubuntu chmod -R ugo+rw /home/ros/mesh_data"
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Check if mongo_data container exists and create otherwise
runContainer "mongo_data" "-v /data/db busybox true"
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Check if mongo_db is running or stopped and start otherwise
#runContainer "mongo_db" "-d -p 27017:27017 --volumes-from mongo_data mongo" true
runContainer "mongo_db" "-d --volumes-from mongo_data mongo" true
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
#runContainer "ftp_data" "-v /home busybox true"
#runContainer "ftpd_server" "-d --volumes-from ftp_data -p 21:21 openease/pure-ftpd" true
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
runContainer "lft_data" "-v /tmp/openEASE/dockerbridge busybox true"
runContainer "episode_data" "-v $OPENEASE_EPISODE_DATA:/episodes -v /episodes busybox true"
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Check if nginx is running or stopped and start otherwise
runContainer "nginx" "-d -p 80:80 -p 443:443 -v /etc/nginx -v $DIR/../nginx-proxy/certs:/etc/nginx/certs -t nginx" true
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Check if docker-gen is running or stopped and start otherwise
runContainer "docker-gen" "-d --volumes-from nginx \
-v /var/run/docker.sock:/tmp/docker.sock \
-v $DIR/../nginx-proxy:/etc/docker-gen/templates \
-t jwilder/docker-gen -notify-sighup nginx -watch -only-published /etc/docker-gen/templates/nginx.tmpl /etc/nginx/conf.d/default.conf" true
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Check if user_db container exists and create otherwise
runContainer "postgres_data" "-v /var/lib/postgresql/data knowrob/user_db true"
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Check if postgres_db is running or stopped and start otherwise
runContainer "postgres_db" "-d -e POSTGRES_USER=docker --volumes-from postgres_data knowrob/postgres" true
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Stop any dockerbridge containers and re-create them to reflect changes to EASE_DEBUG immediately
stopContainer "dockerbridge"
runContainer "dockerbridge" "-d -v /var/run/docker.sock:/var/run/docker.sock -e EASE_DEBUG=$EASE_DEBUG knowrob/dockerbridge" true
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# start a container for the flask secret
runContainer "ease_secret" "-v /etc/ease_secret busybox sh -c '< /dev/urandom tr -dc _A-Z-a-z-0-9 | head -c 64 > /etc/ease_secret/secret'"
# Wait for the postgres port to be available
echo "Waiting for postgres container..."
until nc -z $(docker inspect --format='{{.NetworkSettings.IPAddress}}' postgres_db) 5432
do
sleep 1
done
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# start webrob container
RUNNING=$(docker inspect --format="{{ .State.Running }}" flask 2>/dev/null)
if [ "$RUNNING" == 'true' ]; then # container exists, and is running
echo "Web app container is running, stopping and removing it..."
docker stop flask && docker rm flask
elif [ "$RUNNING" == 'false' ]; then # container exist
echo "Web app container exists, removing it..."
docker rm flask
fi
echo "Creating flask web app container..."
docker run --rm -i -p 127.0.0.1:$KNOWROB_WEB_PORT:5000 \
--volumes-from mesh_data \
--volumes-from ease_secret:ro \
--volumes-from user_data \
--volumes-from episode_data \
--volumes-from lft_data \
--link postgres_db:postgres \
--link dockerbridge:dockerbridge \
--link mongo_db:mongo \
-e VIRTUAL_HOST=flask \
-e VIRTUAL_PORT=5000 \
-e EASE_DEBUG=$EASE_DEBUG \
-e OPENEASE_ADMIN_PASSWORD=$OPENEASE_ADMIN_PASSWORD \
-e OPENEASE_MAIL_USERNAME=$OPENEASE_MAIL_USERNAME \
-e OPENEASE_MAIL_PASSWORD=$OPENEASE_MAIL_PASSWORD \
-e OPENEASE_MESHES="$OPENEASE_MESHES" \
-e OPENEASE_ROS_DISTRIBUTION=$OPENEASE_ROS_DISTRIBUTION \
-e GITHUB_APP_ID=$GITHUB_APP_ID \
-e GITHUB_APP_SECRET=$GITHUB_APP_SECRET \
-e FACEBOOK_APP_SECRET=$FACEBOOK_APP_SECRET \
-e FACEBOOK_APP_ID=$FACEBOOK_APP_ID \
-e TWITTER_APP_ID=$TWITTER_APP_ID \
-e TWITTER_APP_SECRET=$TWITTER_APP_SECRET \
-e GOOGLE_APP_SECRET=$GOOGLE_APP_SECRET \
-e GOOGLE_APP_ID=$GOOGLE_APP_ID \
--name flask \
openease/flask python runserver.py
| true
|
9a45c38c9a0decc862b18dd8c89f6a2ab94ebe65
|
Shell
|
kenblikylee/makenginx
|
/auto/modules/nodejs.sh
|
UTF-8
| 532
| 3.5625
| 4
|
[
"MIT"
] |
permissive
|
install()
{
yum install -y nodejs
npm install -g n
n 8.11.4
NODE_BIN_DIR=$(n which 8.11.4 | sed -e 's/node$//')
echo "node bin: $NODE_BIN_DIR"
n run 8.11.4 -v
n exec 8.11.4 npm -v
n exec 8.11.4 npx -v
sed -i -e "/^export PATH/ i\# makenginx install nodejs\n\PATH=$NODE_BIN_DIR:\$PATH\n" ~/.bash_profile
}
helpinfo()
{
cat <<END
usage: ./configure nodejs <command>
commands:
- install: install nodejs.
END
}
cmd=${1:-''}
case $cmd in
install)
install
;;
'')
helpinfo
exit 0
;;
esac
| true
|
8243a42938a446abc9ed95a3cde65f2096ca9014
|
Shell
|
JCVenterInstitute/DAFi-gating
|
/docker/JupyterNotebook/start-notebook.sh
|
UTF-8
| 668
| 3.390625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
set -e
# Syncing DAFi scripts and notebooks from Github repo
cd /var/DAFi-gating
git pull
cd $HOME
if [ -d "work" ]; then
cp /var/DAFi-gating/Notebooks/*.ipynb ~/work
else
cp /var/DAFi-gating/Notebooks/*.ipynb $HOME
fi
if [[ ! -z "${JUPYTERHUB_API_TOKEN}" ]]; then
# launched by JupyterHub, use single-user entrypoint
exec /usr/local/bin/start-singleuser.sh $*
else
if [[ ! -z "${JUPYTER_ENABLE_LAB}" ]]; then
. /usr/local/bin/start.sh jupyter lab $*
else
. /usr/local/bin/start.sh jupyter notebook $*
fi
fi
cd $HOME
| true
|
8531467003cc21edb75671f59433c91bac9cd50b
|
Shell
|
wangminsheng/shell-file
|
/s2r/config/pci_card_check.sh
|
UTF-8
| 1,382
| 3.625
| 4
|
[] |
no_license
|
#!/bin/sh
## SFC PCI Card list ###############
# LSI_9260
# intel_82599EB
# intel_i350
#
#
#
#
####################################
#Ethernet controller: Intel Corporation I350 Gigabit Network Connection (rev 01)
intel_i350="Intel Corporation I350 Gigabit Network Connection (rev 01)"
intel_i350_port=2
#Ethernet controller: Intel Corporation 82599EB 10-Gigabit SFI/SFP+ Network Connection (rev 01)
intel_82599_EB="Intel Corporation 82599EB 10-Gigabit SFI/SFP+ Network Connection (rev 01)"
intel_82599_port=2
#RAID bus controller: LSI Logic / Symbios Logic LSI MegaSAS 9260 (rev 05)
LSI_9260="LSI Logic / Symbios Logic LSI MegaSAS 9260 (rev 05)"
LSI_9260_port=1
find_pci(){
pci_card_name=$1
device_name=$2
device_port=$3
pci_num=`lspci |grep "$device_name" |wc -l`
card_num=`expr $pci_num / $device_port`
print_green "Found: $pci_card_name number: $card_num" |tee -a $Logfile
}
#just for S2R [ 1S2RU9Z0ST1 ] [ 1S2RUBZ0ST3 ]
if [ "$CONFIG" == "1S2RU9Z0ST1" ] || [ "$CONFIG" == "1S2RUBZ0ST3" ]; then
echo "" |tee -a $Logfile
find_pci "Intel 82599EB" "$intel_82599_EB" "$intel_82599_port"
echo -en "Check Intel 82599EB card number: " |tee -a $Logfile
if [ $card_num -ne 1 ]; then
print_red "FAIL [ exp> 1 get> $card_num ]" |tee -a $Logfile
show_exit
else
print_green "PASS [ exp > 1 ]" |tee -a $Logfile
sleep 1
fi
fi
| true
|
1fed52c36ed98ae1968b1745666f2663c93f3c1f
|
Shell
|
2ndcouteau/ft_linux
|
/6.21_to_6.34_install_binaries/1_patch_all_binaries.sh
|
UTF-8
| 398
| 2.859375
| 3
|
[] |
no_license
|
Rcol='\e[0m', # Text Reset
red='\e[0;31m';
BRed='\e[1;31m';
gre='\e[0;32m';
yel='\e[0;33m';
blu='\e[0;34m';
nb_error=0
echo -e $red"Patch Bzip2"$Rcol
cd /sources/bzip2-*/
patch -Np1 -i ../bzip2-1.0.6-install_docs-1.patch
if [[ $? != 0 ]]; then
echo -e ${red}"Patch Bzip2 failed"${Rcol}
nb_error=$((nb_error+1));
fi
if [[ $nb_error != 0 ]]; then echo -e ${yel}"PATCH FAILDED !!"${Rcol} ; fi
| true
|
b34d69c2f0300d140eee822a4b39bdf1b6f954d6
|
Shell
|
mzp/dockerfiles
|
/fs/entry.sh
|
UTF-8
| 968
| 3.265625
| 3
|
[] |
no_license
|
#!/bin/sh
# setup user
for i in $(cat /root/userlist); do
user=${i%:*}
pass=${i#*:}
echo $user
adduser $user
echo $pass | passwd --stdin $user
echo -e "$pass\n$pass" | pdbedit -a $user -t
# samba
mkdir -p /export/private/${user}
uname -a > /export/private/${user}/SAMBA_INFO
chown -R $user /export/private/${user}
cat <<END >> /etc/samba/smb.conf
[${user}]
path = /export/private/${user}
public = no
writable = yes
valid users = ${user}
write list = ${user}
END
# AFP
mkdir -p /export/timemachine/${user}
chown -R $user /export/timemachine/${user}
chmod -R 770 /export/timemachine/${user}
cat <<END >> /etc/afp.conf
[TimeMachine for ${user}]
path = /export/timemachine/${user}
time machine = yes
END
done
echo
# restart service
service smb start
service nmb start
service messagebus start
service avahi-daemon start
service avahi-dnsconfd start
service netatalk start
# show logs
tail -f /var/log/samba/log.smbd
| true
|
5395a312f942c87cfdbbb9ce98dcf47a4998df55
|
Shell
|
dkujawski/my-git-helpers
|
/gp
|
UTF-8
| 201
| 3.546875
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
#
# Git Pull
function git_pull() {
cd $1
if [ -e $1/.git ]; then
git pull
fi
}
for repo in "$@"; do
if [ -e ${repo} ]; then
git_pull ${repo}
fi
done
| true
|
8b9401d4fe7a519fe7c4dc9e9bfb6bac84f8209e
|
Shell
|
slpcat/docker-images
|
/oracle/OracleDatabase/RAC/OracleRealApplicationClusters/dockerfiles/12.2.0.1/DelNode.sh
|
UTF-8
| 2,122
| 3.75
| 4
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
# LICENSE UPL 1.0
#
# Copyright (c) 1982-2018 Oracle and/or its affiliates. All rights reserved.
#
# Since: January, 2018
# Author: sanjay.singh@oracle.com, paramdeep.saini@oracle.com
# Description: Delete a Grid node and add Oracle Database instance.
#
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER.
declare -a cluster_nodes
DEL_NODE=${1}
NODE_HOSTNAME=$(hostname)
node_count=0
source /etc/rac_env_vars
check_env_vars ()
{
if [ -z ${DEL_NODE} ];then
echo "Please provide node name which you want to delete";
exit 1;
else
echo "Deleting node name set to ${DEL_NODE}"
fi
}
containsNode () {
local nodes match="$1"
shift
nodes=$1
for e in "${cluster_nodes[@]}"
do
[[ "$e" == "$match" ]] && return 0;
done
return 1
}
setNode () {
cluster_nodes=( $($GRID_HOME/bin/olsnodes | awk '{ print $1 }') )
node_count=$($GRID_HOME/bin/olsnodes -a | awk '{ print $1 }' | wc -l)
}
delNode () {
echo "Checking if node exist in the cluster or not!"
containsNode "${DEL_NODE}" "${cluster_nodes[@]}"
ret=$?
if [ $ret -eq 1 ]; then
echo "Node ${DEL_NODE} is not a part of cluster. These Nodes are part of the cluster $cluster_nodes"
exit 1 ;
fi
if [ ${node_count} -eq 1 -a ${DEL_NODE} == ${NODE_HOSTNAME} ] ;then
echo "Stopping the Grid and deconfigure the cluster."
$GRID_HOME/bin/crsctl stop cluster
$GRID_HOME/crs/install/rootcrs.sh -deconfig -force
else
echo "Stopping Grid on deleting node"
cmd='su - grid -c "ssh ${DEL_NODE} \"sudo ${GRID_HOME}/bin/crsctl stop cluster\""'
eval $cmd
echo "Deleting the node from the cluster"
$GRID_HOME/bin/crsctl delete node -n ${DEL_NODE}
echo "getting updated cluster node info from the cluster"
setNode
echo "Checking if node exist in the cluster or not!"
containsNode "${DEL_NODE}" "${cluster_nodes[@]}"
ret=$?
if [ $ret -eq 1 ]; then
echo "Node ${DEL_NODE} is not a part of cluster. These Nodes are part of the cluster $cluster_nodes"
exit 0 ;
else
echo "Node ${DEL_NODE} is still a part of cluster."
exit 1;
fi
fi
}
##########################################
############# MAIN#########################
###########################################
check_env_vars
setNode
delNode
| true
|
2bbc3c8b1838320378fb553e0d1d3282f0b16f9e
|
Shell
|
metux/chromium-suckless
|
/third_party/mesa/src/src/glsl/tests/lower_jumps/lower_breaks_6.opt_test
|
UTF-8
| 1,069
| 2.515625
| 3
|
[
"LGPL-2.0-or-later",
"GPL-1.0-or-later",
"MIT",
"Apache-2.0",
"LGPL-2.0-only",
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
#
# This file was generated by create_test_cases.py.
#
# If a loop contains conditional breaks and continues, and
# ends in an unconditional break, then the unconditional break
# needs to be lowered, because it will no longer be at the end
# of the loop after the final break is added.
../../glsl_test optpass --quiet --input-ir 'do_lower_jumps(0, 0, 0, 1, 1)' <<EOF
((declare (in) float a) (declare (in) float ba) (declare (in) float bb)
(declare (in) float ca)
(declare (in) float cb)
(function main
(signature void (parameters)
((loop () () () ()
((if (expression bool > (var_ref a) (constant float (0.000000)))
((if (expression bool > (var_ref ba) (constant float (0.000000)))
((if (expression bool > (var_ref bb) (constant float (0.000000)))
(continue)
()))
())
(if (expression bool > (var_ref ca) (constant float (0.000000)))
((if (expression bool > (var_ref cb) (constant float (0.000000)))
(break)
()))
()))
())
break))))))
EOF
| true
|
8e6bf3247b8fabbf8f625b99c3e393bdc61abe10
|
Shell
|
johnsir/dotfiles-2
|
/zsh/rbenv.zsh
|
UTF-8
| 109
| 2.921875
| 3
|
[] |
no_license
|
ruby_version() {
local version=`rbenv version-name`
print "%{$fg[magenta]%}${version}%{$reset_color%}"
}
| true
|
69bb0a9c06c3d185ebd296d16060a370e13fc0f5
|
Shell
|
chronospaulh/servicenow-dish-iam-uplift
|
/format.sh
|
UTF-8
| 185
| 2.796875
| 3
|
[] |
no_license
|
#!/usr/local/bin/bash
set -Eeuox pipefail
for filename in guardrails/**/*.json; do
scratch_file=$(mktemp)
jq . "$filename" > "$scratch_file"
cp "$scratch_file" "$filename"
done
| true
|
3e811d7b60d33104aa25e7d68bb5346c33e3bd86
|
Shell
|
tardypad/dotfiles
|
/files/scripts/shell/play-somafm
|
UTF-8
| 440
| 3.296875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
CHANNELS=$( somafm channels )
CHANNEL_NAME=$(
echo "${CHANNELS}" \
| cut -f2 \
| sort \
| bemenu -p 'Play SomaFM channel'
)
[ -n "${CHANNEL_NAME}" ] || exit
CHANNEL_ID=$(
echo "${CHANNELS}" \
| cut -f1,2 \
| grep "${CHANNEL_NAME}$" \
| cut -f1
)
CHANNEL_URL=$( somafm direct_url "${CHANNEL_ID}" )
if [ -z "${CHANNEL_URL}" ]; then
exit 1
fi
mpc -q clear
mpc -q add "${CHANNEL_URL}"
mpc -q play
| true
|
7ffa8da742e197e64f1f1ec5299b8dbb7795c7cb
|
Shell
|
CommonBike/commonbike-site
|
/pre-pro/hooks/post-receive
|
UTF-8
| 2,793
| 3.203125
| 3
|
[] |
no_license
|
#!/bin/bash
deployDir="../deploy"
buildConfig="--rm=true"
runConfig="-p 49153:3000"
while read oldrev newrev refname
do
branch=$(git rev-parse --symbolic --abbrev-ref $refname)
##
## ACCEPTANCE
##
if [ "develop" == "$branch" ]; then
#docker img name
imageName='commonbike-test'
#docker container name
containerName="$imageName"
#get last commit for img tag
lastCommit=`git log --pretty=format:'%h' -n 1`
echo "Hook : deploy to dir $deployDir"
mkdir -p $deployDir
git --work-tree="$deployDir" --git-dir="$(pwd)" checkout -f "$branch"
cd $deployDir
docker tag $imageName:latest $imageName:pendingDeletion
echo "Building Meteor app (develop)"
cd app
npm install --production
meteor build --allow-superuser --directory ../../mrt_build
cd ..
echo "Docker : Building $imageName:$lastCommit"
# This build and tag the image with "latest" tag
buildID=`docker build $buildConfig -t $imageName .`
# Add the commit tag
docker tag $imageName:latest $imageName:$lastCommit
if [ $? -ne 0 ]; then
echo "Docker : Build failed, aborting"
fi
echo "Docker : stop and rm $containerName"
docker-compose down
echo "Docker : removing old image"
docker rmi -f $imageName:pendingDeletion
if [ $? -ne 0 ]; then
echo "Docker : Stop failed, aborting"
fi
docker-compose up -d
if [ $? -ne 0 ]; then
echo "Docker : Run failed, aborting"
fi
fi
##
## PRODUCTION
##
if [ "master" == "$branch" ]; then
#docker img name
imageName='commonbike'
#docker container name
containerName="$imageName"
#get last commit for img tag
lastCommit=`git log --pretty=format:'%h' -n 1`
echo "Hook : deploy to dir $deployDir"
mkdir -p $deployDir
git --work-tree="$deployDir" --git-dir="$(pwd)" checkout -f "$branch"
cd $deployDir
docker tag $imageName:latest $imageName:pendingDeletion
echo "Building Meteor app (master)"
cd app
npm install --production
meteor build --allow-superuser --directory ../../mrt_build
cd ..
echo "Docker : Building $imageName:$lastCommit"
# This build and tag the image with "latest" tag
buildID=`docker build $buildConfig -t $imageName .`
# Add the commit tag
docker tag $imageName:latest $imageName:$lastCommit
if [ $? -ne 0 ]; then
echo "Docker : Build failed, aborting"
fi
echo "Docker : stop and rm $containerName"
docker-compose down
echo "Docker : removing old image"
docker rmi -f $imageName:pendingDeletion
if [ $? -ne 0 ]; then
echo "Docker : Stop failed, aborting"
fi
docker-compose up -d
if [ $? -ne 0 ]; then
echo "Docker : Run failed, aborting"
fi
fi
done
| true
|
2c69ddaa90318a95080db96584ebc25ce205bfa1
|
Shell
|
vmiklos/dotfiles
|
/.bash_login
|
UTF-8
| 2,151
| 2.84375
| 3
|
[] |
no_license
|
#!/bin/bash
# avoid idiotic dos-like prompt
PS1='\u@\h:\w\$ '
# other annoyances
# - kill nepomuk
# chmod 750 /usr/bin/nepomukcontroller
# - konsole vs nvidia: set font size to 11.7 instead of 12
# ls colors via ssh
export LS_OPTIONS=$(echo $LS_OPTIONS|sed 's/--color=[^ ]\+/--color=auto/')
# LibreOffice gbuild colors
export gb_COLOR=1
# Silence 'AT-SPI: Could not obtain desktop path or name' warnings.
export NO_AT_BRIDGE=1
# F: quit if input is less then one page
# M: no more-like short prompt
# R: accept colors
# X: don't throw away the output after quit under x
export LESS="FMRX"
export MAKEFLAGS="-l`grep -c ^processor /proc/cpuinfo`"
# Adds /sbin and /usr/sbin to PATH when sudoing, so you don't have
# to give full PATH for launching a program in those directories.
alias sudo="PATH=$PATH:/sbin:/usr/sbin sudo"
alias webcam="mplayer -tv driver=v4l2:device=/dev/video0 tv://"
alias tv="vlc http://streamer.carnation.hu/mtvonlinem1" # see http://www.eloadas.tv/
dvdrdata() { growisofs -dvd-compat -overburn -Z /dev/dvdrw=$1; eject /dev/dvdrw;}
# cd writing aliases
alias cdblank='cdrecord -v gracetime=2 dev=/dev/cdrw blank=fast'
alias cdrdata='cdrecord -v -dao dev=/dev/cdrw speed=52 gracetime=2 driveropts=burnfree -eject'
alias cdrwdata='cdrecord -v -dao dev=/dev/cdrw speed=52 gracetime=2 driveropts=burnfree blank=fast -eject'
# don't offer other related manpages instead of showing what was requested
export MAN_POSIXLY_CORRECT=1
# ccache with Clang without CCACHE_CPP2 set causes spurious warnings
export CCACHE_CPP2=1
# Make cupsdoprint work out of the box.
export PRINTER="hp-p1566"
# Try to attach to an existing screen by default.
alias screen="screen -DR"
# 2FA means searching for TOTP passwords only, e.g. "2fa login.colla".
alias 2fa="cpm --totp"
# Formats the last commit in the workdir (has to be still committed with commit --amend).
alias git-clang-format-diff="git diff -U0 --no-color HEAD^ | clang-format-diff -p1 -i"
export PATH=$HOME/.local/bin:$PATH
# rusty-tags, etc.
source "$HOME/.cargo/env"
# dlv, etc.
export GOPATH=$HOME/.go
if [ -e $HOME/.go/bin ]; then
export PATH="$HOME/.go/bin:$PATH"
fi
| true
|
c86a7182d9fe8ed6955fd4b7b9e470342fd86871
|
Shell
|
marczis/connectionmanager
|
/inc/tapmenu.sh
|
UTF-8
| 310
| 2.890625
| 3
|
[] |
no_license
|
function tapmenu()
{
menu "TAP" "Select a task" \
"a" "Add tap IF" \
"d" "Delete tap IF"
}
function TAP_a()
{
dia --inputbox "Please provide a name" 0 0 || return -1
local tapname=$DRET
rec sudo $(hns) ip tuntap add mode tap $tapname
}
function TAP_d()
{
removeif "tun"
}
| true
|
13b79679edbc447925efb347f55416ae07220165
|
Shell
|
steelburn/scrolloutf1-repair
|
/www/bin/iptrap.sh
|
UTF-8
| 3,372
| 3.0625
| 3
|
[] |
no_license
|
#!/bin/bash
#####################################################
# AUTHOR: MARIUS GOLOGAN (marius.gologan@gmail.com) #
#####################################################
empty=;
. /var/www/codes.cfg
. /var/www/collector.cfg;
. /var/www/traffic.cfg;
left=`echo "$spamtraps" | sed "s/ /|/g"`;
right=`echo "${domain[*]}" | sed "s/ /|/g"`;
log=/var/log/mail.log;
rm -f /tmp/iptrap[0-9]*.tmp
test -f /var/www/cfg/iptrap || touch /var/www/cfg/iptrap;
test -f /var/www/postfix/iptrap || touch /var/www/postfix/iptrap;
minute=`date +%M | sed "s/^0//"`;
let m1=$minute-1;
let m2=$minute-2;
let m3=$minute-3;
let m4=$minute-4;
let m5=$minute-5;
min=`date +%b" "%e" "%H:`;
lastmin=`grep -E "$min(0?($m1|$m2|$m3|$m4|$m5))" $log`;
flood=`echo "$lastmin" | grep -E " (connect|RCPT) from " | sed "s/^.* from .*\[\([0-9]\{1,3\}\.[0-9]\{1,3\}\.[0-9]\{1,3\}\.[0-9]\{1,3\}\)\].*$/\1/" | grep -v "^$" | sort | uniq -c | awk '{if ($1>10) print $2}'`;
helo=`echo "$lastmin" |\
grep " postfix.* RCPT from .*\[[0-9]\{1,3\}\.[0-9]\{1,3\}\.[0-9]\{1,3\}\.[0-9]\{1,3\}\]: .* helo=<[A-Za-z0-9\-\_]*>" |\
sed "s/.* RCPT from .*\[\([0-9]\{1,3\}\.[0-9]\{1,3\}\.[0-9]\{1,3\}\.[0-9]\{1,3\}\)\]: .*/\1/" | sort -u | grep -v "^$"`;
virus=`echo "$lastmin" |\
grep " amavis.* Blocked INFECTED " |\
sed "s/^.* amavis.* Blocked INFECTED .*\[\([0-9]\{1,3\}\.[0-9]\{1,3\}\.[0-9]\{1,3\}\.[0-9]\{1,3\}\)\] \[\([0-9]\{1,3\}\.[0-9]\{1,3\}\.[0-9]\{1,3\}\.[0-9]\{1,3\}\)\] .*$/\1 \2/" | sed "s/ /\n/" | grep -v "^$" | sort -u`;
trap=`echo "$lastmin" |\
grep " amavis.* Blocked SPAM, " |\
grep -v " <> -> " |\
grep -iE "(> -> <("$left")@("$right")>| <("$left")@("$right")> -> <)" |\
sed "s/^.* amavis.* Blocked SPAM, .*\[\([0-9]\{1,3\}\.[0-9]\{1,3\}\.[0-9]\{1,3\}\.[0-9]\{1,3\}\)\] \[\([0-9]\{1,3\}\.[0-9]\{1,3\}\.[0-9]\{1,3\}\.[0-9]\{1,3\}\)\] .*$/\1 \2/" | grep -v "^$" | sed "s/ /\n/" | sort -u`;
localdom=`echo "$lastmin" |\
grep -iE " postfix.* RCPT from .*\]: 5.* from=<.*@("$right")> to=<.*@("$right")>" |\
sed "s/.* RCPT from .*\[\([0-9]\{1,3\}\.[0-9]\{1,3\}\.[0-9]\{1,3\}\.[0-9]\{1,3\}\)\]: .*/\1/" | sort -u | grep -v "^$"`;
spam=`echo "$lastmin" |\
grep " amavis.* Blocked SPAM, .* Hits: [0-9]\{3\}\." |\
grep -v " <> -> " |\
sed "s/^.* amavis.* Blocked SPAM, .*\[\([0-9]\{1,3\}\.[0-9]\{1,3\}\.[0-9]\{1,3\}\.[0-9]\{1,3\}\)\] \[\([0-9]\{1,3\}\.[0-9]\{1,3\}\.[0-9]\{1,3\}\.[0-9]\{1,3\}\)\] .*$/\1 \2/" | grep -v "^$" | sed "s/ /\n/" | sort -u`;
printf "\n$flood\n$trap\n$virus\n" | grep -v "^$" | grep "^[0-9]\{1,3\}\.[0-9]\{1,3\}\.[0-9]\{1,3\}\.[0-9]\{1,3\}$" | grep -vE "^(127|10|172\.16|192\.168|0\.0)\." |\
while read ip
do
(test "$ip" = "$empty") && exit 0 || (echo -e "$ip/32\txMESSAGEx\t`date +%s`" >> /tmp/iptrap$$.tmp);
done;
curdate=`date +%s`;
let timelimit=$curdate-3600*1*1;
test -f /tmp/iptrap$$.tmp && (cat /tmp/iptrap$$.tmp /var/www/cfg/iptrap | grep -v "^$" |\
awk -F "\t" '{if ($3>'"$timelimit"') print $0}' | sort -u > /tmp/iptrap.tmp);
test -f /tmp/iptrap.tmp && (mv /tmp/iptrap.tmp /var/www/cfg/iptrap);
awk -F "\t" '{if ($3>'"$timelimit"') print $1"\t"$2}' /var/www/cfg/iptrap | sort -u | sed "s/xMESSAGEx/$cidr_client_code/g" > /tmp/iptrap;
curcks=`cksum /var/www/postfix/iptrap | cut -d " " -f2`;
newcks=`cksum /tmp/iptrap | cut -d " " -f2`;
if ! [ "$curcks" = "$newcks" ]
then
mv /tmp/iptrap /var/www/postfix/iptrap;
else
rm -f /tmp/iptrap;
fi
rm -f /tmp/iptrap[0-9]*.tmp
exit 0;
| true
|
756b0201babaccb97cca93400fcd64053f50efc9
|
Shell
|
d43M0n23/sc4nn3r
|
/cms_sc4nn3r
|
UTF-8
| 10,770
| 3.46875
| 3
|
[] |
no_license
|
#!/bin/bash
######################################################################
# cms_sc4nn3r
# simple scanner-script www + mysql
######################################################################
######################################################################
# last modify: 11.09.2017
# bug or anything: d43M0n23@3xpl0it.com
######################################################################
# TODO EXAMPLE:
#
######################################################################
#
######################################################################
# Bash sTyl3!
clear='\033[0m' #alle Attribute zurücksetzen
bold='\033[1m' #Fettschrift
underline='\033[4m' #Unterstreichen
blinken='\033[5m' #Blinken
invers='\033[7m' #inverse Darstellung
black='\033[30m' #Schriftfarbe schwarz
red='\033[31m' #Schriftfarbe rot
green='\033[32m' #Schriftfarbe grün
yell='\033[33m' #Schriftfarbe gelb
blue='\033[34m' #Schriftfarbe blau
mag='\033[35m' #Schriftfarbe magenta
turk='\033[36m' #Schriftfarbe türkis
white='\033[37m' #Schriftfarbe weiß
#######################################################################
##Variablen
DATE=$(date +%F)
version=v1.0
vdate=13.09.2017
## INSTALL & UPDATE START ##
# Local Destination
INSTALL_DIR=/usr/share/cms_sc4nn3r
#INSTALL
if [ ! -d $INSTALL_DIR ]; then
echo -e "\n${green} + -- --=[This is your first run of the cms_sc4nn3r script${clear}"
echo -e "${green} + -- --=[This script will install cms_sc4nn3r under $INSTALL_DIR.${clear}"
echo -e "${green} + -- --=[After install you can use the command 'cms_sc4nn3r' Server/System wide and remove the downloaded git folder${clear}"
sleep 2
mkdir -p $INSTALL_DIR 2> /dev/null
cp -Rf $PWD/* $INSTALL_DIR 2> /dev/null
rm -f /usr/bin/cms_sc4nn3r
ln -s $INSTALL_DIR/cms_sc4nn3r /usr/bin/cms_sc4nn3r
echo -e "${green} + -- --=[Installation Finish.${clear}"
sleep 2
fi
#Latest release
LATEST_RELEASE=$(curl -L -s -H 'Accept: application/json' https://github.com/d43M0n23/sc4nn3r/releases/latest)
LATEST_VERSION=$(echo $LATEST_RELEASE | sed -e 's/.*"tag_name":"\([^"]*\)".*/\1/')
#UPDATE NEW VERSION
if [ "$LATEST_VERSION" != "$version" ]; then
echo -e "\n${red}Your Version of Cms_sc4nn3r is outdated!${clear}"
echo -e "\n${green}Please use the update function: cms_sc4nn3r --update!${clear}"
sleep 5
fi
#UPDATE
if [ "$1" = "--update" ]; then
echo -e "\n${turk}You have select the update Feature${clear}"
sleep 2
cd /tmp/
git clone https://github.com/d43M0n23/sc4nn3r.git
cp -Rf sc4nn4r/* $INSTALL_DIR 2> /dev/null
rm -f /usr/bin/cms_sc4nn3r
ln -s $INSTALL_DIR/cms_sc4nn3r /usr/bin/cms_sc4nn3r
echo -e "\n${green}Update finish.Please restart cms_sc4nn3r.${clear}"
rm -r sc4nn3r
exit
fi
#UNINSTALL
if [ "$1" = "--deinstall" ]; then
echo -e "\n${turk}You have select the deinstall Feature${clear}"
sleep 2
rm -f /usr/bin/cms_sc4nn3r
cd /usr/share/
rm -r cms_sc4nn3r/
cd
exit
fi
## INSTALL & UPDATE ENDE ##
# Email
email=alex@xcoorp.com
# Log & bka-dir check
#if [ ! -f $LOG ]; then touch $LOG; fi
#if [ ! -d $DESTINATION ]; then mkdir $DESTINATION; fi
# DELETE OLD LOGS?
# 1=yes/0=no
kill_logs=0
if [ $kill_logs = 1 ]; then rm *.log; fi
#WORKING DIR
TOPDIR=`pwd`
######## START FUNCTIONS ########
# -------------------------------------------
# send email with report from single scann
# -------------------------------------------
sh_mail () {
mail -s "sc4nn3r: ${scanner}" $email < $scanlog -aFrom:sc4nn3r@3xpl0it.com
}
#################
clear
#################
echo '+-------------------------------------------------+'
echo "| __ __ _____ |"
echo "| __________/ // / ____ ____ |__ /_____ |"
echo "| / ___/ ___/ // /_/ __ \/ __ \ /_ </ ___/ |"
echo "| (__ ) /__/__ __/ / / / / / /__/ / / |"
echo "| /____/\___/ /_/ /_/ /_/_/ /_/____/_/ |"
echo '+-------------------------------------------------+'
echo "| Version ${version} (c)opyright 2017 by |"
echo '| DaemoN d43M0n23@3xpl0it.com |'
echo '+-------------------------------------------------+'
echo '| This Script is subject to the GPL License! |'
echo '| You can copy and change it! |'
echo '+-------------------------------------------------+'
echo "| Date: ${vdate} |"
echo '+-------------------------------------------------+'
sleep 2
while [ "$attacker" != "q" ]
#clear
echo '+-------------------------------------------------+'
echo '| Own-Scripts, Aux & Scanner-Frames |'
echo '+-------------------------------------------------+'
echo '| 1.Wpscan. | Wordpress |'
echo '| 2.CMSmap. | Wordpress - Joomla - Durpal |'
echo '| 3.D-TECT. | Wordpress |'
echo '| 4.WPSeku. | Wordpress |'
echo '| 5.Nikto. | All |'
echo '| 6.Reverse IP Lookup. | All |'
echo '| 7.Joomlavs. | Joomla |'
#echo '| 8.Joomscan. | Joomla |'
echo '| a.All. |'
echo '| x.Quit. |'
echo '+-------------------------------------------------+'
read -p "Attacker Nr (1-x)? " attacker
if [ $attacker = a ]; then
read -p "Wordpress or Joomla (w/j)? " cms_system
fi
do
case $attacker in
1)
echo "Wpscan selected"
read -p "domain (e.g. google.com)? " wp_domain
if [ $wp_domain ]; then
wpscan --update
wpscan --url $wp_domain --enumerate 2>&1 | tee -a wpscan_${wp_domain}.log
echo -e "\n${yell}Logfile is saved as wpscan_${wp_domain}.log${clear}\n"
scanner=Wpscan_${wp_domain}
scanlog=wpscan_${wp_domain}.log
#mail -s "sc4nn3r: ${scanner}" $email < $scanlog -aFrom:cms-sc4nn3r@3xpl0it.com
sh_mail
else
echo -e "\nPlease enter a domain!\n"
fi
;;
2)
echo "CMSmap selected"
read -p "domain (e.g. google.com)? " cms_domain
if [ $cms_domain ]; then
python /root/c0r3/09-cms/CMSmap/cmsmap.py -t $cms_domain -o cmsscan_${cms_domain}.log
echo -e "\n${yell}Logfile is saved as cmsscan_${cms_domain}.log${clear}\n"
scanner=CMSscan_${cms_domain}
scanlog=CMSscan_${cms_domain}.log
sh_mail
else
echo -e "\nPlease enter a domain!\n"
fi
;;
3)
echo "D-TECT selected"
python /root/c0r3/09-cms/D-TECT/d-tect.py
# $scanner = Wpscan_${wp_domain}
# $scanlog = wpscan_${wp_domain}.log
# sh_mail
# else
# echo -e "\nPlease enter a domain!\n"
# fi
;;
4)
echo "WPSeku selected"
read -p "domain (e.g. google.com)? " wpseku_domain
if [ $wpseku_domain ]; then
python /root/c0r3/09-cms/WPSeku/wpseku.py -t $wpseku_domain 2>&1 | tee -a wpseku_${wpseku_domain}.log
echo -e "\n${yell}Logfile is saved as wpseku_${wpseku_domain}.log${clear}\n"
scanner=WPSeku_${wpseku_domain}
scanlog=wpseku_${wpseku_domain}.log
sh_mail
else
echo -e "\nPlease enter a domain!\n"
fi
;;
5)
echo "Nikto selected"
read -p "domain (e.g. google.com)? " nikto_domain
if [ $nikto_domain ]; then
nikto -host http://$nikto_domain 2>&1 | tee -a nikto_${nikto_domain}.log
echo -e "\n${yell}Logfile is saved as nikto_${nikto_domain}.log${clear}\n"
scanner=Nikto_${nikto_domain}
scanlog=nikto_${nikto_domain}.log
sh_mail
else
echo -e "\nPlease enter a domain!\n"
fi
;;
6)
echo "IP Lookup selected"
read -p "domain or ip (e.g. google.com)? " rev_domain
if [ $rev_domain ]; then
php rev3r531p.php $rev_domain 2>&1 | tee -a reverse_${rev_domain}.log
echo -e "\n${yell}Logfile is saved as reverse_${rev_domain}.log${clear}\n"
scanner=Reverse_${rev_domain}
scanlog=reverse_${rev_domain}.log
sh_mail
else
echo -e "\nPlease enter a domain!\n"
fi
;;
7)
echo "Joomlavs selected"
read -p "domain (e.g. google.com)? " joomla_domain
if [ $joomla_domain ]; then
ruby /root/c0r3/09-cms/joomlavs/joomlavs.rb -u $joomla_domain --scan-all 2>&1 | tee -a joomla_${joomla_domain}.log
echo -e "\n${yell}Logfile is saved as joomla_${joomla_domain}.log${clear}\n"
scanner=Wpscan_${wp_domain}
scanlog=wpscan_${wp_domain}.log
sh_mail
else
echo -e "\nPlease enter a domain!\n"
fi
;;
# 8) - OUTDATE !!
# echo "Joomscann selected"
# read -p "domain (e.g. google.com)? " joomscann_domain
# joomscan -u $joomscan_domain 2>&1 | tee -a joomscan_${joomscan_domain}.log
# echo -e "\n${yell}Logfile is saved as joomscan_${joomscan_domain}.log${clear}\n"
# GITHUB SOURCE https://github.com/rezasp/joomscan
# ;;
a)
echo "All selected"
read -p "domain (e.g. google.com)? " all_domain
if [ $all_domain ]; then
if [ $cms_system = w ]; then
wpscan --url $all_domain --enumerate 2>&1 | tee -a all_${all_domain}.log
# python /root/c0r3/09-cms/CMSmap/cmsmap.py -t $cms_domain -o cmsscan_${cms_domain}.log
# python /root/c0r3/09-cms/D-TECT/d-tect.py
python /root/c0r3/09-cms/WPSeku/wpseku.py -t $all_domain 2>&1 | tee -a all_${all_domain}.log
nikto -host http://$all_domain 2>&1 | tee -a all_${all_domain}.log
php rev3r531p.php 2>&1 | tee -a reverse_${all_domain}.log && echo -e "${all_domain}\n"
echo -e "\n${yell}Logfile is saved as all_${all_domain}.log${clear}\n"
scanner=Allscan_${all_domain}
scanlog=all_${all_domain}.log
sh_mail
else
nikto -host http://$all_domain 2>&1 | tee -a all_${all_domain}.log
php rev3r531p.php 2>&1 | tee -a reverse_${all_domain}.log && echo -e "${all_domain}\n"
ruby /root/c0r3/09-cms/joomlavs/joomlavs.rb -u $all_domain --scan-all 2>&1 | tee -a all_${all_domain}.log
echo -e "\n${yell}Logfile is saved as all_${all_domain}.log${clear}\n"
scanner=Allscan_${all_domain}
scanlog=all_${all_domain}.log
sh_mail
fi
else
echo -e "\nPlease enter a domain!\n"
fi
;;
x)
break
;;
*)
echo $"Usage: $0 {1-7|a|x}"
exit 1
esac
done
| true
|
cd0d12f67d92a3ce6c5fd50e1dd9f2e9b8425075
|
Shell
|
tribblix/build
|
/TRIBwireshark/fixit
|
UTF-8
| 432
| 2.515625
| 3
|
[] |
no_license
|
#!/bin/sh
#
# remove the qt gui which is shipped separately
# in the TRIBwireshark-qt package
#
#
# cannot package files with spaces in the name
#
mv "usr/share/wireshark/profiles/No Reassembly" "usr/share/wireshark/profiles/No_Reassembly"
#
# ship the gui separately to slim the dependency tree
#
rm -f usr/bin/wireshark
rm -f usr/share/applications/wireshark.desktop
rm -f usr/share/applications/org.wireshark.Wireshark.desktop
| true
|
3fa179ebe851da89b2934b8c07a2e73d17939bcf
|
Shell
|
awjohnson/LabManagement
|
/Munki Scripts/UnlockPrinters_installcheck.sh
|
UTF-8
| 665
| 3.15625
| 3
|
[] |
no_license
|
#!/bin/sh
# UnlockPrinters_installcheck.sh
# this will run as a Munki install_check script
# exit status of 0 means install needs to run
# exit status NOT 0 means no installation necessary
/usr/bin/security authorizationdb read system.print.operator 2>/dev/null > /private/tmp/preSPO.plist
myGroup=`/usr/bin/defaults read /private/tmp/preSPO.plist group`
exitStatus=0
if [ $myGroup == "_lpoperator" ]; then
/bin/echo "UnlockPrinters: Users can't un-pause print queues."
exitStatus=0
elif [ $myGroup == "everyone" ]; then
/bin/echo "UnlockPrinters: Users can un-pause print queues."
/bin/rm -Rf /private/tmp/preSPO.plist
exitStatus=1
fi
exit $exitStatus
| true
|
9efe83a32fbb042a2e3ba838a6dfa9ebb28239ad
|
Shell
|
abdhaleegit/lkp-tests
|
/lkp-exec/qemu
|
UTF-8
| 7,465
| 3.75
| 4
|
[] |
no_license
|
#!/bin/bash
. $LKP_SRC/lib/kexec.sh
. $LKP_SRC/lib/http.sh
. $LKP_SRC/lib/qemu.sh
. $LKP_SRC/lib/unit.sh
. $LKP_SRC/lib/job-init.sh
script_name=$(basename $0)
usage()
{
cat <<EOF
Usage: lkp $script_name [-o RESULT_ROOT] [-p VDISK_PATH] [-s SSH_PORT] [-k bzImage] job.sh
options:
-o RESULT_ROOT dir for storing all results
-s SSH_PORT forward ssh port to host
-p VDISK_PATH specify vdisk path
-k bzImage specify bzImage as kernel image
Note:
This script uses qemu to start a VM to run LKP test-job.
It downloads kernel, initrd, bm_initrd, modules_initrd through LKP_SERVER,
and generates lkp-initrd locally and creates job-initrd with 'job.sh' you specified.
You can check test results in dir '/tmp/vm_test_result/' or a RESULT_ROOT you specified.
EOF
exit 1
}
create_lkp_home()
{
[[ -e $HOME/.lkp ]] && return
cat <<-EOF
The approx. disk space requirements are
10M simple boot test in rootfs openwrt
50M simple boot test in rootfs debian
1G plan to run a number of different tests
100G or more IO tests
Please enter a dir with enough disk space, or simply press Enter to accept the default.
You may still symlink $HOME/.lkp to a more suitable place in future.
EOF
local dir
read -p "$HOME/.lkp => " dir
[[ $dir ]] && {
dir=$(realpath $dir)
mkdir -p $dir || exit
ln -sT $dir $HOME/.lkp || exit
}
mkdir -p $HOME/.lkp/cache
mkdir -p $HOME/.lkp/result
mkdir -p $HOME/.lkp/qemu-img
}
replace_script_partition_val()
{
local disk_names=(/dev/vd{a..z})
local job_script="$1"
local nr_hdd_vdisk="$(echo $hdd_partitions | wc -w)"
local nr_ssd_vdisk="$(echo $ssd_partitions | wc -w)"
local nr_swap_vdisk="$(echo $swap_partitions | wc -w)"
local nr_rootfs_vdisk="$(echo $rootfs_partition | wc -w)"
[[ $nr_hdd_partitions ]] || nr_hdd_partitions=$nr_hdd_vdisk
[[ $nr_ssd_partitions ]] || nr_ssd_partitions=$nr_ssd_vdisk
VDISK_NUM=$((nr_hdd_partitions+nr_ssd_partitions+nr_swap_vdisk+nr_rootfs_vdisk))
[[ "$hdd_partitions$ssd_partitions$swap_partitions$rootfs_partition" =~ '/dev/vda' ]] && return
if ((VDISK_NUM)); then
local index=0
local vdisk_hdd_val="${disk_names[@]:$index:$nr_hdd_partitions}"
index=$((index+nr_hdd_partitions))
local vdisk_ssd_val="${disk_names[@]:$index:$nr_ssd_partitions}"
index=$((index+nr_ssd_partitions))
local vdisk_rootfs_val="${disk_names[@]:$index:$nr_rootfs_vdisk}"
index=$((index+nr_rootfs_vdisk))
local vdisk_swap_val="${disk_names[@]:$index:$nr_swap_vdisk}"
sed -i -e "s%export hdd_partitions=.*%export hdd_partitions='${vdisk_hdd_val}'%" \
-e "s%export ssd_partitions=.*%export ssd_partitions='${vdisk_ssd_val}'%" \
-e "s%export swap_partitions=.*%export swap_partitions='${vdisk_swap_val}'%" \
-e "s%export rootfs_partition=.*%export rootfs_partition='${vdisk_rootfs_val}'%" \
$job_script
fi
}
create_job_initrd()
{
[[ -d $CACHE_DIR/$job_initrd_dir ]] && rm -rf "$CACHE_DIR/$job_initrd_dir"
mkdir -p $CACHE_DIR/$job_initrd_dir
cp $job_script $CACHE_DIR/${job_file%.yaml}.sh
archive=$CACHE_DIR/${job_file%.yaml}-$job_sig
(
cd $CACHE_DIR || exit
{
echo lkp
echo lkp/scheduled
echo ${job_initrd_dir#/}
find ${job_initrd_dir#/}/*
} | cpio -o -H newc -F $archive.cpio
gzip -n -9 $archive.cpio
mv -f $archive.cpio.gz $archive.cgz
)
}
get_qemu_kernel_initrd()
{
local lkp_initrd=
local job_initrd=
[[ $opt_kernel_image ]] || download_kernel
download_initrd
initrd_option="-initrd $concatenate_initrd"
}
# limit $1 to MemAvailable/2
max_sane_qemu_memory()
{
local mem_kb="$(to_kb $1)"
export_meminfo
[[ $MemAvailable ]] ||
(( MemAvailable = MemFree + (Active_file/2) + Inactive_file ))
(( mem_kb > MemAvailable / 2 )) && mem_kb=$((MemAvailable / 2))
echo $((mem_kb >> 10))M
}
setup_vdisk_root()
{
vm_name=$testbox
if [[ "$opt_vdiskpath" ]]; then
[[ -d "$opt_vdiskpath" ]] || {
echo "$opt_vdiskpath: no such directory"
exit 1
}
VDISK_ROOT="$opt_vdiskpath"
else
VDISK_ROOT=/tmp/vdisk-$USER
[[ -d $VDISK_ROOT ]] || mkdir -p $VDISK_ROOT
fi
}
while getopts "o:p:s:k:" opt
do
case $opt in
o ) opt_result_root="$OPTARG" ;;
s ) opt_ssh="$OPTARG" ;;
p ) opt_vdiskpath="$OPTARG" ;;
k ) opt_kernel_image="$OPTARG" ;;
? ) usage ;;
esac
done
shift $(($OPTIND-1))
unset DISPLAY
job_script=$1
[ -n "$job_script" ] || usage
sed -i 's/\r//' $job_script
create_lkp_home
export CACHE_DIR=$HOME/.lkp/cache
replace_script_partition_val $job_script
. $job_script export_top_env
[[ $opt_kernel_image ]] && unset modules_initrd
create_lkp_src_initrd()
{
if [[ "$kconfig" =~ ^(i386|x86_64)- ]]; then
local arch=${kconfig%%-*}
else
local arch=$(arch)
fi
if [ -d $LKP_SRC/.git ]; then
local head_commit=$(cd $LKP_SRC && git rev-list -n1 HEAD)
local diff_id=$(cd $LKP_SRC && git diff | git patch-id | cut -f1 -d' ')
local src_sig=${head_commit:0:12}_${diff_id:0:12}
else
local src_sig=$(ls -lR $LKP_SRC|md5sum|cut -c1-12)
fi
lkp_initrd=$CACHE_DIR/lkp-$arch-$src_sig.cgz
[[ -f $lkp_initrd ]] || {
LKP_USER=$user \
$LKP_SRC/sbin/pack -a $arch lkp-src
mv $CACHE_DIR/lkp-$arch.cgz $lkp_initrd
}
}
create_lkp_src_initrd
# create job_initrd.cgz
job_sig=$(md5sum $job_script | cut -c1-5)
job_initrd=$CACHE_DIR/${job_file%.yaml}-$job_sig.cgz
job_initrd_dir=${job_file%/*}
[[ -f $job_initrd ]] || create_job_initrd
# if job.sh not include bootloader_append entry, add default content
if [ -n "$bootloader_append" ]; then
bootloader_append=$(echo "$bootloader_append" | tr '\n' ' ')
else
bootloader_append="root=/dev/ram0 job=$job_file user=$user ARCH=x86_64 kconfig=x86_64-rhel commit=051d101ddcd268a7429d6892c089c1c0858df20b branch=linux-devel/devel-hourly-2015033109 max_uptime=1247 RESULT_ROOT=$result_root earlyprintk=ttyS0,115200 rd.udev.log-priority=err systemd.log_target=journal systemd.log_level=warning debug apic=debug sysrq_always_enabled rcupdate.rcu_cpu_stall_timeout=100 panic=-1 softlockup_panic=1 nmi_watchdog=panic oops=panic load_ramdisk=2 prompt_ramdisk=0 console=ttyS0,115200 console=tty0 vga=normal rw"
fi
# create vm result path
if [ -z $opt_result_root ]; then
vm_result_path="/tmp/vm_test_result/$testcase-$(date '+%F-%T')"
else
vm_result_path=$opt_result_root
fi
mkdir -p $vm_result_path
# download kernel and initrds, then cat them
if [[ $HTTP_PREFIX ]]; then
: # use environment value
elif [[ $HOSTNAME = inn ]]; then
LKP_SERVER=inn
elif grep -q intel.com /etc/resolv.conf; then
LKP_SERVER=bee.sh.intel.com
else
LKP_SERVER=
HTTP_PREFIX=https://github.com/0day-ci/lkp-qemu/raw/master
fi
[ -d $CACHE_DIR ] || mkdir $CACHE_DIR
LKP_USER="lkp"
run_kvm()
{
trap - EXIT
local mem_mb="$(max_sane_qemu_memory $memory)"
local mount_tag=9p/virtfs_mount
model='qemu-system-x86_64 -enable-kvm'
netdev_option="-device e1000,netdev=net0 "
netdev_option+="-netdev user,id=net0"
KVM_COMMAND=(
$model
-fsdev local,id=test_dev,path=$vm_result_path,security_model=none -device virtio-9p-pci,fsdev=test_dev,mount_tag=$mount_tag
-kernel ${opt_kernel_image:-$kernel_file}
-append "$bootloader_append ip=dhcp result_service=$mount_tag"
$initrd_option
-smp $nr_cpu
-m $mem_mb
-no-reboot
-watchdog i6300esb
-rtc base=localtime
$qemu_netdev_option
$qemu_console_option
$QEMU_DRIVE_OPTION
)
echo "exec command: ${KVM_COMMAND[@]}"
"${KVM_COMMAND[@]}"
}
get_qemu_kernel_initrd $lkp_initrd $job_initrd
setup_qemu_console
setup_qemu_netdev
setup_vdisk_root
setup_qemu_drives
run_kvm
| true
|
ff610b7781d7d049924f740d9136f78d45b35478
|
Shell
|
ccpgames/pypackage
|
/demo.sh
|
UTF-8
| 3,906
| 3.453125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
#
# This script will create a new venv and install pypackage inside it.
# It then creates a bunch of example packages, all inside a new example
# directory. The hello-world package should be installed, the tested_mod and
# detected_pkg packages should only be tested and built. The has_data package
# should also be installed and should have a script entry point of `read_data`
# which should dump 100B of static random data to stdout.
#
# Make sure to re-activate the venv after running this script with:
#
# $ source example/bin/activate
#
# then you can inspect the installed state of the demo packages
#
#
# cleanup and setup
#
rm -rf example
virtualenv --python=python2.7 --no-site-packages example
source example/bin/activate
pip install pypackage
cd example
#
# Create hello-world
#
mkdir hello-world
cd hello-world
cat <<EOF > my_module.py
def my_function():
return "my_module is correctly installed"
EOF
#
# Install hello-world
#
py-install
# verify the install
cd ..
python -c 'import my_module; print(my_module.my_function())'
#
# Create tested_mod
#
mkdir tested_mod
cd tested_mod
# make the tests
mkdir tests
cd tests
cat <<EOF > test_tested_mod.py
import pytest
import tested_mod
def test_returns_true():
assert tested_mod.returns_true()
def test_returns_false():
assert tested_mod.returns_false() is False
EOF
cd ..
# make the module
cat <<EOF > tested_mod.py
def returns_true():
return True
def returns_false():
return False
EOF
# inform pypackage
cat <<EOF > pypackage.meta
{"test_runner": "pytest"}
EOF
# run the tests
py-test
# build a release
py-build
# show results
ls -lh dist
cd ..
#
# Create detected_pkg
#
# create the directories
mkdir detected_pkg
cd detected_pkg
mkdir detected_pkg
mkdir tests
# write the package files
cd detected_pkg
cat <<EOF > __init__.py
# coding: utf-8
from __future__ import unicode_literals
__version__ = "0.0.1"
__author__ = "joe blow"
__email__ = "jðe@hotmail.com"
EOF
cat <<EOF > some_mod.py
def is_true():
return True
def is_none():
return None
EOF
cd ..
# write the test file
cd tests
cat <<EOF > test_some_mod.py
import pytest
from detected_pkg import some_mod
def test_is_true():
assert some_mod.is_true()
def test_is_none():
assert some_mod.is_none() is None
EOF
cd ..
# write the pypackage.meta
cat <<EOF > pypackage.meta
{"test_runner": "pytest"}
EOF
# run tests
py-test
# create releases
py-build
# save a copy of the setup.py in the src
py-build -s
cd ..
#
# pure binary project
#
mkdir pure_binary
cd pure_binary
dd if=/dev/random of=binary_blob.bin bs=10 count=10
py-build
pip install dist/*.tar.gz
cd /
python -c 'from pkg_resources import Requirement, resource_filename; print(resource_filename(Requirement.parse("pure_binary"), "binary_blob.bin"))'
cd -
cd ..
#
# data project
#
mkdir -p has_data/has_data/data
cd has_data/has_data
touch __init__.py
# create data
dd if=/dev/random of=data/file bs=10 count=10
# create python module to read and display data
cat <<EOF > read_data.py
import os
def to_stdout():
random_file = os.path.join(os.path.dirname(os.path.abspath(__file__)),
"data", "file")
print("your random data is:")
with open(random_file) as random_data:
print(random_data.read())
EOF
cd ..
# make a script, (you could use an entry point but this shows the auto pickup)
mkdir bin
cat <<EOF > bin/random_data
#!/usr/bin/env python
from has_data import read_data
if __name__ == "__main__":
read_data.to_stdout()
EOF
chmod +x bin/random_data
cat <<EOF > pypackage.meta
{
"version": "1.0.0",
"packages": "has_data",
"source_label": "some long git commit hash",
"source_url": "https://yourgitserver/a_shorter_hash"
}
EOF
py-install
py-info has-data
random_data
echo "example packages created! use"
echo " source example/bin/activate"
echo "to activate the example virtualenv"
| true
|
a1f9c65c7763444fc8dff41c7474c5c60be517c8
|
Shell
|
FlyingPumba/tp3-algo3
|
/tests/rename-tests.sh
|
UTF-8
| 215
| 2.84375
| 3
|
[] |
no_license
|
a=1
for i in *.in; do
new_in=$(printf "%03d.in" "$a") #03 pad to length of 4
new_out=$(printf "%03d.out" "$a") #03 pad to length of 4
mv -- "$i" "$new_in"
mv -- "${i%.*}.out" "$new_out"
let a=a+1
done
a=1
| true
|
ba24578e33734292dfc738c4b8c047b6bf2a8d57
|
Shell
|
msimerson/Mail-Toaster-6
|
/provision/telegraf.sh
|
UTF-8
| 697
| 3
| 3
|
[] |
permissive
|
#!/bin/sh
. mail-toaster.sh || exit
export JAIL_START_EXTRA=""
export JAIL_CONF_EXTRA=""
install_telegraf()
{
tell_status "installing telegraf"
stage_pkg_install telegraf || exit
tell_status "Enable telegraf"
stage_sysrc telegraf_enable=YES
}
config_telegraf()
{
local _conf="$STAGE_MNT/usr/local/etc/telegraf.conf"
sed -i.bak \
-e "s/urls.*8086/ s/127.0.0.1/172.16.15.50/"
"$_conf"
}
start_telegraf()
{
stage_exec service telegraf start
}
test_telegraf()
{
stage_test_running telegraf
}
base_snapshot_exists || exit
create_staged_fs telegraf
start_staged_jail telegraf
install_telegraf
config_telegraf
start_telegraf
test_telegraf
promote_staged_jail telegraf
| true
|
9d86880e4b769e04ec12180bcf8aa1f0622f98b0
|
Shell
|
aslammncs/configure-server
|
/install.sh
|
UTF-8
| 900
| 3.265625
| 3
|
[] |
no_license
|
#!/bin/bash
## 1. update ubuntu packages
sudo aptitude update
sudo aptitude safe-upgrade
## 2. install chef
chef_bin="/usr/bin/chef-solo"
aws_bin="/usr/local/bin/aws"
python_bin="/usr/bin/python"
pip_bin="/usr/bin/pip"
echo "Installing chef ... if not installed -"
echo ""
if [ ! -f "$chef_bin" ] ;then
sudo curl -L https://www.opscode.com/chef/install.sh | sudo bash
fi
echo "Installing python2.7 ... if not installed -"
echo ""
if [ ! -f "$python_bin" ] ;then
sudo apt-get install -y python2.7
fi
echo "Installing pip ... if not installed -"
echo ""
if [ ! -f "$pip_bin" ] ;then
sudo apt-get install -y python-pip
fi
echo "Installing awscli ... if not installed -"
echo ""
if [ ! -f "$aws_bin" ] ;then
sudo sudo pip install awscli
fi
## 3. Execute server configurations recipes
echo "Executing the server configurations recipes ..."
sudo "$chef_bin" -c solo.rb -j node.json
| true
|
b51171235ac02c189c51459e6185fdcadb4f1384
|
Shell
|
catketchup/cs
|
/linux/shell_scripts/test1.sh
|
UTF-8
| 1,596
| 4.09375
| 4
|
[] |
no_license
|
#!/bin/bash
echo "Hello World!"
my_name="ketchup"
echo ${my_name}
str="Hello, my name is \"$my_name\"! \n"
echo -e ${str}
# what is echo -e?
echo ${#my_name}
echo ${my_name:1:3}
# array
array_name=(1 2 3 4)
array_name[4]=5
echo ${array_name[4]}
echo ${array_name[@]}
for i in {1..5}; do
echo "for $i"
done
:<<EOF
This is the first comment.
This is the second comment.
EOF
echo "shell pass variables";
echo "the name of file: $0"
echo "the first parameter: $1"
echo "the second parameter: $2"
echo -e "the third parameter: $3 \n"
echo -e "the number of parameters: $# \n"
echo -e "print the parameters as a string: $* \n"
echo "-- \$* 演示 ---"
for i in "$*"; do
echo $i
done
echo "-- \$@ 演示 ---"
for i in "$@"; do
echo $i
done
echo -e "\n"
echo "${1}"
val=`expr 2 + 2`
echo "the sum of the two numbers: $"
a=10
b=20
val=`expr $a + $b`
echo "a + b: $val"
if [ $a == $b ]
then
echo "a equals b"
fi
if [ $a != $b ]
then
echo "a doesn't equals b "
fi
if [ $a -eq $b ]
then
echo "$a -eq $b : a 等于 b"
else
echo "$a -eq $b: a 不等于 b"
fi
if [ $a -ne $b ]
then
echo "$a -ne $b: a 不等于 b"
else
echo "$a -ne $b : a 等于 b"
fi
# bolean operator
if [ $a -lt 100 -o $b -gt 15 ]
then
echo "$a is less than 100 and $b is greater than 15: return true"
else
echo "$a is less than 100 and $b is greater than 15: return false"
fi
# logic operator
# string operator
a='abc'
b='dfg'
if [ $a = $b ]
then
echo "$a = $b: a equals b"
else
echo "$a !+ $b: a doesn't b"
fi
# 文件测试运算符
| true
|
0ea9e5e5dbc79704aea01fed64f5fa4ff0497c12
|
Shell
|
shooteram/dotfiles
|
/func/.redmine.sh
|
UTF-8
| 2,546
| 3.5625
| 4
|
[] |
no_license
|
redmine() {
get_info
case $1 in
[a-z\/.:]*issues\/([0-9]*)) handle_link $1; return;;
lastmessage) _selector=".issue.journals | .[length-1]";;
link) echo "https://${REDMINE_SERVER}/issues/${_redmine_id}"; return;;
.) _selector=".";;
"") _selector=".";;
*) _selector=".issue.$1";;
esac
jq -r $_selector <<< $_response
}
get_info() {
if is_redmine $1; then
if [ ! -z "$REDMINE_SERVER" ]; then
redmine_id true
_address="https://${REDMINE_SERVER}/issues/${_redmine_id}.json?include=journals"
else
echo "[$(date)] REDMINE: The environement variable 'REDMINE_SERVER' is \
nowhere to be seen but it is required." >> /var/log/lastlog
return 1
fi
get_redmine
fi
}
is_redmine() {
[[ $(git_prompt_info) == *"/RM"* ]]
}
get_redmine() {
_filename="/tmp/.redmine_${_redmine_id}"
if [ -f $_filename ]; then
_time_diff=1800
_cur_time=$(date +%s)
_file_time=$(stat $_filename -c %Y)
_second_time_diff=$(expr $_cur_time - $_file_time)
if [[ $_second_time_diff -gt $_time_diff ]]; then
echo "[$(date)] REDMINE: New update: file '${_filename}' exceeded it's time \
diff (by ${_second_time_diff} seconds)" >> /var/log/lastlog
send_request
else _response=$(cat ${_filename}) fi
else
echo "[$(date)] REDMINE: New update: file '${_filename}' didn't existed \
before" >> /var/log/lastlog
send_request
fi
}
send_request() {
if [ ! -z "$REDMINE_API_KEY" ]; then
curl --silent -H 'Content-Type: application/json' \
-H "X-Redmine-API-Key: ${REDMINE_API_KEY}" \
$_address -o "${_filename}"
_response=$(cat ${_filename})
else
echo "[$(date)] REDMINE: The environement variable 'REDMINE_API_KEY' is \
nowhere to be seen but it is required." >> /var/log/lastlog
return 1
fi
}
handle_link() {
_redmine_id=$(echo "$1"| grep -Eo "[[:digit:]]{5}")
_branch="feature/RM${_redmine_id}"
git checkout $_branch &> /dev/null
if [[ ! $? -eq 0 ]]; then
git checkout -b $_branch &> /dev/null
fi
}
feature() {
if is_redmine $1; then
redmine_id true
echo feature/RM$_redmine_id
fi
}
redmine_id() {
_redmine_id=$(git_prompt_info | grep -Eo "[[:digit:]]{5}")
if [[ "" = $1 ]]; then
echo $_redmine_id
fi
}
unset _selector _response _address _redmine_id _branch _filename _time_diff _cur_time _file_time _second_time_diff
| true
|
c5bf2f012881a5a6519cdcd721f8197d4ce4f8c0
|
Shell
|
ling32945/archlinux-dotfiles
|
/.bashrc
|
UTF-8
| 5,563
| 3.5
| 4
|
[] |
no_license
|
# _ _
# | |__ __ _ ___| |__ _ __ ___
# | '_ \ / _` / __| '_ \| '__/ __|
# | |_) | (_| \__ \ | | | | | (__
# |_.__/ \__,_|___/_| |_|_| \___|
# ~/.bashrc: executed by bash(1) for non-login shells.
# see /usr/share/doc/bash/examples/startup-files (in the package bash-doc)
# for examples
# If not running interactively, don't do anything
#[ -z "$PS1" ] && return
# don't put duplicate lines in the history. See bash(1) for more options
# don't overwrite GNU Midnight Commander's setting of `ignorespace'.
HISTCONTROL=$HISTCONTROL${HISTCONTROL+:}ignoredups
# ... or force ignoredups and ignorespace
HISTCONTROL=ignoreboth
# append to the history file, don't overwrite it
shopt -s histappend
# for setting history length see HISTSIZE and HISTFILESIZE in bash(1)
# check the window size after each command and, if necessary,
# update the values of LINES and COLUMNS.
shopt -s checkwinsize
# make less more friendly for non-text input files, see lesspipe(1)
[ -x /usr/bin/lesspipe ] && eval "$(SHELL=/bin/sh lesspipe)"
# set a fancy prompt (non-color, unless we know we "want" color)
case "$TERM" in
xterm-color) color_prompt=yes;;
esac
# uncomment for a colored prompt, if the terminal has the capability; turned
# off by default to not distract the user: the focus in a terminal window
# should be on the output of commands, not on the prompt
# force_color_prompt=yes
if [ -n "$force_color_prompt" ]; then
if [ -x /usr/bin/tput ] && tput setaf 1 >&/dev/null; then
# We have color support; assume it's compliant with Ecma-48
# (ISO/IEC-6429). (Lack of such support is extremely rare, and such
# a case would tend to support setf rather than setaf.)
color_prompt=yes
else
color_prompt=
fi
fi
if [ "$color_prompt" = yes ]; then
PS1='${debian_chroot:+($debian_chroot)}\[\033[01;32m\]\u@\h\[\033[00m\]:\[\033[01;34m\]\w\[\033[00m\]\$ '
else
PS1='${debian_chroot:+($debian_chroot)}\u@\h:\w\$ '
fi
unset color_prompt force_color_prompt
# If this is an xterm set the title to user@host:dir
case "$TERM" in
xterm*|rxvt*)
PS1="\[\e]0;${debian_chroot:+($debian_chroot)}\u@\h: \w\a\]$PS1"
;;
*)
;;
esac
red='\[\e[0;31m\]'
RED='\[\e[1;31m\]'
blue='\[\e[0;34m\]'
BLUE='\[\e[1;34m\]'
cyan='\[\e[0;36m\]'
CYAN='\[\e[1;36m\]'
green='\[\e[0;32m\]'
GREEN='\[\e[1;32m\]'
yellow='\[\e[0;33m\]'
YELLOW='\[\e[1;33m\]'
PURPLE='\[\e[1;35m\]'
purple='\[\e[0;35m\]'
nc='\[\e[0m\]'
if [ "$UID" = 0 ]; then
PS1="$red\u$nc@$red\H$nc:$CYAN\w$nc$red#$nc "
else
PS1="$PURPLE\u$nc@$CYAN\H$nc:$GREEN\w$nc$GREEN\$$nc "
fi
# enable color support of ls and also add handy aliases
if [ -x /usr/bin/dircolors ]; then
test -r ~/.dircolors && eval "$(dircolors -b ~/.dircolors)" || eval "$(dircolors -b)"
alias ls='ls --color=auto'
#alias dir='dir --color=auto'
#alias vdir='vdir --color=auto'
alias grep='grep --color=auto'
alias fgrep='fgrep --color=auto'
alias egrep='egrep --color=auto'
fi
# load common alias
if [ -f ${HOME}/.common/.common_alias ]; then
source ${HOME}/.common/.common_alias
fi
# Alias definitions.
# You may want to put all your additions into a separate file like
# ~/.bash_aliases, instead of adding them here directly.
# See /usr/share/doc/bash-doc/examples in the bash-doc package.
if [ -f ~/.bash_aliases ]; then
. ~/.bash_aliases
fi
# Default parameter to send to the "less" command
# -R: show ANSI colors correctly; -i: case insensitive search
LESS="-R -i"
# enable programmable completion features (you don't need to enable
# this, if it's already enabled in /etc/bash.bashrc and /etc/profile
# sources /etc/bash.bashrc).
if [ -f /etc/bash_completion ] && ! shopt -oq posix; then
. /etc/bash_completion
fi
# Add sbin directories to PATH. This is useful on systems that have sudo
[ -z "${PATH##*/sbin*}" ] || PATH=$PATH:/sbin:/usr/sbin
stty -ixon # Disable ctrl-s and ctrl-q.
shopt -s autocd #Allows you to cd into directory merely by typing the directory name.
HISTSIZE= HISTFILESIZE= # Infinite history.
# Setting Bash prompt. Capitalizes username and host if root user (my root user uses this same config file).
if [ "$EUID" -ne 0 ]
then export PS1="\[$(tput bold)\]\[$(tput setaf 1)\][\[$(tput setaf 3)\]\u\[$(tput setaf 2)\]@\[$(tput setaf 4)\]\h \[$(tput setaf 5)\]\W\[$(tput setaf 1)\]]\[$(tput setaf 7)\]\\$ \[$(tput sgr0)\]"
else export PS1="\[$(tput bold)\]\[$(tput setaf 1)\][\[$(tput setaf 3)\]ROOT\[$(tput setaf 2)\]@\[$(tput setaf 4)\]$(hostname | awk '{print toupper($0)}') \[$(tput setaf 5)\]\W\[$(tput setaf 1)\]]\[$(tput setaf 7)\]\\$ \[$(tput sgr0)\]"
fi
export GPG_TTY=$(tty)
# dmenu
# Import the colors.
. "${HOME}/.cache/wal/colors.sh"
alias dmenu='dmenu_run -nb "$color0" -nf "$color7" -sb "$color4" -sf "$color0" -fn "Hack:11"'
#shdl() { curl -O $(curl -s http://sci-hub.tw/"$@" | grep location.href | grep -o http.*pdf) ;}
# Import colorscheme from 'wal' asynchronously
# & # Run the process in the background.
# ( ) # Hide shell job control messages.
(cat ~/.cache/wal/sequences &)
# indicate in ranger shell
if [ -n "$RANGER_LEVEL" ]; then export PS1="\e[1;32m[ranger]$PS1"; fi
export NVM_DIR="$HOME/.nvm"
[ -s "$NVM_DIR/nvm.sh" ] && \. "$NVM_DIR/nvm.sh" # This loads nvm
[ -s "$NVM_DIR/bash_completion" ] && \. "$NVM_DIR/bash_completion" # This loads nvm bash_completion
powerline-daemon -q
POWERLINE_BASH_CONTINUATION=1
POWERLINE_BASH_SELECT=1
. /usr/lib/python3.7/site-packages/powerline/bindings/bash/powerline.sh
source ~/.shortcuts
# dircolors configuration
eval `dircolors ~/.config/.dir_colors`
| true
|
76d7314c4c2998d63bd1006a61e07db1c7237bf6
|
Shell
|
aminalaee/dashboard
|
/scripts/install
|
UTF-8
| 229
| 3.046875
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/sh -e
PYTHON="python3"
REQUIREMENTS="requirements.txt"
VENV="venv"
set -x
if [ -z "$GITHUB_ACTIONS" ]; then
"$PYTHON" -m venv "$VENV"
PIP="$VENV/bin/pip"
else
PIP="pip"
fi
"$PIP" install -r "$REQUIREMENTS"
| true
|
07c4b795790c818b71282b7c8bef9255b76c6899
|
Shell
|
ptrxwsmitt/helm-jitsi
|
/shell/create-ingress-certs.sh
|
UTF-8
| 730
| 3.90625
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
CERT_FILE=$1
KEY_FILE=$2
# print usage if no input is given or -h, --help is used
if [ '' == "$CERT_FILE" ] || [ '' == "$KEY_FILE" ] || [ '-h' == "$1" ] || [ '--help' == "$1" ]; then
echo -e "USAGE: ${BASH_SOURCE[0]} <CERT_FILE> <KEY_FILE>"
exit 0
fi
# check if files exist
if [ ! -f "$CERT_FILE" ]; then
echo 'cert file does not exist'
exit 1
fi
if [ ! -f "$KEY_FILE" ]; then
echo 'key file does not exist'
exit 2
fi
# turn file content into base64 and apply secret
cat <<EOF | kubectl apply -f -
apiVersion: v1
kind: Secret
metadata:
name: ingress-certs
data:
tls.crt: $(cat "${CERT_FILE}" | base64 --wrap=0)
tls.key: $(cat "${KEY_FILE}" | base64 --wrap=0)
type: kubernetes.io/tls
EOF
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.