blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
d825be9e502895a57935d0a084c8c7d04cda6e13
|
Shell
|
ryeo/android-websocket-client
|
/test_node_server/cert/create_cert.sh
|
UTF-8
| 311
| 2.828125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
FILE_KEY=privkey.pem
FILE_CERT=cacert.pem
DAYS=365
# Generates private key without password protection
# openssl can derive the public key from the private key
openssl genrsa -out $FILE_KEY 2048
# Creates self-signed certificate
openssl req -new -x509 -days $DAYS -key $FILE_KEY -out $FILE_CERT
| true
|
4f2c5176dfe1850fc24edcdbef0f6edd5847585c
|
Shell
|
robertpeters9/tde-packaging
|
/mandriva/2010.2/kdenetwork/kdenetwork-lisa
|
UTF-8
| 3,402
| 3.5625
| 4
|
[] |
no_license
|
#!/bin/sh
#
# Startup script for LISa
#
# chkconfig: 345 92 8
# description: Starts and stops the LAN Information Server used \
# to provide a LAN browser.
# processname: lisa
#
# based on rclisa Version 0.1
# 2001 by Marcus Thiesen (marcus@thiesenweb.de) for SuSE Linux 7.1
# This is free and comes with absolutely no WARRANTY
# adapted for Mandrake 8.0 by Patrick Alberts (mandrake@alberts.org)
# Updated for Mandrake 9.0 by Buchan Milne (bgmilne@linux-mandrake.com)
#
### BEGIN INIT INFO
# Provides: lisa
# Required-Start: $network
# Required-Stop: $network
# Default-Start: 3 5
# Short-Description: LISa LAN Information Server
# Description: Starts and stops the LAN Information Server used
# to provide a LAN browser.
### END INIT INFO
CONFIG_FILE=/etc/lisarc
prog="lisa"
# Source function library.
if [ -f /etc/init.d/functions ] ; then
. /etc/init.d/functions
elif [ -f /etc/rc.d/init.d/functions ] ; then
. /etc/rc.d/init.d/functions
else
exit 0
fi
# Get config.
. /etc/sysconfig/network
# Check that networking is up.
if [ ${NETWORKING} = "no" ]
then
exit 0
fi
# check how we were called
case "$1" in
start)
PID=`pidof -s lisa`
if [ -z "$PID" ]; then
if [ -e /opt/kde3/bin/lisa ]; then
#check for config file and generate one if needed:
if [ ! -e $CONFIG_FILE ];then
gprintf "No config file, generating one, please run kcontrol as root to customise\n"
# See http://lisa-home.sourceforge.net/ for more details
# on the config file format.
# PingAddresses/AllowedAddresses should only be route
# entries that are not gateways,loopback or multicast:
IPNMS_ALL=`/sbin/route -n |awk 'BEGIN {ORS=";"};$4=="U"&&$8!="lo"&&$1!~/224.0.0.0/ {print $1"/"$3}'`
# BroadcastNetwork should be only the internal subnet,
# take first route from above:
IPNMS=`/sbin/route -n |awk ' $4=="U"&&$8!="lo"&&$1!~/224.0.0.0/&&$8!~/ppp.*/ {print $1"/"$3}'|head -n1`
echo "SecondWait=-1"> $CONFIG_FILE
echo "SearchUsingNmblookup=1">> $CONFIG_FILE
echo "DeliverUnnamedHosts=0" >>$CONFIG_FILE
echo "FirstWait=30" >> $CONFIG_FILE
echo "MaxPingsAtOnce=256" >>$CONFIG_FILE
echo "UpdatePeriod=300" >> $CONFIG_FILE
#echo "PingAddresses=$IPNMS_ALL">> $CONFIG_FILE
echo "AllowedAddresses=$IPNMS_ALL" >> $CONFIG_FILE
echo "BroadcastNetwork=$IPNMS" >>$CONFIG_FILE
#echo "PingNames=" >> $CONFIG_FILE
fi
if [ -e $CONFIG_FILE ]; then
action "Starting %s: " "$prog" /bin/true
/opt/kde3/bin/lisa -c $CONFIG_FILE >/dev/null 2>&1
else
action "No configuration available, not starting LISa" /bin/false
fi
else
action "Starting %s: binaries not found " "$prog" /bin/false
fi
else
action "Starting %s: already running (%s) " "$PID" "$prog" /bin/false
fi
touch /var/lock/subsys/lisa
;;
stop)
PID=`pidof -s lisa`
if [ "$PID" ]; then
action "Stopping %s: " "$prog" kill -3 $PID
fi
rm -f /var/lock/subsys/lisa
;;
status)
PID=`pidof -s lisa`
if [ "$PID" ]; then
gprintf "%s is running! ($PID)\n" "$prog"
/usr/bin/kill -s SIGUSR1 $PID
sleep 3
else
gprintf "%s is not running!\n" "$prog" ;
fi
;;
restart)
$0 stop && $0 start
;;
refresh)
PID=`pidof -s lisa`
if [ "$PID" ]; then
gprintf "Sending %s a SIGHUP ($PID)\n" "$prog"
kill -SIGHUP $PID;
else
gprintf "%s is not running!\n" "$prog" ;
fi
;;
*)
gprintf "usage: %s {start|stop|status|refresh|restart}\n" $0
;;
esac
exit 0
| true
|
aa37d1d62d45d6b78e3b74be74da6a39c2167c58
|
Shell
|
ludovicocaldara/racattack-vagrant
|
/OracleLinux/racattack12cR1/stagefiles/named_slave.sh
|
UTF-8
| 3,579
| 3.09375
| 3
|
[] |
no_license
|
# I need to change and include the following parameters:
# 1. cl_prefix
# 2. cl_domain (instead of racattack)
# 3. public_lan
# 4. private_lan
# 5. hub count
# 6. hub starting count (51)
# 7. vip starting count (61)
# 8. leaf count
# 9. leaf starting count (81)
# 10. app count
# 12. scan count
# 11. app starting count (101)
# 13. scan starting count (251)
if [ $# -ne 13 ] ; then
cat - <<EOF
Usage: $0 <with 13 parameters:>
# 1. cl_prefix
# 2. cl_domain (instead of racattack)
# 3. public_lan
# 4. private_lan
# 5. hub count
# 6. hub starting count (51)
# 7. vip starting count (61)
# 8. leaf count
# 9. leaf starting count (81)
# 10. app count
# 11. app starting count (101)
# 12. scan count
# 13. scan starting count (251)
EOF
exit 1
fi
cl_prefix=$1
cl_domain=$2
public_lan=$3
private_lan=$4
hub_count=$5
hub_starting_count=$6
vip_starting_count=$7
leaf_count=$8
leaf_starting_count=$9
app_count=$10
app_starting_count=$11
scan_count=$12
scan_starting_count=$13
base_public=`echo $public_lan | awk -F. '{printf ("%d.%d.%d.",$1,$2,$3) }'`
base_private=`echo $private_lan | awk -F. '{printf ("%d.%d.%d.",$1,$2,$3) }'`
slave_ip="${base_public}$(($hub_starting_count+1))"
master_ip="${base_public}${hub_starting_count}"
if [ -f /var/named/${cl_domain} ];then
echo "named already configured in $HOSTNAME"
exit 0
fi
chkconfig named on
service named stop
rm -f /var/named/${cl_domain} /var/named/in-addr.arpa
touch /var/named/${cl_domain}
chmod 664 /var/named/${cl_domain}
chgrp named /var/named/${cl_domain}
chmod g+w /var/named
chmod g+w /var/named/${cl_domain}
cp /etc/named.conf /etc/named.conf.ori
#grep '192.168.78.52' /etc/named.conf && echo "already configured " || sed -i -e 's/listen-on .*/listen-on port 53 { 192.168.78.52; 127.0.0.1; };/' \
#-e 's/allow-query .*/allow-query { 192.168.78.0\/24; localhost; };/' -e 's/type master;/type slave;\n masters {192.168.78.51; };/' \
#-e '$azone "racattack" {\n type slave;\n masters { 192.168.78.51; };\n file "racattack";\n};\n\n zone "in-addr.arpa" {\n type slave;\n masters { 192.168.78.51; };\n file "in-addr.arpa";\n};' \
#/etc/named.conf
### CREATING THE NEW named.conf
cat <<EOF > /etc/named.conf
options {
listen-on port 53 { $slave_ip; };
listen-on-v6 port 53 { ::1; };
directory "/var/named";
dump-file "/var/named/data/cache_dump.db";
statistics-file "/var/named/data/named_stats.txt";
memstatistics-file "/var/named/data/named_mem_stats.txt";
allow-query { $public_lan/24; localhost; };
allow-transfer { $public_lan/24; };
recursion yes;
dnssec-enable yes;
dnssec-validation yes;
dnssec-lookaside auto;
/* Path to ISC DLV key */
bindkeys-file "/etc/named.iscdlv.key";
managed-keys-directory "/var/named/dynamic";
};
logging {
channel default_debug {
file "data/named.run";
severity dynamic;
};
};
zone "." IN {
type hint;
file "named.ca";
};
include "/etc/named.rfc1912.zones";
include "/etc/named.root.key";
zone "$cl_domain" {
type slave;
masters { ${master_ip}; };
file "$cl_domain";
};
zone "in-addr.arpa" {
type slave;
masters { ${master_ip}; };
file "in-addr.arpa";
};
EOF
if [ ! -f /etc/rndc.key ] ; then
rndc-confgen -a -r /dev/urandom
chgrp named /etc/rndc.key
chmod g+r /etc/rndc.key
service named restart
else
service named restart
fi
# final command must return success or vagrant thinks the script failed
echo "successfully completed named steps"
| true
|
17eee9efb07c199195f977d7def224d2fa605de1
|
Shell
|
ibis-project/docker-kudu
|
/bin/start-kudu.sh
|
UTF-8
| 690
| 3.203125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
if [[ "$KUDU_MASTER" = "true" ]]; then
supervisorctl start kudu-master
fi
/wait-for-it.sh kudu:7051 -t 120
/wait-for-it.sh kudu:8051 -t 120
rc=$?
if [ $rc -ne 0 ]; then
echo -e "\n----------------------------------------"
echo -e " Kudu Master not ready! Exiting..."
echo -e "----------------------------------------"
exit $rc
fi
supervisorctl start kudu-tserver
/wait-for-it.sh localhost:7050 -t 120
/wait-for-it.sh localhost:8050 -t 120
rc=$?
if [ $rc -ne 0 ]; then
echo -e "\n----------------------------------------"
echo -e " Kudu Tablet Server not ready! Exiting..."
echo -e "----------------------------------------"
exit $rc
fi
| true
|
44c85508375fdec6d61100092ae1a2c5ed804afc
|
Shell
|
tebeka/pythonwise
|
/mk
|
UTF-8
| 499
| 3.453125
| 3
|
[
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-gutenberg-2020"
] |
permissive
|
#!/bin/bash
# Guess which make utility to use
# Miki Tebeka <miki@mikitebeka.com>
makecmd=""
if [ -f SConstruct ]; then
makecmd="scons -Q -D"
elif [ -f build.xml ]; then
makecmd="ant"
elif [ -f Makefile ]; then
makecmd="make"
elif [ -f makefile ]; then
makecmd="make"
elif [ "$OSTYPE" == "WINNT" ]; then
proj=`ls *.dsp 2>/dev/null`
if [ -f $proj ]; then
makecmd="msdev $proj /MAKE"
fi
fi
if [ -z "$makecmd" ]; then
echo "can't find project file"
exit 1
fi
$makecmd $@
| true
|
bdb2eb1d3ce315cb90f9a846918953a798827bb5
|
Shell
|
hibohiboo/develop
|
/tutorial/lesson/webpack/webpack4/bin/elm/build-js-test.sh
|
UTF-8
| 591
| 2.84375
| 3
|
[] |
no_license
|
#!/bin/bash
# このシェルスクリプトのディレクトリの絶対パスを取得。
bin_dir=$(cd $(dirname $0) && pwd)
name=${1:-webpacks}
dir_docker="$bin_dir/../../docker"
inputFile="/app/src/assets/elm/Page1.elm"
outputFile="/app/src/assets/elm/Page1.js"
cd $dir_docker && docker-compose run $name /bin/bash -c yarn run elm make $inputFile --output=$outputFile --optimize
inputFile="/app/src/assets/elm/Page2.elm"
outputFile="/app/src/assets/elm/Page2.js"
cd $dir_docker && docker-compose run $name /bin/bash -c yarn run elm make $inputFile --output=$outputFile --optimize
| true
|
7ea1213ffee1cc3fbd06de3ff54b637be998b60f
|
Shell
|
jtaby/dotfiles
|
/zsh.conf/lib/functions.zsh
|
UTF-8
| 673
| 3.953125
| 4
|
[] |
no_license
|
# I could never remember the arguments for tar
function compress() {
tar cvzf $1.tar.gz $1
}
function uncompress() {
if [ -d "$2" ]; then
tar xvzf $1 -C $2
else
mkdir $2
tar xvzf $1 -C $2
fi
}
# Make vim ask for sudo password if I try to open a
# privileged file
function vim {
LIMIT=$#
for ((i = 1; i <= $LIMIT; i++ )) do
eval file="\$$i"
if [[ -e $file && ! -O $file ]]
then
otherfile=1
else
fi
done
if [[ $otherfile = 1 ]]
then
sudo vim "$@"
else
command vim "$@"
fi
}
| true
|
0e20cbc727a4c222863a48e2648a893971ac51e8
|
Shell
|
gusman/eudyptula-task
|
/qemu.sh
|
UTF-8
| 705
| 2.53125
| 3
|
[] |
no_license
|
#!/bin/sh
KERNEL=$KERNEL_BUILD/arch/$ARCH/boot/bzImage
#KERNEL=/boot/vmlinuz-3.16.0-57-generic
ROOTFS=$BUILDROOT_BUILD/images/rootfs.ext4
RAMSZ=128
#QEMU_EMULATOR=qemu-system-i386
QEMU_EMULATOR=qemu-system-x86_64
sudo $QEMU_EMULATOR \
-kernel $KERNEL \
-hda $ROOTFS \
-boot c -m $RAMSZ \
-localtime -no-reboot -net nic -net user \
-enable-kvm \
-monitor tcp:127.0.0.1:4444,server,nowait \
-usb \
-serial stdio \
-append "root=/dev/sda rw console=ttyS0" \
-nographic \
#-usbdevice host:0603:00F2
# -usbdevice host:0951:1607 \
#-usb \
# access the qemu monitor using telnet localhost 4444
# Copy modules into /lib/modules/<kernel-version>
# run depmod -a
| true
|
dfc359fea8a3878341748e9d350af91a5b96180d
|
Shell
|
JankoMat/dnscheck
|
/dnscheck.sh
|
UTF-8
| 645
| 3.078125
| 3
|
[] |
no_license
|
#! /bin/bash
domain=${1}
echo
echo "---------Direct results, propagation not considered---------"
ns=$(whois $domain | grep -i "name server" | cut -d : -f 2 | awk '{print $1}' | head -1)
echo "Nameserver:"
echo $ns
command dig @$ns $ns +short
echo "A record:"
command dig @$ns $domain +short
echo "www:"
command dig @$ns "www.$domain" +short
echo "MX:"
command dig @$ns MX $domain +short
echo
echo "---------Propagated records---------"
echo "Nameserver:"
command dig @8.8.8.8 $ns +short
echo "A record:"
command dig @8.8.8.8 $domain +short
echo "www:"
command dig @8.8.8.8 "www.$domain" +short
echo "MX:"
command dig @8.8.8.8 MX $domain +short
| true
|
520d1a056d7f5df833bfec9453753bb77bae24aa
|
Shell
|
rogeliog/dotfiles
|
/.bashrc
|
UTF-8
| 3,327
| 3.25
| 3
|
[] |
no_license
|
alias g="git"
# ------------------------------------------------------------------------------
# Reload current shell
# ------------------------------------------------------------------------------
alias rl="source ~/.zshrc"
# ------------------------------------------------------------------------------
# Use nvim
# ------------------------------------------------------------------------------
alias vim="nvim"
# ------------------------------------------------------------------------------
# Run tmux to make it work with colors
# ------------------------------------------------------------------------------
alias tmux="TERM=screen-256color-bce tmux"
# ------------------------------------------------------------------------------
# Setup NVM (Might not need this)
# ------------------------------------------------------------------------------
export NVM_DIR="$HOME/.nvm"
. "/usr/local/opt/nvm/nvm.sh"
# ------------------------------------------------------------------------------
# Use FZF Git completion
# ------------------------------------------------------------------------------
source "$HOME/.fzf-git-completions.sh"
# ------------------------------------------------------------------------------
# Open a new PR for the current branch
# ------------------------------------------------------------------------------
function new_pr() {
: ${BASE:=master}
branch=`git rev-parse --abbrev-ref HEAD`
git config --get remote.upstream.url > /dev/null 2>&1
if [ $? -eq 0 ]
then
# New PR for forked repos
url=`git config --get remote.upstream.url`
user=`echo $url | sed s/.*github.com.// | cut -d '/' -f 1`
repo=`echo $url | sed s/.*github.com.// | cut -d '/' -f 2`
fork_url=`git config --get remote.origin.url`
fork_user=`echo $fork_url | sed s/.*github.com.// | cut -d '/' -f 1`
pr_url="https://github.com/$user/$repo/compare/$BASE...$fork_user:$branch"
else
# New PR for own repos
url=`git config --get remote.origin.url`
user=`echo $url | sed s/.*github.com.// | cut -d '/' -f 1`
repo=`echo $url | sed s/.*github.com.// | cut -d '/' -f 2`
pr_url="https://github.com/$user/$repo/compare/$BASE...$branch"
fi
open $pr_url
}
# ------------------------------------------------------------------------------
# Open the PR in your browser for the current branch
# ------------------------------------------------------------------------------
function open_pr() {
: ${REF:=HEAD}
target=${1:-HEAD}
pr=`git show-ref | grep $(git rev-parse $target) | grep /pr/[0-9]*$ -o | grep [0-9]*$ -o`
git config --get remote.upstream.url > /dev/null 2>&1
if [ $? -eq 0 ]
then
# New PR for forked repos
url=`git config --get remote.upstream.url`
user=`echo $url | sed s/.*github.com.// | cut -d '/' -f 1`
repo=`echo $url | sed s/.*github.com.// | cut -d '/' -f 2`
fork_url=`git config --get remote.origin.url`
fork_user=`echo $fork_url | sed s/.*github.com.// | cut -d '/' -f 1`
pr_url="https://github.com/$user/$repo/pull/$pr"
else
# New PR for own repos
url=`git config --get remote.origin.url`
user=`echo $url | sed s/.*github.com.// | cut -d '/' -f 1`
repo=`echo $url | sed s/.*github.com.// | cut -d '/' -f 2`
pr_url="https://github.com/$user/$repo/pull/$pr"
fi
open $pr_url
}
| true
|
032274a93a23da0f15d739d9643fa076088dbc7f
|
Shell
|
griesik/ASDiPSCTranscriptome
|
/ASDiPSCTranscriptome/SCRIPTS/tophat2.sh
|
UTF-8
| 475
| 2.90625
| 3
|
[] |
no_license
|
#!/bin/bash
### MAPEAMENTO (Tophat2, hg19) ###
mkdir ~/results_tophat2/
cd ~/Fastqs/
nome1="a"
nome_old="b"
for i in $(ls); do
nome1=$(echo "$i" | cut -d "." -f 1)
if [ "$nome1" == "$nome_old" ]
then
tophat2 -p 40 -o ~/results_tophat2/$nome1\_out --transcriptome-index=~/databases/gencode.v19/gencode.v19.annotation ~/Homo_sapiens_UCSC_hg19/UCSC/hg19/Sequence/Bowtie2Index/genome $arquivo_old $i 2>> $nome1\_tophat2.out
fi
nome_old=$nome1
arquivo_old=$i
done
| true
|
e255278f90d19170f9b2ad28f26c38daa8002722
|
Shell
|
DanielFloripa/init-scripts
|
/syncHD2.sh
|
UTF-8
| 1,887
| 3.21875
| 3
|
[] |
no_license
|
#!/bin/bash
# Script para backup via SSH usando o rsync
# Versão 0.1http://organelas.com/2009/08/07/shell-script-para-backup-usando-rsync-e-ssh-em-dhcp-no-ubuntu/
## Configuração!!! ##
# Mude os parâmetros abaixo, referentes ao seu sistema
# Destino: IP ou hostname da máquina de destino
ORIGEM1=("/dev/sdb1","/media/daniel/D7B3-DE22")
ORIGEM2=("/home/daniel","/media/daniel/D7B3-DE22")
DESTINO=("/dev/sdc1","/media/daniel/HD1500GB")
# Arquivo log
LOG=/home/arduino/Dropbox/UDESC/ProjPesq14/Pratico/GreenHop/.bkp_sync/.backup`date +%Y-%m-%d`.log
# Checar se a máquina de destino está ligada
/bin/ping -c 1 -W 2 $DESTINO > /dev/null
if [ "$?" -ne 0 ];
then
echo -e `date +%c` >> $LOG
echo -e "\n$DESTINO desligado." >> $LOG
echo -e "Backup não realizado\n" >> $LOG
echo -e "--- // ---\n" >> $LOG
echo -e "\n$DESTINO desligado."
echo -e "Backup não realizado.\n"
else
HORA_INI=`date +%s`
echo -e `date +%c` >> $LOG
echo -e "\n$DESTINO ligado!" >> $LOG
echo -e "Iniciando o backup...\n" >> $LOG
rsync -ah --delete --stats --progress --log-file=$LOG -e ssh $SRC$FDR1 $USR@$DESTINO:$DIR
rsync -ah --stats --progress --log-file=$LOG -e ssh $SRC$FDR2 $USR@$DESTINO:$INITD
HORA_FIM=`date +%s`
TEMPO=`expr $HORA_FIM - $HORA_INI`
echo -e "\nBackup finalizado com sucesso!" >> $LOG
echo -e "Duração: $TEMPO s\n" >> $LOG
echo -e "--- // ---\n" >> $LOG
echo -e "\nBackup finalizado com sucesso!"
echo -e "Duração: $TEMPO s\n"
echo -e "Consulte o log da operação em $LOG.\n"
fi
# TODO
# - Incluir em cron job!
# - Definir como lidar com o arquivo.log (deletar, arquivar, deixar...)
# - Incluir wakeonlan para ligar o computador se estiver desligado
# - Desligar máquina de destino após o término do backup
# - Criar alça para quando a transferência falhar (e.g.,falta de espaço)
| true
|
1cf920c360331815718519eda72fe7c5e6dba117
|
Shell
|
guidohu/booksys
|
/Docker/booksys_dummy_db/anonymize_dump.sh
|
UTF-8
| 6,010
| 3.90625
| 4
|
[
"MIT",
"LicenseRef-scancode-proprietary-license"
] |
permissive
|
#!/bin/bash
OPTIND=1 # Reset in case getopts has been used previously in the shell.
# Initialize our own variables:
input_file=""
output_file=""
verbose=0
show_help() {
cat << 'EOF'
usage: $0 [-hv] -i <input_file> -o <output_file>
OPTIONS:
-h display this help
-v run in verbose mode
-i specify input file (path)
-o specify output file (path)
EOF
}
anonymize() {
# create two empty files
echo "" > $output_file
tempfile="${output_file}.tmp"
echo "" > $tempfile
# initialize tracking variable
deleting_browser_session_data=0
deleting_password_reset_data=0
# delete sensitive content
while IFS= read -r line
do
if [[ "$line" =~ 'INSERT INTO `browser_session`' ]]; then
[[ $verbose = 1 ]] && echo "START DELETION:"
deleting_browser_session_data=1
fi
if [[ "$line" =~ 'INSERT INTO `password_reset`' ]]; then
[[ $verbose = 1 ]] && echo "START DELETION:"
deleting_password_reset_data=1
fi
if [[ "$line" =~ '^\s*$' ]]; then
deleting_browser_session_data=0
deleting_password_reset_data=0
fi
if [[ "$line" = '' ]]; then
deleting_browser_session_data=0
deleting_password_reset_data=0
fi
if [[ $deleting_browser_session_data = 1 ]]; then
# do not write line to output
[[ $verbose = 1 ]] && echo "DELETE $line"
elif [[ $deleting_password_reset_data = 1 ]]; then
# do not write line to output
[[ $verbose = 1 ]] && echo "DELETE $line"
else
echo "$line" >> $tempfile
fi
done < "$input_file"
# sanitize sensitive content
# initialize tracking variables
sanitize_expenditure=0
sanitize_payment=0
sanitize_session=0
sanitize_user=0
while IFS= read -r line
do
# start sanitizing
if [[ "$line" =~ 'INSERT INTO `expenditure`' ]]; then
[[ $verbose = 1 ]] && echo "START SANITIZING:"
sanitize_expenditure=1
fi
if [[ "$line" =~ 'INSERT INTO `payment`' ]]; then
[[ $verbose = 1 ]] && echo "START SANITIZING:"
sanitize_payment=1
fi
if [[ "$line" =~ 'INSERT INTO `session`' ]]; then
[[ $verbose = 1 ]] && echo "START SANITIZING:"
sanitize_session=1
fi
if [[ "$line" =~ 'INSERT INTO `user`' ]]; then
[[ $verbose = 1 ]] && echo "START SANITIZING:"
sanitize_user=1
fi
# stop sanitizing
if [[ "$line" =~ '^\s*$' ]]; then
sanitize_expenditure=0
sanitize_payment=0
sanitize_session=0
sanitize_user=0
fi
if [[ "$line" = '' ]]; then
sanitize_expenditure=0
sanitize_payment=0
sanitize_session=0
sanitize_user=0
fi
# sanitize expenditures
if [[ $sanitize_expenditure = 1 ]]; then
# replace the comment column
[[ $verbose = 1 ]] && echo "Replace line:"
[[ $verbose = 1 ]] && echo $line
[[ $verbose = 1 ]] && echo "By line:"
[[ $verbose = 1 ]] && echo $line | perl -pe "s/,\s'[^']*'\)/, 'my comment'\)/"
echo $line | perl -pe "s/,\s'[^']*'\)/, 'my comment'\)/" >> $output_file
elif [[ $sanitize_payment = 1 ]]; then
# replace the comment column
[[ $verbose = 1 ]] && echo "Replace line:"
[[ $verbose = 1 ]] && echo $line
[[ $verbose = 1 ]] && echo "By line:"
[[ $verbose = 1 ]] && echo $line | perl -pe "s/,\s'[^']*'\)/, 'my comment'\)/"
echo $line | perl -pe "s/,\s'[^']*'\)/, 'my comment'\)/" >> $output_file
elif [[ $sanitize_session = 1 ]]; then
# replace the title and description column
[[ $verbose = 1 ]] && echo "Replace line:"
[[ $verbose = 1 ]] && echo $line
[[ $verbose = 1 ]] && echo "By line:"
[[ $verbose = 1 ]] && echo $line | perl -pe "s/,\s'[^']*',\s'[^']*',\s(\d+,\s\d+,\s\d+)/, 'my title', 'my description', \$1/"
echo $line | perl -pe "s/,\s'[^']*',\s'[^']*',\s(\d+,\s\d+,\s\d+)/, 'my title', 'my description', \$1/" >> $output_file
elif [[ $sanitize_user = 1 ]]; then
# replace the title and description column
[[ $verbose = 1 ]] && echo "Replace line:"
[[ $verbose = 1 ]] && echo $line
[[ $verbose = 1 ]] && echo "By line:"
[[ $verbose = 1 ]] && echo $line | perl -pe "s/\((\d+),.*,(\s\d+,\s\d+,\s\d+,\s'[^']*')/\(\$1, 'username\$1', 12953, '\\\$6\\\$rounds=5000\\\$12953\\\$GmpDFyZnmBTriCjrIlT3tuWkWc5dvhHJeP76qt6ZWTQosQGeViJClsxvSlXesm9nw0Cs4R7BOQY5bqEqT.2nI.', NULL, 'firstname\$1', 'lastname\$1', 'street \$1', 'city \$1', \$1, 123456789\$1, 'email\$1\@domain\$1.com',\$2/"
echo $line | perl -pe "s/\((\d+),.*,(\s\d+,\s\d+,\s\d+,\s'[^']*')/\(\$1, 'username\$1', 12953, '\\\$6\\\$rounds=5000\\\$12953\\\$GmpDFyZnmBTriCjrIlT3tuWkWc5dvhHJeP76qt6ZWTQosQGeViJClsxvSlXesm9nw0Cs4R7BOQY5bqEqT.2nI.', NULL, 'firstname\$1', 'lastname\$1', 'street \$1', 'city \$1', \$1, 123456789\$1, 'email\$1\@domain\$1.com',\$2/" >> $output_file
else
echo "$line" >> $output_file
fi
done < "$tempfile"
rm $tempfile
cat << 'EOF'
Anonymization of database dump has been done. Please manually check the dump as this script might not catch all user data.
EOF
}
main() {
# check arguments
if [ ! -f $input_file ]; then
>&2 echo "Input file not found"
exit 1
fi
# anonymize testdata
anonymize
}
while getopts "hvi:o:" opt "$@"; do
case "$opt" in
h|help)
show_help
exit 0
;;
v|verbose) verbose=1
;;
i|in) input_file=$OPTARG
;;
o|out) output_file=$OPTARG
esac
done
main
exit 0
| true
|
cc166fd25c901f115cfd5fa6f978d78a88f47423
|
Shell
|
goog-lu/shell
|
/shell_study-master/restudy/6.review.sh
|
UTF-8
| 283
| 2.828125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
#
# Author: bavduer
# Email: bavduer@163.com
# Date: 2019/03/25
# Usage: case.
read -p "" var
case $var in
1)
pass;
;;
2)
pass;
;;
3)
pass;
;;
*)
printf "Please redo your choose.\n"
printf "Input True: 1 or 2 or 3.\n"
;;
esac
| true
|
08119ff17d5d0662011e5af2dfb1f06e1f55217e
|
Shell
|
palmer-dabbelt/libflo
|
/test/flo-stat/harness.bash
|
UTF-8
| 1,731
| 3.5625
| 4
|
[] |
no_license
|
export LC_ALL="C"
if [[ "$TEST" == "" ]]
then
TEST="test"
fi
#############################################################################
# Check if we need to generate a Flo file #
#############################################################################
if test -f $TEST.scala
then
cat $TEST.scala
scalac $TEST.scala -classpath chisel.jar:.
scala $SCALA_FLAGS -classpath chisel.jar:. $TEST \
--debug --backend flo \
|| true
cat $TEST.flo
mv $TEST.flo $TEST.in
fi
cat $TEST.in
#############################################################################
# Run the test without valgrind #
#############################################################################
$PTEST_BINARY $TEST.in > $TEST.out
cat $TEST.out | sort > $TEST.out.sort
cat $TEST.gold | sort > $TEST.gold.sort
cat $TEST.out
cat $TEST.gold
diff -au $TEST.gold.sort $TEST.out.sort
out="$?"
if [[ "$out" != "0" ]]
then
exit "$out"
fi
#############################################################################
# Run the test with valgrind #
#############################################################################
if [[ "$(which valgrind)" == "" ]]
then
exit 0
fi
if test ! -x `which valgrind`
then
exit 0
fi
valgrind -q $PTEST_BINARY $TEST.in >$TEST.out 2>$TEST.valgrind
cat $TEST.valgrind
if [[ "$(cat $TEST.valgrind | wc -l)" != 0 ]]
then
exit 1
fi
cat $TEST.out | sort > $TEST.out.sort
cat $TEST.gold | sort > $TEST.gold.sort
cat test.out
cat test.gold
diff -au $TEST.gold.sort $TEST.out.sort
out="$?"
if [[ "$out" != "0" ]]
then
exit "$out"
fi
| true
|
4ee89be9c68fcb32285d6ca2222bb8aad3edc14f
|
Shell
|
Mangome/pyenv
|
/pyenv.d/which/anaconda.bash
|
UTF-8
| 2,851
| 3.8125
| 4
|
[
"MIT"
] |
permissive
|
# Anaconda comes with binaries of system packages (e.g. `openssl`, `curl`).
# Creating shims for those binaries will prevent pyenv users to run those
# commands normally when not using Anaconda.
#
# This is a limited edition of https://github.com/yyuu/pyenv-which-ext
# and it will looks for original `PATH` if there is Anaconda/Miniconda
# installed and the command name is blacklisted.
conda_exists() {
shopt -s nullglob
local condas=($(echo "${PYENV_ROOT}/versions/"*"/bin/conda" "${PYENV_ROOT}/versions/"*"/envs/"*"/bin/conda"))
shopt -u nullglob
[ -n "${condas}" ]
}
conda_shims() {
## curl
cat <<EOS
curl
curl-config
EOS
## fontconfig
cat <<EOS
fc-cache
fc-cat
fc-list
fc-match
fc-pattern
fc-query
fc-scan
fc-validate
EOS
## freetype
cat <<EOS
freetype-config
EOS
## libpng
cat <<EOS
libpng-config
EOS
## openssl
cat <<EOS
openssl
EOS
## qtchooser
cat <<EOS
assistant
designer
lconvert
linguist
lrelease
lupdate
moc
pixeltool
qcollectiongenerator
qdbus
qdbuscpp2xml
qdbusviewer
qdbusxml2cpp
qhelpconverter
qhelpgenerator
qmake
qmlplugindump
qmlviewer
qtconfig
rcc
uic
xmlpatterns
xmlpatternsvalidator
EOS
## redis
cat <<EOS
redis-benchmark
redis-check-aof
redis-check-dump
redis-cli
redis-server
EOS
## sqlite3
cat <<EOS
sqlite3
EOS
## libxml2
cat <<EOS
xml2-config
EOS
## libxslt
cat <<EOS
xslt-config
EOS
## xsltproc
cat <<EOS
xsltproc
EOS
}
expand_path() {
if [ ! -d "$1" ]; then
return 1
fi
local cwd="$(pwd)"
cd "$1"
pwd
cd "$cwd"
}
remove_from_path() {
local path_to_remove="$(expand_path "$1")"
local result=""
if [ -z "$path_to_remove" ]; then
echo "${PATH}"
return
fi
local paths
IFS=: paths=($PATH)
for path in "${paths[@]}"; do
path="$(expand_path "$path" || true)"
if [ -n "$path" ] && [ "$path" != "$path_to_remove" ]; then
result="${result}${path}:"
fi
done
echo "${result%:}"
}
lookup_from_path() {
local command_to_lookup="$1"
local original_path="${PATH}"
PATH="$(remove_from_path "${PYENV_ROOT}/shims")"
local result="$(command -v "$command_to_lookup" || true)"
PATH="${original_path}"
echo "$result"
}
if [ -n "$PYENV_COMMAND" ]; then
if conda_exists; then
if [ -x "$PYENV_COMMAND_PATH" ]; then
# `curl` bundled with Anaconda does not work on Debian
# https://github.com/ContinuumIO/anaconda-issues/issues/72
if [[ "$PYENV_COMMAND" == "curl" ]] && [[ -x "${PYENV_COMMAND_PATH%/*}/curl-config" ]]; then
if [ ! -f "$("${PYENV_COMMAND_PATH%/*}/curl-config" --ca 2>/dev/null || true)" ]; then
PYENV_COMMAND_PATH="$(lookup_from_path "$PYENV_COMMAND" || true)"
fi
fi
else
if conda_shims | grep -q -x "$PYENV_COMMAND"; then
PYENV_COMMAND_PATH="$(lookup_from_path "$PYENV_COMMAND" || true)"
fi
fi
fi
fi
| true
|
2481af11a00e7c3d112d9e3cacd829c9a29cd7a8
|
Shell
|
Lynx-Be-Learning/My-Sandbox
|
/Project-Examples/DotFiles/dotfiles-5-13/.bashrc
|
UTF-8
| 5,014
| 2.734375
| 3
|
[] |
no_license
|
#! /bin/bash
#
#
### NOTE TO SELF
# 2 worded named below
# dotfile / hidden put name_name
# normal put .name-name
#
### Code / Help
# https://github.com/dougborg/bashrc
# 256 color support
# > http://www.mail-archive.com/bug-coreutils@gnu.org/msg11030.html
#
#
#
###?????? MAKE SHELL INTERACTIVE
#[ -f /etc/inputrc ] && source /etc/inputrc
[ -f /etc/bashrc ] && . /etc/bashrc
[ -f $HOME/.profile ] && . $HOME/.profile
## Export
export LANG=en_US.utf8
export LC_ALL=en_US.utf8
export TERM=rxvt-unicode-256color
# urxvt <--- terminal
# urxvtc <--- client
# urxvtd <--- daemon
# Preferred applications
export PAGER=less
export TERMINAL=rxvt-unicode-256color
export IRC=weechat
if [ -n "$DISPLAY" ]; then
export BROWSER=firefox
export EDITOR=gedit
else
export BROWSER=lynx
export EDITOR=lynx-emacs
fi
if [[ -d $HOME/bin && -z $(echo $PATH | grep -o $HOME/bin) ]]; then
export PATH=$HOME/bin:$PATH
fi
if [[ -d $HOME/Bin && -z $(echo $PATH | grep -o $HOME/Bin) ]]; then
export PATH=$HOME/Bin:$PATH
fi
if [[ -d $HOME/.local/bin && -z $(echo $PATH | grep -o $HOME/.local/bin) ]]; then
export PATH=$HOME/.local/bin:$PATH
fi
if [[ -d $HOME/.local/Bin && -z $(echo $PATH | grep -o $HOME/.local/Bin) ]]; then
export PATH=$HOME/.local/Bin:$PATH
fi
if [[ -d /usr/local/cuda-7.0 && -z $(echo $LD_LIBRARY_PATH | grep -o /.local/bin) ]]; then
export LD_LIBRARY_PATH=/usr/local/cuda-7.0:$LD_LIBRARY_PATH
if [[ -d /usr/local/cuda-7.0/bin && -z $(echo $PATH | grep -o /usr/local/cuda-7.0/bin) ]]; then
export PATH=/usr/local/cuda-7.0/bin:$PATH
fi
elif [[ -d /usr/local/cuda-6.5 && -z $(echo $LD_LIBRARY_PATH | grep -o /.local/bin) ]]; then
export LD_LIBRARY_PATH=/usr/local/cuda-6.5:$LD_LIBRARY_PATH
if [[ -d /usr/local/cuda-6.5/bin && -z $(echo $PATH | grep -o /usr/local/cuda-6.5/bin) ]]; then
export PATH=/usr/local/cuda-6.5/bin:$PATH
fi
fi
if [[ -d /opt/VirtualGL/bin && -z $(echo $PATH | grep -o /opt/VirtualGL/bin) ]]; then
export PATH=/opt/VirtualGL/bin:$PATH
fi
# Old PATH code
#PATH=$PATH:$HOME/.local/bin:$HOME/Bin:$PATH
#PATH=$PATH:/opt/VirtualGL/bin:/usr/local/cuda-6.5/bin
#LD_LIBRARY_PATH=$LD_LIBRARY_PATH/usr/local/cuda-6.5
# History completion bound to arrow keys (down, up)
bind '"\e[A": history-search-backward'
bind '"\e[B": history-search-forward'
## My Paths
export EMACS=$HOME/.emacs.d
export GIT=$HOME/Git
export I3=$HOME/.i3
export LOGS=$HOME/.logs.d
export WALLPAPER=$HOME/.config/wallpaper/main
# resolve symlinks when 'cd'ing
set -o physical
# assume 'cd' when trying to exec a directory
#shopt -s autocd 2>/dev/null
# print job status on exit
shopt -s checkjobs 2> /dev/null
# update $ROWS/$COLUMNS after command
shopt -s checkwinsize
# @(…) +(…) etc. globs
shopt -s extglob
# the ** glob
shopt -s globstar
# floods $TERM with ever cmd possablie
shopt -s no_empty_cmd_completion
# store multi-line commands as single history entry
shopt -s cmdhist
# append to $HISTFILE on exit
shopt -s histappend
# allow re-editing failed history subst
shopt -s histreedit
#
if (( ${BASH_VERSINFO[0]} < 4 )) || (( ${BASH_VERSINFO[1]} < 3 )); then
set +o histexpand
fi
# History lenght; Does it need to be exported?
hISTSIZE=1000
HISTFILESIZE=2000
# Don't put duplicate lines or lines starting
# with space in the history.
HISTCONTROL=ignoreboth
## Load other Files
# ~/.bash_aliases / Alias definitions.
if [ -f ~/.bash_aliases ]; then
. ~/.bash_aliases
fi
# ~/.bash_function / Small bash func
if [ -f ~/.bash_function ]; then
. ~/.bash_function
fi
# ~/.bash_env / Environment variables/exports
if [ -f ~/.bash_env ]; then
. ~/.bash_env
fi
# enable programmable completion features
if ! shopt -oq posix; then
if [ -f /usr/share/bash-completion/bash_completion ]; then
. /usr/share/bash-completion/bash_completion
elif [ -f /etc/bash_completion ]; then
. /etc/bash_completion
fi
fi
# Prompt
if [ -n "$SSH_CONNECTION" ]
then
export PS1="\[$(tput setaf 1)\]┌─╼\[\033[0;36m\] \u\[\033[00m\] in \[$(tput setaf 7)\][\w]\n\[$(tput setaf 1)\]\$(if [[ \$? == 0 ]]; then echo \"\[$(tput setaf 1)\]└────╼ \[$(tput setaf 7)\][ssh]\"; else echo \"\[$(tput setaf 1)\]└╼ \[$(tput setaf 7)\][ssh]\"; fi) \[$(tput setaf 7)\]"
else
export PS1="\[$(tput setaf 1)\]┌─╼\[\033[0;36m\] \u\[\033[00m\] in \[$(tput setaf 7)\][\w]\n\[$(tput setaf 1)\]\$(if [[ \$? == 0 ]]; then echo \"\[$(tput setaf 1)\]└────╼\"; else echo \"\[$(tput setaf 1)\]└╼\"; fi) \[$(tput setaf 7)\]"
fi
# Colored prompt
force_color_prompt=yes
if [ -n "$force_color_prompt" ]; then
if [ -x /usr/bin/tput ] && tput setaf 1 >&/dev/null; then
color_prompt=yes
else
color_prompt=
fi
fi
# Compile and execute a C source on the fly
# csource() {
# [[ $1 ]] || { echo "Missing operand" >&2; return 1; }
# [[ -r $1 ]] || { printf "File %s does not exist or is not readable\n" "$1" >&2; return 1; }
# local output_path=${TMPDIR:-/tmp}/${1##*/};
# gcc "$1" -o "$output_path" && "$output_path";
# rm "$output_path";
# return 0;
# }
| true
|
e4b97f93aa711d7db789885fac03094d5dad36d0
|
Shell
|
craiglittle/dotbox-custom
|
/script/bootstrap
|
UTF-8
| 868
| 3.53125
| 4
|
[] |
no_license
|
#!/bin/bash
set -e
dotbox_dir="$(dirname $0)/../"
libdir="${dotbox_dir}/lib"
modulepath="${dotbox_dir}/modules"
manifest="${dotbox_dir}/manifests/base.pp"
module_not_installed() {
[[ `puppet module list --modulepath $modulepath | grep $1 | wc -l` -eq 0 ]]
}
install_module() {
puppet module install -i $modulepath $1 --version $2
}
if xcode-select -p > null ; then
echo 'Xcode is already installed.'
else
xcode-select --install
read -p 'Press any key to continue the bootstrap process.'
fi
sudo mkdir -p /usr/local
if ! sudo gem list -i puppet > /dev/null ; then
sudo gem install puppet
fi
if module_not_installed bjoernalbers-homebrew ; then
install_module bjoernalbers-homebrew 0.2.0
fi
sudo puppet apply --libdir $libdir \
--modulepath $modulepath \
--hiera_config /dev/null \
$manifest
| true
|
926f8874410bb529b2b68b6bfdb69426e271f212
|
Shell
|
NickCul30/Misc
|
/School Assignments/60-256 Systems Programming/Lab10/findemails
|
UTF-8
| 196
| 3.171875
| 3
|
[] |
no_license
|
#!/bin/bash
ids=(`grep $1 courses | cut -d $'\t' -f 1`)
size=${#ids[*]}
if [ $size -eq 0 ]; then
echo "Course not found"
fi
for i in ${ids[*]}; do
echo `grep $i emails | cut -d $'\t' -f 2`
done
| true
|
2de581bf7b0354f70c0551cd0886dcb469bf3768
|
Shell
|
alainmarcel/Surelog
|
/third_party/tests/Zachjs/lib/functions.sh
|
UTF-8
| 3,618
| 3.921875
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
SCRIPT_DIR=`dirname "${BASH_SOURCE[0]}"`
SV2V="$SCRIPT_DIR/../../bin/sv2v"
assertExists() {
file=$1
[ -f "$file" ]
assertTrue "$file does not exist" $?
}
# USAGE: simulate <vcd-file> <log-file> <top-module> <file> [<file> ...]
simulate() {
# arguments
sim_vcd="$1"; shift
sim_log="$1"; shift
sim_top="$1"; shift
# compile the files
sim_prog="$SHUNIT_TMPDIR/simprog.exe"
iv_output=`iverilog \
-Wall \
-Wno-select-range \
-o "$sim_prog" \
-g2005 \
-DTEST_VCD="\"$sim_vcd\"" \
-DTEST_TOP=$sim_top \
"$SCRIPT_DIR/tb_dumper.v" \
"$@" 2>&1`
assertTrue "iverilog on $1 failed" $?
assertNull "iverilog emitted warnings:" "$iv_output"
if [ "$iv_output" != "" ]; then
echo "$iv_output"
fi
# run the simulation
$sim_prog > $sim_log
assertTrue "simulating $1 failed" $?
# remove the date from the VCD
sed -i.orig -e "1,3d" "$sim_vcd"
# remove the "opened file..." prompt from the log
sed -i.orig -e "1,1d" "$sim_log"
}
assertConverts() {
ac_file="$1"
ac_tmpa="$SHUNIT_TMPDIR/ac-conv-tmpa.v"
ac_tmpb="$SHUNIT_TMPDIR/ac-conv-tmpb.v"
ac_tmpc="$SHUNIT_TMPDIR/ac-conv-tmpc.v"
$SV2V "$ac_file" 2> /dev/null > "$ac_tmpa"
assertTrue "1st conversion of $ac_file failed" $?
$SV2V "$ac_tmpa" 2> /dev/null > "$ac_tmpb"
assertTrue "2nd conversion of $ac_file failed" $?
$SV2V "$ac_tmpb" 2> /dev/null > "$ac_tmpc"
assertTrue "3rd conversion of $ac_file failed" $?
diff "$ac_tmpb" "$ac_tmpc" > /dev/null
assertTrue "conversion of $ac_file not stable after the second iteration" $?
# using sed to remove quoted strings
filtered=`sed -E 's/"([^"]|\")+"//g' "$ac_tmpa"`
echo "$filtered" | grep "\$bits" > /dev/null
assertFalse "conversion of $ac_file still contains \$bits" $?
echo "$filtered" | grep "\]\[" > /dev/null
assertFalse "conversion of $ac_file still contains multi-dim arrays" $?
echo "$filtered" | egrep "\s(int\|bit\|logic\|byte\|struct\|enum\|longint\|shortint)\s"
assertFalse "conversion of $ac_file still contains SV types" $?
}
# convert SystemVerilog source file(s)
convert() {
out_file="$1"; shift
$SV2V "$@" 2> /dev/null > "$out_file"
assertTrue "conversion failed" $?
assertExists "$out_file"
}
simpleTest() {
sv="$1"
ve="$2"
tb="$3"
assertNotNull "SystemVerilog file not specified" $sv
assertNotNull "Verilog file not specified" $ve
assertNotNull "Testbench not specified" $tb
# some tests don't have a separate testbench, instead having the top-level
# module defined in both of the input files
if [ ! -f "$tb" ]; then
tb="$SCRIPT_DIR/empty.v"
fi
assertExists $sv
assertExists $ve
assertExists $tb
assertConverts "$sv"
assertConverts "$ve"
assertConverts "$tb"
cv="$SHUNIT_TMPDIR/conv.v"
convert "$cv" "$sv"
simulateAndCompare "$ve" "$cv" "$tb"
}
simulateAndCompare() {
ve="$1"
cv="$2"
tb="$3"
ref_vcd="$SHUNIT_TMPDIR/ref.vcd"
gen_vcd="$SHUNIT_TMPDIR/gen.vcd"
ref_log="$SHUNIT_TMPDIR/ref.log"
gen_log="$SHUNIT_TMPDIR/gen.log"
# simulate and compare the two files
simulate "$ref_vcd" "$ref_log" top "$ve" "$tb"
simulate "$gen_vcd" "$gen_log" top "$cv" "$tb"
diff "$ref_vcd" "$gen_vcd" > /dev/null
assertTrue "VCDs are different" $?
output=`diff "$ref_log" "$gen_log"`
assertTrue "Simulation outputs differ:\n$output" $?
}
runTest() {
test="$1"
simpleTest "${test}.sv" "${test}.v" "${test}_tb.v"
}
| true
|
5ff310f926892e403d3e3f7ec01d3d22c9e5fb61
|
Shell
|
cherusker/dps-2018s-assignment-2-testing
|
/run.sh
|
UTF-8
| 2,817
| 4.03125
| 4
|
[] |
no_license
|
#!/bin/bash
# ------------------------------------------------------------------------------
# adjust the following settings to match your setup:
dlvexe="../dlv/dlv.x86-64-linux-elf-static.bin"
guessTestsRootFolder="guess-tests/"
checkTestsRootFolder="check-tests/"
guessProgram="../code/guess.dl"
checkProgram="../code/check.dl"
runGuessTests="1" # 0 = no, 1 = yes
runCheckTests="1" # 0 = no, 1 = yes
stopOnFailure="1" # 0 = no, 1 = yes
# ------------------------------------------------------------------------------
# defines:
ansiStyleReset="\033[0m"
ansiStyleRedBold="\033[1;31m"
ansiStyleGreen="\033[0;32m"
# ------------------------------------------------------------------------------
# test execution logic:
succCounter="0"
failCounter="0"
if [[ "${runGuessTests}" == "1" ]]; then
for inFile in "${guessTestsRootFolder}"*; do
if [[ "${inFile: -3}" != ".dl" ]]; then
continue
fi
printf "Testing: %s ..." "${inFile}"
outFile="${inFile}.out"
"${dlvexe}" -silent \
-N=300 \
"${guessProgram}" \
"${inFile}" > "${outFile}"
read lines name <<< $(wc -l "${outFile}")
if [[ "${inFile}" == *"-${lines}.dl" ]]; then
succCounter=$((succCounter+1))
printf " ${ansiStyleGreen}OK${ansiStyleReset}\n"
else
failCounter=$((failCounter+1))
printf " ${ansiStyleRedBold}failed${ansiStyleReset}\n"
if [[ "${stopOnFailure}" == "1" ]]; then
break
fi
fi
done
fi
if [[ "${runCheckTests}" == "1" ]]; then
for inFile in "${checkTestsRootFolder}"*; do
if [[ "${inFile: -3}" != ".dl" ]]; then
continue
fi
printf "Testing: %s ..." "${inFile}"
outFile="${inFile}.out"
"${dlvexe}" -silent \
-N=300 \
"${checkProgram}" \
"${inFile}" > "${outFile}"
read lines name <<< $(wc -l "${outFile}")
if [[ "${inFile: -6}" == "yes.dl" ]] && [[ "${lines}" == "1" ]]; then
succCounter=$((succCounter+1))
printf " ${ansiStyleGreen}OK${ansiStyleReset}\n"
elif [[ "${inFile: -5}" == "no.dl" ]] && [[ "${lines}" == "0" ]]; then
succCounter=$((succCounter+1))
printf " ${ansiStyleGreen}OK${ansiStyleReset}\n"
else
failCounter=$((failCounter+1))
printf " ${ansiStyleRedBold}failed${ansiStyleReset}\n"
if [[ "${stopOnFailure}" == "1" ]]; then
break
fi
fi
done
fi
printf "\n succesful tests: ${ansiStyleGreen}%d${ansiStyleReset}\n" "${succCounter}"
printf " failed tests: ${ansiStyleRedBold}%d${ansiStyleReset}\n\n" "${failCounter}"
| true
|
8acffe59ac676201818279c731d1e586f149b92f
|
Shell
|
ecoarium/microsphere
|
/workspace-settings/shell/choices/packer.bash
|
UTF-8
| 2,384
| 3.90625
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
register_workspace_setting 'packer'
function change_packer_settings() {
export PATHS_PROJECT_DEPLOY_PACKER_HOME="${PATHS_PROJECT_DEPLOY_HOME}/packer"
local packer_baseline_dirs=()
local packer_baseline_dir=''
for packer_template_file in `find ${PATHS_PROJECT_DEPLOY_PACKER_HOME} -path "*/*" -type f -name 'template.json' -not -path "*/.build/*" -exec dirname '{}' \;`; do
packer_baseline_dirs+=("${packer_template_file/${PATHS_PROJECT_DEPLOY_PACKER_HOME}\//}")
done
while true; do
printf "\n"
printf "\n"
echo " Choose a packer baseline:"
local count=0
local packer_choice=''
for packer_baseline_dir in "${packer_baseline_dirs[@]}"
do
packer_choice="${packer_baseline_dir//\// }"
let "count++"
echo " $count. $packer_choice"
done
local answer=''
read -p " choose (1-$count): " answer
local original_answer=$answer
let "answer--"
if [[ -n "${packer_baseline_dirs[$answer]}" ]] ; then
export PATHS_PROJECT_DEPLOY_PACKER_CONTEXT_PATH="${PATHS_PROJECT_DEPLOY_PACKER_HOME}/${packer_baseline_dirs[$answer]}"
export PACKER_CONTEXT="${packer_baseline_dirs[$answer]}"
break
else
echo "Invalid option: $original_answer"
fi
done
show_packer_settings
}
function show_packer_settings() {
local packer_nice_name="$(echo "${PATHS_PROJECT_DEPLOY_PACKER_CONTEXT_PATH}" | awk -F '/' '{print $(NF-1) " " $NF}')"
good "
###################################### PACKER OS CHOICE $(echo "${packer_nice_name}" | awk '{print toupper($0)}') #####################################
PATHS_PROJECT_DEPLOY_PACKER_HOME: ${PATHS_PROJECT_DEPLOY_PACKER_HOME}
PATHS_PROJECT_DEPLOY_PACKER_CONTEXT_PATH: ${PATHS_PROJECT_DEPLOY_PACKER_CONTEXT_PATH}
PACKER_CONTEXT: ${PACKER_CONTEXT}
if you wish to change these settings execute the following in your terminal:
change_packer_settings
#####################################################################################################
"
}
function set_workspace_settings_to_packer() {
change_packer_settings
export VAGRANT_DEFAULT_PROVIDER=virtualbox
export VAGRANT_CONTEXT="${VAGRANT_DEFAULT_PROVIDER}/packer/${PACKER_CONTEXT}"
export TEST_TYPES=packer
export HATS=packer
}
| true
|
5e49d889bbb1884388afc30993d1b6bac62251e0
|
Shell
|
MarioHdpz/shell-scripts
|
/Certbot/init-letsencrypt.sh
|
UTF-8
| 1,613
| 3.921875
| 4
|
[] |
no_license
|
#!/bin/bash
if ! [ -x "$(command -v docker)" ]; then
echo 'Error: docker is not installed.' >&2
exit 1
fi
read -p "Domain: " domain
read -p "Email: " email
staging=0 # Set to 1 if you're testing your setup to avoid hitting request limits
if [ -d "/etc/letsencrypt" ]; then
read -p "Existing data found for $domain. Continue and replace existing certificate? (y/N) " decision
if [ "$decision" != "Y" ] && [ "$decision" != "y" ]; then
exit
fi
fi
if [ ! -e "/etc/letsencrypt/conf/options-ssl-nginx.conf" ] || [ ! -e "/etc/letsencrypt/conf/ssl-dhparams.pem" ]; then
echo "### Downloading recommended TLS parameters ..."
sudo mkdir -p "/etc/letsencrypt/conf"
curl -s https://raw.githubusercontent.com/certbot/certbot/master/certbot-nginx/certbot_nginx/_internal/tls_configs/options-ssl-nginx.conf | sudo tee /etc/letsencrypt/conf/options-ssl-nginx.conf >/dev/null
curl -s https://raw.githubusercontent.com/certbot/certbot/master/certbot/certbot/ssl-dhparams.pem | sudo tee /etc/letsencrypt/conf/ssl-dhparams.pem >/dev/null
echo
fi
echo "### Requesting Let's Encrypt certificate for $domain ..."
# Select appropriate email arg
case "$email" in
"") email_arg="--register-unsafely-without-email" ;;
*) email_arg="--email $email" ;;
esac
# Enable staging mode if needed
if [ $staging != "0" ]; then staging_arg="--staging"; fi
docker run -it --rm --name certbot \
-v "/etc/letsencrypt:/etc/letsencrypt" \
-v "/var/lib/letsencrypt:/var/lib/letsencrypt" \
-p 80:80 \
-p 443:443 \
certbot/certbot certonly \
--standalone \
-d $domain \
$email_arg \
$staging_arg
| true
|
abf9a0857cbb907ef876ffc67e9bdac713c44b12
|
Shell
|
birc-aeh/au-slurm-package
|
/scripts/controller-prolog
|
UTF-8
| 667
| 3.203125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
did_any_fail=N
nlist=/dev/shm/nodes_$SLURM_JOBID
/opt/slurm/bin/scontrol show hostname $SLURM_JOB_NODELIST > $nlist
# Piping in to the while loop would create a new scope, and did_any_fail would
# not be set
INPUT=$(/com/bin/parallel-ssh-cmd -f $nlist /opt/slurm/scripts/slurm-remote-prolog $SLURM_JOBID $SLURM_JOB_USER 2>&1)
while read PROGRESS NODE ERROR MSG
do
if [ "$ERROR" -ne 0 ]
then
echo "$NODE: $MSG"
did_any_fail=Y
/opt/slurm/bin/scontrol update NodeName=$NODE State=DRAIN Reason="$MSG"
fi
done <<< "$INPUT"
rm $nlist
if [ "$did_any_fail" == 'Y' ]
then
echo 'We had failures/timeouts'
exit 1
fi
| true
|
8dceac70fde3901d77e2256b9c67b88d31c7b676
|
Shell
|
SeonghuiChoe/ionic-inappbrowser
|
/build-debug.sh
|
UTF-8
| 634
| 3.015625
| 3
|
[] |
no_license
|
#!/bin/bash
# 사용 할 keystore alias
KEYSTORE_ALIAS="crm"
PATH_UNSIGNED="/usr/local/app/platforms/android/build/outputs/apk/debug/android-debug.apk"
# 생성 할 signed apk 이름
OUT_SIGNED_NAME="./crm_debug"
OUT_SIGNED_EXT=".apk"
# App 버전, config.xml 파싱
version=$(cat config.xml | grep "<widget" | grep -o "version=\"[^ ]*" | sed -e "s/version=//g" -e "s/\"//g")
# 안드로이드 플랫폼 추가
eval "cordova platform rm android"
eval "cordova platform add android@6.4.0"
# 안드로이드 릴리즈 빌드
eval "cordova build --debug android"
eval "cp $PATH_UNSIGNED $OUT_SIGNED_NAME\_$version$OUT_SIGNED_EXT"
| true
|
1d8b3c19c24b464fa98573331e314daf3cb21a5a
|
Shell
|
mempool/mempool-sitemap
|
/sitemap-update
|
UTF-8
| 3,085
| 3.40625
| 3
|
[] |
no_license
|
#!/usr/bin/env zsh
API=http://[::1]:3000
SITEMAP_INDEX_TEMP_FILE=sitemap-index-temp.xml
SITEMAP_TEMP_FILE=sitemap-temp.xml
BLOCK_TIP_HEIGHT="$(curl -s ${API}/blocks/tip/height)"
BLOCK_HEIGHT=0
BLOCK_PER_FILE=50000
FILE_TOTAL="$(($BLOCK_TIP_HEIGHT / $BLOCK_PER_FILE))"
FILE_START=0
FILE_STOP="${FILE_TOTAL}"
FILE="${FILE_START}"
echo "${BLOCK_TIP_HEIGHT} blocks"
echo "${BLOCK_PER_FILE} blocks per sitemap"
echo "${FILE_TOTAL} total sitemaps"
echo '<?xml version="1.0" encoding="UTF-8"?>' > "${SITEMAP_INDEX_TEMP_FILE}"
echo '<sitemapindex xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">' >> "${SITEMAP_INDEX_TEMP_FILE}"
until [ "${FILE}" = "${FILE_STOP}" ];do
BLOCK_HEIGHT=$(((($FILE + 1) * $BLOCK_PER_FILE) - 1))
if [ "${BLOCK_HEIGHT}" -gt "${BLOCK_TIP_HEIGHT}" ];then
BLOCK_HEIGHT="${BLOCK_TIP_HEIGHT}"
fi
BLOCK_HASH=$(curl -s ${API}/block-height/${BLOCK_HEIGHT})
#BLOCK=$(curl -s ${API}/block/${BLOCK_HASH})
#BLOCK_TIMESTAMP=$(echo "${BLOCK}"|sed -e 's/.*timestamp.://' -e 's/,.*//')
SITEMAP_FILE_DECIMAL=$(printf "sitemap/sitemap%02d.xml" "${FILE}")
SITEMAP_FILE=${SITEMAP_FILE_DECIMAL}
#SITEMAP_FILE=$(echo ${SITEMAP_FILE_DECIMAL} | sed -e 's/0/a/g' -e 's/1/b/g' -e 's/2/c/g' -e 's/3/d/g' -e 's/4/e/g' -e 's/5/f/g' -e 's/6/g/g' -e 's/7/h/g' -e 's/8/i/g' -e 's/9/j/g')
#SITEMAP_LASTMOD=$(date -r "${BLOCK_TIMESTAMP}" +"%Y-%m-%dT%H:%M:%SZ")
if [ ! -f "${SITEMAP_FILE}" ] || [ "${BLOCK_HEIGHT}" -gt $(($BLOCK_TIP_HEIGHT - 2000)) ];then
BLOCK_STOP="$((($BLOCK_HEIGHT - ${BLOCK_PER_FILE}) + 1))"
echo "generating ${SITEMAP_FILE} for block ${BLOCK_STOP} ~ ${BLOCK_HEIGHT}"
echo '<?xml version="1.0" encoding="UTF-8"?>' > "${SITEMAP_TEMP_FILE}"
echo '<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.sitemaps.org/schemas/sitemap/0.9 http://www.sitemaps.org/schemas/sitemap/0.9/sitemap.xsd">' >> "${SITEMAP_TEMP_FILE}"
while [ "${BLOCK_HEIGHT}" -ge "${BLOCK_STOP}" ];do
BLOCK_HASH=$(curl -s ${API}/block-height/${BLOCK_HEIGHT})
#BLOCK=$(curl -s ${API}/block/${BLOCK_HASH})
#BLOCK_TIMESTAMP=$(echo "${BLOCK}"|sed -e 's/.*timestamp.://' -e 's/,.*//')
#BLOCK_LASTMOD=$(date -r "${BLOCK_TIMESTAMP}" +"%Y-%m-%dT%H:%M:%SZ")
#echo '<url>' >> "${SITEMAP_TEMP_FILE}"
echo "<url><loc>https://mempool.space/block/${BLOCK_HASH}</loc></url>" >> "${SITEMAP_TEMP_FILE}"
#echo "<lastmod>${BLOCK_LASTMOD}</lastmod>" >> "${SITEMAP_TEMP_FILE}"
#echo '</url>' >> "${SITEMAP_TEMP_FILE}"
((BLOCK_HEIGHT--))
done
echo '</urlset>' >> "${SITEMAP_TEMP_FILE}"
mv "${SITEMAP_TEMP_FILE}" "${SITEMAP_FILE}"
fi
echo '<sitemap>' >> "${SITEMAP_INDEX_TEMP_FILE}"
echo "<loc>https://mempool.space/${SITEMAP_FILE}</loc>" >> "${SITEMAP_INDEX_TEMP_FILE}"
#echo "<lastmod>${SITEMAP_LASTMOD}</lastmod>" >> "${SITEMAP_INDEX_TEMP_FILE}"
echo '</sitemap>' >> "${SITEMAP_INDEX_TEMP_FILE}"
((FILE++))
done
echo '</sitemapindex>' >> "${SITEMAP_INDEX_TEMP_FILE}"
mv "${SITEMAP_INDEX_TEMP_FILE}" "sitemap.xml"
| true
|
452f57c8dbd57ea047d2ac842dd95af755457365
|
Shell
|
ryanjclark/selenium_bot
|
/create_environment.sh
|
UTF-8
| 365
| 2.65625
| 3
|
[] |
no_license
|
echo "Creating virtualenv 'selenium'"
virtualenv selenium
source selenium/bin/activate
echo "Installing requirements"
pip install -r requirements.txt
# TODO: curl chromedriver latest version
# echo "curl command for Chromedriver"
# Chromedriver needs to be in opt/WebDriver/bin
echo "Adding WebDrivers to PATH"
export PATH=$PATH:/opt/WebDriver/bin >> ~/.profile
| true
|
81e12cf4963d9cb272b39d66f7999bf56eec5fdf
|
Shell
|
timflutre/quantgen
|
/submit.bash
|
UTF-8
| 5,100
| 3.734375
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
# Aim: submit a simple job to HTCondor (https://htcondor.org/)
# Copyright (C) 2022 INRAE
# License: GPL-3+
# Persons: Timothée Flutre [cre,aut]
# Versioning: https://github.com/timflutre/quantgen
progVersion="0.1.0" # http://semver.org/
# Display the help on stdout.
# The format complies with help2man (http://www.gnu.org/s/help2man)
function help () {
msg="\`${0##*/}' submits a simple job to HTCondor (https://htcondor.org/).\n"
msg+="\n"
msg+="Usage: ${0##*/} [OPTIONS] ...\n"
msg+="\n"
msg+="Options:\n"
msg+=" -h, --help\tdisplay the help and exit\n"
msg+=" -V, --version\toutput version information and exit\n"
msg+=" -v, --verbose\tverbosity level (0/default=1/2/3)\n"
msg+=" -e, --exe\tpath to the executable\n"
msg+=" -a, --args\targuments to the executable (optional)\n"
msg+=" -o, --out\tprefix of output files for the job (default=${out})\n"
msg+=" -m, --mem\trequired memory (default=${mem})\n"
msg+=" -c, --cpu\trequired CPUs (default=${cpu})\n"
msg+=" -j, --job\tfile name for the job (default=${job})\n"
msg+="\n"
msg+="Examples:\n"
msg+=" ${0##*/} -e '/bin/echo' -a 'Hello, world'\n"
msg+="\n"
msg+="Report bugs to <timothee.flutre@inrae.fr>."
echo -e "$msg"
}
# Display version and license information on stdout.
# The person roles complies with R's guidelines (The R Journal Vol. 4/1, June 2012).
function version () {
msg="${0##*/} ${progVersion}\n"
msg+="\n"
msg+="Copyright (C) 2022 INRAE.\n"
msg+="License GPLv3+: GNU GPL version 3 or later <http://gnu.org/licenses/gpl.html>\n"
msg+="This is free software; see the source for copying conditions. There is NO\n"
msg+="warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n"
msg+="\n"
msg+="Written by Timothée Flutre [cre,aut]."
echo -e "$msg"
}
# http://www.linuxjournal.com/content/use-date-command-measure-elapsed-time
function timer () {
if [[ $# -eq 0 ]]; then
echo $(date '+%s')
else
local startRawTime=$1
endRawTime=$(date '+%s')
if [[ -z "$startRawTime" ]]; then startRawTime=$endRawTime; fi
elapsed=$((endRawTime - startRawTime)) # in sec
nbDays=$((elapsed / 86400))
nbHours=$(((elapsed / 3600) % 24))
nbMins=$(((elapsed / 60) % 60))
nbSecs=$((elapsed % 60))
printf "%01dd %01dh %01dm %01ds" $nbDays $nbHours $nbMins $nbSecs
fi
}
# Parse the command-line arguments.
# http://stackoverflow.com/a/4300224/597069
function parseCmdLine () {
getopt -T > /dev/null # portability check (say, Linux or Mac OS?)
if [ $? -eq 4 ]; then # GNU enhanced getopt is available
TEMP=`getopt -o hVv:e:a:o:m:c:j: -l help,version,verbose:,exe:,args:out:mem:cpu:job:, \
-n "$0" -- "$@"`
else # original getopt is available (no long options, whitespace, sorting)
TEMP=`getopt hVv:e:a:o:m:c:j: "$@"`
fi
if [ $? -ne 0 ]; then
echo "ERROR: "$(which getopt)" failed" 1>&2
getopt -T > /dev/null
if [ $? -ne 4 ]; then
echo "did you use long options? they are not handled \
on your system, use -h for help"
fi
exit 2
fi
eval set -- "$TEMP"
while [ $# -gt 0 ]; do
case "$1" in
-h | --help) help; exit 0; shift;;
-V | --version) version; exit 0; shift;;
-v | --verbose) verbose=$2; shift 2;;
-e | --exe) exe=$2; shift 2;;
-a | --args) args=$2; shift 2;;
-o | --out) out=$2; shift 2;;
-m | --mem) mem=$2; shift 2;;
-c | --cpu) cpu=$2; shift 2;;
-j | --job) job=$2; shift 2;;
--) shift; break;;
*) echo "ERROR: options parsing failed, use -h for help" 1>&2; exit 1;;
esac
done
hash condor_submit 2>/dev/null || \
{ echo >&2 "ERROR: condor_submit is not in your PATH"; exit 1; }
if [ -z "${exe}" ]; then
echo -e "ERROR: missing compulsory option -e\n" 1>&2
help
exit 1
fi
}
function run () {
if [ $verbose -gt "0" ]; then
msg="write config file"
echo -e $msg
fi
txt="Universe = vanilla"
txt+="\nExecutable = "${exe}
if [ ! -z "${args}" ]; then
txt+="\nArguments = "${args}
fi
txt+="\n"
txt+="\nshould_transfer_files = no"
txt+="\n"
txt+="\ninput = /dev/null"
txt+="\noutput = ${out}.o\$(Cluster)"
txt+="\nerror = ${out}.e\$(Cluster)"
txt+="\nlog = ${out}.l\$(Cluster)"
txt+="\n"
txt+="\nrequest_memory = ${mem}"
txt+="\nrequest_cpus = ${cpu}"
txt+="\nJobLeaseDuration = 30"
txt+="\nrequirements = ( HAS_ASREML =?= False )"
txt+="\ngetenv = true"
txt+="\n"
txt+="\nQueue"
txt+="\n"
echo -e ${txt} > ${job}
if [ $verbose -gt "0" ]; then
msg="submit job"
echo -e $msg
fi
condor_submit ${job}
}
verbose=0
exe=""
args=""
out="out_condor"
mem="4G"
cpu="1"
job="job_file"
parseCmdLine "$@"
if [ $verbose -gt "0" ]; then
startTime=$(timer)
msg="START ${0##*/} ${progVersion} $(date +"%Y-%m-%d") $(date +"%H:%M:%S")"
# msg+="\ncmd-line: $0 "$@ # comment if an option takes a glob as argument
msg+="\ncwd: $(pwd)"
echo -e $msg
fi
run exe args
if [ $verbose -gt "0" ]; then
msg="END ${0##*/} ${progVersion} $(date +"%Y-%m-%d") $(date +"%H:%M:%S")"
msg+=" ($(timer startTime))"
echo $msg
fi
| true
|
c198523bbd8abddd026717b5bfd3f183ad1b3dcd
|
Shell
|
vmartinezmag/up
|
/up_laravel.sh
|
UTF-8
| 516
| 2.921875
| 3
|
[] |
no_license
|
#!/bin/bash
ports=$(lsof -ti:8080,8081,8082 | wc -l)
if [ $ports != 0 ];
then
kill $(lsof -ti:8080,8081,8082)
else
URL_A=$(find /Applications/MAMP/htdocs/ -type d -name "proorder")
URL_B=$(find /Applications/MAMP/htdocs/ -type d -name "gestor_ana")
URL_C=$(find /Applications/MAMP/htdocs/ -type d -name "subastas_ana")
cd ~
cd $URL_A
php artisan serve --port 8080 &
cd ~
cd $URL_B
php artisan serve --port 8081 &
cd ~
cd $URL_C
php artisan serve --port 8082 &
fi
| true
|
e555500979866f2c032e3f9bcc9b7752cbe0b378
|
Shell
|
peteryang/vagrant-codes-in-practice
|
/vagrant-jenkins-gitlab/scripts/get_client_token.sh
|
UTF-8
| 1,298
| 3.890625
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
#Exit code "155" means required parameters are not enough
if [ "$#" -ne 3 ]
then
echo "Usage: ./$(basename $0) ROOT_TOKEN POLICY_NAME APPROLE_NAME"
exit 155
fi
export VAULT_ADDR='http://127.0.0.1:8200'
VAULT_TOKEN=$1
IP=$(ifconfig | grep 'inet ' | awk '{ print $2 }' | head -n 1)
create_policy () {
## First argument inside the function represents the policy name $1
## Second argument inside the function represents the role name $2
curl -s -X POST -H "X-Vault-Token: $VAULT_TOKEN" -d '{"rules": "{\"name\": \"'$1'\", \"path\": {\"secret/*\": {\"policy\": \"read\"}}}"}' http://$IP:8200/v1/sys/policy/$1
curl -s -H "X-Vault-Token: $VAULT_TOKEN" --request POST --data '{"policies": ["'"$1"'"]}' http://$IP:8200/v1/auth/approle/role/$2
roleid=$(curl -s -H "X-Vault-Token: $VAULT_TOKEN" http://$IP:8200/v1/auth/approle/role/$2/role-id | jq -r .data.role_id)
secretid=$(curl -s -H "X-Vault-Token: $VAULT_TOKEN" --request POST http://$IP:8200/v1/auth/approle/role/$2/secret-id | jq -r .data.secret_id)
client_token=$(curl -s -H POST --data '{"role_id": "'"$roleid"'", "secret_id": "'"$secretid"'"}' http://$IP:8200/v1/auth/approle/login | jq -r .auth.client_token)
echo -e "\n This token for the new role $2: $client_token"
}
create_policy $2 $3
| true
|
1cf50786712c3e80fbebd0cf0d5c494e8fc03c09
|
Shell
|
hodiapa/G5K-VMPlaceS
|
/g5k-deployment-scripts/tools/memtouch/bw.sh
|
UTF-8
| 496
| 3.140625
| 3
|
[] |
no_license
|
#!/bin/sh
S=1
TMP=`grep br0 /proc/net/dev | sed -e 's/:/ /' `
INIT_IN=`echo $TMP | awk '{print $2}'`
INIT_OUT=`echo $TMP | awk '{print $10}'`
while true ; do
sleep $S
TMP=`grep br0 /proc/net/dev | sed -e 's/:/ /' `
LAST_IN=`echo $TMP | awk '{print $2}'`
LAST_OUT=`echo $TMP | awk '{print $10}'`
echo "`date +%s --utc` `expr \( $LAST_IN - $INIT_IN \) / $S` `expr \( $LAST_OUT - $INIT_OUT \) / $S`"
INIT_IN=$LAST_IN
INIT_OUT=$LAST_OUT
done
| true
|
60d502add09773a520766daf9f7ac0a46c8ba6fc
|
Shell
|
Alesha72003/MINGW-packages
|
/mingw-w64-lasem/PKGBUILD
|
UTF-8
| 1,949
| 2.8125
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
# Maintainer: Dominic Sisneros <dsisnero@gmail.com>
_realname=lasem
pkgbase=mingw-w64-${_realname}
pkgname="${MINGW_PACKAGE_PREFIX}-${_realname}"
pkgver=0.4.4
pkgrel=2
pkgdesc="Lasem aims to be a C/Gobject based SVG/Mathml renderer (mingw-w64)"
arch=('any')
mingw_arch=('mingw32' 'mingw64' 'ucrt64' 'clang64' 'clang32')
url="https://wiki.gnome.org/Projects/Lasem"
license=('LGPL')
license=('GPL' 'MPL' 'LGPL')
makedepends=("${MINGW_PACKAGE_PREFIX}-gobject-introspection"
"${MINGW_PACKAGE_PREFIX}-gdk-pixbuf2"
"${MINGW_PACKAGE_PREFIX}-glib2"
"${MINGW_PACKAGE_PREFIX}-gtk-doc"
"${MINGW_PACKAGE_PREFIX}-cairo"
"${MINGW_PACKAGE_PREFIX}-pango"
"${MINGW_PACKAGE_PREFIX}-libxml2")
source=("https://download.gnome.org/sources/${_realname}/${pkgver:0:3}/${_realname}-${pkgver}.tar.xz"
001-fix-doc-install.patch
002-no-undefined.patch)
sha256sums=('9bf01fcfdc913ebc05989ac1f5902d52e28e7c31f797e2b6d3d413d4b51bba39'
'9c9321e4f2c841d3b348204519b5e508492521f4c2c512ecfb8a083a06dca1e3'
'7f98e6d191c53ffe80235258c5114c78e3e7af889c5c07c79ed4134e2be7e3b8')
prepare() {
cd "${_realname}-${pkgver}"
patch -p1 -i ${srcdir}/001-fix-doc-install.patch
patch -p1 -i ${srcdir}/002-no-undefined.patch
autoreconf -fiv
}
build() {
[[ -d ${srcdir}/build-${MINGW_CHOST} ]] && rm -rf ${srcdir}/build-${MINGW_CHOST}
mkdir -p ${srcdir}/build-${MINGW_CHOST} && cd ${srcdir}/build-${MINGW_CHOST}
../${_realname}-${pkgver}/configure \
--prefix=${MINGW_PREFIX} \
--build=${MINGW_CHOST} \
--host=${MINGW_CHOST} \
--target=${MINGW_CHOST} \
--sysconfdir=${MINGW_PREFIX}/etc \
--enable-gtk-doc
make
}
# check() {
# # make check
# }
package() {
cd "${srcdir}/build-${MINGW_CHOST}"
make DESTDIR="${pkgdir}" -j1 install
install -Dm644 ${srcdir}/${_realname}-${pkgver}/COPYING ${pkgdir}${MINGW_PREFIX}/share/licenses/${_realname}/COPYING
}
| true
|
72f373847c61525af66651265cc8d1bbda11be0b
|
Shell
|
tkuchiki/manage-domain
|
/bin/check_https_domain.sh
|
UTF-8
| 967
| 3.65625
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
DB_USER="${1}"
DB_NAME="${2}"
DOMAIN="${3}"
TIMEOUT="${4:-5}"
LIST=`mysql -u ${DB_USER} ${DB_NAME} -N -B -e "SELECT sub_domain FROM domain_list WHERE domain = '${DOMAIN}';"`
https() {
_DOMAIN="${1}"
_SUB_DOMAIN="${2}"
if [ "${_SUB_DOMAIN}" = "" ] ; then
echo "https://${_DOMAIN}"
else
echo "https://${_SUB_DOMAIN}.${_DOMAIN}"
fi
}
update_ssl() {
_SUB_DOMAIN="${1}"
if [ "${_SUB_DOMAIN}" = "" ] ; then
_SUB_DOMAIN='@'
fi
mysql -u root -e "UPDATE \`mng_domain\`.\`domain_list\` SET https=1 WHERE sub_domain='${_SUB_DOMAIN}';"
echo "Updated ${_SUB_DOMAIN}"
}
check() {
_DOMAIN="${1}"
_SUB_DOMAIN="${2}"
_URL=`https "${_DOMAIN}" "${_SUB_DOMAIN}"`
if curl --connect-timeout $TIMEOUT -s $_URL > /dev/null 2>&1 ; then
update_ssl "${_SUB_DOMAIN}"
fi
}
for SUB_DOMAIN in $LIST; do
check "${DOMAIN}" "${SUB_DOMAIN}"
done
# naked domain
check "${DOMAIN}"
| true
|
61dd7f2f2559ee068d0082d761e182435bc594cb
|
Shell
|
RohitKuwar/website-members
|
/.husky/pre-commit
|
UTF-8
| 429
| 3.28125
| 3
|
[] |
no_license
|
#!/bin/sh
. "$(dirname "$0")/_/husky.sh"
echo 'Lets check if the code is well formatted 😎'
npm run check-format || (
echo 'oops! your format doesnt looks good. Run npm run format and try commiting again!';
false;
)
echo 'Ok, the format looks good, how about lint check? 😋'
npm run check-lint || (
echo 'oops! you did not meet the linting standards!';
false;
)
echo 'Awesome, you passed all the checks 😍🎉'
| true
|
d7692fdcd499affd590ddcc5a3ea6a7914094a78
|
Shell
|
ghoresh11/ecoli_genome_collection
|
/7_treemer_reps/X_run_fasttree_jobs.sh
|
UTF-8
| 560
| 2.609375
| 3
|
[] |
no_license
|
for d in */ ; do
if [[ $d == *_155* ]]; then
var=${d%?}
echo ${var}
bsub -o ${var}.o -e ${var}.e -J ${var} -n1 -R"span[hosts=1]" bash run_fasttree.sh $d
fi
done
while read d; do
var=${d%?}
echo ${var}
bsub -R"select[mem>3000] rusage[mem=3000]" -M3000 -o ${var}.o -e ${var}.e -J ${var} -n1 -R"span[hosts=1]" bash run_fasttree.sh $d
done <failed.txt
for d in */ ; do
if [[ $d == 1_1558534808 ]];then
continue
fi
if [[ $d == *_155* ]]; then
var=${d%?}
echo ${var}
bsub -o ${var}.o -e ${var}.e -J ${var} -n1 -R"span[hosts=1]" bash run_fasttree.sh $d
fi
done
| true
|
09e57f28f56fd7e5a3fde7737bf88fb418acb059
|
Shell
|
mjumbewu/philly-councilmatic
|
/bin/update_data
|
UTF-8
| 1,391
| 3.328125
| 3
|
[] |
no_license
|
#!/bin/bash
# Set COUNCILMATIC_DIR to the absolute path of the Django project's root folder.
COUNCILMATIC_SRC=$(dirname $0)/../src
cd "$COUNCILMATIC_SRC"
echoerr() { echo "$@" 1>&2; }
# 1. Download any new files
echoerr
echoerr "============================================================"
echoerr "Attempting to import any new legislative data..."
echoerr
if python manage.py updatelegfiles --traceback
then echoerr "Finished updating all data."
else echoerr "Failed to update all data."
fi
# 2. Update the search index with any files updated in the last week
echoerr
echoerr "============================================================"
echoerr "Attempting to update the search index..."
echoerr
if python manage.py update_index --traceback --age=168 --batch-size=100
then echoerr "Success!"
else echoerr "Failed to update the search index."
fi
# 3. Send out subscription content notifications
echoerr
echoerr "============================================================"
echoerr "Sending out any subscription update notifications..."
echoerr
python manage.py cleanfeeds --traceback || true
python manage.py updatefeeds --traceback || true
python manage.py sendfeedupdates --traceback || true
# 4. Update previous legfiles. This means that updates to older content will
# always be a little behind, but it's better than nothing.
# python manage.py updatelegfiles --update
| true
|
8003e1dda1ca1b5ab9f2bcc3ec6200e08bfb7b94
|
Shell
|
michaeltout/ZcashLightClientKit
|
/Scripts/build_librustzcash_xcode.sh
|
UTF-8
| 2,407
| 3.375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
SCRIPT_COMMONS="${PODS_TARGET_SRCROOT}/Scripts/script_commons.sh"
if [ ! -f $SCRIPT_COMMONS ]; then
echo "Failed to load $SCRIPT_COMMONS"
exit 1
fi
source $SCRIPT_COMMONS
if [ "$1" = "--testing" ]; then
export ZCASH_NETWORK_ENVIRONMENT=$ZCASH_TESTNET
echo "Testing flag detected, forcing $ZCASH_TESTNET"
fi
check_environment
if [ "$ACTION" = "clean" ]; then
echo "CLEAN DETECTED"
clean
exit 0
fi
if [ existing_build_mismatch = true ]; then
# clean
echo "Build mismatch. You previously built a different network environment. It appears that your build could be inconsistent if proceeding. Please clean your Pods/ folder and clean your build before running your next build."
exit 1
fi
if is_mainnet; then
FEATURE_FLAGS="--features=mainnet"
else
FEATURE_FLAGS=""
fi
echo "Building Rust backend"
echo ""
echo "platform name"
echo $PLATFORM_NAME
if [ $PLATFORM_NAME = "iphonesimulator" ]; then
ZCASH_ACTIVE_ARCHITECTURE="x86_64-apple-ios"
else
ZCASH_ACTIVE_ARCHITECTURE="aarch64-apple-ios"
fi
echo "cargo lipo --manifest-path ${PODS_TARGET_SRCROOT}/Cargo.toml $FEATURE_FLAGS --targets $ZCASH_ACTIVE_ARCHITECTURE --release"
if [ ! -f ${ZCASH_LIB_RUST_BUILD_PATH}/universal/release/${ZCASH_LIB_RUST_NAME} ]; then
cargo lipo --manifest-path ${PODS_TARGET_SRCROOT}/Cargo.toml $FEATURE_FLAGS --targets $ZCASH_ACTIVE_ARCHITECTURE --release
persist_environment
fi
if [ ! -d "${RUST_LIB_PATH}" ]; then
mkdir -p "${RUST_LIB_PATH}"
fi
echo "clean up existing artifacts: rm -f ${ZCASH_SDK_RUST_LIB_PATH}/${ZCASH_LIB_RUST_NAME}"
rm -f "${ZCASH_SDK_RUST_LIB_PATH}/${ZCASH_LIB_RUST_NAME}"
echo "clean up sdk lib path: rm -f ${RUST_LIB_PATH}/${ZCASH_LIB_RUST_NAME}"
rm -f "${RUST_LIB_PATH}/${ZCASH_LIB_RUST_NAME}"
echo "copying artifacts: cp -f ${ZCASH_LIB_RUST_BUILD_PATH}/universal/release/${ZCASH_LIB_RUST_NAME} ${ZCASH_SDK_RUST_LIB_PATH}/${ZCASH_LIB_RUST_NAME}"
# ALWAYS SHIP RELEASE NO MATTER WHAT YOUR BUILD IS (FOR NOW AT LEAST)
cp -f "${ZCASH_LIB_RUST_BUILD_PATH}/universal/release/${ZCASH_LIB_RUST_NAME}" "${ZCASH_SDK_RUST_LIB_PATH}/${ZCASH_LIB_RUST_NAME}"
echo "copying artifacts: cp -f ${ZCASH_LIB_RUST_BUILD_PATH}/universal/release/${ZCASH_LIB_RUST_NAME} ${RUST_LIB_PATH}/${ZCASH_LIB_RUST_NAME}"
cp -f "${ZCASH_LIB_RUST_BUILD_PATH}/universal/release/${ZCASH_LIB_RUST_NAME}" "${RUST_LIB_PATH}/${ZCASH_LIB_RUST_NAME}"
| true
|
a4a47c5bcff8ac533a1b4651132a08c45419421c
|
Shell
|
matthewc100/Xplanet-1
|
/configs/test_increment
|
UTF-8
| 94
| 2.859375
| 3
|
[] |
no_license
|
#!/bin/zsh
START=1
END=10
INCR=1.25
for ((x = START; x <= END; x += INCR)); do
echo $x
done
| true
|
a4e3038cdab1f92640d1a78d56a8f224947109bd
|
Shell
|
CartoDB/mapnik-packaging
|
/osx/scripts/test_mapnik.sh
|
UTF-8
| 1,382
| 3.015625
| 3
|
[
"ICU",
"BSL-1.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/usr/bin/env bash
set -e -u
set -o pipefail
echo '*** testing install'
if [ ! -d "${MAPNIK_BIN_SOURCE}/share/mapnik" ]; then
${ROOTDIR}/scripts/post_build_fix.sh
fi
echoerr 'testing build in place'
export ICU_DATA="${MAPNIK_BIN_SOURCE}/share/mapnik/icu"
export GDAL_DATA="${MAPNIK_BIN_SOURCE}/share/mapnik/gdal"
export PROJ_LIB="${MAPNIK_BIN_SOURCE}/share/mapnik/proj"
cd ${MAPNIK_SOURCE}
if [[ ${USE_LTO} == true ]]; then
if [[ "${LDPRELOAD:-false}" != false ]]; then
OLD_LD_PRELOAD_VALUE="${LD_PRELOAD}"
fi
export LD_PRELOAD="$(pwd)/plugins/input/libgdal.so.1"
fi
$MAKE test-local || true
if [[ ${OFFICIAL_RELEASE} == true ]]; then
for i in {"2.7","2.6",}
do
if [ -d "${MAPNIK_BIN_SOURCE}/lib/python${i}/site-packages/mapnik" ]; then
echo testing against python $i
export PYTHONPATH=${MAPNIK_BIN_SOURCE}/lib/python${i}/site-packages/
export PATH=${MAPNIK_BIN_SOURCE}/bin:$PATH
# TODO - allow setting python version in $MAKE wrapper
#$MAKE test
python${i} tests/visual_tests/test.py -q
python${i} tests/run_tests.py -q
else
echo skipping test against python $i
fi
done
fi
if [[ ${USE_LTO} == true ]]; then
if [[ "${OLD_LD_PRELOAD_VALUE:-false}" != false ]]; then
export LD_PRELOAD="${OLD_LD_PRELOAD_VALUE}"
fi
fi
set +e +u
| true
|
efa56b36dba4f3c795a57063d1a90075b935c133
|
Shell
|
flant/examples
|
/2019/05-kubernetes-dashboard-gitlab/ctl.sh
|
UTF-8
| 9,109
| 3.78125
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
set -e
! read -rd '' HELP_STRING <<"EOF"
Usage: ctl.sh [OPTION]... --gitlab-url GITLAB_URL --oauth2-id ID --oauth2-secret SECRET --dashboard-url DASHBOARD_URL
Install kubernetes-dashboard to Kubernetes cluster.
Mandatory arguments:
-i, --install install into 'kube-system' namespace
-u, --upgrade upgrade existing installation, will reuse password and host names
-d, --delete remove everything, including the namespace
--gitlab-url set gitlab url with schema (https://gitlab.example.com)
--oauth2-id set OAUTH2_PROXY_CLIENT_ID from gitlab
--oauth2-secret set OAUTH2_PROXY_CLIENT_SECRET from gitlab
--dashboard-url set dashboard url without schema (dashboard.example.com)
Optional arguments:
-h, --help output this message
EOF
RANDOM_NUMBER=$(cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w 4 | head -n 1)
CURRENT_DIR=$(pwd)
TMP_DIR="/tmp/kubernetes-dashboard-$RANDOM_NUMBER"
WORKDIR="$TMP_DIR/kdashboard"
DASHBOARD_NAMESPACE="kube-system"
if [ -z "${KUBECONFIG}" ]; then
export KUBECONFIG=~/.kube/config
fi
FIRST_INSTALL="true"
TEMP=$(getopt -o i,u,d,h --long install,upgrade,delete,gitlab-url:,oauth2-id:,oauth2-secret:,dashboard-url:,help \
-n 'ctl.sh' -- "$@")
eval set -- "$TEMP"
while true; do
case "$1" in
-i | --install )
MODE=install; shift ;;
-u | --upgrade )
MODE=upgrade; shift ;;
-d | --delete )
MODE=delete; shift ;;
--gitlab-url )
GITLAB_URL="$2"; shift 2;;
--oauth2-id )
OAUTH2_PROXY_CLIENT_ID="$2"; shift 2;;
--oauth2-secret )
OAUTH2_PROXY_CLIENT_SECRET="$2"; shift 2;;
--dashboard-url )
DASHBORD_URL="$2"; shift 2;;
-h | --help )
echo "$HELP_STRING"; exit 0 ;;
-- )
shift; break ;;
* )
break ;;
esac
done
if [ -z "${MODE}" ]; then
echo "$HELP_STRING"; exit 1
fi
if [ "$MODE" == "install" ]; then
if [ -z "${GITLAB_URL}" ]; then
echo "$HELP_STRING"; exit 0
fi
if [ -z "${OAUTH2_PROXY_CLIENT_ID}" ]; then
echo "$HELP_STRING"; exit 0
fi
if [ -z "${OAUTH2_PROXY_CLIENT_SECRET}" ]; then
echo "$HELP_STRING"; exit 0
fi
if [ -z "${DASHBORD_URL}" ]; then
echo "$HELP_STRING"; exit 0
fi
fi
type kubectl >/dev/null 2>&1 || { echo >&2 "I require kubectl but it's not installed. Aborting."; exit 1; }
type jq >/dev/null 2>&1 || { echo >&2 "I require jq but it's not installed. Aborting."; exit 1; }
mkdir -p "$TMP_DIR"
cd "$TMP_DIR"
cp -r "$CURRENT_DIR" "$WORKDIR"
cd "$WORKDIR"
KUBE_SYSTEM=$(kubectl get node -l node-role/system -o name |wc -l)
LOGIN_URL="${GITLAB_URL}/oauth/authorize"
REDEEM_URL="${GITLAB_URL}/oauth/token"
VALIDATE_URL="${GITLAB_URL}/api/v3/user"
OAUTH2_PROXY_COOKIE_SECRET="$(cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w 15 | head -n 1| base64)"
function install {
sed -i -e "s%##LOGIN_URL##%$LOGIN_URL%g" manifests/kube-dashboard-oauth2-proxy.yaml
sed -i -e "s%##REDEEM_URL##%$REDEEM_URL%g" manifests/kube-dashboard-oauth2-proxy.yaml
sed -i -e "s%##VALIDATE_URL##%$VALIDATE_URL%g" manifests/kube-dashboard-oauth2-proxy.yaml
sed -i -e "s%##OAUTH2_PROXY_CLIENT_ID##%$OAUTH2_PROXY_CLIENT_ID%g" manifests/kube-dashboard-oauth2-proxy.yaml
sed -i -e "s%##OAUTH2_PROXY_CLIENT_SECRET##%$OAUTH2_PROXY_CLIENT_SECRET%g" manifests/kube-dashboard-oauth2-proxy.yaml
sed -i -e "s%##OAUTH2_PROXY_COOKIE_SECRET##%$OAUTH2_PROXY_COOKIE_SECRET%g" manifests/kube-dashboard-oauth2-proxy.yaml
sed -i -e "s%##DASHBORD_URL##%$DASHBORD_URL%g" manifests/kube-dashboard-ingress.yaml
if [[ $KUBE_SYSTEM -gt 0 ]]; then
sed -i -e "s%##AFFINITY##%%g" manifests/kube-dashboard.yaml
sed -i -e "s%##AFFINITY##%%g" manifests/kube-dashboard-oauth2-proxy.yaml
else
sed -i -e "s%##AFFINITY##.*%%g" manifests/kube-dashboard.yaml
sed -i -e "s%##AFFINITY##.*%%g" manifests/kube-dashboard-oauth2-proxy.yaml
fi
kubectl apply -Rf manifests/
}
function upgrade {
CURENT_DASHBOARD_NAMESPACE=$DASHBOARD_NAMESPACE
kubectl get deployment kubernetes-dashboard -n $DASHBOARD_NAMESPACE >/dev/null 2>&1 && CURENT_DASHBOARD_NAMESPACE=$DASHBOARD_NAMESPACE
if $(kubectl get deployment oauth2-proxy -n $CURENT_DASHBOARD_NAMESPACE > /dev/null 2>/dev/null); then
LOGIN_URL=$(kubectl get deployment oauth2-proxy -n $CURENT_DASHBOARD_NAMESPACE -o json | jq -r '.spec.template.spec.containers[0]' |grep '\-login\-url'| sed -e 's/^[[:space:]]*//'| sed -e 's/,$//')
REDEEM_URL=$(kubectl get deployment oauth2-proxy -n $CURENT_DASHBOARD_NAMESPACE -o json | jq -r '.spec.template.spec.containers[0]' |grep '\-redeem\-url'| sed -e 's/^[[:space:]]*//'| sed -e 's/,$//')
VALIDATE_URL=$(kubectl get deployment oauth2-proxy -n $CURENT_DASHBOARD_NAMESPACE -o json | jq -r '.spec.template.spec.containers[0]' |grep '\-validate\-url'| sed -e 's/^[[:space:]]*//'| sed -e 's/,$//')
OAUTH2_PROXY_COOKIE_SECRET=$(kubectl get deployment oauth2-proxy -n $CURENT_DASHBOARD_NAMESPACE -o json | jq -r '.spec.template.spec.containers[0]' |grep 'OAUTH2_PROXY_COOKIE_SECRET' -A1 |grep value |awk -F ': ' '{print $2}')
OAUTH2_PROXY_CLIENT_SECRET=$(kubectl get deployment oauth2-proxy -n $CURENT_DASHBOARD_NAMESPACE -o json | jq -r '.spec.template.spec.containers[0]' |grep 'OAUTH2_PROXY_CLIENT_SECRET' -A1 |grep value |awk -F ': ' '{print $2}')
OAUTH2_PROXY_CLIENT_ID=$(kubectl get deployment oauth2-proxy -n $CURENT_DASHBOARD_NAMESPACE -o json | jq -r '.spec.template.spec.containers[0]' |grep 'OAUTH2_PROXY_CLIENT_ID' -A1 |grep value |awk -F ': ' '{print $2}')
else
echo "Can't upgrade. Deployment kubernetes-dashboard does not exists. " && exit 1
fi
if $(kubectl get deployment oauth2-proxy -n $CURENT_DASHBOARD_NAMESPACE > /dev/null 2>/dev/null); then
DASHBORD_URL=$(kubectl get ing oauth2-proxy -n $CURENT_DASHBOARD_NAMESPACE -o json | jq -r '.spec.tls[0].hosts[0]')
else
echo "Can't upgrade. Ingress oauth2-proxy does not exists. " && exit 1
fi
sed -i -e "s%-login-url=##LOGIN_URL##%$LOGIN_URL%g" manifests/kube-dashboard-oauth2-proxy.yaml
sed -i -e "s%-redeem-url=##REDEEM_URL##%$REDEEM_URL%g" manifests/kube-dashboard-oauth2-proxy.yaml
sed -i -e "s%-validate-url=##VALIDATE_URL##%$VALIDATE_URL%g" manifests/kube-dashboard-oauth2-proxy.yaml
sed -i -e "s%##OAUTH2_PROXY_CLIENT_ID##%$OAUTH2_PROXY_CLIENT_ID%g" manifests/kube-dashboard-oauth2-proxy.yaml
sed -i -e "s%##OAUTH2_PROXY_CLIENT_SECRET##%$OAUTH2_PROXY_CLIENT_SECRET%g" manifests/kube-dashboard-oauth2-proxy.yaml
sed -i -e "s%##OAUTH2_PROXY_COOKIE_SECRET##%$OAUTH2_PROXY_COOKIE_SECRET%g" manifests/kube-dashboard-oauth2-proxy.yaml
sed -i -e "s%##DASHBORD_URL##%$DASHBORD_URL%g" manifests/kube-dashboard-ingress.yaml
if [[ $KUBE_SYSTEM -gt 0 ]]; then
sed -i -e "s%##AFFINITY##%%g" manifests/kube-dashboard.yaml
sed -i -e "s%##AFFINITY##%%g" manifests/kube-dashboard-oauth2-proxy.yaml
else
sed -i -e "s%##AFFINITY##.*%%g" manifests/kube-dashboard.yaml
sed -i -e "s%##AFFINITY##.*%%g" manifests/kube-dashboard-oauth2-proxy.yaml
fi
kubectl delete clusterrolebinding kubernetes-dashboard ||true
kubectl delete svc oauth2-proxy -n $DASHBOARD_NAMESPACE ||true
kubectl delete svc kubernetes-dashboard -n $DASHBOARD_NAMESPACE ||true
kubectl delete ing external-auth-oauth2 -n $DASHBOARD_NAMESPACE &> /dev/null||true
kubectl delete ing oauth2-proxy -n $DASHBOARD_NAMESPACE &> /dev/null||true
kubectl delete deployment oauth2-proxy -n $DASHBOARD_NAMESPACE &> /dev/null||true
kubectl delete svc oauth2-proxy -n $DASHBOARD_NAMESPACE &> /dev/null||true
kubectl delete sa kubernetes-dashboard -n $DASHBOARD_NAMESPACE &> /dev/null|| true
kubectl delete deployment kubernetes-dashboard -n $DASHBOARD_NAMESPACE &> /dev/null||true
kubectl delete svc kubernetes-dashboard -n $DASHBOARD_NAMESPACE &> /dev/null||true
kubectl apply -Rf manifests/
}
if [ "$MODE" == "install" ]
then
kubectl get deployment kubernetes-dashboard -n $DASHBOARD_NAMESPACE >/dev/null 2>&1 && FIRST_INSTALL="false"
if [ "$FIRST_INSTALL" == "true" ]
then
install
else
echo "Deployment kubernetes-dashboard exists. Please, delete or run with the --upgrade option it to avoid shooting yourself in the foot."
fi
elif [ "$MODE" == "upgrade" ]
then
upgrade
elif [ "$MODE" == "delete" ]
then
kubectl delete clusterrole dashboard ||true
kubectl delete clusterrolebinding kubernetes-dashboard ||true
kubectl delete ing external-auth-oauth2 -n $DASHBOARD_NAMESPACE &> /dev/null||true
kubectl delete ing oauth2-proxy -n $DASHBOARD_NAMESPACE &> /dev/null||true
kubectl delete deployment oauth2-proxy -n $DASHBOARD_NAMESPACE &> /dev/null||true
kubectl delete svc oauth2-proxy -n $DASHBOARD_NAMESPACE &> /dev/null||true
kubectl delete sa kubernetes-dashboard -n $DASHBOARD_NAMESPACE &> /dev/null|| true
kubectl delete deployment kubernetes-dashboard -n $DASHBOARD_NAMESPACE &> /dev/null||true
kubectl delete svc kubernetes-dashboard -n $DASHBOARD_NAMESPACE &> /dev/null||true
fi
function cleanup {
rm -rf "$TMP_DIR"
}
trap cleanup EXIT
| true
|
6a0e6a21c020d921a9a42910d6082ba09780fe0b
|
Shell
|
jaimevale/microcks
|
/install/nomad/setup-microcks-nomad.sh
|
UTF-8
| 352
| 2.6875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
echo "Creating docker network for nomad job ..."
docker network create --driver bridge nomad_main
echo "Replacing mount paths with current path"
export pwd=`pwd`
export tild=~
perl -i.bak -pe 's|/Users/lbroudou/Development/github/microcks/install/nomad|${pwd}|' microcks.nomad
perl -i.bak -pe 's|/Users/lbroudou/|${tild}|' microcks.nomad
| true
|
6f9a1b849a9c5924f3527f259452a9924d79844d
|
Shell
|
jivesoftware/routing-bird
|
/deployable-scripts/src/main/dist/bin/status
|
UTF-8
| 403
| 3.46875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
APP_ROOT=$(cd $(dirname $0)/..; pwd);
SERVICE_NAME=$(basename $APP_ROOT);
IS_RUNNING=`ps -ef | grep "\-DserviceName="${SERVICE_NAME} | grep -v "grep " | wc -l`
if [ $IS_RUNNING -eq 0 ]; then
echo "${SERVICE_NAME} stopped"
exit 1
else
PID=`ps -ef | grep "\-DserviceName="${SERVICE_NAME} | grep -v "grep " | awk '{ print $2 }'`
echo "${SERVICE_NAME} running (pid=${PID})"
fi
| true
|
4b1d9c8e3f30601e05a7f0cb7d5eb433e1f267d8
|
Shell
|
lihuibng/realm-core
|
/evergreen/install_baas.sh
|
UTF-8
| 12,076
| 3.234375
| 3
|
[
"Apache-2.0",
"LicenseRef-scancode-generic-export-compliance"
] |
permissive
|
#!/bin/bash
# This script will download all the dependencies for and build/start a Realm Cloud app server
# and will import a given app into it.
#
# Usage:
# ./evergreen/build_and_run_baas.sh {path to working directory} {path to app to import} [git revision of baas]
#
set -o errexit
set -o pipefail
case $(uname -s) in
Darwin)
STITCH_SUPPORT_LIB_URL="https://s3.amazonaws.com/stitch-artifacts/stitch-support/stitch-support-macos-debug-4.3.2-721-ge791a2e-patch-5e2a6ad2a4cf473ae2e67b09.tgz"
STITCH_ASSISTED_AGG_URL="https://stitch-artifacts.s3.amazonaws.com/stitch-mongo-libs/stitch_mongo_libs_osx_ac073d06065af6e00103a8a3cf64672a3f875bea_20_12_01_19_47_16/assisted_agg"
GO_URL="https://golang.org/dl/go1.14.10.darwin-amd64.tar.gz"
MONGODB_DOWNLOAD_URL="http://downloads.10gen.com/osx/mongodb-macos-x86_64-enterprise-4.4.1.tgz"
YQ_DOWNLOAD_URL="https://github.com/mikefarah/yq/releases/download/3.4.1/yq_darwin_amd64"
JQ_DOWNLOAD_URL="https://github.com/stedolan/jq/releases/download/jq-1.6/jq-osx-amd64"
;;
Linux)
GO_URL="https://golang.org/dl/go1.14.10.linux-amd64.tar.gz"
YQ_DOWNLOAD_URL="https://github.com/mikefarah/yq/releases/download/3.4.1/yq_linux_amd64"
JQ_DOWNLOAD_URL="https://github.com/stedolan/jq/releases/download/jq-1.6/jq-linux64"
# Detect what distro/versionf of Linux we are running on to download the right version of MongoDB to download
# /etc/os-release covers debian/ubuntu/suse
if [[ -e /etc/os-release ]]; then
# Amazon Linux 2 comes back as 'amzn'
DISTRO_NAME="$(. /etc/os-release ; echo "$ID")"
DISTRO_VERSION="$(. /etc/os-release ; echo "$VERSION_ID")"
DISTRO_VERSION_MAJOR="$(cut -d. -f1 <<< "$DISTRO_VERSION" )"
elif [[ -e /etc/redhat-release ]]; then
# /etc/redhat-release covers RHEL
DISTRO_NAME=rhel
DISTRO_VERSION="$(lsb_release -s -r)"
DISTRO_VERSION_MAJOR="$(cut -d. -f1 <<< "$DISTRO_VERSION" )"
fi
case $DISTRO_NAME in
ubuntu)
MONGODB_DOWNLOAD_URL="http://downloads.10gen.com/linux/mongodb-linux-$(uname -m)-enterprise-ubuntu${DISTRO_VERSION_MAJOR}04-4.4.1.tgz"
STITCH_ASSISTED_AGG_LIB_URL="https://stitch-artifacts.s3.amazonaws.com/stitch-mongo-libs/stitch_mongo_libs_ubuntu2004_x86_64_ac073d06065af6e00103a8a3cf64672a3f875bea_20_12_01_19_47_16/libmongo-ubuntu2004-x86_64.so"
STITCH_SUPPORT_LIB_URL="https://mciuploads.s3.amazonaws.com/mongodb-mongo-v4.4/stitch-support/ubuntu2004/03d22bb5884e280934d36702136d99a9363fb720/stitch-support-4.4.2-rc0-3-g03d22bb.tgz"
;;
rhel)
case $DISTRO_VERSION_MAJOR in
7)
MONGODB_DOWNLOAD_URL="http://downloads.10gen.com/linux/mongodb-linux-x86_64-enterprise-rhel70-4.4.1.tgz"
STITCH_ASSISTED_AGG_LIB_URL="https://stitch-artifacts.s3.amazonaws.com/stitch-mongo-libs/stitch_mongo_libs_linux_64_ac073d06065af6e00103a8a3cf64672a3f875bea_20_12_01_19_47_16/libmongo.so"
STITCH_SUPPORT_LIB_URL="https://s3.amazonaws.com/stitch-artifacts/stitch-support/stitch-support-rhel-70-4.3.2-721-ge791a2e-patch-5e2a6ad2a4cf473ae2e67b09.tgz"
;;
*)
echo "Unsupported version of RHEL $DISTRO_VERSION"
exit 1
;;
esac
;;
*)
echo "Unsupported platform $DISTRO_NAME $DISTRO_VERSION"
exit 1
;;
esac
;;
*)
echo "Unsupported platform $(uname -s)"
exit 1
;;
esac
# Allow path to $CURL to be overloaded by an environment variable
CURL=${CURL:=curl}
BASE_PATH=$(cd $(dirname "$0"); pwd)
REALPATH=$BASE_PATH/abspath.sh
usage()
{
echo "Usage: install_baas.sh -w <path to working dir>
[-s <path to stitch app to import>]
[-b <branch or git spec of baas to checkout/build]"
exit 0
}
PASSED_ARGUMENTS=$(getopt w:s:b: "$*") || usage
WORK_PATH=""
STITCH_APP=""
BAAS_VERSION=""
set -- $PASSED_ARGUMENTS
while :
do
case "$1" in
-w) WORK_PATH=$($REALPATH "$2"); shift; shift;;
-s) STITCH_APP=$($REALPATH "$2"); shift; shift;;
-b) BAAS_VERSION="$2"; shift; shift;;
--) shift; break;;
*) echo "Unexpected option $1"; usage;;
esac
done
if [[ -z "$WORK_PATH" ]]; then
echo "Must specify working directory"
usage
exit 1
fi
if [[ -n "$STITCH_APP" && ! -f "$STITCH_APP/config.json" ]]; then
echo "Invalid app to import: $STITCH_APP/config.json does not exist."
exit 1
fi
[[ -d $WORK_PATH ]] || mkdir -p $WORK_PATH
cd $WORK_PATH
if [[ -f $WORK_PATH/baas_ready ]]; then
rm $WORK_PATH/baas_ready
fi
echo "Installing node and go to build baas and its dependencies"
export NVM_DIR="$WORK_PATH/.nvm"
if [ ! -d "$NVM_DIR" ]; then
git clone https://github.com/nvm-sh/nvm.git "$NVM_DIR"
cd "$NVM_DIR"
git checkout `git describe --abbrev=0 --tags --match "v[0-9]*" $(git rev-list --tags --max-count=1)`
cd -
fi
[[ -s "$NVM_DIR/nvm.sh" ]] && \. "$NVM_DIR/nvm.sh"
NODE_VERSION=12.16.2
nvm install --no-progress $NODE_VERSION
nvm use $NODE_VERSION
[[ -x $WORK_PATH/go/bin/go ]] || ($CURL -sL $GO_URL | tar -xz)
export GOROOT=$WORK_PATH/go
export PATH=$WORK_PATH/go/bin:$PATH
[[ -d baas_dep_binaries ]] || mkdir baas_dep_binaries
export PATH=$WORK_PATH/baas_dep_binaries:$PATH
if [[ ! -x baas_dep_binaries/yq || ! -x baas_dep_binaries/jq ]]; then
cd baas_dep_binaries
which yq || ($CURL -LsS $YQ_DOWNLOAD_URL > yq && chmod +x yq)
which jq || ($CURL -LsS $JQ_DOWNLOAD_URL > jq && chmod +x jq)
cd -
fi
if [[ -z "$BAAS_VERSION" ]]; then
BAAS_VERSION=$($CURL -LsS "https://realm.mongodb.com/api/private/v1.0/version" | jq -r '.backend.git_hash')
fi
if [[ ! -d $WORK_PATH/baas/.git ]]; then
git clone git@github.com:10gen/baas.git
else
cd baas
git fetch
cd ..
fi
cd baas
echo "Checking out baas version $BAAS_VERSION"
git checkout $BAAS_VERSION
cd -
if [[ ! -d $WORK_PATH/baas/etc/dylib/lib ]]; then
echo "Downloading stitch support library"
mkdir baas/etc/dylib
cd baas/etc/dylib
$CURL -LsS $STITCH_SUPPORT_LIB_URL | tar -xz --strip-components=1
cd -
fi
export LD_LIBRARY_PATH=$WORK_PATH/baas/etc/dylib/lib
if [[ ! -x $WORK_PATH/baas_dep_binaries/libmongo.so && -n "$STITCH_ASSISTED_AGG_LIB_URL" ]]; then
echo "Downloading assisted agg library"
cd $WORK_PATH/baas_dep_binaries
$CURL -LsS $STITCH_ASSISTED_AGG_LIB_URL > libmongo.so
chmod 755 libmongo.so
cd -
fi
if [[ ! -x $WORK_PATH/baas_dep_binaries/assisted_agg && -n "$STITCH_ASSISTED_AGG_URL" ]]; then
echo "Downloading assisted agg binary"
cd $WORK_PATH/baas_dep_binaries
$CURL -LsS $STITCH_ASSISTED_AGG_URL > assisted_agg
chmod 755 assisted_agg
cd -
fi
YARN=$WORK_PATH/yarn/bin/yarn
if [[ ! -x $YARN ]]; then
echo "Getting yarn"
mkdir yarn && cd yarn
$CURL -LsS https://s3.amazonaws.com/stitch-artifacts/yarn/latest.tar.gz | tar -xz --strip-components=1
cd -
mkdir $WORK_PATH/yarn_cache
fi
if [[ ! -x baas_dep_binaries/transpiler ]]; then
echo "Building transpiler"
cd baas/etc/transpiler
$YARN --non-interactive --silent --cache-folder $WORK_PATH/yarn_cache
$YARN build --cache-folder $WORK_PATH/yarn_cache --non-interactive --silent
cd -
ln -s $(pwd)/baas/etc/transpiler/bin/transpiler baas_dep_binaries/transpiler
fi
if [[ ! -x baas_dep_binaries/realm-cli ]]; then
mkdir realm-cli
cd realm-cli
$CURL -LsS https://github.com/10gen/realm-cli/archive/v1.2.0.tar.gz | tar -xz --strip-components=1
go build -o $WORK_PATH/baas_dep_binaries/realm-cli
cd -
fi
if [ ! -x $WORK_PATH/mongodb-binaries/bin/mongod ]; then
echo "Downloading mongodb"
$CURL -sLS $MONGODB_DOWNLOAD_URL --output mongodb-binaries.tgz
tar -xzf mongodb-binaries.tgz
rm mongodb-binaries.tgz
mv mongodb* mongodb-binaries
chmod +x ./mongodb-binaries/bin/*
fi
ulimit -n 32000
if [[ -d mongodb-dbpath ]]; then
rm -rf mongodb-dbpath
fi
mkdir mongodb-dbpath
function cleanup() {
if [[ -f $WORK_PATH/baas_server.pid ]]; then
PIDS_TO_KILL="$(< $WORK_PATH/baas_server.pid)"
fi
if [[ -f $WORK_PATH/mongod.pid ]]; then
PIDS_TO_KILL="$(< $WORK_PATH/mongod.pid) $PIDS_TO_KILL"
fi
if [[ -n "$PIDS_TO_KILL" ]]; then
echo "Killing $PIDS_TO_KILL"
kill $PIDS_TO_KILL
echo "Waiting for processes to exit"
wait
fi
}
trap "exit" INT TERM ERR
trap cleanup EXIT
echo "Starting mongodb"
[[ -f $WORK_PATH/mongodb-dbpath/mongod.pid ]] && rm $WORK_PATH/mongodb-path/mongod.pid
./mongodb-binaries/bin/mongod \
--replSet rs \
--bind_ip_all \
--port 26000 \
--logpath $WORK_PATH/mongodb-dbpath/mongod.log \
--dbpath $WORK_PATH/mongodb-dbpath/ \
--pidfilepath $WORK_PATH/mongod.pid &
./mongodb-binaries/bin/mongo \
--nodb \
--eval 'assert.soon(function(x){try{var d = new Mongo("localhost:26000"); return true}catch(e){return false}}, "timed out connecting")' \
> /dev/null
echo "Initializing replica set"
./mongodb-binaries/bin/mongo --port 26000 --eval 'rs.initiate()' > /dev/null
cd $WORK_PATH/baas
echo "Adding stitch user"
go run -exec="env LD_LIBRARY_PATH=$LD_LIBRARY_PATH" cmd/auth/user.go \
addUser \
-domainID 000000000000000000000000 \
-mongoURI mongodb://localhost:26000 \
-salt 'DQOWene1723baqD!_@#'\
-id "unique_user@domain.com" \
-password "password"
[[ -d tmp ]] || mkdir tmp
echo "Starting stitch app server"
[[ -f $WORK_PATH/baas_server.pid ]] && rm $WORK_PATH/baas_server.pid
go build -o $WORK_PATH/baas_server cmd/server/main.go
$WORK_PATH/baas_server \
--configFile=etc/configs/test_config.json 2>&1 > $WORK_PATH/baas_server.log &
echo $! > $WORK_PATH/baas_server.pid
$BASE_PATH/wait_for_baas.sh $WORK_PATH/baas_server.pid
if [[ -n "$STITCH_APP" ]]; then
APP_NAME=$(jq '.name' "$STITCH_APP/config.json" -r)
echo "importing app $APP_NAME from $STITCH_APP"
[[ -f $WORK_PATH/stitch-state ]] && rm $WORK_PATH/stitch-state
realm-cli login \
--config-path=$WORK_PATH/stitch-state \
--base-url=http://localhost:9090 \
--auth-provider=local-userpass \
--username=unique_user@domain.com \
--password=password \
-y
ACCESS_TOKEN=$(yq r $WORK_PATH/stitch-state "access_token")
GROUP_ID=$($CURL \
--header "Authorization: Bearer $ACCESS_TOKEN" \
http://localhost:9090/api/admin/v3.0/auth/profile -s | jq '.roles[0].group_id' -r)
APP_ID_PARAM=""
if [[ -f "$STITCH_APP/secrets.json" ]]; then
TEMP_APP_PATH=$(mktemp -d $WORK_PATH/$(basename $STITCH_APP)_XXXX)
mkdir -p $TEMP_APP_PATH && echo "{ \"name\": \"$APP_NAME\" }" > "$TEMP_APP_PATH/config.json"
realm-cli import \
--config-path=$WORK_PATH/stitch-state \
--base-url=http://localhost:9090 \
--path="$TEMP_APP_PATH" \
--project-id "$GROUP_ID" \
--strategy replace \
-y
APP_ID=$(jq '.app_id' "$TEMP_APP_PATH/config.json" -r)
APP_ID_PARAM="--app-id=$APP_ID"
while read -r SECRET VALUE; do
realm-cli secrets add \
--config-path=$WORK_PATH/stitch-state \
--base-url=http://localhost:9090 \
--app-id=$APP_ID \
--name="$SECRET" \
--value="$(echo $VALUE | sed 's/\\n/\n/g')"
done < <(jq 'to_entries[] | [.key, .value] | @tsv' "$STITCH_APP/secrets.json" -r)
rm -r $TEMP_APP_PATH
fi
realm-cli import \
--config-path=$WORK_PATH/stitch-state \
--base-url=http://localhost:9090 \
--path="$STITCH_APP" \
$APP_ID_PARAM \
--project-id="$GROUP_ID" \
--strategy=replace \
-y
fi
touch $WORK_PATH/baas_ready
echo "Baas server ready"
wait
| true
|
4d741542d19ee593f659c04bce79f13c89ef4111
|
Shell
|
meisto/Terminal_Cheatsheet
|
/colors/colors.sh
|
UTF-8
| 855
| 3.90625
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Date format: DD:MM:YY-hh:mm:ss
# Types: ONC (once), REG (regular), IRR (irregular)
echo_color () {
# Arguments: fg_color_code bg_color_code text
# A color code should be given in the format
# "r;g;b" where r,g,b=0-255
# If a colorcode is equal to "-", it will
# be ignored
## Assemble ANSI 24-bit color codes
fg_prefix="\x1b[38;2;"
bg_prefix="\x1b[48;2;"
color_suffix="m"
fg_cc=$fg_prefix$1$color_suffix
bg_cc=$bg_prefix$2$color_suffix
test $1 != "-" && echo -ne $fg_cc
test $2 != "-" && echo -ne $bg_cc
echo -ne "$3"
# Stop color modifications
echo -ne "\e[0m"
}
echo_block () {
echo_color $2 "-" "\uE0B2"
echo_color $1 $2 " $3 "
echo_color $2 "-" "\uE0B0"
}
echo_color "255;0;0" "0;255;255" "\uE0B2 test \uE0B0"
echo ""
echo_block "255;0;0" "0;255;255" "test"
echo ""
| true
|
9fc5d674659958476b2c95f1ecfb6de02249c4e6
|
Shell
|
fossabot/wireguard-install
|
/wireguard-client.sh
|
UTF-8
| 2,858
| 3.71875
| 4
|
[] |
permissive
|
#!/bin/bash
# Secure WireGuard For CentOS, Debian, Ubuntu, Raspbian, Arch, Fedora, Redhat
# Sanity Checks and automagic
function root-check() {
if [[ "$EUID" -ne 0 ]]; then
echo "Sorry, you need to run this as root"
exit
fi
}
# Root Check
root-check
# Detect Operating System
function dist-check() {
if [ -e /etc/centos-release ]; then
DISTRO="CentOS"
elif [ -e /etc/debian_version ]; then
DISTRO=$( lsb_release -is )
elif [ -e /etc/arch-release ]; then
DISTRO="Arch"
elif [ -e /etc/fedora-release ]; then
DISTRO="Fedora"
elif [ -e /etc/redhat-release ]; then
DISTRO="Redhat"
else
echo "Your distribution is not supported (yet)."
exit
fi
}
# Check distro
dist-check
# Install Wireguard
function install-wireguard-client() {
if [ "$DISTRO" == "Ubuntu" ]; then
apt-get update
apt-get install software-properties-common -y
add-apt-repository ppa:wireguard/wireguard -y
apt-get update
apt-get install wireguard resolvconf linux-headers-$(uname -r) -y
elif [ "$DISTRO" == "Debian" ]; then
echo "deb http://deb.debian.org/debian/ unstable main" > /etc/apt/sources.list.d/unstable.list
printf 'Package: *\nPin: release a=unstable\nPin-Priority: 90\n' > /etc/apt/preferences.d/limit-unstable
apt-get update
apt-get install wireguard resolvconf linux-headers-$(uname -r) -y
elif [ "$DISTRO" == "Raspbian" ]; then
apt-get update
echo "deb http://deb.debian.org/debian/ unstable main" > /etc/apt/sources.list.d/unstable.list
apt-get install dirmngr -y
apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 04EE7237B7D453EC
printf 'Package: *\nPin: release a=unstable\nPin-Priority: 90\n' > /etc/apt/preferences.d/limit-unstable
apt-get update
apt-get install wireguard raspberrypi-kernel-headers resolvconf -y
elif [ "$DISTRO" == "Arch" ]; then
pacman -Syy
pacman -S openresolv wireguard-tools wireguard-arch
elif [[ "$DISTRO" = 'Fedora' ]]; then
dnf update
dnf copr enable jdoss/wireguard -y
dnf install kernel-devel-$(uname -r) resolvconf wireguard-dkms wireguard-tools -y
elif [ "$DISTRO" == "CentOS" ]; then
yum update
wget -O /etc/yum.repos.d/wireguard.repo https://copr.fedorainfracloud.org/coprs/jdoss/wireguard/repo/epel-7/jdoss-wireguard-epel-7.repo
yum install epel-release -y
yum install wireguard-dkms wireguard-tools resolvconf kernel-headers-$(uname -r) kernel-devel-$(uname -r) -y
elif [ "$DISTRO" == "Redhat" ]; then
yum update
wget -O /etc/yum.repos.d/wireguard.repo https://copr.fedorainfracloud.org/coprs/jdoss/wireguard/repo/epel-7/jdoss-wireguard-epel-7.repo
yum install epel-release -y
yum install wireguard-dkms wireguard-tools resolvconf kernel-headers-$(uname -r) kernel-devel-$(uname -r) -y
fi
}
# Install WireGuard Client
install-wireguard-client
| true
|
9877dbfc257c2051bece732a5650fb5f81106b8e
|
Shell
|
hayderimran7/ci-stack
|
/bootstrap/puppetdb/bootstrap.sh
|
UTF-8
| 424
| 2.578125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
sudo apt-get install -y git
sudo git clone https://git.openstack.org/openstack-infra/config /opt/config/production
sudo /opt/config/production/install_puppet.sh
sudo apt-get install -y hiera hiera-puppet
sudo /opt/config/production/install_modules.sh
sudo puppet apply --modulepath='/opt/config/production/modules:/etc/puppet/modules' ${DIR}/puppetdb.pp
| true
|
bf05f7d59c2725a353f4159d7c3d6225ce837906
|
Shell
|
Hoangdinh85/NTI-321
|
/automation/scp_to_nagios.sh
|
UTF-8
| 344
| 2.5625
| 3
|
[] |
no_license
|
#!/bin/bash
bash generate_config.sh $1 $2
gcloud compute scp $1.cfg nicolebade@nagios-a:/etc/nagios/servers
# Note: I had to add user nicolebade to group nagios using usermod -a -G nagios nicolebade
# I also had to chmod 775 /etc/nagios/servers
gcloud compute ssh nicolebade@nagios-a --command='sudo /usr/sbin/nagios -v /etc/nagios/nagios.cfg'
| true
|
75c68adf686ef1325bcbe0c2f8e4eadf6af82d74
|
Shell
|
ericadams/dotfiles
|
/WSL/bash/.bash_aliases
|
UTF-8
| 2,271
| 2.875
| 3
|
[] |
no_license
|
alias dcs=docker-compose
export WEBHOOK_ENDPOINT='api/SendGridWebhookEvents'
export SANDBOX_WEBHOOK='https://1daesk3r9c.execute-api.us-west-2.amazonaws.com/api/SendGridWebhookEvents'
export INT_WEBHOOK="https://5upst37au2.execute-api.us-west-2.amazonaws.com/$WEBHOOK_ENDPOINT"
export SANDBOX_AGGS_DB="sendgrid-event-aggregates-db.internal.sandbox-petdesk.com"
function aggsdbhost() {
workspace=${1:-sandbox}
export AGGS_DB_HOST="sendgrid-event-aggregates-db.internal.${workspace}-petdesk.com"
echo "AGGS_DB_HOST=$AGGS_DB_HOST"
}
export GIT_ROOT="/mnt/c/Users/Eric Adams/code"
export VET_API_ROOT="$GIT_ROOT/svc-vet-api"
export VET_DASH_ROOT="$GIT_ROOT/web-vet-dashboard"
export VET_DASH_REACT="$VET_DASH_ROOT/MessageCenter/react"
export BENTLEY_ROOT="$GIT_ROOT/bkgd-pd-bentley"
export ADMIN_TOOLS_ROOT="$GIT_ROOT/web-pd-admintools"
export ALLSERVE_ROOT="$GIT_ROOT/svc-pd-api"
export E2E_ROOT="$GIT_ROOT/qa-e2e"
alias code-react="code $VET_DASH_REACT"
alias code-e2e="code $E2E_ROOT"
alias vs-vetdash="start $VET_DASH_ROOT/MessageCenter.sln"
alias vs-vetapi="start $VET_API_ROOT/VetAPI.sln"
alias vs-bentley="start $BENTLEY_ROOT/Bentley.sln"
alias vs-tools="start $ADMIN_TOOLS_ROOT/PetPartnerProviderAdmin.sln"
alias vs-allserve="start $ALLSERVE_ROOT/AllServWebAPI.sln"
# Terraform Aliases
alias tf="terraform"
alias tfspace='terraform workspace select'
alias sbox='tfspace sandbox'
alias git-prune="npx git-removed-branches --prune"
# AWS CLI aliases
alias aws-whoami="aws sts get-caller-identity"
alias aws-secrets="aws secretsmanager"
# Git
alias cp-commit-msg="cp ~/.githook-commit-msg ./.git/hooks/commit-msg"
alias gpn='git push -u origin $(get_branch)'
alias ci-sha="git rev-parse --short"
alias last-sha="git rev-parse --short HEAD"
# pbcopy/pbpaste emulation
alias pbcopy='xclip -selection clipboard'
alias pbpaste='xclip -selection clipboard -o'
alias curltime="curl -w \"@$HOME/.curl-format.txt\" -o /dev/null -s "
alias cd...='cd ../..'
alias cdinfra='cd ./operations/infrastructure'
tf_clean_state () {
workspace=${1:-sandbox}
terraform init &&
terraform workspace select "$workspace" ||
{ echo "init failed"; exit 1; }
terraform state list | grep '^module\.[^.]\{1,\}_state' | xargs -I "@" terraform state rm "@";
}
| true
|
debf4190c0be2e9f1c2c878802b5edfa20018db2
|
Shell
|
tournet/script
|
/sources.sh
|
UTF-8
| 644
| 2.921875
| 3
|
[] |
no_license
|
#!/bin/bash
# Usage:
# wget --no-check-certificate https://raw.githubusercontent.com/mixool/script/master/sources.sh && chmod +x sources.sh && ./sources.sh
[[ $EUID -ne 0 ]] && echo "Error, This script must be run as root!" && exit 1
cat <<EOF > /etc/apt/sources.list
deb http://archive.debian.org/debian/ jessie main
deb-src http://archive.debian.org/debian/ jessie main
deb http://security.debian.org jessie/updates main
deb-src http://security.debian.org jessie/updates main
EOF
cat <<EOF > /etc/apt/apt.conf.d/10no--check-valid-until
Acquire::Check-Valid-Until "0";
EOF
apt-get clean && apt-get update
apt-get install wget curl vim -y
| true
|
fbcbe6e7599c6918394be41f5ac1eddb083ddfd9
|
Shell
|
shazame/Personal
|
/config/zsh/.zshrc
|
UTF-8
| 1,171
| 2.671875
| 3
|
[] |
no_license
|
# An impressive base zshrc is used: pacman -S grml-zsh-config
if [ -r ${ZDOTDIR:-${HOME}}/.zsh_aliases ]; then
source ${ZDOTDIR:-${HOME}}/.zsh_aliases
fi
if [ -r ${ZDOTDIR:-${HOME}}/.zsh_marks ]; then
source ${ZDOTDIR:-${HOME}}/.zsh_marks
fi
# Custom completion settings
fpath=(${ZDOTDIR:-${HOME}}/completion $fpath)
autoload -U compinit
compinit
# Prompt configuration (see: prompt -h grml)
# Available items:
#at, battery, change-root, date, grml-chroot,
#history, host, jobs, newline, path, percent, rc, rc-always, sad-smiley,
#shell-level, time, user, vcs
# Default prompt:
#zstyle ':prompt:grml:left:setup' items rc change-root user at host path vcs percent
#zstyle ':prompt:grml:right:setup' items sad-smiley
# My prompt:
zstyle ':prompt:grml:left:setup' items shell-level rc change-root user at host path vcs percent
zstyle ':prompt:grml:right:setup' items sad-smiley time battery
# Color configuration
# Yellow username
zstyle ':prompt:grml:left:items:user' pre '%F{yellow}'
# Blue time
zstyle ':prompt:grml:right:items:time' pre ' %F{blue}[ '
zstyle ':prompt:grml:right:items:time' post ']%f'
#source /usr/share/doc/pkgfile/command-not-found.zsh
| true
|
5bbcc8378d00bb041ba07327b181b66b011d2908
|
Shell
|
teejl/HPCA
|
/PRJ2/trial2/sim_exe.sh
|
UTF-8
| 2,215
| 2.96875
| 3
|
[] |
no_license
|
function init
{
# clear results.txt
echo "Simulation executing..." > ~/Repos/HPCA/PRJ2/trial2/results.txt
echo "" >> results.txt
# resetting the configuration files
rm ~/sesc/src/libsuc/CacheCore.cpp
rm ~/sesc/src/libsuc/CacheCore.h
rm ~/sesc/src/libcmp/SMPCache.cpp
rm ~/sesc/src/libcmp/SMPCache.h
cp ~/Repos/HPCA/PRJ2/trial2/CacheCore.cpp ~/sesc/src/libsuc/
cp ~/Repos/HPCA/PRJ2/trial2/CacheCore.h ~/sesc/src/libsuc/
cp ~/Repos/HPCA/PRJ2/trial2/SMPCache.cpp ~/sesc/src/libcmp/
cp ~/Repos/HPCA/PRJ2/trial2/SMPCache.h ~/sesc/src/libcmp/
# build out simulator and simulation
cd ~/sesc/
make
cd ~/sesc/apps/Splash2/fmm
make
}
function simulate
{
# run simulation for default case
echo "~~~~~ ${2} ~~~~~" >> ~/Repos/HPCA/PRJ2/trial2/results.txt
# remove output, run simulation, and copy it to the trail folder
rm ~/sesc/apps/Splash2/fmm/"sesc_fmm.mipseb.${2}"
cp ~/Repos/HPCA/PRJ2/trial2/"${1}" ~/sesc/confs/
~/sesc/sesc.opt -f $2 -c ~/sesc/confs/"${1}" -iInput/input.256 -ofmm.out -efmm.err fmm.mipseb -p 1
rm ~/Repos/HPCA/PRJ2/trial2/"sesc_fmm.mipseb.${2}"
cp ~/sesc/apps/Splash2/fmm/"sesc_fmm.mipseb.${2}" ~/Repos/HPCA/PRJ2/trial2/
# copy over error files and print them out
echo " ~~~~~~ ERRORS ~~~~~~"
cp ~/sesc/apps/Splash2/fmm/fmm.err ~/Repos/HPCA/PRJ2/trial2/
cat ~/sesc/apps/Splash2/fmm/fmm.err
echo ""
echo " ~~~~~~ OUT ~~~~~~"
cp ~/sesc/apps/Splash2/fmm/fmm.out ~/Repos/HPCA/PRJ2/trial2/
cat ~/sesc/apps/Splash2/fmm/fmm.out
echo " ~~~~~~~~~~~~~~~~~~~~"
# get report statistics
~/sesc/scripts/report.pl ~/sesc/apps/Splash2/fmm/"sesc_fmm.mipseb.${2}" >> ~/Repos/HPCA/PRJ2/trial2/results.txt
# output results
echo "~~~~~ ${2} ~~~~~" >> ~/Repos/HPCA/PRJ2/trial2/results.txt
echo ""
}
function testing
{
echo "${1}"
echo $2
}
echo "~~~~ Starting Simulation ~~~~"
echo "I will be simulating a processor with this script. PRJ2."
echo "by TeeJ"
#testing cmp4-noc.conf Default
init
simulate cmp4-noc.conf Default
simulate cmp4-noc-L1NXLRU.conf L1NXLRU
#simulate cmp4-noc-dmap-l1.conf DMapL1
#simulate cmp4-noc-5cyc-l1.conf 5CycL1
#simulate cmp4-noc-9cyc-l1.conf 9CycL1
echo "~~~~ Finishing Simulation ~~~~"
| true
|
b6a9ed3cd7c6a483a0f611e19dda390752a7477e
|
Shell
|
longbiaochen/lvm-rsync-backup
|
/backup-daily~
|
UTF-8
| 4,214
| 4.0625
| 4
|
[
"Unlicense"
] |
permissive
|
#!/bin/bash
# This script performs a backup of the specified
# system logical volume to a backup logical volume
# located on a separate physical volume.
# It uses an LVM snapshot to ensure the data
# remains consistent during the backup.
BACKUP_VOLUME=$1
BACKUP_MOUNT=$2
BACKUP_SIZE=$3
BUFFER_SIZE=$4
BACKUP_DEVICE="${BACKUP_VOLUME}"
BACKUP_RW_MOUNT="${BACKUP_MOUNT}/${BACKUP_VOLUME}_rw"
BACKUP_RO_MOUNT="${BACKUP_MOUNT}/${BACKUP_VOLUME}"
SNAPSHOT_NAME="${BACKUP_VOLUME}_snapshot"
SNAPSHOT_DEVICE="${SNAPSHOT_NAME}"
SNAPSHOT_MOUNT="${BACKUP_MOUNT}/${SNAPSHOT_NAME}"
### First check everything was left cleanly last time, and fix if not
#
# If the snapshot mountpoint still exists try unmounting and removing it
#
if [ -d ${SNAPSHOT_MOUNT} ] ; then
umount ${SNAPSHOT_MOUNT}
rmdir ${SNAPSHOT_MOUNT}
fi
#
# If the snapshot logical volume still exists, remove it
#
lvdisplay | grep "LV Name" | grep -q ${SNAPSHOT_NAME}
if [ $? = 0 ]; then
lvremove -f ${SNAPSHOT_DEVICE}
fi
### Create a logical volume to snapshot the system volume
#
# This is created every time. The volume is deleted at the end of the
# backup as it is not necessary to keep it, wastes space and
# cpu and will freeze when full.
#
# The size of this volume needs to be large enough to contain
# any changes which may happen on the original volume during
# the course of the backup. For example, with a size of 592M,
# if a 1G file is written the snapshot volume may be frozen!
# To avoid this make size big enough to cope, execute only in
# quiet times (early hours) and make sure this script completes
# gracefully if a frozen snapshot is encountered.
#
echo lvcreate -L${BUFFER_SIZE} -s -n ${SNAPSHOT_NAME} ${BACKUP_VOLUME}
lvcreate -L${BUFFER_SIZE} -s -n ${SNAPSHOT_NAME} ${BACKUP_VOLUME}
### Create the backup logical volume
#
# This will only usually be necessary on the first run of this
# script, unless for some reason the backup itself has been lost!
#
# The size of the backup volume must be large enough to contain
# the contents of the original volume and any changes which have
# occurred over the length of time for which backups are kept.
# It may therefore be necessary to extend the backup volume manually.
#
if [ ! -h ${BACKUP_DEVICE} ] ; then
lvcreate -L${BACKUP_SIZE} -n ${BACKUP_VOLUME} backup
mkfs.xfs /dev/backup/${BACKUP_VOLUME}
fi
### Create the backup mount directory if necessary
#
if [ ! -d ${BACKUP_MOUNT} ] ; then
mkdir -p ${BACKUP_MOUNT}
fi
### Create the snapshot mount-point
#
if [ ! -d ${SNAPSHOT_MOUNT} ] ; then
mkdir -p ${SNAPSHOT_MOUNT}
fi
### Protect the snapshot mount-point
#
chmod go-rwx ${SNAPSHOT_MOUNT}
### Mount the snapshot read-only
#
echo mount -o ro ${SNAPSHOT_DEVICE} ${SNAPSHOT_MOUNT}
mount -o ro ${SNAPSHOT_DEVICE} ${SNAPSHOT_MOUNT}
./make_backup_rw ${BACKUP_VOLUME} ${BACKUP_MOUNT} || exit 1
### Delete the oldest daily backup, if it exists
#
if [ -d ${BACKUP_RW_MOUNT}/daily.3 ] ; then
rm -rf ${BACKUP_RW_MOUNT}/daily.3 ;
fi
### Shift the middle snapshots(s) back by one, if they exist
if [ -d ${BACKUP_RW_MOUNT}/daily.2 ] ; then
mv ${BACKUP_RW_MOUNT}/daily.2 ${BACKUP_RW_MOUNT}/daily.3 ;
fi
if [ -d ${BACKUP_RW_MOUNT}/daily.1 ] ; then
mv ${BACKUP_RW_MOUNT}/daily.1 ${BACKUP_RW_MOUNT}/daily.2 ;
fi
if [ -d ${BACKUP_RW_MOUNT}/daily.0 ] ; then
mv ${BACKUP_RW_MOUNT}/daily.0 ${BACKUP_RW_MOUNT}/daily.1 ;
fi
### Make backup of current (snapshot) volume
#
# This method creates hard-links to the previous backup for unchanged
# files, saving considerable space.
#
rsync -a --delete --link-dest=${BACKUP_RW_MOUNT}/daily.1 \
${SNAPSHOT_MOUNT}/ ${BACKUP_RW_MOUNT}/daily.0/
### Update the mtime of daily.0 to reflect the snapshot time
#
touch ${BACKUP_RW_MOUNT}/daily.0 ;
### Attempt to unmount the RW mount point
#
umount ${BACKUP_RW_MOUNT}
rmdir ${BACKUP_RW_MOUNT}
### Attempt to create the RO mount point
#
# This will only usually be necessary on the first run of this script.
#
if [ ! -d ${BACKUP_RO_MOUNT} ] ; then
mkdir -p ${BACKUP_RO_MOUNT}
fi
### Attempt to mount the RO mount point
#
mount -o ro ${BACKUP_DEVICE} ${BACKUP_RO_MOUNT}
### Unmount the snapshot
#
umount ${SNAPSHOT_MOUNT}
rmdir ${SNAPSHOT_MOUNT}
### Remove the snapshot volume
#
lvremove -f ${SNAPSHOT_DEVICE}
| true
|
8fa5d8ed104fccdbe912645e176d33d3b21530bb
|
Shell
|
mgijax/vocload
|
/MP.config
|
UTF-8
| 2,532
| 3.109375
| 3
|
[] |
no_license
|
#!/bin/sh
#
# Program: MP.config
#
# Purpose:
#
# Configuration file for Mammalian Phenotype Load
#
# Usage:
#
# Execute this file from a script or command line to
# set environment variables for MP load
#
# 11/02/2022 lec
# per Sue:
# • type 1 synonyms used for labels on the HMDC
# • type 2 synonyms used for labels on allele summary pages (http://www.informatics.jax.org/allele/summary?markerId=MGI:105043)
# • the mpslimload is used for on ribbons on the gene page (http://www.informatics.jax.org/marker/MGI:105043)
# The slimload also specifies which MP headers should appear in the ribbon. The slim set leaves out “phenotype not analyzed” and “normal phenotype”.
#
# History:
#
# 03/25/2003 lec
# - new Configuration file
#
cd `dirname $0`
RUNTIME_DIR="${DATALOADSOUTPUT}/mgi/vocload/runTimeMP"
ARCHIVE_DIR="${DATALOADSOUTPUT}/mgi/vocload/archiveMP"
export RUNTIME_DIR
export ARCHIVE_DIR
OBO_FILE="${RUNTIME_DIR}/MPheno_OBO.ontology"
OBO_FILE_VERSION="1.2"
export OBO_FILE
export OBO_FILE_VERSION
# This variable will let the loadTerm module know that it should expect to
# find a synonym type column in the Termfile.
#
USE_SYNONYM_TYPE=1
export USE_SYNONYM_TYPE
RCD_FILE="MP.rcd"
export RCD_FILE
case `uname -n` in
bhmgiapp01) MAINTAINER="mgiadmin,cynthia.smith@jax.org,susan.bello@jax.org,anna.anagnostopoulos@jax.org";;
*) MAINTAINER="$USER";;
esac
export MAINTAINER
# vocabulary attributes:
VOCAB_NAME="Mammalian Phenotype" # name of vocabulary (VOC_Vocab.name)
VOCAB_COMMENT_KEY=1000 # name of note types for comments
ACC_PREFIX="MP" # acc ID prefix
JNUM="J:72460" # reference for this vocabulary
IS_SIMPLE=0 # structured vocabulary; should be a .rcd file
IS_PRIVATE=0 # acc IDs are public
LOGICALDB_KEY=34 # ACC_LogicalDB._LogicalDB_key
ANNOT_TYPE_KEY=0 # VOC_AnnotType._AnnotType_key; don;t report annotations
# to newly obsoleted terms
DAG_ROOT_ID="MP:0000001"
TOPOLOGICAL_SORT=true # Should the topological sort be done (true/false)
HEADER_ANNOT_TYPE_KEY=1002
export VOCAB_NAME
export VOCAB_COMMENT_KEY
export ACC_PREFIX
export JNUM
export IS_SIMPLE
export IS_PRIVATE
export LOGICALDB_KEY
export ANNOT_TYPE_KEY
export DAG_ROOT_ID
export TOPOLOGICAL_SORT
export HEADER_ANNOT_TYPE_KEY
TERM_FILE="${RUNTIME_DIR}/Termfile"
DEFS_FILE="${RUNTIME_DIR}/MP.defs"
HEADER_FILE="${RUNTIME_DIR}/MP.header"
NOTE_FILE="${RUNTIME_DIR}/MP.note"
SYNONYM_FILE="${RUNTIME_DIR}/MP.synonym"
export TERM_FILE
export DEFS_FILE
export HEADER_FILE
export NOTE_FILE
export SYNONYM_FILE
| true
|
f760e5fc6ba3dc6d6874e703a3a5835e45db0090
|
Shell
|
AdamWu1979/epi_corrections
|
/scripts/analyze-medians-and-dice-scores.sh
|
UTF-8
| 6,220
| 2.921875
| 3
|
[] |
no_license
|
# Run from epi_corrections dir as
# bash scripts/analyze-medians-and-dice-scores.sh ../epi_corrections_out_2019_07_02_native_tumor_excluded_from_rcbv
correction_dir=$1
# Gradient echo (e1) and Spin echo (e2)
for (( i = 1 ; i < 3 ; i++ )) ; do
# MNI ROI median rCBV files
readarray mni_medians_epic_files_arr_e${i} < <(find $correction_dir/EPI_applyepic -type d -name *e${i}_applyepic_perf | xargs -I {} echo {}/mniroismedians.txt)
readarray mni_medians_topup_files_arr_e${i} < <(find $correction_dir/EPI_applytopup -type d -name *e${i}_prep_topup_applytopup_postp_perf | xargs -I {} echo {}/mniroismedians.txt)
readarray mni_medians_raw_files_arr_e${i} < <(find $correction_dir/EPI_raw_DSC -type d -name *e${i}_perf | xargs -I {} echo {}/mniroismedians.txt)
# MNI ROI dice files (ground truth)
readarray gt_epic_mni_dice_files_arr_e${i} < <(find $correction_dir/EPI_applyepic -type d -name *e${i}_applyepic_perf | xargs -I {} echo {}/mniroisgtepicdice.txt)
readarray gt_topup_mni_dice_files_arr_e${i} < <(find $correction_dir/EPI_applytopup -type d -name *e${i}_prep_topup_applytopup_postp_perf | xargs -I {} echo {}/mniroisgttopupdice.txt)
readarray gt_raw_mni_dice_files_arr_e${i} < <(find $correction_dir/EPI_raw_DSC -type d -name *e${i}_perf | xargs -I {} echo {}/mniroisgtrawdice.txt)
done
# Split arrays into SENSE and not SENSE acceleration
# With SENSE
readarray mni_medians_epic_files_arr_e1_sense < <(printf '%s\n' ${mni_medians_epic_files_arr_e1[*]} | grep -i sense)
readarray mni_medians_topup_files_arr_e1_sense < <(printf '%s\n' ${mni_medians_topup_files_arr_e1[*]} | grep -i sense)
readarray mni_medians_raw_files_arr_e1_sense < <(printf '%s\n' ${mni_medians_raw_files_arr_e1[*]} | grep -i sense)
readarray gt_epic_mni_dice_files_arr_e1_sense < <(printf '%s\n' ${gt_epic_mni_dice_files_arr_e1[*]} | grep -i sense)
readarray gt_topup_mni_dice_files_arr_e1_sense < <(printf '%s\n' ${gt_topup_mni_dice_files_arr_e1[*]} | grep -i sense)
readarray gt_raw_mni_dice_files_arr_e1_sense < <(printf '%s\n' ${gt_raw_mni_dice_files_arr_e1[*]} | grep -i sense)
readarray mni_medians_epic_files_arr_e2_sense < <(printf '%s\n' ${mni_medians_epic_files_arr_e2[*]} | grep -i sense)
readarray mni_medians_topup_files_arr_e2_sense < <(printf '%s\n' ${mni_medians_topup_files_arr_e2[*]} | grep -i sense)
readarray mni_medians_raw_files_arr_e2_sense < <(printf '%s\n' ${mni_medians_raw_files_arr_e2[*]} | grep -i sense)
readarray gt_epic_mni_dice_files_arr_e2_sense < <(printf '%s\n' ${gt_epic_mni_dice_files_arr_e2[*]} | grep -i sense)
readarray gt_topup_mni_dice_files_arr_e2_sense < <(printf '%s\n' ${gt_topup_mni_dice_files_arr_e2[*]} | grep -i sense)
readarray gt_raw_mni_dice_files_arr_e2_sense < <(printf '%s\n' ${gt_raw_mni_dice_files_arr_e2[*]} | grep -i sense)
# Without SENSE
readarray mni_medians_epic_files_arr_e1_nosense < <(printf '%s\n' ${mni_medians_epic_files_arr_e1[*]} | grep -iv sense)
readarray mni_medians_topup_files_arr_e1_nosense < <(printf '%s\n' ${mni_medians_topup_files_arr_e1[*]} | grep -iv sense)
readarray mni_medians_raw_files_arr_e1_nosense < <(printf '%s\n' ${mni_medians_raw_files_arr_e1[*]} | grep -iv sense)
readarray gt_epic_mni_dice_files_arr_e1_nosense < <(printf '%s\n' ${gt_epic_mni_dice_files_arr_e1[*]} | grep -iv sense)
readarray gt_topup_mni_dice_files_arr_e1_nosense < <(printf '%s\n' ${gt_topup_mni_dice_files_arr_e1[*]} | grep -iv sense)
readarray gt_raw_mni_dice_files_arr_e1_nosense < <(printf '%s\n' ${gt_raw_mni_dice_files_arr_e1[*]} | grep -iv sense)
readarray mni_medians_epic_files_arr_e2_nosense < <(printf '%s\n' ${mni_medians_epic_files_arr_e2[*]} | grep -iv sense)
readarray mni_medians_topup_files_arr_e2_nosense < <(printf '%s\n' ${mni_medians_topup_files_arr_e2[*]} | grep -iv sense)
readarray mni_medians_raw_files_arr_e2_nosense < <(printf '%s\n' ${mni_medians_raw_files_arr_e2[*]} | grep -iv sense)
readarray gt_epic_mni_dice_files_arr_e2_nosense < <(printf '%s\n' ${gt_epic_mni_dice_files_arr_e2[*]} | grep -iv sense)
readarray gt_topup_mni_dice_files_arr_e2_nosense < <(printf '%s\n' ${gt_topup_mni_dice_files_arr_e2[*]} | grep -iv sense)
readarray gt_raw_mni_dice_files_arr_e2_nosense < <(printf '%s\n' ${gt_raw_mni_dice_files_arr_e2[*]} | grep -iv sense)
# Without SENSE, first 15
readarray mni_medians_epic_files_arr_e1_nosense < <(printf '%s\n' ${mni_medians_epic_files_arr_e1[*]} | grep -iv sense | head -n 15)
readarray mni_medians_topup_files_arr_e1_nosense < <(printf '%s\n' ${mni_medians_topup_files_arr_e1[*]} | grep -iv sense | head -n 15)
readarray mni_medians_raw_files_arr_e1_nosense < <(printf '%s\n' ${mni_medians_raw_files_arr_e1[*]} | grep -iv sense | head -n 15)
readarray gt_epic_mni_dice_files_arr_e1_nosense < <(printf '%s\n' ${gt_epic_mni_dice_files_arr_e1[*]} | grep -iv sense | head -n 15)
readarray gt_topup_mni_dice_files_arr_e1_nosense < <(printf '%s\n' ${gt_topup_mni_dice_files_arr_e1[*]} | grep -iv sense | head -n 15)
readarray gt_raw_mni_dice_files_arr_e1_nosense < <(printf '%s\n' ${gt_raw_mni_dice_files_arr_e1[*]} | grep -iv sense | head -n 15)
readarray mni_medians_epic_files_arr_e2_nosense < <(printf '%s\n' ${mni_medians_epic_files_arr_e2[*]} | grep -iv sense | head -n 15)
readarray mni_medians_topup_files_arr_e2_nosense < <(printf '%s\n' ${mni_medians_topup_files_arr_e2[*]} | grep -iv sense | head -n 15)
readarray mni_medians_raw_files_arr_e2_nosense < <(printf '%s\n' ${mni_medians_raw_files_arr_e2[*]} | grep -iv sense | head -n 15)
readarray gt_epic_mni_dice_files_arr_e2_nosense < <(printf '%s\n' ${gt_epic_mni_dice_files_arr_e2[*]} | grep -iv sense | head -n 15)
readarray gt_topup_mni_dice_files_arr_e2_nosense < <(printf '%s\n' ${gt_topup_mni_dice_files_arr_e2[*]} | grep -iv sense | head -n 15)
readarray gt_raw_mni_dice_files_arr_e2_nosense < <(printf '%s\n' ${gt_raw_mni_dice_files_arr_e2[*]} | grep -iv sense | head -n 15)
script=$(dirname $0)/wilcoxon-and-mni-rois-dice-analysis.py
command="python $script --rawmedians ${mni_medians_raw_files_arr_e2[@]} --cormedians ${mni_medians_epic_files_arr_e2[@]} --dicegtrawscores ${gt_raw_mni_dice_files_arr_e2[@]} --dicegtcorscores ${gt_epic_mni_dice_files_arr_e2[@]}"
#echo $command
eval $command
| true
|
b0bc5fd12989539c19fda1714345c942d4cdcdfb
|
Shell
|
elfosardo/airship-dev-tools
|
/ci/scripts/image_scripts/provision_metal3_image_centos.sh
|
UTF-8
| 1,619
| 3.484375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
set -uex
SCRIPTS_DIR="$(dirname "$(readlink -f "${0}")")"
# Metal3 Dev Env variables
M3_DENV_ORG="${M3_DENV_ORG:-metal3-io}"
M3_DENV_REPO="${M3_DENV_REPO:-metal3-dev-env}"
M3_DENV_URL="${M3_DENV_URL:-https://github.com/${M3_DENV_ORG}/${M3_DENV_REPO}.git}"
M3_DENV_BRANCH="${M3_DENV_BRANCH:-master}"
M3_DENV_ROOT="${M3_DENV_ROOT:-/tmp}"
M3_DENV_PATH="${M3_DENV_PATH:-${M3_DENV_ROOT}/${M3_DENV_REPO}}"
FORCE_REPO_UPDATE="${FORCE_REPO_UPDATE:-true}"
export CONTAINER_RUNTIME="${CONTAINER_RUNTIME:-docker}"
sudo yum update -y
sudo yum update -y curl nss
sudo yum install -y git
#Install Operator SDK
OSDK_RELEASE_VERSION=v0.10.0
curl -OJL https://github.com/operator-framework/operator-sdk/releases/download/${OSDK_RELEASE_VERSION}/operator-sdk-${OSDK_RELEASE_VERSION}-x86_64-linux-gnu
chmod +x operator-sdk-${OSDK_RELEASE_VERSION}-x86_64-linux-gnu
sudo mkdir -p /usr/local/bin/
sudo cp operator-sdk-${OSDK_RELEASE_VERSION}-x86_64-linux-gnu /usr/local/bin/operator-sdk
rm operator-sdk-${OSDK_RELEASE_VERSION}-x86_64-linux-gnu
## Install metal3 requirements
#mkdir -p "${M3_DENV_ROOT}"
#if [[ -d "${M3_DENV_PATH}" && "${FORCE_REPO_UPDATE}" == "true" ]]; then
# sudo rm -rf "${M3_DENV_PATH}"
#fi
#if [ ! -d "${M3_DENV_PATH}" ] ; then
# pushd "${M3_DENV_ROOT}"
# git clone "${M3_DENV_URL}"
# popd
#fi
#pushd "${M3_DENV_PATH}"
#git checkout "${M3_DENV_BRANCH}"
#git pull -r || true
#make install_requirements
#popd
#
#rm -rf "${M3_DENV_PATH}"
sudo sed -i "0,/.*PermitRootLogin.*/s//PermitRootLogin yes/" /etc/ssh/sshd_config
# Reset cloud-init to run on next boot.
"${SCRIPTS_DIR}"/reset_cloud_init.sh
| true
|
9855957ef4209f7f17b74828774bf293419ba57d
|
Shell
|
Sibylau/mlir-aie
|
/utils/clone-llvm.sh
|
UTF-8
| 759
| 2.71875
| 3
|
[
"LLVM-exception",
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
##===- utils/clone-llvm.sh - Build LLVM for github workflow --*- Script -*-===##
#
# This file licensed under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
##===----------------------------------------------------------------------===##
#
# This script checks out LLVM. We use this instead of a git submodule to avoid
# excessive copies of the LLVM tree.
#
##===----------------------------------------------------------------------===##
git clone --depth 1 https://github.com/llvm/llvm-project.git llvm
pushd llvm
git fetch --depth=1 origin ebe408ad8003c946ef871b955ab18e64e82697cb
git checkout ebe408ad8003
popd
| true
|
a5f715ac1324e76cad8a229cdeb60418972faf25
|
Shell
|
marstool/qwp201802
|
/docs/20180227_-u_-zs5zofy.info_dir/20180227_-U_-Zs5ZOFY.info.json.sh2
|
UTF-8
| 2,333
| 2.765625
| 3
|
[] |
no_license
|
#!/bin/bash
rm -f \
20180227_-U_-Zs5ZOFY.info.json.vo.* \
20180227_-U_-Zs5ZOFY.info.json.ao.* \
20180227_-U_-Zs5ZOFY.info.json.bo.* \
20180227_-U_-Zs5ZOFY.info.json.wav
rm -fr 20180227_-U_-Zs5ZOFY.info_dir
mkdir -p 20180227_-U_-Zs5ZOFY.info_dir
mv 20180227_-U_-Zs5ZOFY.info.json 20180227_-U_-Zs5ZOFY.info.json.* 20180227_-U_-Zs5ZOFY.info_dir/
mv 20180227_-U_-Zs5ZOFY.info_dir/20180227_-U_-Zs5ZOFY.info.json.jpg 20180227_-U_-Zs5ZOFY.info_dir/20180227_-U_-Zs5ZOFY.info.json.webm.mp4.jpg
cat > 20180227_-U_-Zs5ZOFY.info_dir/_index.md << EOF3
+++
title = " 20180227_-U_-Zs5ZOFY VOA连线秦伟平:刘鹤访美,能否缓和美中经贸紧张关系 2018-02-27 "
description = " VOA连线 | 主持人:郑裕文 嘉宾:秦伟平__就在最近几周川普政府一再批评中国贸易政策之际,中国派出中财办主任刘鹤访问美国。作为习近平的首席经济顾问,刘鹤的访美之行能否缓和近期以来美中贸易问题上剑拔弩张的紧张气氛?旅美经济学者秦伟平为您点评。 "
weight = 20
+++
{{< mymp4 mp4="20180227_-U_-Zs5ZOFY.info.json.webm.mp4"
text="len $(cat 20180227_-U_-Zs5ZOFY.info_dir/20180227_-U_-Zs5ZOFY.info.json.webm.mp4|wc -c)"
>}}
{{< mymp4x mp4x="20180227_-U_-Zs5ZOFY.info.json.25k.mp4"
text="len $(cat 20180227_-U_-Zs5ZOFY.info_dir/20180227_-U_-Zs5ZOFY.info.json.25k.mp4|wc -c)"
>}}
{{< mydiv text="VOA连线 | 主持人:郑裕文 嘉宾:秦伟平__就在最近几周川普政府一再批评中国贸易政策之际,中国派出中财办主任刘鹤访问美国。作为习近平的首席经济顾问,刘鹤的访美之行能否缓和近期以来美中贸易问题上剑拔弩张的紧张气氛?旅美经济学者秦伟平为您点评。" >}}
<br>
{{< mydiv text="https://www.youtube.com/watch?v=-U_-Zs5ZOFY" >}}
<br>
请大家传播时,不需要传播文件本身,<br>
原因是:一旦传播过大东西(例如,图片,文件),<br>
就会触发检查机制。<br>
我不知道检查机制的触发条件。<br>
但是我知道,不会说你传一个没有敏感词的网络地址都检查,<br>
否则,检查员得累死。<br><br>
直接转发网址就可以了:<br>
原因是,这是程序员网站,<br>
共匪不敢封锁,墙内可以直接下载。
EOF3
| true
|
ef42ce11cc9c701336b48bc21da57911eb8adcf8
|
Shell
|
GCN/openshift-azure
|
/hack/delete.sh
|
UTF-8
| 491
| 3.0625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash -x
if [[ $# -eq 0 && ! -e _data/containerservice.yaml ]]; then
echo error: _data/containerservice.yaml must exist
exit 1
fi
if [[ $# -eq 1 ]]; then
export RESOURCEGROUP=$1
else
export RESOURCEGROUP=$(awk '/^ resourceGroup:/ { print $2 }' <_data/containerservice.yaml)
fi
USE_PROD_FLAG="-use-prod=false"
if [[ -n "$TEST_IN_PRODUCTION" ]]; then
USE_PROD_FLAG="-use-prod=true"
fi
go run cmd/createorupdate/createorupdate.go -request=DELETE $USE_PROD_FLAG
| true
|
eec393268db54658b1b835c932d5dc4ee0a9ac27
|
Shell
|
tukiyo/docker-xrdp
|
/bin/build.sh
|
UTF-8
| 281
| 3.015625
| 3
|
[] |
no_license
|
#!/bin/sh
set -eu
_build() {
ARCH=$(uname -m)
TAG=$1
if [ $# -eq 2 ];then
TAG=$2
fi
#
docker build . \
--no-cache=false \
-f Dockerfile/${ARCH}/${TAG} \
-t tukiyo3/xrdp:${TAG}
}
_build core
_build xfce4
_build latest
# _build novnc
# _build lxde
# _build icewm
| true
|
8145f157fea50cb9261e319cb39fe40aba6e54b3
|
Shell
|
infinityrobot/dotfiles
|
/aliases/macos.sh
|
UTF-8
| 1,253
| 2.703125
| 3
|
[] |
no_license
|
# Display / hide hidden files
alias show_hidden="defaults write com.apple.finder AppleShowAllFiles YES; killall Finder"
alias hide_hidden="defaults write com.apple.finder AppleShowAllFiles NO; killall Finder"
# Enable / disable screenshot shadow
alias enable_screenshot_shadow="defaults write com.apple.screencapture disable-shadow NO; killall SystemUIServer"
alias disable_screenshot_shadow="defaults write com.apple.screencapture disable-shadow YES; killall SystemUIServer"
# Enable / disable text selection in QuickLook
alias enable_ql_text_selection="defaults write com.apple.finder QLEnableTextSelection YES; killall Finder"
alias disable_ql_text_selection="defaults write com.apple.finder QLEnableTextSelection NO; killall Finder"
# Get IP addresses
alias get_local_ip="ipconfig getifaddr en0"
alias get_external_ip="curl ipecho.net/plain; echo"
# Enable / disable Dashboard
alias disable_dashboard="defaults write com.apple.dashboard mcx-disabled YES; killall Dock"
alias enable_dashboard="defaults write com.apple.dashboard mcx-disabled NO; killall Dock"
# Print the path
alias path="echo $PATH | tr -s ':' '\n'"
# Restarts.
alias restart_dock="killall Dock"
alias restart_finder="killall Finder"
alias restart_ui="killall SystemUIServer"
| true
|
7015e2a33ddb246d91d2ef19d6766689c40b715f
|
Shell
|
zerraveth/so
|
/stoper
|
UTF-8
| 208
| 3.140625
| 3
|
[] |
no_license
|
#!/bin/bash
stoper=$1
n=$2
iloczyn=1
for ((i=0 ;$i<$n;i++ )) ; do
echo -n "podaj liczbe: "
read liczba;
iloczyn=$[$iloczyn*$liczba]
if (( $liczba == $stoper )) ; then
break
fi
done
echo "iloczyn: $iloczyn"
| true
|
72c8c518ea5feed0fc2655bdf7c0c8b5e9731d4a
|
Shell
|
montaser223/Bash-Script-Project
|
/main.sh
|
UTF-8
| 2,245
| 4.1875
| 4
|
[] |
no_license
|
#!/bin/bash
DIR="/home/$USER/DBMS"
echo "Welcome To DBMS"
#This function check user has directory to put DB on it or not
function checkDir() {
if [ ! -d $DIR ]; then
mkdir /home/$USER/DBMS
fi
}
#Call checkDir function
checkDir
#This function Dispaly DataBase Menu
function mainMenu() {
echo -e "\n+---------Main Menu-------------+"
echo "| 1. Open DB |"
echo "| 2. Create DB |"
echo "| 3. Drop DB |"
echo "| 4. List DBs |"
echo "| 5. Exit |"
echo "+-------------------------------+"
echo -e "Enter Choice: \c"
read ch
case $ch in
1) selectDB ;;
2) createDB ;;
3) dropDB ;;
4) listDB ;;
5)
echo BYE
exit
;;
*)
echo " Wrong Choice "
mainMenu
;;
esac
}
#This function Open specific DB
function selectDB() {
echo -e "Enter Database Name: \c"
read dbName
if [[ -d $DIR/$dbName ]]; then
echo "Database $dbName was Successfully Selected"
./tables.sh $DIR/$dbName
else
echo "Database $dbName wasn't found"
mainMenu
fi
}
#This function Create new DB
function createDB() {
echo -e "Enter Database Name: \c"
read -a db
ELEMENTS=${#db[@]}
if [[ $ELEMENTS == 1 ]]; then
if [[ ${db[0]} =~ ^[A-Za-z_]+ ]]; then
dbName=${db[0]}
if [[ -d $DIR/$dbName ]]; then
echo "DataBase already existed ,choose another name"
mainMenu
else
mkdir $DIR/$dbName
if [[ $? == 0 ]]; then
echo "Database Created Successfully"
else
echo "Error Creating Database $dbName"
fi
fi
else
echo "DB name shouldn't contain any special charcter except '_' "
fi
else
echo "It isn't allowed to use spaces in Database name "
fi
mainMenu
}
function dropDB() {
echo -e "Enter Database Name: \c"
read dbName
rm -r $DIR/$dbName 2>>./.error.log
if [[ $? == 0 ]]; then
echo "Database Dropped Successfully"
else
echo "Database Not found"
fi
mainMenu
}
#This function List all avilable DB
function listDB() {
if [[ "$(ls $DIR | egrep -v '^f')" ]]; then
ls $DIR | egrep -v '^f'
else
echo "No Avilable Databases"
fi
mainMenu
}
# Call DB menu
mainMenu
| true
|
458dd52e888fadc011cb35270feb9ecb31a0db98
|
Shell
|
gnpereira90/ppgcc-mpi-master-slave
|
/execution-3/run.sh
|
UTF-8
| 1,477
| 3.640625
| 4
|
[] |
no_license
|
#!/bin/bash -e
echo
echo "Running on `hostname`"
echo "Initial Time: `date`"
echo
mkdir -p logs
touch logs/sort-sequential.csv
touch logs/master-slave-sort.csv
TRIES=3
ARGS=(1000 5000 10000)
DIR=$PWD
OUTPUTFILE_SEQUENTIAL="$PWD/logs/sort-sequential.csv"
OUTPUTFILE_MASTER_SLAVE="$PWD/logs/master-slave-sort.csv"
number_vectors=1000
cd "$DIR/sort-sequential"
make
echo "TRIES;ARRAY_SIZE;NUMBER_VECTORS;RUNTIME" > "$OUTPUTFILE_SEQUENTIAL"
echo "Runnning Bubble Sort Sequential"
for k in $(seq 1 $TRIES); do
for ((i = 0; i < ${#ARGS[@]}; i++)); do
row=$(make run ARRAY_SIZE=${ARGS[$i]} NUMBER_VECTORS=$number_vectors | grep -i 'RUNTIME' | cut -d "=" -f2)
echo "$k; ${ARGS[$i]}; $number_vectors; $row" >> "$OUTPUTFILE_SEQUENTIAL"
echo "$k; ${ARGS[$i]}; $number_vectors; $row"
done
done
cd "$DIR/master-slave-sort"
make build_lad
echo "TRIES;NPS;ARRAY_SIZE;NUMBER_VECTORS;RUNTIME" > "$OUTPUTFILE_MASTER_SLAVE"
echo "Runnning Bubble Sort Master Slave"
for k in $(seq 1 $TRIES); do
processes=($(seq 2 2 32))
for np in "${processes[@]}"; do
for ((i = 0; i < ${#ARGS[@]}; i++)); do
row=$(make run_lad NP=$np ARRAY_SIZE=${ARGS[$i]} NUMBER_VECTORS=$number_vectors | grep -i 'RUNTIME' | cut -d "=" -f2)
echo "$k; $np; ${ARGS[$i]}; $number_vectors; $row" >> "$OUTPUTFILE_MASTER_SLAVE"
echo "$k; $np; ${ARGS[$i]}; $number_vectors; $row"
done
done
done
echo
echo "Final Time: `date`"
echo
| true
|
dfba0db7840eb3d8f0b4bb286918cdeacf5ac354
|
Shell
|
hchiam/learning-bash-scripts
|
/create-git-branch-and-use-it.sh
|
UTF-8
| 118
| 2.6875
| 3
|
[
"MIT"
] |
permissive
|
// put this inside your .bash_profile
function branch() {
git checkout -b $1
git push --set-upstream origin $1
}
| true
|
4663357ed6f085fa1095407353eccc16f6a37fcb
|
Shell
|
ferhatelmas/openebs
|
/k8s/lib/scripts/configure_k8s_master.sh
|
UTF-8
| 759
| 3.828125
| 4
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
#!/bin/bash
#Variables:
machineip=
hostname=`hostname`
kubeversion="v1.7.5"
function get_machine_ip(){
ifconfig | \
grep -oP "inet addr:\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}" \
| grep -oP "\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}" | sort |\
tail -n 1 | head -n 1
}
function setup_k8s_master() {
sudo kubeadm init --apiserver-advertise-address=$machineip \
--kubernetes-version=$kubeversion
}
function update_hosts(){
sudo sed -i "/$hostname/ s/.*/$machineip\t$hostname/g" /etc/hosts
}
#Code
#Get the ip of the machine
machineip=`get_machine_ip`
#Update the host file of the master.
echo Updating the host file...
update_hosts
#Create the Cluster
echo Setting up the Master using IPAddress: $machineip
setup_k8s_master
| true
|
8a11b68a3ee012c1a071de8b7dbb08c5803c3f92
|
Shell
|
NetBSD/pkgsrc
|
/net/nagios-nrpe/files/nrpe.sh
|
UTF-8
| 335
| 2.78125
| 3
|
[] |
no_license
|
#!@RCD_SCRIPTS_SHELL@
#
# $NetBSD: nrpe.sh,v 1.1.1.1 2006/05/21 10:28:40 grant Exp $
#
# PROVIDE: nrpe
# REQUIRE: DAEMON
. /etc/rc.subr
name="nrpe"
rcvar=$name
command="@PREFIX@/sbin/${name}"
config="@PKG_SYSCONFDIR@/${name}.cfg"
pidfile="/var/run/${name}.pid"
command_args="-c $config -d"
load_rc_config $name
run_rc_command "$1"
| true
|
1312ec38bc932fefcc2a5ca63a04278dacabe1fc
|
Shell
|
wstcttt/rc
|
/.bashrc
|
UTF-8
| 1,601
| 3.3125
| 3
|
[] |
no_license
|
#create or attach the screen when start the shell
#if [[ -z "$STY" ]]; then
# screen -xRR jun
#fi
TERM=xterm-256color
[[ $- == *i* ]] || return 0
kill -WINCH $$
if [ "$(uname)" == "Darwin" ]; then
alias ls='ls -G'
export CLICOLOR=1
export LSCOLORS=gxBxhxDxfxhxhxhxhxcxcx
else
export LS_OPTIONS='--color=auto'
eval "`dircolors`"
alias ls='ls $LS_OPTIONS'
fi
export GREP_OPTIONS='--color=auto'
alias ..='cd ..'
alias ll='ls -al'
#####hack the rm#####
alias realrm=/bin/rm
alias rm=trash
alias rl='ls ~/.trash'
alias ur=untrash
trash()
{
echo "------------"
echo "This is not the real rm, no option needed"
echo "You can recover file from ~/.trash, or just use: ur <file>"
echo "Use the real /bin/rm to delete permanently, be careful!!!"
echo "------------"
mkdir -p ~/.trash
mv $@ ~/.trash/ && echo "Done."
}
untrash()
{
echo "Recover from ~/.trash, files: $@"
for p in $*; do
mv -i ~/.trash/$p .
done
}
cleartrash()
{
read -p "WARNING!!!: Delete files in ~/.trash permanently, are you sure?[n]" confirm
[[ $confirm == 'y' ]] || [[ $confirm == 'Y' ]] && /bin/rm -rf ~/.trash/* && echo "All files cleared"
}
#####done of hack the rm#####
Blue='\033[34m'
Green='\033[32m'
Cyan='\033[36m'
Red='\033[31m'
Purple='\033[35m'
Brown='\033[33m'
STARTFGCOLOR=$Green
ENDCOLOR="\033[0m"
export PS1="\[$STARTFGCOLOR\][\t] \u@\h: \w \n\s-\v\$ \[$ENDCOLOR\]"
export LC_ALL=en_US.UTF-8
set -o vi
####functions####
function cprc() {
scp ~/.bashrc ~/.vimrc ~/.screenrc ~/.inputrc $1:${2:-\~}/.
ssh -t $1 "cd ${2:-~}"
}
| true
|
f7793ece5f973d9593d4d7bddb837577f9842c83
|
Shell
|
nadtakanfuthoem/books-api
|
/services/cognito/scripts/deploy.bash
|
UTF-8
| 625
| 3.046875
| 3
|
[] |
no_license
|
#!/bin/bash
# set bash to stop if error happen
set -e
profile="nadtakan"
service="cognito"
stage=$1
region="us-east-1"
sls deploy --stage $stage --profile $profile
# Grab export (good for handling multiple stacks)
COGNITO_USER_POOL_ID=$(aws cloudformation list-exports --query "Exports[?Name==\`$service:$stage:UserPoolId\`].Value" --no-paginate --output text --region $region --profile $profile)
# create ssm keys cognito and web id
aws ssm --profile $profile --region $region put-parameter \
--name "$service-$region-user-pool-id" \
--type "String" \
--value "${COGNITO_USER_POOL_ID}" \
--overwrite
| true
|
241a26029b9d152a721b383c72dbdea638b1506e
|
Shell
|
purpleP/autovenv
|
/new_auto.zsh
|
UTF-8
| 1,358
| 3.75
| 4
|
[] |
no_license
|
activate_venv() {
unsetopt nomatch 2>/dev/null
local venv=$(find_venv)
if [[ -n $venv ]]; then
. $venv/bin/activate
else
deactivate 2>/dev/null
fi
}
find_venv() {
local project_root=$(git rev-parse --show-toplevel 2>/dev/null)
if [[ -n $project_root ]]; then
venv_dir=$(find $project_root -maxdepth 1 -type d -exec test -e "{}/bin/activate" \; -print -quit 2>/dev/null)
if [[ -n $venv_dir ]]; then
echo $venv_dir
return 0
fi
fi
local search_in=($PWD)
while [[ $search_in[-1] != '/' ]]; do
search_in+=($(dirname $search_in[-1]))
done
find $search_in -maxdepth 1 -type d -exec test -e "{}/bin/activate" \; -print -quit 2>/dev/null
}
has_python_packages() {
local search_path=$(pwd)
if ! [[ -e $search_path/__init__.py ]]; then
local package=$(find $search_path -maxdepth 1 -type d -exec test -e "{}/__init__.py" \; -print -quit 2>/dev/null)
if [[ -n $package && ":$PYTHONPATH:" != *":$search_path:"* ]]; then
for p in ${PYTHONPATH//:/ }; do
[[ $search_path/ = $p ]]; return 0
done
export PYTHONPATH=$PYTHONPATH:$search_path
fi
fi
}
chpwd_functions=(${chpwd_functions[@]} "has_python_packages")
chpwd_functions=(${chpwd_functions[@]} "activate_venv")
| true
|
0a964e40f68fe0c458bd88a8beb104bc6befe596
|
Shell
|
ryanwalexander/PiBuilder
|
/boot/scripts/support/isc-dhcp-fix.sh
|
UTF-8
| 552
| 3.765625
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
logger "isc-dhcp-fix launched"
Card()
{
ifconfig $1 | grep -Po '(?<=inet )[\d.]+' &> /dev/null
if [ $? != 0 ]; then
logger "isc-dhcp-fix resetting $1"
sudo dhclient $1
fi
}
# if you do not have both interfaces active, comment-out the "Card"
# line that you don't need. But leave both "sleep" lines in place.
# The idea is that each interface is probed at two-second intervals.
# That should not change, even if you reduce to one interface.
while true; do
Card eth0
sleep 1
Card wlan0
sleep 1
done
| true
|
8eb01a5b88db42dd1872a0671cdff394ceacbadc
|
Shell
|
manasealoo/blast_tools
|
/blastp_formatter.sh
|
UTF-8
| 8,470
| 3.625
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
if [[ ($1 == "-h") || ($1 == "--help") ]]
then
echo "Usage:"
echo "$0 [-h,-help] [-i,--input-fasta] [-d,--database] <-e,--evalue>
<-nd,--num_descriptions> <-na,--num_alignments> <-o --output-filename>
<-cl,--clean> <-nrh,--num-report-hits> <-nthr, --num-threads>
<-bks, --backward-search>
where:
-h, --help show this help text
-i, --input-fasta the input FASTA file
-d, --database database FASTA file
-e, --evalue the E value
-nd, --num_descriptions
-na, --num_alignments
-o, --output-filename
-cl, --clean
-nrh, --num-report-hits
-nthr, --num-threads
-bks, --backward-search "
exit 0
fi
# parsing input arguments
while [[ $# > 1 ]]
do
key="$1"
case $key in
-i|--input-fasta)
INPUT_FASTA="$2"
shift
;;
-d|--database)
DATABASE="$2"
shift
;;
-e|--evalue)
EVALUE_SET="$2"
shift
;;
-nd|--num_descriptions)
NUM_DESCRIPTIONS_SET="$2"
shift
;;
-na|--num_alignments)
NUM_ALIGNMENTS_SET="$2"
shift
;;
-o|--output-filename)
OUTPUTREPORT="$2"
shift
;;
-cl|--clean)
DEBUG_CLEAN="$2"
shift
;;
-bks|--backward-search)
BACK="$2"
shift
;;
-nrh|--num-report-hits)
NUM_REPORT_HITS_SET="$2"
shift
;;
-nthr|--num-threads)
NUM_THREDS_SET="$2"
shift
;;
esac
shift
done
# processing input arguments
DATABASE_NAME=${DATABASE%.*}
EVALUE=${EVALUE_SET:-0.001}
NUM_THREDS=${NUM_THREDS_SET:-4}
NUM_REPORT_HITS=${NUM_REPORT_HITS_SET:-2}
NUM_DESCRIPTIONS=${NUM_DESCRIPTIONS_SET:-9}
NUM_ALIGNMENTS=${NUM_ALIGNMENTS_SET:-9}
OUTPUTREPORT="${OUTPUTREPORT%.*:-"report"}-MaxHits_$NUM_REPORT_HITS-input_${INPUT_FASTA%.*}-db_$DATABASE_NAME-evalue_$EVALUE.txt"
############################################
# if database is a fasta file, build a local database
# otherwise database files in BLASTDB path will be used.
if [[ $DATABASE == *".fasta"* || $DATABASE == *".FASTA"* ]]
then
# generage database if they do not exist
[ ! -e "$DATABASE_NAME.psq" ] && \
[ ! -e "$DATABASE_NAME.psi" ] && \
[ ! -e "$DATABASE_NAME.psd" ] && \
[ ! -e "$DATABASE_NAME.pog" ] && \
[ ! -e "$DATABASE_NAME.pni" ] && \
[ ! -e "$DATABASE_NAME.pnd" ] && \
[ ! -e "$DATABASE_NAME.pin" ] && \
[ ! -e "$DATABASE_NAME.phr" ] && \
makeblastdb -in $DATABASE -out $DATABASE_NAME -dbtype prot -parse_seqids
fi
# final report
echo "\
# blastp report:
# Input: $INPUT_FASTA
# Database: $DATABASE_NAME
# Evalue: $EVALUE
# Fields: subject id, subject length, alignment length, evalue, bit score, % identity, % percid, annotation
" > $OUTPUTREPORT
printf "%30s %10s %10s %10s %8s %10s %10s %s\n" \
"HitID" "Length" "Alignment" "Evalue" "BitScore" "%id" \
"%percid" "Annotation" >> $OUTPUTREPORT
echo \
-------------------------------------------------------------------------------------------------------------- \
>> $OUTPUTREPORT
NUM_LINES=$(wc -l < $INPUT_FASTA)
ENTRY=0
COUNT=1
NEW_ENTRY=1
while [[ $COUNT -le $NUM_LINES ]]
do
#
## extract one fasta query from input file
#
THIS_LINE=$(sed "${COUNT}q;d" $INPUT_FASTA)
if [[ $NEW_ENTRY == 1 ]] # '>' line of a new entry
then
echo $THIS_LINE > query.$ENTRY.fasta
COUNT=$((COUNT+1))
NEW_ENTRY=0
continue
else
if [[ $THIS_LINE =~ ^\> ]] # '>' line of the next new entry
then
NEW_ENTRY=1
else # sequence lines
echo $THIS_LINE >> query.$ENTRY.fasta
COUNT=$((COUNT+1))
if [[ $COUNT -gt $NUM_LINES ]] # the last line
then
echo
else # not the last line
continue
fi
fi
fi
printf "\n\n\n"
echo "*********** processing entry $ENTRY: ***********"
printf "\n\n\n"
#
## Blast
#
echo "@@@ BLASTing"
blastp -outfmt \
"7 sseqid slen length evalue bitscore pident" \
-query query.$ENTRY.fasta -out blastp.$ENTRY.report \
-db $DATABASE_NAME -evalue $EVALUE -num_threads $NUM_THREDS -lcase_masking
#-num_descriptions $NUM_DESCRIPTIONS -num_alignments $NUM_ALIGNMENTS
#
## output report
#
# header for every entry
echo "Query $(sed "1q;d" query.$ENTRY.fasta):" >> $OUTPUTREPORT
echo >> $OUTPUTREPORT
## append percid to report
LINE_NUM=2
while read line
do
if [[ $line == *"hits found"* ]] # Append to Fields description with "% percid"
then
echo $line >> $OUTPUTREPORT
elif [[ $line == \#* ]] # other comment line
then
# not include comment
echo
else # entries that need to be processed
#
## calculate PID for top hits
#
echo "@@@ calculating % percid for top $((LINE_NUM - 1)) hit"
ID=$(echo $line | awk 'BEGIN { FS="|" } { print $2 }')
blastdbcmd -db $DATABASE_NAME -entry $ID -out blastdbcmd.$ENTRY.fasta
cat query.$ENTRY.fasta blastdbcmd.$ENTRY.fasta > Match.$ENTRY.fasta
mafft Match.$ENTRY.fasta > Match.$ENTRY.fasta.aligned
percid Match.$ENTRY.fasta.aligned percid.$ENTRY.percid_matrix
# PERCENT=$(cat percid.$ENTRY.percid_matrix | head -n$LINE_NUM | tail -n1 | awk '{print $1}')
PERCENT=$(cat percid.$ENTRY.percid_matrix | tail -n1 | awk '{print $1}')
PERCENT=$(echo "scale=2; $PERCENT*100" | bc)
PERCENT=$(printf '%*.*f' 0 2 "$PERCENT")
#
## extract annotation
#
echo "@@@ extracting annotation"
ID=$(echo $line | awk 'BEGIN { FS="|" } { print $2 }')
if [[ $PSI_BLAST_DB == *.fasta || $PSI_BLAST_DB == *.FASTA ]]
then
ID_header=$(grep $ID $PSI_BLAST_DB)
TITLE=$(echo $ID_header | awk 'BEGIN { FS="|" } { print $5 }')
else
TITLE=$(blastdbcmd -db $DATABASE_NAME -dbtype prot -entry $ID | head -n1 | awk -v id=$ID 'BEGIN { FS=id } { print $2 }' | cut -d " " -f2- | awk 'BEGIN { FS=">" } { print $1 }')
fi
IFS=" "
printf "%30s %10s %10s %10s %8s %10s %10s %s\n" ${line[0]} \
${line[1]} ${line[2]} ${line[3]} ${line[4]} ${line[5]} $PERCENT $TITLE >> $OUTPUTREPORT
if [[ $LINE_NUM -gt $NUM_REPORT_HITS ]]
then
break
fi
LINE_NUM=$((LINE_NUM+1))
fi
done <blastp.$ENTRY.report
echo >> $OUTPUTREPORT
#
## if backward searching enabled
#
if [[ $BACK == 1 ]]
then
echo "@@@ top hit backward searching"
if [[ ! -e "input_as_db.phr" ]]
then
makeblastdb -in $INPUT_FASTA -out input_as_db -dbtype prot -parse_seqids
fi
BACKWARD_SEARCH_ENTRY_ID=$(grep "ref|\|gi|" blastp.$ENTRY.report | head -n1 | awk 'BEGIN { FS="|" } { print $2 }')
ENTRY_STRING="'$BACKWARD_SEARCH_ENTRY_ID'"
eval "blastdbcmd -db $DATABASE_NAME -dbtype prot -entry $ENTRY_STRING > backward_search.$ENTRY.fasta"
blastp -outfmt \
"7 qseqid sseqid slen length evalue bitscore pident" \
-query backward_search.$ENTRY.fasta -out backward_search.$ENTRY.report \
-db input_as_db -evalue $EVALUE -num_threads $NUM_THREDS -lcase_masking
echo " Backward Search hits:" >> $OUTPUTREPORT
#grep "ref|\|gi|" backward_search.$ENTRY.report | head -n1 | awk 'BEGIN { FS="|" } { print $2 }' >> $OUTPUTREPORT
REPORT_STRING=$(grep "^gi|" backward_search.$ENTRY.report | head -n1)
#
## backward PID calculation
#
echo "@@@ calculating top hit backward searching %percid"
# align query sequence with all hits
cat backward_search.$ENTRY.fasta query.$ENTRY.fasta > backward_search.Match.$ENTRY.fasta
mafft backward_search.Match.$ENTRY.fasta > backward_search.Match.$ENTRY.fasta.aligned
# calculate PID using percid
percid backward_search.Match.$ENTRY.fasta.aligned backward_search.percid.$ENTRY.percid_matrix
PERCENT=$(cat backward_search.percid.$ENTRY.percid_matrix | tail -n1 | awk '{print $1}')
PERCENT=$(echo "scale=2; $PERCENT*100" | bc)
PERCENT=$(printf '%*.*f' 0 2 "$PERCENT")
if [[ $REPORT_STRING != "" ]]
then
arr=($REPORT_STRING)
printf "%30s %10s %10s %10s %8s %10s %10s\n" ${arr[1]} ${arr[2]} ${arr[3]} ${arr[4]} ${arr[5]} ${arr[6]} $PERCENT >> $OUTPUTREPORT
fi
echo >> $OUTPUTREPORT
fi
ENTRY=$((ENTRY+1))
done
#
## cleanup
#
echo "@@@ cleaning up"
if [[ $BACK == 1 ]]
then
rm input_as_db.*
if [[ $DEBUG_CLEAN > 0 ]]
then
rm backward_search.*
fi
fi
# cleanup
if [[ $DEBUG_CLEAN > 0 ]]
then
rm Match.*.fasta Match.*.fasta.aligned \
percid.*.percid_matrix query.*.fasta \
blastp.*.report blastdbcmd.*.fasta
fi
| true
|
362e0642923afde8cc45443be9b277e292594fd5
|
Shell
|
ahiguti/ase
|
/ase/tests/test_02_cclass/run.sh
|
UTF-8
| 790
| 3.21875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
TESTS="01 02 03 04";
source ../compat.sh
compile_c asetestcpp.cpp libase-lib-ASETestCPP
for i in $TESTS; do
args=""
if [ -f "test$i.args" ]; then args="`cat test$i.args`"; fi
if [ -f "test$i.expect2" ]; then
../../aserun-local test$i.lua $args > test$i.log 2> test$i.log2
else
../../aserun-local test$i.lua $args > test$i.log 2>> stderr.log
fi
done
for i in $TESTS; do
if ! $DIFF -u test$i.log test$i.expected; then
echo "test$i failed";
exit 1
fi
if [ -f "test$i.expect2" ]; then
lines="`wc -l < test$i.expect2`"
head -$lines test$i.log2 > test$i.log2h
if ! $DIFF -u test$i.log2h test$i.expect2 && \
! $DIFF -u test$i.log2h test$i.expect2ef; then
echo "test$i failed";
exit 1
fi
fi
done
echo "OK."
exit 0
| true
|
b0ada65be55c4ef049e783c5fcd4a22655f385ba
|
Shell
|
Joseph-Obuseri/Project-1-ELK-Stack-Implementation
|
/Git/Linux/for_loops.sh
|
UTF-8
| 453
| 3.21875
| 3
|
[] |
no_license
|
#!/bin/bash
states={
'Texas'
'California'
'Hawaii'
'Louisiana'
'Florida'
]
nums=(0 1 2 3 4 5 6 7 8 9)
for state in ${states[@]};
do
if [$state == 'Texas']
then
echo "Texas is the best country"
if [$state == 'california']
then
echo "California not cool"
if [$state == 'Louisiana']
then
echo "Louisiana is cool yeah"
fi
done
for num in ${nums{0}};
do
if [$num=3] || [$num=$] [$num=7]
then
echo
| true
|
f7a8b7d1398c1731481ebf048e3f293debefa328
|
Shell
|
jghamburg/lsb2-a-readme
|
/platform/semantic-release/build-docker.sh
|
UTF-8
| 546
| 2.8125
| 3
|
[] |
no_license
|
#!/bin/bash
# create docker image of semantic-release
# with gradle support - write gradle.properties version=
#
DOCKER_GROUP="jghamburg"
BUILD_VERSION="1.0.0"
APP_NAME="git-semantic-release"
echo "Current build version: ${BUILD_VERSION} ..."
docker build --no-cache=true --pull=true -t ${DOCKER_GROUP}/${APP_NAME}:${BUILD_VERSION} .
docker tag ${DOCKER_GROUP}/${APP_NAME}:${BUILD_VERSION} ${DOCKER_GROUP}/${APP_NAME}:latest
docker push ${DOCKER_GROUP}/${APP_NAME}:${BUILD_VERSION}
docker push ${DOCKER_GROUP}/${APP_NAME}:latest
echo "The End"
| true
|
58dccc95f6d124322684d1a3acca30752ae259ba
|
Shell
|
dilawar/Scripts
|
/trigger_jackass.sh
|
UTF-8
| 717
| 2.96875
| 3
|
[] |
no_license
|
#!/bin/bash -
#===============================================================================
#
# FILE: trigger_jackass.sh
#
# USAGE: ./trigger_jackass.sh
#
# DESCRIPTION: Trigger jackass.
#
# OPTIONS: ---
# REQUIREMENTS: ---
# BUGS: ---
# NOTES: ---
# AUTHOR: Dilawar Singh (), dilawars@ncbs.res.in
# ORGANIZATION: NCBS Bangalore
# CREATED: Tuesday 13 February 2018 11:19:02 IST
# REVISION: ---
#===============================================================================
set -o nounset # Treat unset variables as an error
WRITE=${1:-P}
SERIAL=${ARDUINO_PORT:-/dev/ttyACM0}
stty -F $SERIAL raw speed 38400
echo "$WRITE" > $SERIAL
| true
|
8fb860c25e8e00ef7077bd3030510f49b6cfac5c
|
Shell
|
Yubico/python-pyhsm-dpkg
|
/debian/yhsm-validation-server.preinst
|
UTF-8
| 1,125
| 3.46875
| 3
|
[
"BSD-2-Clause",
"BSD-3-Clause"
] |
permissive
|
#! /bin/sh
#
# see: dh_installdeb(1)
#
# summary of how this script can be called:
# * <new-preinst> `install'
# * <new-preinst> `install' <old-version>
# * <new-preinst> `upgrade' <old-version>
# * <old-preinst> `abort-upgrade' <new-version>
#
# for details, see http://www.debian.org/doc/debian-policy/ or
# the debian-policy package
set -e
case "$1" in
upgrade)
if [ "x$2" != "x" ]; then
if dpkg --compare-versions "$2" le "1.0.3c-1"; then
if getent passwd yhsm-valsrv | grep -q ":/home/yhsm-valsrv:"; then
# Version 1.0.3c-1 of this package accidentally created a home directory
# for the service user
usermod -d /var/cache/yubikey-val yhsm-valsrv
rmdir /home/yhsm-valsrv
fi
if getent passwd yhsm-valsrv | grep -q ":/bin/sh$"; then
# Version 1.0.3c-1 of this package set a shell on the service user,
# although one is not necessary
usermod -s /bin/false yhsm-valsrv
fi
fi
fi
;;
install|abort-upgrade)
;;
*)
echo "preinst called with unknown argument \`$1'" >&2
exit 1
;;
esac
#DEBHELPER#
| true
|
eaa5e069ee73768e73a0513ac64f35ef061cdccb
|
Shell
|
alexey85/dns325-transmission
|
/make_transmission.sh
|
UTF-8
| 3,580
| 3.15625
| 3
|
[] |
no_license
|
#!/bin/sh
# cross compilation vars
export TARGET_HOST="arm-none-linux-gnueabi"
export BUILD_HOST=$(uname -m)
export CROSS_COMPILE=${TARGET_HOST}-
#building environment
WORKDIR=$(pwd)
export WORKDIR
export TOOLCHAIN_DIR=${WORKDIR}/dns325_GPL
export MKAPFG_DIR=${WORKDIR}/"ShareCenter Add-On SDK_v2.0_12192011"
PATH=${TOOLCHAIN_DIR}/${CROSS_COMPILE}6xxx/bin:$PATH
export PATH
export WORKDIR_LIB="$WORKDIR/lib"
if [ ! -d ${WORKDIR_LIB} ]; then mkdir ${WORKDIR_LIB} ; fi
export WORKDIR_INC="$WORKDIR/include"
if [ ! -d ${WORKDIR_INC} ]; then mkdir ${WORKDIR_INC} ; fi
export WORKDIR_OUT="$WORKDIR/transmission"
if [ ! -d ${WORKDIR_OUT} ]; then mkdir ${WORKDIR_OUT} ; fi
# compile environment
CC=${CROSS_COMPILE}gcc
CXX=${CROSS_COMPILE}g++
RANLIB=${CROSS_COMPILE}ranlib
LD=${CROSS_COMPILE}ld
STRIP=${CROSS_COMPILE}strip
export CC
export CXX
export RANLIB
export LD
export STRIP
export LDFLAGS="-L$WORKDIR_LIB"
file=libiconv-1.9.2 && \
echo "building $file" && \
cd ${TOOLCHAIN_DIR} && \
tar zxf ${file}.tgz && \
chmod -R +rw ${file} && \
cd ${file} && \
./configure --host="$TARGET_HOST" --build="$BUILD_HOST" --prefix="$WORKDIR" && \
make clean && \
make && \
make install && \
if [ $? -ne 0 ]; then echo"fail build $file" && exit; fi
file=zlib-1.2.3 && \
echo "building $file" && \
cd ${TOOLCHAIN_DIR} && \
tar zxf ${file}.tgz && \
chmod -R +rw ${file} && \
cd ${file} && \
./configure --prefix="$WORKDIR" && \
make clean && \
make && \
make install && \
if [ $? -ne 0 ]; then echo"fail build $file" && exit; fi
file=openssl-0.9.7 && \
echo "building $file" && \
cd ${TOOLCHAIN_DIR} && \
tar zxf ${file}.tgz && \
chmod -R +rw ${file} && \
cd ${file} && \
./Configure --prefix="$WORKDIR" no-static no-threads no-dso no-zlib shared linux-elf-arm:${CC} && \
make clean && \
make depend && \
make && \
make install && \
if [ $? -ne 0 ]; then echo"fail build $file" && exit; fi
file=curl-7.19.7 && \
echo "building $file" && \
cd ${TOOLCHAIN_DIR} && \
tar zxf ${file}.tgz && \
chmod -R +rw ${file} && \
cd ${file} && \
./configure --host="$TARGET_HOST" --build="$BUILD_HOST" --with-zlib="$WORKDIR" --with-ssl="$WORKDIR" \
--prefix="$WORKDIR" --without-random --disable-static --enable-shared && \
make clean && \
make && \
make install && \
if [ $? -ne 0 ]; then echo"fail build $file" && exit; fi
file=libevent-2.0.21-stable && \
echo "building $file" && \
cd ${TOOLCHAIN_DIR} && \
if [ ! -f ${file}.tar.gz ]; then wget https://github.com/downloads/libevent/libevent/${file}.tar.gz ; fi && \
tar zxf ${file}.tar.gz && \
cd ${file} && \
./configure --host="$TARGET_HOST" --build="$BUILD_HOST" --prefix="$WORKDIR" --enable-static --disable-shared && \
make clean && \
make && \
make install && \
if [ $? -ne 0 ]; then echo"fail build $file" && exit; fi
file=transmission-2.75 && \
echo "building $file" && \
cd ${TOOLCHAIN_DIR} && \
if [ ! -f ${file}.tar.bz2 ]; then wget http://download.transmissionbt.com/files/${file}.tar.bz2 ; fi && \
tar xjf ${file}.tar.bz2 && \
cd ${file} && \
./configure \
LIBEVENT_LIBS="-L$WORKDIR_LIB -levent -liconv" \
LIBEVENT_CFLAGS="-I$WORKDIR_INC" \
OPENSSL_LIBS="-L$WORKDIR_LIB -lcrypto -lssl -liconv" \
OPENSSL_CFLAGS="-I$WORKDIR_INC" \
LIBCURL_LIBS="-L$WORKDIR_LIB -lcurl -liconv" \
LIBCURL_CFLAGS="-I$WORKDIR_INC" \
--with-zlib="$WORKDIR" --enable-lightweight --prefix="$WORKDIR_OUT" --host="$TARGET_HOST" \
--build="$BUILD_HOST" --disable-nls --enable-daemon --without-gtk && \
make clean && \
make && \
make install && \
$STRIP "$WORKDIR_OUT"/bin/transmission-* && \
if [ $? -ne 0 ]; then echo"fail build $file" && exit; fi
| true
|
8bcfdecda281301b8a332761c1e9e30daeaab3f7
|
Shell
|
ProteoWizard/pwiz
|
/pwiz/analysis/eharmony/scripts/mergePepXML.sh
|
UTF-8
| 299
| 3.03125
| 3
|
[
"LicenseRef-scancode-free-unknown",
"Apache-2.0"
] |
permissive
|
#!/bin/bash
file1=${1}
file2=${2}
outfile=${3}
sed 's/<\/msms_run_summary>//' $file1 > temp1
sed 's/<\/msms_pipeline_analysis>//' temp1 > temp2
awk 'BEGIN { DOPRINT = 0 } DOPRINT == 1 { print } /analysis_timestamp/ { DOPRINT = 1 }' ${file2} | tee -a temp2
mv temp2 $outfile
rm temp1
rm temp2
| true
|
e4f4d3d592cbcdd00cb572a9b4bcc3a4cad72dc0
|
Shell
|
y-kitamu/dockers
|
/node/entrypoint.sh
|
UTF-8
| 295
| 2.984375
| 3
|
[] |
no_license
|
#!/bin/bash
function run_as_user() {
jupyter lab --ip='*' --port=5000 --no-browser --NotebookApp.token='' --notebook-dir=/home/`id -n -u`/work
}
# start sshd
/usr/sbin/sshd
# start jupyter
RUN_AS_USER=$(declare -f run_as_user)
sudo -H -u ${USER_NAME} bash -c "${RUN_AS_USER}; run_as_user"
| true
|
ef2ca9a76ae8280bacee4577d6bc93665b843b96
|
Shell
|
deephealthproject/eddl
|
/formulas/bash/install_release.sh
|
UTF-8
| 202
| 2.515625
| 3
|
[
"MIT"
] |
permissive
|
VERSION=v0.5.4a
sudo apt-get install wget
wget "https://github.com/deephealthproject/eddl/archive/$VERSION.tar.gz"
tar -xf "$VERSION.tar.gz"
cd "eddl-$VERSION"
mkdir build
cd build
cmake ..
make install
| true
|
2f98a972a207ee4072491d0aa182fc1066171cb6
|
Shell
|
c-rainstorm/innodb_parser
|
/run-mysql-in-docker.sh
|
UTF-8
| 1,255
| 3.34375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
source iprc.sh
source tools.sh
ensureImageExist "${MYSQL_IMAGE}"
mkdir -p "${MYSQL_MOUNT_VOLUME}"
mkdir -p "${PASSWORD_DIR}"
echo "######################################################"
# shellcheck disable=SC2046
# shellcheck disable=SC2002
if [ ! -e "${MYSQL_PASS_FILE}" ] || [ $(cat "${MYSQL_PASS_FILE}" | wc -l) -eq 0 ]; then
openssl rand -base64 10 >"${MYSQL_PASS_FILE}"
echo "new password generated, find it in ${MYSQL_PASS_FILE}"
else
echo "use password in ${MYSQL_PASS_FILE} generated before"
fi
echo "######################################################"
stopContainer "${MYSQL_CONTAINER_NAME}"
docker run --name "${MYSQL_CONTAINER_NAME}" -p 3306:3306 \
-v "${MYSQL_MY_CONF_DIR}":/etc/mysql/conf.d \
-v "${MYSQL_MOUNT_VOLUME}":/var/lib/mysql \
-v "${MYSQL_INIT_DB_DIR}":/docker-entrypoint-initdb.d \
-e MYSQL_ROOT_PASSWORD="$(cat "${MYSQL_PASS_FILE}")" \
-d "${MYSQL_IMAGE}"
echo "#######################DB 配置#######################"
cat "${MYSQL_MY_CONF_DIR}"/*
echo "#######################DB 配置#######################"
echo "#######################DB 初始化#######################"
cat "${MYSQL_INIT_DB_DIR}"/*
echo "#######################DB 初始化#######################"
| true
|
27d81d79a0631753c1c4ff55744eab53397991ae
|
Shell
|
poncovka/java-bytecode-size-optimization
|
/app/projects/analysis/src/main/sh/jbyca-postprocessing-lib
|
UTF-8
| 3,043
| 3.703125
| 4
|
[] |
no_license
|
#!/bin/sh
# Author: Vendula Poncova
# Date: 14.4.2016
# Library of functions for jbyca-postprocessing.
#
# Usage:
# source jbyca-postprocessing-lib
sort_cls() {
sort -k 1,1n -k 3,3rg -k 2,2rn -k 4,4
};
unique_patterns() {
awk -F "\t" '!counter[$4]++'
};
cut_patterns() {
awk -F "\t" -v max="$1" 'counter[$1]++ < max'
};
skip_no_parameters() {
# Find lines with numbered or full parameters,
# which are not only labels.
grep -P ' (?!LABEL)[[:alpha:]]*\('
};
filter_same_parameters() {
# Find lines with parameters of the same index.
# example: STORE VAR(0); LOAD VAR(0);
grep '\( [[:alpha:]]\+([[:digit:]]\+)\).*\1'
};
filter_p1() {
grep '.'
};
filter_p2() {
skip_no_parameters | filter_same_parameters
};
filter_p3() {
skip_no_parameters
};
filter_end_with_if() {
grep -P '; IF[^\;]*; $'
}
filter_end_with_pop() {
grep -P '; POP[^\;]*; $'
}
filter_end_with_math() {
grep -P '; [^\;]*(ADD|SUB|MUL|DIV|REM|NEG|SHL|SHR|AND|OR|XOR); $'
};
filter_begin_end() {
grep -P '\tBEGIN;.*END; $'
};
filer_null() {
grep -P 'null.*NULL'
};
filter_string() {
grep -P 'CONST S.*StringBuilder'
};
remove_empty() {
if [ ! -s "$1" ]; then
rm -f "$1"
fi
};
generate_basic_cls() {
local in="$1"
local out="$2"
for input in $in/patterns*.out; do
# check that file exists
if [ ! -f $input ]; then
continue
fi
# define name of output file
output=$out/$(basename $input '.out')'.cls'
# generate output
tail -n +2 $input | \
sort_cls > $output
# remove file if empty
remove_empty $output
done
};
merge_cls() {
local in="$1"
local out="$2"
local oi=$3
local pi=$4
local w=$5
output="${out}/patterns_${oi}_${pi}_${w}.cls"
# define output
if [ "$w" = "a" ]; then
wregex="*"
elif [ "$w" = "x" ]; then
wregex="0"
else
wregex="[1-9]*"
fi
# define names of input files
name="patterns_${oi}_${pi}_${wregex}_*.cls"
# define filter
filter="filter_${pi}"
# generate output
find "$out" -name "${name}" -exec cat {} \; | sort_cls \
| unique_patterns \
| $filter > $output
# remove file if empty
remove_empty $output
# return name of file
echo $output
};
apply_filter() {
local in="$1"
local out="$2"
local suffix="$3"
local filter="$4"
# define input
input="$in"
# define output
output=$out/$(basename $input '.cls')'_'${suffix}'.cls'
# generate output
cat "$input" 2>/dev/null | $filter > "$output"
# remove file if empty
remove_empty "$output"
# return name of file
echo $output
};
summarize_cls() {
apply_filter "${1}" "${2}" "summary" "cut_patterns ${3}"
};
filter_and_summarize() {
local in="$1"
local out="$2"
local suffix="$3"
local filter="$4"
local max="$5"
output=$(apply_filter "$in" "$out/$suffix" "$suffix" "$filter")
output=$(summarize_cls "$output" "$out/$suffix/summary" "$max")
echo $output
}
# end of file
| true
|
366c3ec836af1e66a70f89da854056c3acfd3388
|
Shell
|
Furniva1/zero-focus-code
|
/Bash-Script-Snippets/read-in-read-out.sh
|
UTF-8
| 1,498
| 3.96875
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
#let script exit if a command fails
set -e
#let script exit if an unsed variable is used
set -u
alias echo="echo -n"
function1(){
clear
echo
echo "This program grabs internal text and outputs it to"
echo "a file called filename123.txt that will be created"
echo "in the directory the code is run. This file will "
echo "be read to the output screen using the cat command"
echo "and then the program will remove the filename123.txt"
sleep 15
function2
}
function2(){
clear
echo
touch ./filename123.txt
echo "filename123.txt has been created in your local directory"
sleep 2
echo "This program grabs internal text and outputs it to" > filename123.txt
echo "a file called filename123.txt that will be created" >> filename123.txt
echo "in the directory the code is run. This file will " >> filename123.txt
echo "be read to the output screen using the cat command" >> filename123.txt
echo "and then the program will remove the filename123.txt" >> filename123.txt
echo
echo "filename123.txt now has text!"
sleep 2
clear
function3
}
function3(){
echo
cat ./filename123.txt | while read LINE
do
echo $LINE
done
echo
echo "This is the internal text output"
sleep 10
clear
function4
}
function4(){
echo
rm ./filename123.txt
echo "Please wait for cleanup"
for i in {1..11}
do
x="."; y="o"
echo -ne $x$y; sleep .5
done
echo
echo "filename123.txt has been removed - Goodbye!"
sleep 3
clear
exit 0
}
while [ 1 ]
do
function1
exit 0
done
| true
|
8f454addf3f99ec14a90464680d0513f1a366449
|
Shell
|
844196/iiyo_koiyo
|
/iiyo_koiyo.sh
|
UTF-8
| 756
| 4.03125
| 4
|
[
"CC0-1.0"
] |
permissive
|
#!/bin/bash
#
# @(#) 擬似乱数を生成・表示し、引数にマッチするまでループします
#
# Usage:
# iiyo_koiyo.sh [num]
# command | iiyo_koiyo.sh
#
# Author:
# @84____ (Masaya Tk.)
#
base=${0##*/}
_error() {
echo "${base%.sh}: invaild argument" 1>&2
exit 1
}
_randCreate() {
rand=$((${RANDOM} % 10))
printf ${rand}
}
if [ -p /dev/stdin ]; then
iiyo=$(cat -)
elif [ -n "${1}" ]; then
iiyo=${1}
else
_error
fi
if $(echo ${iiyo} | grep '[^0-9]' >/dev/null 2>&1); then
_error
fi
while :;
do
_randCreate
i=${i}${rand}
if $(echo ${i} | grep "${iiyo}$" >/dev/null 2>&1); then
break
fi
if [ ${#i} -eq ${#iiyo} ]; then
i=${i:1}
fi
done
exit 0
| true
|
4ffc59afa0ed4235fae486b03c4a7b7426098632
|
Shell
|
bedezign/docker-utility-scripts
|
/php/scripts/install_mongodb.sh
|
UTF-8
| 468
| 2.71875
| 3
|
[
"MIT"
] |
permissive
|
# /usr/bin/env sh
echo '*** Installing MongoDB extension'
# MongoC
apt-get install pkg-config libssl-dev libsasl2-dev
# BSON
apt-get install git gcc automake autoconf libtool
pecl install mongodb
export INI_DIR=`php -r 'ob_start(); phpinfo(INFO_GENERAL); array_map(function($value) { if (strpos($value, "additional .ini files =>")!==false) echo end(explode(" => ", $value)); }, explode(PHP_EOL, ob_get_clean()));'`
echo extension=mongodb.so > $INI_DIR/mongodb.ini
| true
|
d391cf9e47782157f679d276ba1d4f776c5af645
|
Shell
|
leonhostetler/sample-programs
|
/shell/variables/variables.sh
|
UTF-8
| 629
| 4
| 4
|
[] |
no_license
|
#!/bin/bash
#
# variables.sh
#
# Leon Hostetler, June 9, 2018
#
# CALL USING: sh variables.sh
#
# Create two variables
myvar1=19
myvar2="Leon Hostetler"
# Print them to the screen
echo $myvar1
echo $myvar2
# You can also run commands and assign the output to variables
# For example, here I use the command 'ls | wc -l' to count the number of files in the current directory.
numfiles=$(ls | wc -l)
echo "Number of files = " $numfiles
# Can also do arithmetic with variables (use double parentheses)
four=4
foursquared=$(($four*$four))
echo "four squared = " $foursquared
| true
|
3cbfcd1b94b55ae2cec1dd739dcce549f5620108
|
Shell
|
idem2lyon/CloudWhip
|
/scripts/aws_cli_example.sh
|
UTF-8
| 1,700
| 3.359375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# This is an example script to deploy PODs containing three custom instances (2 Linux and 1 Windows) for Lab 1: Gaining Access to OS & Application.
# Note that the VPC and Subnets are manually created using the AWS VPC Wizard and this script is used only for deploying the instances in their respective subnets.
studentcount=25
counter=0
for j in $( seq 0 1);
do
for i in $( seq 1 $studentcount);
do
# start Source w/ pub IP
instance_id=$(aws ec2 run-instances --image-id [YOUR CUSTOM AMI-ID] --count 1 --instance-type m1.small --key-name [YOUR KEY-PAIR NAME] --private-ip-address 172.16.$j.$i'1' --subnet-id [YOUR SUBNET ID] --associate-public-ip-address | grep InstanceId |awk '{print $2 }' | cut -d '"' -f2)
counter=$((counter+1))
# create a name tag
aws ec2 create-tags --resources $instance_id --tags Key=Name,Value=S$counter’:Source’$j.$i’1’
#start Linux target
instance_id=$(aws ec2 run-instances --image-id [YOUR CUSTOM AMI-ID] --count 1 --instance-type m1.small --key-name [YOUR KEY-PAIR NAME] --private-ip-address 172.16.$j.$i'2' --subnet-id [YOUR SUBNET ID] | grep InstanceId |awk '{print $2 }' | cut -d '"' -f2)
#Create a name tag
aws ec2 create-tags --resources $instance_id --tags Key=Name,Value=S$counter’:Linux.’$j.$i’2’
#start win2k3
instance_id=$(aws ec2 run-instances --image-id [YOUR CUSTOM AMI-ID] --count 1 --instance-type m1.small --key-name [YOUR KEY-PAIR NAME] --private-ip-address 172.16.$j.$i'3' --subnet-id [YOUR SUBNET ID] | grep InstanceId |awk '{print $2 }' | cut -d '"' -f2)
#Create a name tag
aws ec2 create-tags --resources $instance_id --tags Key=Name,Value=S1:Win.$j.$i’3’
done
done
| true
|
0b5dcfd6371be9e715343fba5fe27f96b52ca0b7
|
Shell
|
axelhodler/dotfiles
|
/.zprofile
|
UTF-8
| 128
| 2.578125
| 3
|
[] |
no_license
|
if [[ "$TERM" != "screen-256color" ]]
then
tmux attach-session -t home || tmuxinator start home
fi
export LC_ALL=en_US.UTF-8
| true
|
19ea128fba102327e630813fc0f1632ea3d50b2c
|
Shell
|
erichardy/shabc
|
/bin/Faire-TOUT-HTML.sh
|
UTF-8
| 263
| 2.9375
| 3
|
[] |
no_license
|
#!/bin/bash
export PATH="/opt/local/shabc/bin:$PATH"
if [ $# -lt 1 ]
then
R="/var/wwwMUSIC/MUSIC"
R="/var/wwwTMP/abc-irlande/"
else
R=$1
fi
cd $R
git pull
for REP in Divers-Irlandais Hornpipes Jigs Reels
do
echo $REP
cd $REP
FaireHTML.sh
FaireHTML-abcjs.sh
cd $R
done
| true
|
a05f48bec156f02a1a057adfc5f0b469dd53e4f6
|
Shell
|
saleyn/erl-patches
|
/old/0-otp.17.5-patch.sh
|
UTF-8
| 747
| 3.53125
| 4
|
[] |
no_license
|
#!/bin/bash
OLD_PATCH_FILE=1-otp.17.4.patch
NEW_PATCH_FILE=1-otp.17.5.patch
[ -f $NEW_PATCH_FILE ] && echo "New patch exists!" && exit 1
echo "Backup original files [y/N]: "
read yn
if [ $yn = "y" -o $yn = "Y" ]; then
for f in $(awk '/\+\+\+/{print $2}' $OLD_PATCH_FILE) ; do cp -v $f $f.orig ; done
fi
echo "Apply patches [Y/n]: "
read yn
if [ $yn = "y" -o $yn = "Y" ]; then
patch -p0 < $OLD_PATCH_FILE
for f in $(awk '/\+\+\+/{print $2}' $OLD_PATCH_FILE) ; do
diff -bur $f.orig $f >> $NEW_PATCH_FILE
done
fi
for f in {2..5}-otp*; do
git apply --stat --check $f
echo "Apply patch $f [Y/n]: "
read yn
if [ $yn = "y" -o $yn = "Y" ]; then
git apply $f
else
echo "Skipped $f"
fi
done
| true
|
f7b1d3c77bfc0b837ca58efb293bddaff4b04ecb
|
Shell
|
thbishop/kv
|
/infra/script/install_api.sh
|
UTF-8
| 484
| 2.921875
| 3
|
[] |
no_license
|
#!/bin/bash -ex
aws_region=$(curl --silent http://169.254.169.254/latest/dynamic/instance-identity/document|grep region|awk -F\" '{print $4}')
aws s3 cp s3://kv-artifacts-${aws_region}/api.zip /var/tmp/ --region ${aws_region}
cd /var/tmp
unzip /var/tmp/api.zip
mkdir -p /opt/kv/api
mv /var/tmp/api /opt/kv/api/
chmod +x /opt/kv/api/api
useradd kv-api
chown -R kv-api:kv-api /opt/kv
mv /var/tmp/kv-api /etc/init.d/
chmod +x /etc/init.d/kv-api
chkconfig kv-api on
service kv-api start
| true
|
5689b4581b488bbe948c055197d1aa6cce6d7be3
|
Shell
|
richdonne/Raspberry-Pi-LAMP-Install-Script
|
/install.sh
|
UTF-8
| 854
| 2.6875
| 3
|
[] |
no_license
|
#########################################################################
#LAMP for Raspberry Pi #
#This script will install Apache, PHP, FTP, and MySQL. #
#This script was written by Harbour, Justin #
#[C] 2013 Justin Harbour: See LICENSE.md for details #
#########################################################################
#!/bin/bash
#Prerequisites
sudo apt-get update
#FTP
sudo apt-get install -y proftpd
#Apache
sudo apt-get install -y apache2
sudo echo "ServerName localhost" >> /etc/apache2/httpd.conf
#PHP
sudo apt-get install -y php5 libapache2-mod-php5 php5-intl php5-mcrypt php5-curl php5-gd php5-sqlite
#MySQL
sudo apt-get install -y mysql-server mysql-client php5-mysql
#Additional Dependencies
sudo apt-get install -y nmap zenmap
| true
|
f1a30058315012b1afa47867ee5a27c139ae340e
|
Shell
|
sakaal/boot
|
/root/virt-install_boot.local.sh
|
UTF-8
| 770
| 2.609375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Logical volume group 'vg_guests' is configured as a libvirt storage pool.
# Test environment usage tested on Fedora 19:
ssh-keygen -R 192.168.122.8
virt-install --connect=qemu:///system \
--network=network:default,model=virtio \
--initrd-inject ~/git/boot/root/boot.local-ks.cfg \
--extra-args="ks=file:/boot.local-ks.cfg console=tty0 console=xvc0,115200n8" \
--name=boot.local \
--description="A bootstrapping node" \
--disk path=/dev/vg_guests/lv_boot.local,size=4,bus=virtio,sparse=false,cache=writethrough \
--ram=768 \
--arch=x86_64 \
--os-type=linux \
--os-variant=generic26 \
--vcpus=1 \
--check-cpu \
--accelerate \
--virt-type=kvm \
--location=http://www.nic.funet.fi/pub/Linux/INSTALL/Centos/6.4/os/x86_64/ \
--graphics=spice \
--noautoconsole
| true
|
5e42e48cad5e4ae65c396b7c87dce0a037f2ba03
|
Shell
|
fmherschel/ClusterTools2
|
/sbin/cs_show_cluster_patterns
|
UTF-8
| 5,975
| 2.765625
| 3
|
[] |
no_license
|
#!/bin/bash
#
# cs_show_cluster_patterns
#
# (c) 2011-2014 SUSE Linux GmbH, Germany. Author: L.Pinne.
# GNU General Public License. No warranty.
#
# Version: 0.1 2014-08-14
#
EXE="$0"
CFG="/etc/ClusterTools2/cs_show_cluster_patterns"
test -s $CFG && source $CFG
test -z "/etc/corosync/corosync.conf" &&\
test -z "${CLUSTER_LOG}" &&\
CLUSTER_LOG=$(grep "^[^#]*logfile:.*/" /etc/corosync/corosync.conf |\
tr -d " " | cut -d":" -f2)
test -z "${CLUSTER_LOG}" &&\
CLUSTER_LOG="/var/log/messages"
ZIPPED_LOG="${CLUSTER_LOG}*bz2"
# TODO: what error msgs. make sense ?
test -z "${OSERR_PATTERN}" &&\
OSERR_PATTERN="
"NMI.received"
"NMI.*error"
"dlm.*link.*down"
"ocfs2.*ERR"
"ocfs2.*not.unmounted.cleanly"
"fsck.*recommended"
"EXT.-fs.error"
"shutdown.*shutting.down"
"syslog-ng.shutting.down"
"syslog-ng.starting.up"
"SoftDog.*not"
"
# TODO: verify cluster log msgs. maybe more generic possible?
# TODO: unmanaged FAILED
# TODO: event start, event end
# TODO: update config file from built-in pattern list
test -z "${CLUSTR_PATTERN}" &&\
CLUSTR_PATTERN="
"TOTEM.*FAILED.*RECEIVE"
"TOTEM.*processor.failed"
"TOTEM.*Incrementing.problem"
"pcmk.*Invalid.destination"
"pcmk.*Assertion.failure"
"pcmk.*Child.process.*exited"
"pcmk.*Sending.message.*failed"
"stonith.*Device.*not.found"
"stonith.*Found.[01].matching.devices"
"stonith.*failed"
"stonith.*timed.out"
"pengine.fenced.because.*.un-expectedly.down"
"pengine.*Node.*unclean"
"pengine.*fenced.*resource.failure"
"pengine.*Forcing.*away.*failures"
"pengine.*LogActions:.Start"
"pengine.*LogActions:.Stop"
"pengine.*LogActions:.Recover"
"pengine.*LogActions:.Restart"
"pengine.*LogActions:.Monitor"
"pengine.*LogActions:.Move"
"pengine.*LogActions:.Leave.*\(Started\)"
"pengine.*LogActions:.Leave.*\(Stopped\)"
"pengine.*LogActions:.Leave.*\(Started.unmanaged\)"
"pengine.*LogActions:.Leave.*\(Stopped.unmanaged\)"
"crm_resource.*Error"
"crmd.*quorum.lost"
"crmd.*our.DC.is.dead"
"crmd.*Resource.*active.at.shutdouwn"
"crmd.*Action.*not.supported"
"crmd.*State.transition.S_IDLE"
"crmd.*State.transition.S_POLICY_ENGINE"
"crmd.*State.transition.S_TRANSITION_ENGINE"
"crmd.*State.transition.S_STARTING"
"crmd.*State.transition.S_PENDING"
"crmd.*State.transition.S_ELECTION"
"crmd.*State.transition.S_INTEGRATION"
"crmd.*State.transition.S_FINALIZE_JOIN"
"crmd.*State.transition.S_NOT_DC"
"crmd.*ERROR:"
"crmd.*Updating.failcount.*stop"
"crmd.*Updating.failcount.*start"
"lrmd.*probe"
"lrmd.*RA.output.*failed"
"lrmd.*RA.output.*error"
"sbd.*Initializing"
"sbd.*Monitoring"
"sbd.*Using.watchdog"
"sbd.*Set.watchdog.timeout"
"sbd.*Writing.*to"
"sbd.*successfully.delivered"
"sbd.*Received.command"
"sbd.*mbox.read.failed"
"sbd.*Latency"
"
# TODO: SAPInstance, SAPDatabase patterns, See /etc/ClusterTools2/cs_show_cluster_patterns
test -z "${RESRC_PATTERN}" &&\
RESRC_PATTERN="
"Volume.group.*error"
"LVM.*not.*correctly"
"CTDB.*not.start"
"Timeout.*CTDB"
"not.in.Secondary.mode"
"demote.*still.primary"
"Clone.options.misconfigured"
"ERS_InstanceName.parameter.is.mandatory"
"Cannot.find.sapstartsrv"
"sapstartsrv.*for.instance"
"Expected.*instance.*profile"
"Expected.DIR_PROFILE.parameter"
"SAP.*Unauthorized"
"SAPHana.*RA.*rc=[1-7,9]"
"eDirectory.*failed.*stop"
"eDirectory.*not.*configured"
"eDir.configuration.error"
"ndsd.*no.*socket"
"eDirectory.isn.*running"
"eDirectory.configuration.not"
"Couldn.*find.device"
"Couldn.*fsck"
"Couldn.*mount"
"DANGER.*NOT.cluster-aware"
"
function help() {
echo " $(basename $0) OPTION"
echo
echo " --help show help."
echo " --version show version."
echo " --show show all."
echo " --fencing show fencing."
echo " --migration show migration."
echo
echo " --count count total for each pattern."
echo " --zip count from compressed logs, too."
}
function run_grep_show() {
echo "logs: $LOG" >/dev/stderr
# TODO: more efficient loop
for e in ${OSERR_PATTERN} ${CLUSTR_PATTERN} ${RESRC_PATTERN}; do
for f in ${LOG}; do
test -r $f && cat $f
done | grep -i $e
done | sort -M -k1,3
# TODO: sort by timestamp OK ? "Sep 27 17:04:11 "
# TODO: sort error msgs.?
}
function run_grep_count() {
echo "logs: $LOG" >/dev/stderr
# TODO: more efficient loop
for e in ${OSERR_PATTERN} ${CLUSTR_PATTERN} ${RESRC_PATTERN}; do
echo -n "$e = "
for f in ${LOG}; do
test -r $f && cat $f
done | bzgrep -ic $e
done
}
# TODO: resources move success, fail
#Sep 26 16:57:22 ext2300s pengine: [1079]: notice: LogActions: Move rsc_fs_NFS_host (Started ext2301s -> ext2300s)
function awk_migration() {
echo "logs: $LOG" >/dev/stderr
for f in ${LOG}; do
test -r $f && cat $f
done | sort -M -k1,3 |\
awk '$0~/pengine.*LogActions:.Move/ {print}'
}
# TODO: nodes fence, leave, join
#Sep 27 16:07:14 ext2300s pengine: [1079]: WARN: pe_fence_node: Node ext2301s will be fenced because it is un-expectedly down
#Sep 27 16:08:15 ext2300s stonith: [18720]: CRIT: external_run_cmd: Calling '/usr/lib64/stonith/plugins/external/sbd reset ext2301s' returned
# TODO: evtl. "stonith-ng:.*succe"
function awk_fencing() {
echo "logs: $LOG" >/dev/stderr
for f in ${LOG}; do
test -r $f && cat $f
done | sort -M -k1,3 |\
grep -e "pengine:.*Node.*fenced" -e "sbd:.*reset"
}
# TODO: reading from pipe
# main()
case $1 in
-v|--version)
echo -n "$(basename $EXE) "
head -11 $EXE | grep "^# Version: "
exit
;;
-z|--zip)
LOG=""
for z in ${ZIPPED_LOG}; do
test -s $z && LOG="${LOG} ${z}"
done
# unzipped log has to be last in loop :-/
test -s ${CLUSTER_LOG} && LOG="${LOG} ${CLUSTER_LOG}"
run_grep_count
exit
;;
-c|--count)
test -s ${CLUSTER_LOG} && LOG="${CLUSTER_LOG}"
run_grep_count
exit
;;
-f|--fencing)
test -s ${CLUSTER_LOG} && LOG="${CLUSTER_LOG}"
awk_fencing
exit
;;
-m|--migration)
test -s ${CLUSTER_LOG} && LOG="${CLUSTER_LOG}"
awk_migration
exit
;;
-s|--show)
test -s ${CLUSTER_LOG} && LOG="${CLUSTER_LOG}"
run_grep_show
exit
;;
*)
help
exit
;;
esac
#
| true
|
da37a6eade8462b4bfcac44586f78126a6091db4
|
Shell
|
norisio/dotfiles
|
/deploy_nvim.sh
|
UTF-8
| 252
| 3.265625
| 3
|
[] |
no_license
|
#!/bin/bash
HERE=$(cd $(dirname $0); pwd)
XDG_NVIM_DIR=$HOME/.config/nvim
mkdir -p $XDG_NVIM_DIR
echo "created $XDG_NVIM_DIR"
function link() {
ln -s "$HERE/$1" "$XDG_NVIM_DIR/$1"
echo "created symbolic link to $1"
}
link init.lua
link lua
| true
|
bdfc991a43f5f889689499f2ce574d471254191d
|
Shell
|
tteck/Proxmox
|
/install/transmission-install.sh
|
UTF-8
| 905
| 2.578125
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
# Copyright (c) 2021-2023 tteck
# Author: tteck (tteckster)
# License: MIT
# https://github.com/tteck/Proxmox/raw/main/LICENSE
source /dev/stdin <<< "$FUNCTIONS_FILE_PATH"
color
verb_ip6
catch_errors
setting_up_container
network_check
update_os
msg_info "Installing Dependencies"
$STD apt-get install -y curl
$STD apt-get install -y sudo
$STD apt-get install -y mc
msg_ok "Installed Dependencies"
msg_info "Installing Transmission"
$STD apt-get install -y transmission-daemon
systemctl stop transmission-daemon
sed -i '{s/"rpc-whitelist-enabled": true/"rpc-whitelist-enabled": false/g; s/"rpc-host-whitelist-enabled": true,/"rpc-host-whitelist-enabled": false,/g}' /etc/transmission-daemon/settings.json
systemctl start transmission-daemon
msg_ok "Installed Transmission"
motd_ssh
customize
msg_info "Cleaning up"
$STD apt-get autoremove
$STD apt-get autoclean
msg_ok "Cleaned"
| true
|
d3e771a55ec0554cc0178d0f08f03922b786afe6
|
Shell
|
c-sh0/old_website
|
/sh/ndd
|
UTF-8
| 474
| 3.796875
| 4
|
[] |
no_license
|
#!/bin/ksh
#
# ndd.sh
#
# Andres Kroonmaa kindly supplied a nifty script to check all existing values
# for a network component (tcp, udp, ip, icmp, etc.). Solaris
# Uses ndd(1M) see man page for more information
#
PATH=/usr/sbin:$PATH
if [ -z "$1" ]; then
echo "Usage: $0 [udp | tcp | ip | icmp | arp | ... ]"
exit
fi
ndd /dev/$1 '?' | nawk -v c="ndd /dev/$1" '
/write/ {
split($0,a,/[ \t(]/);
n=c t " " a[1];
printf "echo %s = ",a[1];
printf "`%s`\n",n;
}' | sh
| true
|
c2523ce2c2a66ebe112b1bc3bd1fc576d157f9b8
|
Shell
|
tsitle/dockerimage-ws-apache_base
|
/build-ctx/files/start.sh
|
UTF-8
| 25,646
| 3.46875
| 3
|
[] |
no_license
|
#!/bin/bash
#
# by TS, May 2019
#
VAR_MYNAME="$(basename "$0")"
# ----------------------------------------------------------
# @param string $1 Message
#
# @return void
function _log_err() {
local TMP_LOG_PATH="${LCFG_LOG_PATH:-/var/log}"
echo "$VAR_MYNAME: $1" >/dev/stderr
echo "$(date --rfc-3339=seconds) $VAR_MYNAME: $1" >> "$TMP_LOG_PATH/start_script.log"
}
# @param string $1 Message
#
# @return void
function _log_def() {
local TMP_LOG_PATH="${LCFG_LOG_PATH:-/var/log}"
echo "$VAR_MYNAME: $1"
echo "$(date --rfc-3339=seconds) $VAR_MYNAME: $1" >> "$TMP_LOG_PATH/start_script.log"
}
# @return void
function _sleepBeforeAbort() {
# to allow the user to see this message in 'docker logs -f CONTAINER' we wait before exiting
_log_err "(sleeping 5s before aborting)"
local TMP_CNT=0
while [ $TMP_CNT -lt 5 ]; do
sleep 1
_log_err "(...)"
TMP_CNT=$(( TMP_CNT + 1 ))
done
_log_err "(aborting now)"
exit 1
}
# ----------------------------------------------------------
LCFG_LOG_PATH="/var/log"
_log_def "----------------"
# ----------------------------------------------------------
export HOST_IP=$(/sbin/ip route|awk '/default/ { print $3 }')
echo "$HOST_IP dockerhost" >> /etc/hosts
# ----------------------------------------------------------
LVAR_WS_IS_APACHE=false
[ -d /etc/apache2 ] && LVAR_WS_IS_APACHE=true
LVAR_WS_IS_NGINX=false
[ "$LVAR_WS_IS_APACHE" = "false" -a -d /etc/nginx ] && LVAR_WS_IS_NGINX=true
if [ "$LVAR_WS_IS_APACHE" != "true" -a "$LVAR_WS_IS_NGINX" != "true" ]; then
_log_err "Error: could not determine webserver type. Aborting."
_sleepBeforeAbort
fi
# ----------------------------------------------------------
CF_WEBROOT="${CF_WEBROOT:-/var/www/html}"
CF_WEBROOT_SITE="${CF_WEBROOT_SITE:-}"
CF_WEBROOT_SITE_ORG="${CF_WEBROOT_SITE_ORG:-}"
CF_PROJ_PRIMARY_FQDN="${CF_PROJ_PRIMARY_FQDN:-}"
CF_ENABLE_HTTP=${CF_ENABLE_HTTP:-true}
CF_ENABLE_HTTPS=${CF_ENABLE_HTTPS:-false}
CF_CREATE_DEFAULT_HTTP_SITE=${CF_CREATE_DEFAULT_HTTP_SITE:-false}
CF_CREATE_DEFAULT_HTTPS_SITE=${CF_CREATE_DEFAULT_HTTPS_SITE:-false}
CF_HTTPS_FQDN_DEFAULT="${CF_PROJ_PRIMARY_FQDN:-default.localhost}"
CF_WWWDATA_USER_ID=${CF_WWWDATA_USER_ID:-33}
CF_WWWDATA_GROUP_ID=${CF_WWWDATA_GROUP_ID:-33}
CF_WWWFPM_USER_ID=${CF_WWWFPM_USER_ID:-1000}
CF_WWWFPM_GROUP_ID=${CF_WWWFPM_GROUP_ID:-1000}
CF_SSLCERT_GROUP_ID=${CF_SSLCERT_GROUP_ID:-120}
CF_LANG="${CF_LANG:-}"
CF_TIMEZONE="${CF_TIMEZONE:-}"
CF_ENABLE_CRON=${CF_ENABLE_CRON:-false}
CF_SET_OWNER_AND_PERMS_WEBROOT=${CF_SET_OWNER_AND_PERMS_WEBROOT:-false}
# config file for PHP module XDebug is pre-configured for REMOTE_HOST=dockerhost
CF_ENABLE_XDEBUG=${CF_ENABLE_XDEBUG:-false}
CF_XDEBUG_REMOTE_HOST="${CF_XDEBUG_REMOTE_HOST:-}"
CF_PHPFPM_RUN_AS_WWWDATA=${CF_PHPFPM_RUN_AS_WWWDATA:-false}
CF_PHPFPM_ENABLE_OPEN_BASEDIR="${CF_PHPFPM_ENABLE_OPEN_BASEDIR:-true}"
CF_PHPFPM_UPLOAD_TMP_DIR="${CF_PHPFPM_UPLOAD_TMP_DIR:-/var/www/upload_tmp_dir/}"
CF_PHPFPM_PM_MAX_CHILDREN=${CF_PHPFPM_PM_MAX_CHILDREN:-5}
CF_PHPFPM_PM_START_SERVERS=${CF_PHPFPM_PM_START_SERVERS:-2}
CF_PHPFPM_PM_MIN_SPARE_SERVERS=${CF_PHPFPM_PM_MIN_SPARE_SERVERS:-1}
CF_PHPFPM_PM_MAX_SPARE_SERVERS=${CF_PHPFPM_PM_MAX_SPARE_SERVERS:-3}
CF_PHPFPM_UPLOAD_MAX_FILESIZE="${CF_PHPFPM_UPLOAD_MAX_FILESIZE:-100M}"
CF_PHPFPM_POST_MAX_SIZE="${CF_PHPFPM_POST_MAX_SIZE:-100M}"
CF_PHPFPM_MEMORY_LIMIT="${CF_PHPFPM_MEMORY_LIMIT:-512M}"
CF_PHPFPM_MAX_EXECUTION_TIME="${CF_PHPFPM_MAX_EXECUTION_TIME:-600}"
CF_PHPFPM_MAX_INPUT_TIME="${CF_PHPFPM_MAX_INPUT_TIME:-600}"
CF_PHPFPM_HTML_ERRORS=${CF_PHPFPM_HTML_ERRORS:-true}
CF_APACHE_TIMEOUT="${CF_APACHE_TIMEOUT:-300}"
# ----------------------------------------------------------
LCFG_WS_SITES_PATH_AVAIL=""
LCFG_WS_SITES_PATH_ENAB=""
LCFG_WS_SITECONF_DEF_HTTP=""
LCFG_WS_SITECONF_DEF_HTTPS=""
LCFG_WS_SITECONF_FEXT=""
LCFG_WS_SITECONF_ORG_PATH=""
if [ "$LVAR_WS_IS_APACHE" = "true" ]; then
LCFG_LOG_PATH="/var/log/apache2"
LCFG_WS_SITES_PATH_AVAIL="/etc/apache2/sites-available"
LCFG_WS_SITES_PATH_ENAB="/etc/apache2/sites-enabled"
LCFG_WS_SITECONF_DEF_HTTP="000-default-http.conf"
LCFG_WS_SITECONF_DEF_HTTPS="000-default-https.conf"
LCFG_WS_SITECONF_FEXT=".conf"
LCFG_WS_SITECONF_ORG_PATH="/root/apache-defaults"
elif [ "$LVAR_WS_IS_NGINX" = "true" ]; then
LCFG_LOG_PATH="/var/log/nginx"
LCFG_WS_SITES_PATH_AVAIL="/etc/nginx/sites-available"
LCFG_WS_SITES_PATH_ENAB="/etc/nginx/sites-enabled"
LCFG_WS_SITECONF_DEF_HTTP="030-default-http"
LCFG_WS_SITECONF_DEF_HTTPS="031-default-https"
LCFG_WS_SITECONF_FEXT=""
LCFG_WS_SITECONF_ORG_PATH="/root/nginx-defaults"
fi
LCFG_SSL_PATH_HOST_CERTS="/etc/ssl/host-certs"
LCFG_SSL_PATH_HOST_KEYS="/etc/ssl/host-keys"
LCFG_SSL_PATH_LETSENCRYPT_WEBROOT="/var/www/letsencrypt_webroot"
# ----------------------------------------------------------
# @param string $1 Username/Groupname
#
# @return void
function _removeUserAndGroup() {
getent passwd "$1" >/dev/null 2>&1 && userdel -f "$1"
getent group "$1" >/dev/null 2>&1 && groupdel "$1"
return 0
}
# Change numeric IDs of user/group to user-supplied values
#
# @param string $1 Username/Groupname
# @param string $2 Numeric ID for User as string
# @param string $3 Numeric ID for Group as string
# @param string $4 optional: Additional Group-Memberships for User
#
# @return int EXITCODE
function _createUserGroup() {
local TMP_NID_U="$2"
local TMP_NID_G="$3"
echo -n "$TMP_NID_U" | grep -q -E "^[0-9]*$" || {
_log_err "Error: non-numeric User ID '$TMP_NID_U' supplied for '$1'. Aborting."
return 1
}
echo -n "$TMP_NID_G" | grep -q -E "^[0-9]*$" || {
_log_err "Error: non-numeric Group ID '$TMP_NID_G' supplied '$1'. Aborting."
return 1
}
[ ${#TMP_NID_U} -gt 5 ] && {
_log_err "Error: numeric User ID '$TMP_NID_U' for '$1' has more than five digits. Aborting."
return 1
}
[ ${#TMP_NID_G} -gt 5 ] && {
_log_err "Error: numeric Group ID '$TMP_NID_G' for '$1' has more than five digits. Aborting."
return 1
}
[ $TMP_NID_U -eq 0 ] && {
_log_err "Error: numeric User ID for '$1' may not be 0. Aborting."
return 1
}
[ $TMP_NID_G -eq 0 ] && {
_log_err "Error: numeric Group ID for '$1' may not be 0. Aborting."
return 1
}
local TMP_ADD_G="$4"
if [ -n "$TMP_ADD_G" ]; then
echo -n "$TMP_ADD_G" | LC_ALL=C grep -q -E "^([0-9a-z_,]|-)*$" || {
_log_err "Error: additional Group-Memberships '$TMP_ADD_G' container invalid characters. Aborting."
return 1
}
fi
_removeUserAndGroup "$1"
getent passwd $TMP_NID_U >/dev/null 2>&1 && {
_log_err "Error: numeric User ID '$TMP_NID_U' already exists. Aborting."
return 1
}
getent group $TMP_NID_G >/dev/null 2>&1 && {
_log_err "Error: numeric Group ID '$TMP_NID_G' already exists. Aborting."
return 1
}
local TMP_ARG_ADD_GRPS=""
[ -n "$TMP_ADD_G" ] && TMP_ARG_ADD_GRPS="-G $TMP_ADD_G"
_log_def "Setting numeric user/group ID of '$1' to ${TMP_NID_U}/${TMP_NID_G}..."
groupadd -g ${TMP_NID_G} "$1" || {
_log_err "Error: could not create Group '$1'. Aborting."
return 1
}
useradd -l -u ${TMP_NID_U} -g "$1" $TMP_ARG_ADD_GRPS -M -s /bin/false "$1" || {
_log_err "Error: could not create User '$1'. Aborting."
return 1
}
return 0
}
# ----------------------------------------------------------
# Create Upload directory for PHP-FPM
#
# @return void
function _createPhpFpmUploadDir() {
local TMP_UPL_DIR="$(echo -n "$CF_PHPFPM_UPLOAD_TMP_DIR" | sed -e 's,/$,,g')"
[ -d "$TMP_UPL_DIR" ] || mkdir "$TMP_UPL_DIR"
if [ "$TMP_UPL_DIR" != "/tmp" -a "$TMP_UPL_DIR" != "/var/tmp" ]; then
if [ "$CF_PHPFPM_RUN_AS_WWWDATA" != "true" ]; then
chown wwwphpfpm:wwwphpfpm "$TMP_UPL_DIR" || return 1
else
chown www-data:www-data "$TMP_UPL_DIR" || return 1
fi
chmod u=rwx,g=rwxs,o= "$TMP_UPL_DIR"
fi
}
# ----------------------------------------------------------
# @return int EXITCODE
function _setOwnerPermsWebroot() {
local TMP_WEB_USER="www-data"
local TMP_WEB_GROUP="www-data"
if [ -n "$CF_PHP_FPM_VERSION" -a "$CF_PHPFPM_RUN_AS_WWWDATA" != "true" ]; then
TMP_WEB_USER="wwwphpfpm"
TMP_WEB_GROUP="wwwphpfpm"
fi
local TMP_WEBR_SITE="$CF_WEBROOT"
[ -n "$CF_WEBROOT_SITE" -a -d "$CF_WEBROOT/$CF_WEBROOT_SITE" ] && TMP_WEBR_SITE="$TMP_WEBR_SITE/$CF_WEBROOT_SITE"
_log_def "Chown'ing and chmod'ing $TMP_WEBR_SITE"
_log_def " - chmod u=rwx,g=rwx,o=rx '$TMP_WEBR_SITE'"
chmod u=rwx,g=rwx,o=rx "$TMP_WEBR_SITE" || return 1
chmod u=r,g=r,o=r "$TMP_WEBR_SITE" || return 1
_log_def " - find '$TMP_WEBR_SITE' -type d -exec chmod u=rwx,g=rwxs,o=rx"
find "$TMP_WEBR_SITE" -type d -exec chmod u=rwx,g=rwxs,o=rx '{}' \; || return 1
_log_def " - find '$TMP_WEBR_SITE' -type f -exec chmod ug=rw,o=r"
find "$TMP_WEBR_SITE" -type f -exec chmod ug=rw,o=r '{}' \; || return 1
_log_def " - chown $TMP_WEB_USER:$TMP_WEB_GROUP -R '$TMP_WEBR_SITE'"
chown $TMP_WEB_USER:$TMP_WEB_GROUP -R "$TMP_WEBR_SITE" || return 1
# for Neos CMS:
[ -f "$TMP_WEBR_SITE/flow" ] && {
_log_def " - chmod ug+x '$TMP_WEBR_SITE/flow'"
chmod ug+x "$TMP_WEBR_SITE/flow" || return 1
}
return 0
}
# ----------------------------------------------------------
# @return int EXITCODE
function _changeApacheWebroot() {
_log_def "Setting webroot to '$CF_WEBROOT/$CF_WEBROOT_SITE'..."
local TMP_WR_PATH_ORG=""
if [ -n "$CF_WEBROOT_SITE_ORG" ]; then
TMP_WR_PATH_ORG="$(echo -n "$CF_WEBROOT/$CF_WEBROOT_SITE_ORG" | sed -e 's/\//\\\//g')"
else
TMP_WR_PATH_ORG="$(echo -n "$CF_WEBROOT" | sed -e 's/\//\\\//g')"
fi
local TMP_DR_PATH="$CF_WEBROOT/$CF_WEBROOT_SITE"
local TMP_WR_PATH="$CF_WEBROOT/$CF_WEBROOT_SITE"
if [ "$CF_IS_FOR_NEOS_CMS" = "true" ]; then
TMP_DR_PATH="$TMP_DR_PATH/Web"
fi
local TMP_DR_PATH_SED="$(echo -n "$TMP_DR_PATH" | sed -e 's/\//\\\//g')"
local TMP_WR_PATH_SED="$(echo -n "$TMP_WR_PATH" | sed -e 's/\//\\\//g')"
if [ -f $LCFG_WS_SITES_PATH_AVAIL/$LCFG_WS_SITECONF_DEF_HTTP ]; then
sed -i \
-e "s/<DOCROOT>/${TMP_DR_PATH_SED}/g" \
-e "s/<WEBROOT>/${TMP_WR_PATH_SED}/g" \
$LCFG_WS_SITES_PATH_AVAIL/$LCFG_WS_SITECONF_DEF_HTTP
fi
if [ -f $LCFG_WS_SITES_PATH_AVAIL/$LCFG_WS_SITECONF_DEF_HTTPS ]; then
sed -i \
-e "s/<DOCROOT>/${TMP_DR_PATH_SED}/g" \
-e "s/<WEBROOT>/${TMP_WR_PATH_SED}/g" \
$LCFG_WS_SITES_PATH_AVAIL/$LCFG_WS_SITECONF_DEF_HTTPS
fi
if [ -n "$CF_PHP_FPM_VERSION" ]; then
sed -i \
-e "s/<DOCROOT>/${TMP_WR_PATH_SED}/g" \
/etc/php/${CF_PHP_FPM_VERSION}/fpm/pool.d/www.conf
fi
return 0
}
# ----------------------------------------------------------
# @return int EXITCODE
function _changeApacheServername() {
# the FQDN should not contain slashes - but just to be safe...
local TMP_FQDN="$(echo -n "$CF_PROJ_PRIMARY_FQDN" | sed -e 's/\//\\\//g')"
if [ -f $LCFG_WS_SITES_PATH_AVAIL/$LCFG_WS_SITECONF_DEF_HTTP ]; then
sed -i \
-e "s/^#ServerName <PRIMARY_FQDN>$/ServerName ${TMP_FQDN}/g" \
$LCFG_WS_SITES_PATH_AVAIL/$LCFG_WS_SITECONF_DEF_HTTP || return 1
fi
if [ -f $LCFG_WS_SITES_PATH_AVAIL/$LCFG_WS_SITECONF_DEF_HTTPS ]; then
sed -i \
-e "s/^#ServerName <PRIMARY_FQDN>$/ServerName ${TMP_FQDN}/g" \
$LCFG_WS_SITES_PATH_AVAIL/$LCFG_WS_SITECONF_DEF_HTTPS || return 1
#
local TMP_FQDN_SSL="$CF_HTTPS_FQDN_DEFAULT"
# the FQDN should not contain slashes - but just to be safe...
TMP_FQDN_SSL="$(echo -n "$TMP_FQDN_SSL" | sed -e 's/\//\\\//g')"
sed -i \
-e "s/^#ServerName <PRIMARY_FQDN_SSL>$/ServerName ${TMP_FQDN_SSL}/g" \
$LCFG_WS_SITES_PATH_AVAIL/$LCFG_WS_SITECONF_DEF_HTTPS || return 1
fi
return 0
}
# ----------------------------------------------------------
# @return int EXITCODE
function _changePhpFpmSettings() {
if [ "$CF_PHPFPM_RUN_AS_WWWDATA" != "true" ]; then
sed -i \
-e "s/^user = .*$/user = wwwphpfpm/g" \
-e "s/^group = .*$/group = wwwphpfpm/g" \
/etc/php/${CF_PHP_FPM_VERSION}/fpm/pool.d/www.conf || return 1
fi
#
local TMP_UTD_PATH_SED="$CF_PHPFPM_UPLOAD_TMP_DIR"
echo -n "$TMP_UTD_PATH_SED" | grep -q -e "/$" || TMP_UTD_PATH_SED+="/"
TMP_UTD_PATH_SED="$(echo -n "$TMP_UTD_PATH_SED" | sed -e 's/\//\\\//g')"
sed -i \
-e "s/<UPLOADTMPDIR>/${TMP_UTD_PATH_SED}/g" \
/etc/php/${CF_PHP_FPM_VERSION}/fpm/pool.d/www.conf || return 1
#
if [ "$CF_PHPFPM_ENABLE_OPEN_BASEDIR" = "true" ]; then
sed -i \
-e "s/^;php_admin_value\[open_basedir\] = /php_admin_value[open_basedir] = /g" \
/etc/php/${CF_PHP_FPM_VERSION}/fpm/pool.d/www.conf || return 1
fi
#
local TMP_HE="on"
[ "$CF_PHPFPM_HTML_ERRORS" != "true" ] && TMP_HE="off"
sed -i \
-e "s/^pm\.max_children = .*$/pm.max_children = ${CF_PHPFPM_PM_MAX_CHILDREN}/g" \
-e "s/^pm\.start_servers = .*$/pm.start_servers = ${CF_PHPFPM_PM_START_SERVERS}/g" \
-e "s/^pm\.min_spare_servers = .*$/pm.min_spare_servers = ${CF_PHPFPM_PM_MIN_SPARE_SERVERS}/g" \
-e "s/^pm\.max_spare_servers = .*$/pm.max_spare_servers = ${CF_PHPFPM_PM_MAX_SPARE_SERVERS}/g" \
-e "s/^php_admin_value\[upload_max_filesize\] = .*$/php_admin_value[upload_max_filesize] = ${CF_PHPFPM_UPLOAD_MAX_FILESIZE}/g" \
-e "s/^php_admin_value\[post_max_size\] = .*$/php_admin_value[post_max_size] = ${CF_PHPFPM_POST_MAX_SIZE}/g" \
-e "s/^php_admin_value\[memory_limit\] = .*$/php_admin_value[memory_limit] = ${CF_PHPFPM_MEMORY_LIMIT}/g" \
-e "s/^php_admin_value\[max_execution_time\] = .*$/php_admin_value[max_execution_time] = ${CF_PHPFPM_MAX_EXECUTION_TIME}/g" \
-e "s/^php_admin_value\[max_input_time\] = .*$/php_admin_value[max_input_time] = ${CF_PHPFPM_MAX_INPUT_TIME}/g" \
-e "s/^php_admin_flag\[html_errors\] = .*$/php_admin_flag[html_errors] = ${TMP_HE}/g" \
/etc/php/${CF_PHP_FPM_VERSION}/fpm/pool.d/www.conf
}
# @return int EXITCODE
function _changeApacheSettings() {
sed -i \
-e "s/^Timeout .*/Timeout ${CF_APACHE_TIMEOUT}/g" \
/etc/apache2/apache2.conf
}
# ----------------------------------------------------------
# @param string $1 Filename
#
# @return int EXITCODE
function _changePhpTimezone_sub() {
local TMP_FN="/etc/php/${CF_PHP_FPM_VERSION}/$1"
[ ! -f "$TMP_FN" ] && return 0
#
grep -q "^;date.timezone =" "$TMP_FN"
[ $? -ne 0 ] && return 0
local TMP_TZ="$(echo -n "$CF_TIMEZONE" | sed -e 's/\//\\\//g')"
sed -e "s/^;date.timezone =\$/date.timezone = '$TMP_TZ'/g" "$TMP_FN" > "${TMP_FN}.tmp" || return 1
mv "${TMP_FN}.tmp" "$TMP_FN"
}
# @return int EXITCODE
function _changePhpTimezone() {
_changePhpTimezone_sub "fpm/php.ini" || return 1
_changePhpTimezone_sub "cli/php.ini"
}
# @return int EXITCODE
function _changeXdebugRemoteHost() {
_log_def "Setting XDebug Remote Host to '$CF_XDEBUG_REMOTE_HOST'..."
local TMP_FN="/etc/php/${CF_PHP_FPM_VERSION}/mods-available/xdebug.ini"
local TMP_RH="$(echo -n "$CF_XDEBUG_REMOTE_HOST" | sed -e 's/\//\\\//g')"
sed -e "s/^xdebug\.remote_host=.*/xdebug.remote_host=\"$TMP_RH\"/g" "$TMP_FN" > "${TMP_FN}.tmp" || return 1
mv "${TMP_FN}.tmp" "$TMP_FN"
}
# ----------------------------------------------------------
# @return int EXITCODE
function _http_createDefaultSite() {
cp $LCFG_WS_SITECONF_ORG_PATH/$LCFG_WS_SITECONF_DEF_HTTP \
$LCFG_WS_SITES_PATH_AVAIL/
}
# ----------------------------------------------------------
# @return int EXITCODE
function _ssl_createDefaultSite() {
cp $LCFG_WS_SITECONF_ORG_PATH/$LCFG_WS_SITECONF_DEF_HTTPS \
$LCFG_WS_SITES_PATH_AVAIL/ || return 1
#
local TMP_FQDN_SSL="$CF_HTTPS_FQDN_DEFAULT"
# the FQDN should not contain slashes - but just to be safe...
TMP_FQDN_SSL="$(echo -n "$TMP_FQDN_SSL" | sed -e 's/\//\\\//g')"
sed -i \
-e "s/-<PRIMARY_FQDN_SSL>\./-${TMP_FQDN_SSL}\./g" \
$LCFG_WS_SITES_PATH_AVAIL/$LCFG_WS_SITECONF_DEF_HTTPS || return 1
}
# @return void
function _ssl_setOwnerAndPerms() {
[ -d "$1" ] && {
chown $2:$3 "$1" && chmod "$4" "$1"
}
return 0
}
# @param string $1 Hostname
# @param string $2 Domain
# @param string $3 optional: "internal"
#
# @return int EXITCODE
function _ssl_generateCert() {
local TMP_START_PATH_SUF=""
[ "$3" = "internal" ] && TMP_START_PATH_SUF="-$3"
local TMP_START_PRIVKEY_FN="${LCFG_SSL_PATH_HOST_KEYS}${TMP_START_PATH_SUF}/private-${1}.${2}.key"
local TMP_START_PUB_CERT_FN="${LCFG_SSL_PATH_HOST_CERTS}${TMP_START_PATH_SUF}/client-${1}.${2}.crt"
if [ -f "$TMP_START_PRIVKEY_FN" -a -f "$TMP_START_PUB_CERT_FN" ]; then
_log_def "Not generating '$TMP_START_PRIVKEY_FN' and '$TMP_START_PUB_CERT_FN'. Files already exist."
else
_log_def "Generating '$TMP_START_PRIVKEY_FN' and '$TMP_START_PUB_CERT_FN'..."
/root/sslgen.sh "${1}.${2}" $3 || return 1
fi
return 0
}
# @return int EXITCODE
function _ssl_generateCertDefaultSite() {
local TMP_FQDN="${CF_PROJ_PRIMARY_FQDN:-default.localhost}"
local TMP_HOSTN="$(echo -n "$TMP_FQDN" | cut -f1 -d.)"
local TMP_DOM="$(echo -n "$TMP_FQDN" | cut -f2- -d.)"
_ssl_generateCert "$TMP_HOSTN" "$TMP_DOM" "internal"
}
# @return int EXITCODE
function _ssl_generateCertOtherVhosts() {
local TMP_CNT="$(find $LCFG_WS_SITES_PATH_ENAB/ -maxdepth 1 -type l -name "*-https${LCFG_WS_SITECONF_FEXT}" | grep -v "$LCFG_WS_SITECONF_DEF_HTTPS" | wc -l)"
if [ "$TMP_CNT" = "0" ]; then
_log_def "No further enabled virtual hosts with HTTPS found."
else
local TMP_FN
for TMP_FN in `find $LCFG_WS_SITES_PATH_ENAB/ -maxdepth 1 -type l -name "*-https${LCFG_WS_SITECONF_FEXT}" | grep -v "$LCFG_WS_SITECONF_DEF_HTTPS"`; do
_log_def "Generate Cert/Key for Virtual Host '$TMP_FN'..."
local TMP_CRT_FN=""
local TMP_KEY_FN=""
if [ "$LVAR_WS_IS_APACHE" = "true" ]; then
TMP_CRT_FN="$(grep '^[[:space:]]*SSLCertificateFile /.*$' "$TMP_FN" | awk '/client-.*\.crt/ { print $2 }')"
TMP_KEY_FN="$(grep '^[[:space:]]*SSLCertificateKeyFile /.*$' "$TMP_FN" | awk '/private-.*\.key/ { print $2 }')"
elif [ "$LVAR_WS_IS_NGINX" = "true" ]; then
TMP_CRT_FN="$(grep '^[[:space:]]*ssl_certificate /.*$' "$TMP_FN" | awk '/client-.*\.crt/ { print $2 }' | tr -d \;)"
TMP_KEY_FN="$(grep '^[[:space:]]*ssl_certificate_key /.*$' "$TMP_FN" | awk '/private-.*\.key/ { print $2 }' | tr -d \;)"
fi
[ -z "$TMP_CRT_FN" -o -z "$TMP_KEY_FN" ] && {
_log_err "Error: could not determine Cert/Key filename. Aborting."
return 1
}
#echo " crt=$TMP_CRT_FN"
#echo " key=$TMP_KEY_FN"
TMP_CRT_FQDN="$(basename "$TMP_CRT_FN")"
TMP_CRT_FQDN="$(echo -n "$TMP_CRT_FQDN" | sed -e 's/^client-//' -e 's/\.crt$//')"
TMP_KEY_FQDN="$(basename "$TMP_KEY_FN")"
TMP_KEY_FQDN="$(echo -n "$TMP_KEY_FQDN" | sed -e 's/^private-//' -e 's/\.key$//')"
#echo " crt FQDN=$TMP_CRT_FQDN"
#echo " key FQDN=$TMP_KEY_FQDN"
[ -z "$TMP_CRT_FQDN" -o -z "$TMP_KEY_FQDN" -o "$TMP_CRT_FQDN" != "$TMP_KEY_FQDN" ] && {
_log_err "Error: FQDN not found in Cert/Key filenames. Aborting."
return 1
}
if [ -f "$TMP_KEY_FN" -a -f "$TMP_CRT_FN" ]; then
_log_def "Not generating '$TMP_KEY_FN' and '$TMP_CRT_FN'. Files already exist."
else
_log_def "Generating '$TMP_KEY_FN' and '$TMP_CRT_FN'..."
/root/sslgen.sh "$TMP_CRT_FQDN" || return 1
fi
done
fi
return 0
}
# Change numeric ID of group 'ssl-cert' to user-supplied numeric ID
#
# @return int EXITCODE
function _ssl_createSslCertGroup() {
getent group "ssl-cert" >/dev/null 2>&1 && groupdel "ssl-cert"
_log_def "Setting numeric group ID of ssl-cert to ${CF_SSLCERT_GROUP_ID}..."
groupadd -g ${CF_SSLCERT_GROUP_ID} "ssl-cert"
}
# ----------------------------------------------------------
if [ "$CF_ENABLE_HTTPS" = "true" ]; then
_ssl_createSslCertGroup || {
_log_err "Error: creating ssl-cert group with GID=${CF_SSLCERT_GROUP_ID} failed. Aborting."
_sleepBeforeAbort
}
#
_ssl_setOwnerAndPerms "$LCFG_SSL_PATH_HOST_CERTS" root root "755"
_ssl_setOwnerAndPerms "$LCFG_SSL_PATH_HOST_KEYS" root ssl-cert "750"
_ssl_setOwnerAndPerms "$LCFG_SSL_PATH_LETSENCRYPT_WEBROOT" root root "755"
if [ "$CF_CREATE_DEFAULT_HTTPS_SITE" = "true" ]; then
# create default HTTPS site
_log_def "Create default HTTPS site..."
_ssl_createDefaultSite || {
_sleepBeforeAbort
}
# enable default HTTPS site
if [ ! -h $LCFG_WS_SITES_PATH_ENAB/$LCFG_WS_SITECONF_DEF_HTTPS ]; then
_log_def "Enable default HTTPS site..."
a2ensite $LCFG_WS_SITECONF_DEF_HTTPS || {
_sleepBeforeAbort
}
fi
# generate SSL-Cert/Key for default virtual host
if [ -h $LCFG_WS_SITES_PATH_ENAB/$LCFG_WS_SITECONF_DEF_HTTPS ]; then
_ssl_generateCertDefaultSite || {
_sleepBeforeAbort
}
fi
fi
# generate SSL-Cert/Key for all other virtual hosts
_ssl_generateCertOtherVhosts || exit 1
# enable Apache SSL module
a2enmod ssl || {
_sleepBeforeAbort
}
else
# disable default HTTPS site
if [ -h $LCFG_WS_SITES_PATH_ENAB/$LCFG_WS_SITECONF_DEF_HTTPS ]; then
_log_def "Disable default HTTPS site..."
a2dissite $LCFG_WS_SITECONF_DEF_HTTPS || {
_sleepBeforeAbort
}
fi
fi
if [ "$CF_ENABLE_HTTP" = "true" ]; then
if [ "$CF_CREATE_DEFAULT_HTTP_SITE" = "true" ]; then
# create default HTTP site
_log_def "Create default HTTP site..."
_http_createDefaultSite || {
_sleepBeforeAbort
}
# enable default HTTP site
if [ ! -h $LCFG_WS_SITES_PATH_ENAB/$LCFG_WS_SITECONF_DEF_HTTP ]; then
_log_def "Enable default HTTP site..."
a2ensite $LCFG_WS_SITECONF_DEF_HTTP || {
_sleepBeforeAbort
}
fi
fi
else
# disable default HTTP site
if [ -h $LCFG_WS_SITES_PATH_ENAB/$LCFG_WS_SITECONF_DEF_HTTP ]; then
_log_def "Disable default HTTP site..."
a2dissite $LCFG_WS_SITECONF_DEF_HTTP || {
_sleepBeforeAbort
}
fi
fi
_log_def "createUserGroup 'www-data'..."
_createUserGroup "www-data" "${CF_WWWDATA_USER_ID}" "${CF_WWWDATA_GROUP_ID}" || {
_sleepBeforeAbort
}
if [ -n "$CF_PHP_FPM_VERSION" ]; then
if [ "$CF_PHPFPM_RUN_AS_WWWDATA" != "true" ]; then
_log_def "createUserGroup 'wwwphpfpm'..."
_createUserGroup "wwwphpfpm" "${CF_WWWFPM_USER_ID}" "${CF_WWWFPM_GROUP_ID}" "www-data" || {
_sleepBeforeAbort
}
[ ! -d /home/wwwphpfpm ] && mkdir /home/wwwphpfpm
chown wwwphpfpm:wwwphpfpm -R /home/wwwphpfpm
chmod 750 /home/wwwphpfpm
fi
_log_def "createPhpFpmUploadDir..."
_createPhpFpmUploadDir || {
_log_err "Error: could not create PHP-FPM Upload dir. Aborting."
_sleepBeforeAbort
}
_log_def "changePhpFpmSettings..."
_changePhpFpmSettings || {
_log_err "Error: could not change PHP-FPM settings. Aborting."
_sleepBeforeAbort
}
fi
if [ -n "$CF_WEBROOT" -a -d "$CF_WEBROOT" ]; then
if [ -n "$CF_WEBROOT_SITE" -a ! -d "$CF_WEBROOT/$CF_WEBROOT_SITE" ]; then
_log_def "mkdir '$CF_WEBROOT/$CF_WEBROOT_SITE'..."
mkdir -p "$CF_WEBROOT/$CF_WEBROOT_SITE" || {
_sleepBeforeAbort
}
fi
if [ "$CF_IS_FOR_NEOS_CMS" = "true" ]; then
if [ -n "$CF_WEBROOT_SITE" -a ! -d "$CF_WEBROOT/$CF_WEBROOT_SITE/Web" ]; then
_log_def "mkdir '$CF_WEBROOT/$CF_WEBROOT_SITE/Web'..."
mkdir "$CF_WEBROOT/$CF_WEBROOT_SITE/Web" || {
_sleepBeforeAbort
}
fi
fi
if [ "$CF_SET_OWNER_AND_PERMS_WEBROOT" = "true" ]; then
_log_def "setOwnerPermsWebroot..."
_setOwnerPermsWebroot || {
_log_err "Error: could not set owner/perms of webroot. Aborting."
_sleepBeforeAbort
}
fi
fi
if [ -n "$CF_WEBROOT" -a -d "$CF_WEBROOT" -a \
-n "$CF_WEBROOT_SITE" -a -d "$CF_WEBROOT/$CF_WEBROOT_SITE" ]; then
_log_def "changeApacheWebroot..."
_changeApacheWebroot || {
_sleepBeforeAbort
}
fi
if [ -n "$CF_PROJ_PRIMARY_FQDN" ]; then
_log_def "changeApacheServername..."
_changeApacheServername || {
_sleepBeforeAbort
}
fi
_log_def "changeApacheSettings..."
_changeApacheSettings || {
_log_err "Error: could not change Apache settings. Aborting."
_sleepBeforeAbort
}
# ----------------------------------------------------------
if [ -n "$CF_LANG" ]; then
_log_def "Updating locale with '$CF_LANG'..."
export LANG=$CF_LANG
export LANGUAGE=$CF_LANG
export LC_ALL=$CF_LANG
update-locale LANG=$CF_LANG || {
_sleepBeforeAbort
}
update-locale LANGUAGE=$CF_LANG
update-locale LC_ALL=$CF_LANG
echo "export LANG=$CF_LANG" >> ~/.bashrc
echo "export LANGUAGE=$CF_LANG" >> ~/.bashrc
echo "export LC_ALL=$CF_LANG" >> ~/.bashrc
fi
if [ -n "$CF_TIMEZONE" ]; then
[ ! -f "/usr/share/zoneinfo/$CF_TIMEZONE" ] && {
_log_err "Error: could not find timezone file for '$CF_TIMEZONE'. Aborting."
_sleepBeforeAbort
}
_log_def "Setting timezone to '$CF_TIMEZONE'..."
export TZ=$CF_TIMEZONE
ln -snf /usr/share/zoneinfo/$CF_TIMEZONE /etc/localtime
echo $CF_TIMEZONE > /etc/timezone
#
_changePhpTimezone
fi
# ----------------------------------------------------------
if [ -n "$CF_PHP_FPM_VERSION" ] && \
[ "$CF_PHP_FPM_VERSION" != "7.4" -o "$CF_CPUARCH_DEB_DIST" = "amd64" ]; then
if [ -n "$CF_XDEBUG_REMOTE_HOST" ]; then
_changeXdebugRemoteHost || {
_sleepBeforeAbort
}
fi
if [ "$CF_ENABLE_XDEBUG" = "true" ]; then
_log_def "Enabling XDebug..."
phpenmod xdebug
fi
fi
# ----------------------------------------------------------
# for child docker images:
if [ -x /start-child.sh ]; then
_log_def "Calling '/start-child.sh'..."
/start-child.sh || {
_sleepBeforeAbort
}
fi
# ----------------------------------------------------------
if [ -n "$CF_PHP_FPM_VERSION" ]; then
_log_def "Starting PHP-FPM..."
service php$CF_PHP_FPM_VERSION-fpm start || {
_sleepBeforeAbort
}
fi
if [ "$CF_ENABLE_CRON" = "true" ]; then
#mkdir -p /var/spool/cron/crontabs 2>/dev/null
#chmod +t /var/spool/cron/crontabs
#chown :crontab /var/spool/cron/crontabs
#
TMP_FCNT="$(find /var/spool/cron/crontabs -type f | wc -l)"
if [ "$TMP_FCNT" != "0" ]; then
_log_def "chown+chmod '/var/spool/cron/crontabs/*'..."
for FN in /var/spool/cron/crontabs/*; do
chown $(basename "$FN"):crontab "$FN"
chmod 600 "$FN"
done
fi
#
_log_def "Starting cron..."
service cron start || {
_sleepBeforeAbort
}
fi
_log_def "Starting apache..."
apachectl -D FOREGROUND
| true
|
948f7ee2428f350f5b0fe0b683b3ffddddab8b9f
|
Shell
|
artisdom/_ebDev
|
/scripts/font/FontForgeFonts2Fonts.sh
|
UTF-8
| 2,555
| 4.21875
| 4
|
[] |
no_license
|
# DESCRIPTION
# Converts all FontForge-compatible files in the current directory of type $1 to type $2.
# DEPENDENCIES
# - FontForge installed and in your PATH
# - for Windows, cygpath (which comes with both Cygwin and MSYS2)
# USAGE
# Run with these parameters:
# - $1 Source type to convert from
# - $2 Target type to convert from
# Example that converts all .sfd files to .otf:
# FontForgeFonts2Fonts.sh sfd otf
# CODE
if [ ! "$1" ]
then echo no source format parameter \$1 passed to script\; will exit\.
exit
else
sourceFormat=$1
echo source format passed to script\: $1;
fi
if [ ! "$2" ]
then echo no target format parameter \$2 passed to script\; will exit\.
exit
else
destFormat=$2
echo target format passed to script\: $2;
fi
# get full path to FontForge:
fullPathToFFscript=$(getFullPathToFile.sh FontForgeConvert.pe)
# convert to windows path format if Windows:
if [ $OS == "Windows_NT" ]
then
# cygpath is also shipped with MSYS2, so this works with cygwin and MSYS2! :
fullPathToFFscript=$(cygpath -w "$fullPathToFFscript" | tr -d '\15\32')
fi
currDir=$(pwd)
source_files=($(find . -maxdepth 1 -type f -iname \*.$sourceFormat -printf '%f\n'))
for element in ${source_files[@]}
do
# If we're running Windows, build a Windows-style path (backslashes); otherwise leave path as-is:
if [ $OS == "Windows_NT" ]
then
# escaping \:
fullPathToSourceFile="$currDir"\\"$element"
fullPathToSourceFile=$(cygpath -w $fullPathToSourceFile)
else
# oy with the need to escape \:
fullPathToSourceFile="$currDir"/"$element"
fi
fullPathMinusExt=${fullPathToSourceFile%.*}
fullPathToTargetFile="$fullPathMinusExt"."$destFormat"
# THIS IS INSANITY: I can't get it to run the command in-line; but it will if run from a variable with the same string?! Is something weird with double-quote marks and/or spaces in paths going on? I can only get it to work if I create and execute a temp script! :
echo "FontForge -script \"$fullPathToFFscript\" \"$fullPathToSourceFile\" .$destFormat" > gs42BeyT_tmpScript.sh
chmod +x ./gs42BeyT_tmpScript.sh
source ./gs42BeyT_tmpScript.sh
rm ./gs42BeyT_tmpScript.sh
done
# DEV HISTORY:
# 2020-11-15 Updated to use better array creation and command substitution. Re-tested on windows. Corrected some documentation errors.
# 2020-08-29 Revamped to use simpler script.
# 2020-04-30 Rewrote as bash script, parameterizing source and dest format.
# 2020-04-30 Pulled my hair out with problems of spaces in file name and arrays, made "array" with text file and iterated over lines of it instead.
| true
|
1714e27f5ab52e2a3f4b8f3aef49cb7f792632e3
|
Shell
|
jsutlovic/screenrunner
|
/installer.sh
|
UTF-8
| 979
| 3.75
| 4
|
[
"BSD-2-Clause-Views"
] |
permissive
|
#!/usr/bin/env bash
## Setup script for screenrunner
## Install main scripts into $HOME/bin, creating if it doesn't exist,
## Copy
if [ ! -d "$HOME/bin" ]; then
mkdir -p $HOME/bin
fi
cp -t $HOME/bin scrrnr scrcmd bashrnr
if [ -f "$HOME/.screenrc" ]; then
echo "You already have a .screenrc, setting ours up in $HOME/.screenrc-scrrnr"
cp screenrc-ex1 $HOME/.screenrc-scrrnr
else
cp screenrc-ex1 $HOME/.screenrc
fi
if [ -d "$HOME/.screen" ]; then
echo "$HOME/.screen already exists, not copying"
else
cp -R screenrc $HOME/.screen
fi
if echo "$PATH" | grep "$HOME/bin" > /dev/null; then
echo -e "Looks like you're good to go!"
echo -e "Try starting with:\n\nscrrnr tester"
else
echo "All installed!"
echo "Looks like you're missing $HOME/bin in your \$PATH"
echo "Fix that by adding the line:"
echo ""
echo "PATH=\"\$HOME/bin:\$PATH\""
echo ""
echo "into your \$HOME/.bashrc"
echo ""
echo "Then open up a new terminal and try:"
echo "scrrnr tester"
fi
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.