blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
fe9416da2750bf0aad684523adfb4bb7b7738740
|
Shell
|
kill121/ipxe-helper
|
/compile_ipxe.sh
|
UTF-8
| 361
| 2.71875
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
. common.sh
log "Cloning ipxe source"
git clone -q git://git.ipxe.org/ipxe.git
cd ipxe;
git pull || error "No git?..."
log "Preparing binary"
cd src
copy_template bootstrap.ipxe
make bin/undionly.kpxe EMBED=bootstrap.ipxe > /dev/null || error "Could not compile :("
echo
log "Done, copy $(pwd)/bin/undionly.kpxe to the TFTP server"
| true
|
dd8bdb8a9e6e3f6c71aa444a8426c01e5d1b0cd1
|
Shell
|
mihalea/bitwarden-pyro
|
/packaging/deploy.sh
|
UTF-8
| 617
| 3.0625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
set -ex
cd "$TRAVIS_BUILD_DIR/packaging"
# Get the repo
git clone ssh://aur@aur.archlinux.org/bitwarden-pyro-git.git aur
# Update it
cp PKGBUILD aur
cd aur
# Change pkgver to current version
sed -i -r "s/^(pkgver)=(.*)$/\1=${VERSION}/g" PKGBUILD
sed -i -r "s/^(source=.*#tag)=(.*)$/\1=${TRAVIS_TAG}\")/g" PKGBUILD
# Create .SRCINFO
/bin/bash "$MAKEPKG_DIR/makepkg" --config="${MAKEPKG_CONF}" --printsrcinfo > .SRCINFO
# Commit
git add PKGBUILD .SRCINFO
git config user.email "deploy@mihalea.ro"
git config user.name "mihalea-deploy"
git commit -m "Release $TRAVIS_TAG"
# Deploy to AUR
git push origin master
| true
|
eddbfbd9487b411ba6e6372af485b845ac0b7e27
|
Shell
|
luispedrosa/vigor
|
/bench/clean.sh
|
UTF-8
| 184
| 2.625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
. ./config.sh
# $1: The middlebox path (so we know what to kill)
echo "[clean] Cleaning machines..."
ssh $TESTER_HOST "~/scripts/clean/tester.sh"
./clean/middlebox.sh $1
| true
|
1871123e693b11ff160851eebbd75956ad0c62ac
|
Shell
|
flukso/flm02
|
/tools/fff/www/cgi-bin/status
|
UTF-8
| 1,027
| 2.96875
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
#
# status - an fff script returning the last command's status code
#
# Copyright (C) 2015 Bart Van Der Meerssche <bart.vandermeerssche@flukso.net>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# build the response header first
echo "HTTP/1.1 200 OK"
echo "Content-Type: text/plain"
echo "Transfer-Encoding: chunked"
echo ""
# ... followed by the response body
HOME=$PWD/cgi-bin
NOBUF="stdbuf -i0 -o0 -e0"
$NOBUF cat $HOME/code
| true
|
7dd7289aae944dd25c3eb86c4da212d3531d0586
|
Shell
|
bertvv/presentation-clean-bash
|
/examples/connections.sh
|
UTF-8
| 3,370
| 4.46875
| 4
|
[] |
no_license
|
#! /usr/bin/env bash
#
# Author: Bert Van Vreckem <bert.vanvreckem@gmail.com>
#
# Give a list of all open connections, consisting of IP address and host name
# (if reverse DNS lookup succeeds).
#
# https://github.com/bertvv/scripts/blob/master/src/connections.sh
set -o errexit # abort on nonzero exitstatus
set -o nounset # abort on unbound variable
set -o pipefail # don't hide errors in pipes
IFS=$'\t\n' # Set Internal Field Separator
readonly debug='off' # Enable debug mode when set to 'on'
main() {
debug "Main loop"
check_args "${@}"
check_dependencies
for ip in $(active_hosts); do
print_host_info "${ip}"
done
}
# {{{ Functions
# Argument checking
check_args() {
debug "Checking arguments: ${@}"
if [ "$#" -eq '0' ]; then
debug "No arguments given"
return 0
fi
debug "At least one argument given"
if help_wanted "${1}"; then
usage
exit 0
fi
}
help_wanted() {
debug "Checking if help is wanted with arg: ${1}"
[ "${1}" = '-h' ] || \
[ "${1}" = '--help' ] || \
[ "${1}" = '-?' ]
}
# Prints help message
usage() {
cat << _EOF_
Usage: ${0} [-h|--help|-?]
Print a list of hosts with an open TCP connection, consisting of their IP
address and host name (if a reverse DNS lookup succeeds).
Any other command line arguments are ignored.
_EOF_
}
# Usage: check_dependencies
# Checks whether the commands needed for this script exist
check_dependencies() {
debug 'Checking dependencies'
if ! which dig > /dev/null 2>&1; then
error "The dig command is not available, install it first!"
exit 1
fi
if ! which whois > /dev/null 2>&1; then
error "The whois command is not available, install it first!"
exit 1
fi
}
# Usage: active_hosts
# List all hosts with an active TCP connection
active_hosts() {
debug "Listing hosts with an active TCP Connection"
ss --tcp --numeric --ipv4 \
| awk '/ESTAB/ {print $5}' \
| strip_port \
| sort -n \
| uniq
}
# Usage: CMD | strip_port
# Strips the port number from a string of the form IP_ADDRESS:PORT,
# read from standard input
strip_port() {
sed 's/:.*$//'
}
# Usage: print_host_info IP_ADDRESS
# Print the specified IP address, and its associated network name and host
# name (if available).
print_host_info() {
local ip_address="${1}"
debug "Printing host info for ${ip_address}"
local host_name=$(reverse_lookup "${ip_address}")
local net_name=$(network_name "${ip_address}")
printf "%16s %20s %s\n" "${ip_address}" "${net_name}" "${host_name}"
}
# Usage: reverse_lookup IP_ADDRESS
# Perform a reverse DNS lookup, only returning the host name
reverse_lookup() {
local ip_address="${1}"
debug "Performing reverse lookup for ${ip_address}"
dig -x "${ip_address}" +short \
| head -1
}
# Usage: whois IP_ADDRESS
# Use whois to find out the network name
network_name() {
local ip_address="${1}"
debug "Looking up network name for ${ip_address}"
whois "${ip_address}" \
| grep --ignore-case netname \
| head -1 \
| awk '{print $2}'
}
# Usage: debug "MESSAGE"
# Print a debug message (in cyan), when debug mode is "on"
debug() {
if [ "${debug}" = 'on' ]; then
printf '\e[0;36m[DBG] %s\e[0m\n' "${*}" 1>&2
fi
}
# Usage: error "MESSAGE"
# Print an error message (in red)
error() {
printf '\e[0;31m[ERR] %s\e[0m\n' "${*}" 1>&2
}
main "${@}"
#}}}
| true
|
21bd39539913da0dfb0aaad3733b3885f48382ef
|
Shell
|
ipbit/falcon-plugin
|
/plugin-update.sh
|
UTF-8
| 436
| 2.8125
| 3
|
[] |
no_license
|
#!/bin/bash
PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin
PLUGIN_DIR=/usr/local/open-falcon/plugin/
RANDOM_SLEEP=`echo $(($RANDOM%3000))`
sleep $RANDOM_SLEEP
cd $PLUGIN_DIR
git reset --hard
git reset --hard origin/master
curl http://127.0.0.1:1988/plugin/update
for SCRIPT_LIST in `find $PLUGIN_DIR -name "*" | grep -E "*.sh$|*.py$" `
do
chmod +x $SCRIPT_LIST
done
| true
|
fb50469155f3ad27e06daf1f4599a733910a2a67
|
Shell
|
tiagorol/cloudify-wordpress
|
/scripts/nginx/configure-nginx.sh
|
UTF-8
| 382
| 2.828125
| 3
|
[] |
no_license
|
#!/bin/bash
set -e
cd /home/ubuntu/
ctx logger info "Incio da Configuracao do Nginx..."
wget https://raw.githubusercontent.com/tiagorol/cloudify-wordpress/master/resources/nginx/nginx.conf
HOST_NG=$(wget http://ipecho.net/plain -O - -q ; echo)
sudo sed -i "s/HOST_NGINX/$HOST_NG/" nginx.conf
sudo cp nginx.conf /etc/nginx/
ctx logger info "Configuracao Nginx com sucesso..."
| true
|
192d462cb42b8ca5da139afc4240d3d21fd90cca
|
Shell
|
brunevikis/Scripts_Queue_CPAS
|
/Local/scripts/GNL/OLD/Altera_Adterm.sh
|
UTF-8
| 3,041
| 3.015625
| 3
|
[] |
no_license
|
#!/bin/bash
Alterar_Adterm()
{
i=0
j=0
mes=1
linha_Usina=False
bloco_Antigo=""
bloco_Novo=""
Dir_Atual=$(pwd)
n=6
pos=${#Dir_Atual}
n_5=$((pos-6))
Pasta_Data=${Dir_Atual:$n_5:$n}
insertpoint=2
IFS=$'\n'
while read d1
do
i=$((i+1))
j=$((j+1))
linha_AD=$(echo "$d1")
if [[ i -gt 2 ]];
then
if [[ j -eq 3 ]];
then
j=0
usina=$(echo "$d1" | cut -c1-6 | sed "s/ //g" | sed "s/[[:alpha:]]//g")
bloco_Antigo=$bloco_Antigo$d1$'\n'
bloco_Novo="$bloco_Novo""$d1"
else
bloco_Antigo="$bloco_Antigo""$d1"$'\n'
if [[ mes -eq 1 ]];
then
mes=$((mes+1))
Pasta_Ben=$((Pasta_Data-2))
else
mes=1
Pasta_Ben=$((Pasta_Data-1))
fi
n=6
pos=${#d1}
n_5=$((pos-$n))
pat1=${d1:$n_5:$n}
cd ..
dcPaths=$( ls | grep -E '^20[0-9]{4}' -v )
cd $Pasta_Data
if [ -d "../$dcPaths/$Pasta_Ben" ];
then
IFS=";"
while read f1 f2 f3 f4 Usina_Ben Nome Pat Ben Custo
do
if [[ " 1 " == "$f1" ]];
then
if [[ "$Ben" > "$Custo" ]];
then
Usina_Ben=$(echo "$Usina_Ben" | sed "s/ //g")
if [[ "$Usina_Ben" == "$usina" && ${Pat:2:2} -eq 1 ]];
then
pat1_Ben=${Ben:5:7}
elif [[ "$Usina_Ben" == "$usina" && ${Pat:2:2} -eq 2 ]];
then
pat2_Ben=${Ben:5:7}
elif [[ "$Usina_Ben" == "$usina" && ${Pat:2:2} -eq 3 ]];
then
pat3_Ben=${Ben:5:7}
fi
else
Usina_ben=$(echo "${Usina_Ben:1:3}" | sed "s/ //g")
if [[ "${Usina_Ben:1:3}" == "$usina" && ${Pat:2:2} -eq 1 ]];
then
pat1_Ben="0"
elif [[ "${Usina_Ben:1:3}" == "$usina" && ${Pat:2:2} -eq 2 ]];
then
pat2_Ben="0"
elif [[ "${Usina_Ben:1:3}" == "$usina" && ${Pat:2:2} -eq 3 ]];
then
pat3_Ben="0"
fi
fi
fi
done < ../$dcPaths/$Pasta_Ben/bengnl.csv
if [[ "$pat1_Ben" != "0" ]];
then
linha_nova=$(echo "$linha_AD" | sed -e "s/${pat1}/${pat1_Ben:0:6}/g" )
linha_nova=$(echo "$linha_nova" | sed -e "s/${pat1}/${pat2_Ben:0:6}/g" )
linha_nova=$(echo "$linha_nova" | sed -e "s/${pat1}/${pat3_Ben:0:6}/g" )
bloco_Novo="$bloco_Novo""$linha_nova"$'\n'
#sed -i "/$linha_AD/{s/$linha_AD/$linha_nova/;:a;N;ba}" adterm.dat
#sed -i "s/$linha_AD/$linha_nova/g" adterm.dat
else
linha_nova=$linha_AD
bloco_Novo="$bloco_Novo""$linha_nova"$'\n'
#linha_nova=$(echo "$linha_GL" | sed -e "s/${pat1}/${pat1_Ben}/" )
teste32=0
fi
else
linha_nova=$linha_AD
bloco_Novo="$bloco_Novo""$linha_nova"$'\n'
fi
fi
fi
done < adterm.dat
echo $bloco_Antigo >> Teste.log
echo $bloco_Novo >> Teste.log
#echo $bloco_Novo > adterm.dat.temp.modif
#sed -i "$insertpoint r ""adterm.dat"".temp.modif" "adterm.dat".temp
# mv "adterm.dat".temp "adterm.dat"
}
Alterar_Adterm
| true
|
e4460078f56afd0addefa06e4274c459438ff06d
|
Shell
|
gilbertoamarcon/sfm
|
/add-cam.sh
|
UTF-8
| 258
| 2.796875
| 3
|
[] |
no_license
|
#!/bin/bash
cam_model=$1
cam_focal_dist=$2
sensor_db=${HOME}/dev/openMVG/src/openMVG/exif/sensor_width_database/sensor_width_camera_database.txt
# Inserting camera into the db
echo $cam_model';'$cam_focal_dist >> $sensor_db
sort -o $sensor_db $sensor_db
| true
|
0e434b437f01763f5168b6031f8cdff6b7262f34
|
Shell
|
b4/4.3BSD-Quasijarus
|
/usr.bin/diction/SCCS/s.diction.sh
|
UTF-8
| 1,616
| 2.515625
| 3
|
[] |
no_license
|
h51544
s 00001/00001/00027
d D 4.5 82/11/06 17:54:54 rrh 5 4
c fix minor formatting botch
e
s 00006/00002/00022
d D 4.4 82/11/06 17:13:29 rrh 4 3
c Change so default macro package is -me; added -me, -ma and -k flags
c for deroff
e
s 00002/00001/00022
d D 4.3 82/11/06 17:08:06 rrh 3 2
c Change path to deroff so deroff is taken from /usr/bin
e
s 00001/00001/00022
d D 4.2 82/11/06 14:35:35 rrh 2 1
c Bill Jolitz @ Berkeley received this version from Lorindia Cherry
c around September 1981, as the ``most recent version''. Deltas
c seem to be progressive, rather than regressive, although the BTL sid for
c deroff.c is retrograde; this is probably a case of parallel development
e
s 00023/00000/00000
d D 4.1 82/11/06 13:48:39 rrh 1 0
c Oldest available version at Berkeley
e
u
U
t
T
I 1
#! /bin/sh
#
# %W% (Berkeley) %E%
#
I 3
D=/usr/bin
E 3
B=/usr/lib
echo $*
rest=
flag=
nflag=
D 4
mflag=-mm
E 4
I 4
mflag=-me
E 4
D 2
lflag=
E 2
I 2
D 5
lflag= -ml
E 5
I 5
lflag=-ml
E 5
I 4
kflag=
E 4
E 2
file=
for i
do case $i in
-f) flag=-f;shift; file=$1; shift; continue;;
-n) nflag=-n;shift; continue;;
I 4
-k) kflag=-k;shift; continue;;
E 4
-mm) mflag=$1; shift; continue;;
-ms) mflag=$1;shift;continue;;
I 4
-me) mflag=$1;shift;continue;;
-ma) mflag=$1;shift;continue;;
E 4
-ml) lflag=$1;shift;continue;;
*) rest=$*; break;;
esac
done
D 3
$B/deroff $mflag $lflag $rest^$B/dprog -d $nflag $flag $file
E 3
I 3
D 4
$D/deroff $mflag $lflag $rest^$B/dprog -d $nflag $flag $file
E 4
I 4
$D/deroff $kflag $lflag $mflag $rest^$B/dprog -d $nflag $flag $file
E 4
E 3
E 1
| true
|
6de5039558e1d94350e4b81fde30e966e6c7a418
|
Shell
|
edwardsmarkf/fastfeathers
|
/init-mariadb-client.bsh
|
UTF-8
| 4,062
| 3.46875
| 3
|
[] |
no_license
|
#! /bin/bash
# init-mariadb-client.bsh last update: 2018-04-18
# 2018-09-12 -- commented out firewall
# this script builds a mariadb client from scratch for testing with init-mariadb-server.bsh
dbServerIP='XXX.XXX.XXX.XXX'; ## this this from init-mariadb-server.bsh
dbUser='feathersuser';
dbPass='aaaaaa';
dbPort='3306';
dbName='bank';
if [ '${$(groups)/sudo}' ] ;
then SUDO='sudo' ;
elif [ '${$(whoami)/root' ] ;
then echo SUDO='';
else
echo 'you either need to be have sudo or be logged in as root!';
exit;
fi;
FileNameWithExtension=${0##*/} ;
FileNameWithoutExtension=${FileNameWithExtension%.*} ;
TimeStamp=`date "+%Y-%m-%d %r"` ;
rm -Rf ./${FileNameWithoutExtension}/ ; ## just in case one already exists.
mkdir ./${FileNameWithoutExtension}/ && cd ./${FileNameWithoutExtension}/ ;
## ${SUDO} yum --assumeyes install bind-utils expect firewalld wget ;
## ${SUDO} systemctl start firewalld ;
## ${SUDO} systemctl enable firewalld ;
## ???????????????????????????????????????????????????????????????????????????????
## ${SUDO} firewall-cmd --zone=dmz --add-port=${dbPort}/tcp --permanent ;
## ${SUDO} firewall-cmd --reload ;
${SUDO} yum --assumeyes update ;
${SUDO} yum --assumeyes install expect ;
${SUDO} yum --assumeyes install gcc-c++ make ;
${SUDO} yum --assumeyes install epel-release ;
## create a repo for Mariadb 10.2
cat <<END_OF_REPO > /etc/yum.repos.d/MariaDB-10.2.repo ;
# MariaDB 10.2 CentOS repository list - created 2018-03-19 16:12 UTC
# http://downloads.mariadb.org/mariadb/repositories/
[mariadb]
name = MariaDB
baseurl = http://yum.mariadb.org/10.2/centos7-amd64
gpgkey=https://yum.mariadb.org/RPM-GPG-KEY-MariaDB
gpgcheck=1
END_OF_REPO
sudo yum --assumeyes install MariaDB-client ; ## notice that MariaDB-server is being skipped.
curl --silent --location https://rpm.nodesource.com/setup_8.x | sudo bash - ;
${SUDO} yum --assumeyes install nodejs ;
sleep 10 ; ## installing node appeared to work async in one test.
${SUDO} npm -g update npm ; ## update to latest version
echo -n 'node version: ' ; node --version ; ## 8.10.0 used in this writing
echo -n 'npm version: ' ; npm --version ; ## 5.6.0 at the time of this writing
export FileNameWithExtension;
expect <(cat <<'END_OF_NPM_INIT'
set timeout -1
spawn npm init ;
expect -re ".*package name:.*"
send -- "\r"
expect -re ".*version:.*"
send -- "\r"
expect -re ".*description:.*"
send -- "Created using bash script: $env(FileNameWithExtension)\r"
expect -re ".*entry point:.*"
send -- "\r"
expect -re ".*test command:.*"
send -- "\r"
expect -re ".*git repository:.*"
send -- "\r"
expect -re ".*keywords:.*"
send -- "\r"
expect -re ".*author:.*"
send -- "Created using bash script: $env(FileNameWithExtension)\r"
expect -re ".*license:.*"
send -- "\r"
expect -re ".*Is this OK?.*"
send -- "\r"
expect eof
END_OF_NPM_INIT
)
npm install mysql2 --save ;
## written from https://www.sitepoint.com/using-node-mysql-javascript-client/
cat > nodeMariadbTest.js <<END_OF_NODE_SCRIPT ;
// Connect to the "bank" database.
const mysql = require('mysql2');
const connection = new mysql.createConnection({
host: '${dbServerIP}',
user: '${dbUser}',
password: '${dbPass}',
database: '${dbName}',
port: ${dbPort}
});
/* optionally this works too:
const connectionString = 'mysql://${dbUser}:${dbPass}@${dbServerIP}:${dbPort}/${dbName}';
const connection = new mysql.createConnection( connectionString );
*/
connection.connect((err) => {
if (err) throw err;
console.log('Connected!');
});
END_OF_NODE_SCRIPT
cat <<END_OF_SCRIPT;
mysql --user ${dbUser} --password --host ${dbServerIP} ; ## password (twice) ${dbPass}
be sure to do cd ${FileNameWithoutExtension}; node nodeMariadbTest.js ;
END_OF_SCRIPT
| true
|
0b9bd81d36f752cde253422881811cbb293c419a
|
Shell
|
ernestrc/logd
|
/test/helper.sh.in
|
UTF-8
| 7,806
| 2.90625
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
LOGD_SLAB_CAP=@LOGD_SLAB_CAP@
LOGD_BUF_INIT_CAP=@LOGD_BUF_INIT_CAP@
LOGD_BUF_MAX_CAP=@LOGD_BUF_MAX_CAP@
if [[ $SED == "" ]]; then
SED=sed
fi
TESTS_SLEEP="$TESTS_SLEEP"
if [[ "$TESTS_SLEEP" == "" ]]; then
TESTS_SLEEP=1
fi
function assert_file_content {
local __OUT=$(tr -d '\0' < $2)
if [ "$__OUT" != "$1" ]; then
echo "`date "+%Y-%m-%d %H:%M:%S"` expected '$1' but found '$__OUT'"
if [ -f "$ERR" ]; then
cat $ERR
fi
exit 1;
fi
}
function push_file() {
if [[ "$PUSH_FILE_ITER" == "" ]]; then
PUSH_FILE_ITER=10
fi
for i in $(seq 1 $PUSH_FILE_ITER); do
echo "2018-05-12 12:51:28 ERROR [thread1] clazz a: A, " >>$IN
echo "2018-05-12 12:52:22 WARN [thread2] clazz callType: b: B" >> $IN
echo "2018-05-12 12:53:22 INFO [thread3] clazz callType: c: C, " >> $IN
echo "2018-05-12 12:54:22 DEBUG [thread4] clazz callType: b: ," >> $IN
echo "2018-05-12 12:55:22 TRACE [thread5] clazz callType: b: c: C, " >> $IN
echo "2018-05-12 12:51:28 ERROR [thread1] clazz a: A, " >> $IN
echo "2018-05-12 12:52:22 WARN [thread2] clazz callType: b: B" >> $IN
echo "2018-05-12 12:53:22 INFO [thread3] clazz callType: c: C, " >> $IN
echo "2018-05-12 12:54:22 DEBUG [thread4] clazz callType: b: ," >> $IN
echo "2018-05-12 12:55:22 TRACE [thread5] clazz callType: b: c: C, " >> $IN
echo "2018-05-12 12:55:22 TRACE [thread5] clazz callType: b: c: C, " >> $IN
echo "2018-05-12 12:51:28 ERROR [thread1] clazz a: A, " >> $IN
echo "2018-05-12 12:52:22 WARN [thread2] clazz callType: b: B" >> $IN
echo "2018-05-12 12:53:22 INFO [thread3] clazz callType: c: C, callType: c: C, callType: c: C, callType: c: C, callType: c: C, callType: c: C, callType: c: C, callType: c: C, callType: c: C, callType: c: C, callType: c: C, callType: c: C, callType: c: " >> $IN
echo "2018-05-12 12:54:22 DEBUG [thread4] clazz callType: b: ," >> $IN
echo "2018-05-12 12:55:22 TRACE [thread5] clazz callType: b: c: C, " >> $IN
echo "GARBAGE 12:55:22 TRACE [thread5] clazz callType: b: c: C, " >> $IN
echo "2018-05-12 12:51:28 ERROR [thread1] clazz a: A, " >> $IN
echo "2018-05-12 12:51:28 ERROR [thread1] clazz a: A, " >> $IN
echo "2018-05-12 12:52:22 WARN [thread2] clazz callType: b: B" >> $IN
echo "2018-05-12 12:53:22 INFO [thread3] clazz callType: c: C, " >> $IN
echo "2018-05-12 12:54:22 DEBUG [thread4] clazz callType: b: ," >> $IN
echo "2018-05-12 12:55:22 TRACE [thread5] clazz callType: b: c: C, " >> $IN
echo "2018-05-12 12:55:22 TRACE [thread5] clazz callType: b: c: C, " >> $IN
echo "2018-05-12 12:51:28 ERROR [thread1] clazz a: A, " >> $IN
echo "2018-05-12 12:52:22 WARN [thread2] clazz callType: b: B" >> $IN
echo "2018-05-12 12:53:22 INFO [thread3] clazz callType: c: C, " >> $IN
echo "2018-05-12 12:54:22 DEBUG [thread4] clazz callType: b: ," >> $IN
echo "2018-05-12 12:53:22 INFO [thread3] clazz callType: c: C, callType: c: C, callType: c: C, callType: c: C, callType: c: C, callType: c: C, callType: c: C, callType: c: C, callType: c: C, callType: c: C, callType: c: C, callType: c: C, callType: c: C, callType: c: C, callType: c: C, callType: c:" >> $IN
echo "2018-05-12 12:55:22 TRACE [thread5] clazz callType: b: c: C, " >> $IN
echo "2018-05-12 12:55:22 TRACE [thread5] clazz callType: b: c: C, " >> $IN
echo "2018-05-12 12:52:22 WARN [thread2] clazz callType: b: B" >> $IN
echo "2018-05-12 12:53:22 INFO [thread3] clazz callType: c: C, " >> $IN
echo "2018-05-12 12:54:22 DEBUG [thread4] clazz callType: b: ," >> $IN
echo "2018-05-12 12:51:28 ERROR [thread1] clazz a: A, " >> $IN
echo "2018-05-12 12:51:28 ERROR [thread1] clazz a: A, " >> $IN
echo "2018-05-12 12:52:22 WARN [thread2] clazz callType: b: B" >> $IN
echo "2018-05-12 12:53:22 INFO [thread3] clazz callType: c: C, " >> $IN
echo "2018-05-12 12:51:28 ERROR [thread1] clazz a: A, " >> $IN
echo "2018-05-12 12:52:22 WARN [thread2] clazz callType: b: B" >> $IN
echo "2018-05-12 12:53:22 INFO [thread3] clazz callType: c: C, " >> $IN
echo "2018-05-12 12:51:28 ERROR [thread1] clazz a: A, " >> $IN
echo "2018-05-12 12:52:22 WARN [thread2] clazz callType: b: B" >> $IN
echo "2018-05-12 12:53:22 INFO [thread3] clazz callType: c: C, callType: c: C, callType: c: C, callType: c: C, callType: c: C, callType: c: C, callType: c: C, callType: c: C, callType: c: C, callType: c: C, callType: c: C, callType: c: C, callType: c: C, callType: c: C, callType: c: C, callType: c:" >> $IN
echo "2018-05-12 12:53:22 INFO [thread3] clazz callType: c: C, " >> $IN
echo "" >> $IN
echo "2018-05-12 12:54:22 DEBUG [thread4] clazz callType: b: ," >> $IN
echo "2018-05-12 12:55:22 TRACE [thread5] clazz callType: b: c: C, " >> $IN
echo "2018-05-12 12:55:22 TRACE [thread5] clazz callType: b: c: C, " >> $IN
echo "2018-05-12 12:54:22 DEBUG [thread4] clazz callType: b: ," >> $IN
echo "2018-05-12 12:55:22 TRACE [thread5] clazz callType: b: c: C, " >> $IN
echo "2018-05-12 12:55:22 TRACE [thread5] clazz callType: b: c: C, " >> $IN
echo "2018-05-12 12:54:22 DEBUG [thread4] clazz callType: b: ," >> $IN
echo "2018-05-12 12:55:22 TRACE [thread5] clazz callType: b: c: C, " >> $IN
echo "2018-05-12 12:55:22 TRACE [thread5] clazz callType: b: c: C, " >> $IN
echo "2018-05-12 12:52:22 WARN [thread2] clazz callType: b: B" >> $IN
echo "2018-05-12 12:53:22 INFO [thread3] clazz callType: c: C, " >> $IN
echo "2018-05-12 12:54:22 DEBUG [thread4] clazz callType: b: ," >> $IN
echo "2018-05-12 12:53:22 INFO [thread3] clazz callType: c: C, callType: c: C, callType: c: C, callType: c: C, callType: c: C, callType: c: C, callType: c: C, callType: c: C, callType: c: C, callType: c: C, callType: c: C, callType: c: C, callType: c: C, callType: c: C, callType: c: C, callType: c:" >> $IN
echo "2018-05-12 12:55:22 TRACE [thread5] clazz callType: b: c: C, " >> $IN
echo "2018-05-12 12:55:22 TRACE [thread5] clazz callType: b: c: C, " >> $IN
echo "2018-05-12 12:55:22 TRACE [thread5] clazz callType: b: c: C, " >> $IN
echo "2018-05-12 12:55:22 TRACE [thread5] clazz callType: b: c: C, " >> $IN
echo "2018-05-12 12:55:22 TRACE [thread5] clazz callType: b: c: C, " >> $IN
echo "2018-05-12 12:51:28 ERROR [thread1] clazz a: A, " >> $IN
echo "2018-05-12 12:52:22 WARN [thread2] clazz callType: b: B" >> $IN
echo "2018-05-12 12:53:22 INFO [thread3] clazz callType: c: C, " >> $IN
echo "2018-05-12 12:54:22 DEBUG [thread4] clazz callType: b: ," >> $IN
echo "2018-05-12 12:51:28 ERROR [thread1] clazz a: A, " >> $IN
echo "2018-05-12 12:52:22 WARN [thread2] clazz callType: b: B" >> $IN
echo "2018-05-12 12:53:22 INFO [thread3] clazz callType: c: C, " >> $IN
echo "2018-05-12 12:54:22 DEBUG [thread4] clazz callType: b: ," >> $IN
echo "2018-05-12 12:55:22 TRACE [thread5] clazz callType: b: c: C, " >> $IN
echo "2018-05-12 12:53:22 INFO [thread3] clazz callType: c: C, callType: c: C, callType: c: C, callType: c: C, callType: c: C, callType: c: C, callType: c: C, callType: c: C, callType: c: C, callType: c: C, callType: c: C, callType: c: C, callType: c: C, callType: c: C, callType: c: C, callType: c:" >> $IN
echo "2018-05-12 12:55:22 TRACE [thread5] clazz callType: b: c: C, " >> $IN
echo "2018-05-12 12:55:22 TRACE [thread5] clazz callType: b: c: C, " >> $IN
echo "2018-05-12 12:55:22 TRACE [thread5] clazz callType: b: c: C, " >> $IN
done
}
function pushdata() {
cat >>$IN << EOF
2018-05-12 12:51:28 ERROR [thread1] clazz a: A,
2018-05-12 12:52:22 WARN [thread2] clazz callType: b: B
EOF
sleep $TESTS_SLEEP
STAT=$(stat $IN | grep 'fifo')
if [[ "$STAT" == "" ]]; then
sync $IN
fi
}
function assert_file_contains {
COUNT=$(cat $2 | grep "$1" -c)
if [ "$COUNT" != "1" ]; then
echo "expected to find '$1' in file but not found: $COUNT"
if [ -f "$ERR" ]; then
cat $ERR
fi
cat $2
exit 1;
fi
}
| true
|
2eb727192a9b3d4419464ca96c7dd2c40f1a69e4
|
Shell
|
resmo/voyage-linux
|
/meshlium/plugins/system/Synchronization/data/gps_date.sh
|
UTF-8
| 322
| 3.53125
| 4
|
[] |
no_license
|
#!/bin/bash
launch_gps_time=$(cat /etc/gps_time.conf |grep on|wc -l)
if [ $launch_gps_time -gt 0 ] ; then
gpsd=$(ps -e |grep gpsd)
if [ -n "$gpsd" ] ; then
date=`gps_parser.py`
echo "date $date"
else
echo "gpsd not active"
fi
else
echo "gps not configured to actualize date"
fi
| true
|
0c24a83b34c79b106d3c39c5653e7d19f8e394ff
|
Shell
|
inetum-orleans/docker-devbox-scripts
|
/.bash_enter.d/81-git-chmod
|
UTF-8
| 1,720
| 4.125
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
# This should apply executable flags defined on git repository based on git ls-files -s output
function _git_chmod {
if $([ -d .git ] || git rev-parse --git-dir > /dev/null 2>&1); then
submodule_path=$(_utils_relative_path "$PWD")
if [[ -z $submodule_path ]]; then
echo "$(tput setaf 7)Checking executable flags from git repository$(tput sgr0)"
else
echo "$(tput setaf 7)Checking executable flags from git repository ($submodule_path)$(tput sgr0)"
fi
chmod_changed=
executable_files=$(git ls-files -s | grep -E "^[0-9]+755 +" | cut -f2)
non_executable_files=$(git ls-files -s | grep -E "^[0-9]+644 +" | cut -f2)
for f in $executable_files; do
if [[ ! -x "$f" ]]; then
echo "$(tput setaf 3)chmod +x \"$f\"$(tput sgr0)"
chmod_changed=1
chmod +x "$f"
fi
done
for f in $non_executable_files; do
if [[ -x "$f" ]]; then
echo "$(tput setaf 3)chmod -x \"$f\"$(tput sgr0)"
chmod_changed=1
chmod -x "$f"
fi
done
if [[ "$chmod_changed" == "1" ]]; then
echo ""
echo "$(tput setaf 3)Some chmod commands have been executed from git index metadata.$(tput sgr0)"
echo "$(tput setaf 3)If it doesn't fit your needs, you should change executable flags inside repository.$(tput sgr0)"
echo "$(tput setaf 3)Use the following command: git update-index --chmod=<flags> foo.sh$(tput sgr0)"
echo "$(tput setaf 3)chmod +x foo.sh => git update-index --chmod=+x foo.sh$(tput sgr0)"
echo "$(tput setaf 3)chmod -x foo.sh => git update-index --chmod=-x foo.sh$(tput sgr0)"
fi
fi
}
_git_chmod
for submodule in $(git config --file .gitmodules --get-regexp path | awk '{ print $2 }'); do
(cd "$submodule" && source "${BASH_SOURCE[0]}")
done
| true
|
0bc7f12d834cb0e20f16e794c1ff98ac4ea8050e
|
Shell
|
jhuntwork/merelinux
|
/packages/rust/PKGBUILD
|
UTF-8
| 1,957
| 2.921875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# shellcheck disable=SC2034,SC2154,SC2068
pkgname=(rust)
pkgver=1.66.0
pkgrel=1
pkgdesc='The Rust programming language'
arch=(x86_64)
url='https://www.rust-lang.org'
license=(GPL)
groups=()
depends=()
makedepends=(
cmake
llvm-dev
ninja
openssl-dev
python
zlib-ng-dev
)
options=()
changelog=ChangeLog
source=(
"https://static.rust-lang.org/dist/rustc-${pkgver}-src.tar.xz"
)
sha256sums=(
0dc176e34fae9871f855a6ba4cb30fa19d69c5b4428d29281a07419c4950715c
)
build() {
# FIXME - Add to llvm? or just bootstrap this and have it depend on itself later?
ln -s libunwind.so.1.0 /usr/lib/libgcc_s.so.1.0
ln -s libgcc_s.so.1.0 /usr/lib/libgcc_s.so.1
ln -s libgcc_s.so.1.0 /usr/lib/libgcc_s.so
export CHOST=x86_64-unknown-linux-musl
cd_unpacked_src
sed -i 's/\(crt_static_default = \)true/\1false/' \
compiler/rustc_target/src/spec/linux_musl_base.rs
./configure \
--prefix=/usr \
--build="${CHOST}" \
--host="${CHOST}" \
--release-channel=stable \
--disable-docs \
--disable-codegen-tests \
--enable-extended \
--enable-option-checking \
--enable-locked-deps \
--enable-vendor \
--disable-llvm-static-stdcpp \
--llvm-root=/usr \
--tools='cargo,src' \
--set="target.${CHOST}.musl-root=/" \
--set="target.${CHOST}.crt-static=false"
./x.py build -v
}
package() {
depends=(
"ld-musl-$(arch).so.1"
libc++.so.1
libc++abi.so.1
libcrypto.so.1.1
libssl.so.1.1
libunwind.so.1
libz.so.1
)
provides=(
librustc_driver-6df729ebc511072b.so
libstd-1d88b09d0391d938.so
libtest-807256c4868665ab.so
)
cd_unpacked_src
DESTDIR="$pkgdir" ./x.py install -v
rm -rf "${pkgdir}/usr/lib/rustlib/src" \
"${pkgdir}/usr/share/doc" \
"${pkgdir}/usr/share/zsh"
}
| true
|
49a7731f88a16892aadf03a71f3df38415de6e5b
|
Shell
|
KostyaEsmukov/dotfiles
|
/scripts/install_docker.sh
|
UTF-8
| 1,023
| 3.5625
| 4
|
[] |
no_license
|
#!/bin/bash
set -euxo pipefail
if [ "Darwin" = `uname` ]; then
brew install --cask docker
elif which apt; then
# https://docs.docker.com/install/linux/docker-ce/debian/
if [ "x86_64" != `uname -m` ]; then
echo "This script is for x86_64 only."
echo "Please install docker manually instead."
exit 1
fi
if [ "Debian" != `lsb_release -si` ]; then
echo "This script is for Debian."
echo "Please install docker manually instead."
exit 1
fi
sudo apt install \
apt-transport-https \
ca-certificates \
curl \
gnupg2 \
software-properties-common
curl -fsSL https://download.docker.com/linux/debian/gpg | sudo apt-key add -
sudo add-apt-repository \
"deb [arch=amd64] https://download.docker.com/linux/debian \
$(lsb_release -cs) \
stable"
sudo apt update
sudo apt install docker-ce
sudo gpasswd -a `whoami` docker
else
echo "Unsupported OS"
exit 1
fi
| true
|
705d6d00f8a446c3ba19edc0fa051c4c4acafb2d
|
Shell
|
git-hub-lab/reactor
|
/.travis/install_docker.sh
|
UTF-8
| 815
| 3.375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
set -e
install_engine() {
echo "update docker engine"
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
sudo apt-get update
sudo apt-get -y -o Dpkg::Options::="--force-confnew" install docker-ce
docker --version
}
install_compose() {
echo "update docker-compose"
DOCKER_COMPOSE_VERSION=1.23.2
sudo rm /usr/local/bin/docker-compose
curl -L https://github.com/docker/compose/releases/download/${DOCKER_COMPOSE_VERSION}/docker-compose-`uname -s`-`uname -m` > docker-compose
chmod +x docker-compose
sudo mv docker-compose /usr/local/bin
docker-compose --version
}
main() {
install_engine
install_compose
docker info
}
main
| true
|
6dc2b0f376cdee47972041fff61e7b6798ad68d2
|
Shell
|
mumahendras3/configs-scripts
|
/scripts/nvidia-driver-recompile
|
UTF-8
| 3,135
| 4.25
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/sh
# Variable initialization
BBSBPATH=${BBSBPATH:-/root/SlackBuilds-extra/Bumblebee-SlackBuilds}
COMPAT32=${COMPAT32:-yes}
NVDVER=${NVDVER:-390.12}
KERNEL=${KERNEL:-$(uname -r)}
# Telling the Bumblebee-Slackbuilds directory that's going to be used
if [ "$BBSBPATH" == "/root/Downloads/Bumblebee-SlackBuilds" ]; then
echo "Using $BBSBPATH as the Bumblebee-SlackBuilds directory (default directory)"
else
echo "Using $BBSBPATH as the Bumblebee-SlackBuilds directory"
fi
# Asking for multilib support
while true
do
case $COMPAT32 in
"yes")
echo "Enabling multilib support..."
break ;;
"no")
echo "Disabling multilib support..."
break ;;
"*")
printf "Wrong value has been set to the COMPAT32, please specify the correct value (\"yes\" or \"no\"): "
read -r COMPAT32
esac
done
# Telling which NVIDIA driver version is used
if [ "$NVDVER" == "390.12" ]; then
echo "Using the default driver (390.12)"
else
echo "Using driver version "$NVDVER""
fi
# Telling which kernel the modules will be built for
if [ "$KERNEL" == "$(uname -r)" ]; then
echo "Building modules for $KERNEL (the currently used kernel)"
KERNELVER=$(uname -r | cut -d '-' -f 1) # This is stored for later use (when installing the nvidia-kernel)
TAG=$(uname -r | cut -d '-' -f 2) # This is stored for later use (when installing the nvidia-kernel)
else
echo "Building modules for $KERNEL"
KERNELVER=$(printf $KERNEL | cut -d '-' -f 1) # This is stored for later use (when installing the nvidia-kernel)
TAG=$(printf $KERNEL | cut -d '-' -f 2) # This is stored for later use (when installing the nvidia-kernel)
fi
# Changing to the Bumblebee-Slackbuilds directory
cd "$BBSBPATH"
# Asking if you want to update the Bumblebee-Slackbuilds files first
printf "Do you want to update the Bumblebee-Slackbuilds files first? [y|n] "
read choice
if [ "$choice" == "y" ]; then
# Running the download script in case of any update
./download.sh
echo "Do you see any errors when downloading? if you do, say yes here and retry the download script (please input y or n here)"
read choice
if [ "$choice" == "y" ]; then
exit 1
fi
fi
# Installing the kernel modules needed after every kernel upgrade
echo 'Installing the kernel modules needed after every kernel upgrade'
echo "Building and installing bbswitch"
cd bbswitch
KERNEL=$KERNEL ./bbswitch.SlackBuild
installpkg /tmp/bbswitch-*$KERNEL*_bbsb.t?z
# Installing nvidia-bumblebee if using other than the default nvidia driver
if [ "$NVDVER" != "$(ls /var/log/packages | grep "nvidia-bumblebee-*" | cut -d '-' -f 3)" ]; then
echo "Upgrading/downgrading nvidia-bumblebee using Nvidia driver version $NVDVER"
cd ../nvidia-bumblebee
VERSION=$NVDVER COMPAT32=$COMPAT32 ./nvidia-bumblebee.SlackBuild
upgradepkg /tmp/nvidia-bumblebee-"$NVDVER"-"$(uname -m)"-?_bbsb.t?z
fi
# Installing nvidia-kernel
echo "Building and installing nvidia-kernel"
cd ../nvidia-kernel
KERNEL=$KERNEL VERSION=$NVDVER ./nvidia-kernel.SlackBuild
installpkg /tmp/nvidia-kernel-"$NVDVER"_"$KERNELVER"_"$TAG"-$(uname -m)-?_bbsb.t?z
# Finished
echo "Installation is complete, please reboot the computer"
exit 0
| true
|
b2764e8ebb9a6e690bede366bbdaf1e2bf8728b8
|
Shell
|
freebsd/freebsd-ports
|
/net/rp-pppoe/files/04pppoe-server.sh.sample
|
UTF-8
| 555
| 3.3125
| 3
|
[
"BSD-2-Clause"
] |
permissive
|
#! /bin/sh
# use a subshell to support new -CURRENT rc.d structure
(
daemon=pppoe-server
daemon_path=/usr/local/sbin
#daemon_flags=
case $1 in
start)
if [ -x ${daemon_path}/$daemon ]; then
${daemon_path}/$daemon $daemon_flags
echo -n " $daemon"
fi
;;
stop)
killall $daemon && echo -n " $daemon"
;;
restart)
$0 stop
sleep 1
$0 start
;;
*)
echo "Usage: `basename $0` {start|stop|restart}" >&2
exit 64
;;
esac
exit 0
)
| true
|
0918b74fbb2f78b0070fde440e15a3797471ad67
|
Shell
|
JohnCremona/apocrita_scripts
|
/jcontrol
|
UTF-8
| 932
| 3.375
| 3
|
[] |
no_license
|
#!/bin/bash
JOBNAME=$1
MAXJOBS=$2
function nr () { qstat | grep mpx017 | grep -c " r " ; }
function nq () { qstat | grep " qw " | awk 'BEGIN{s=0;}{a=gensub("-.*$","","g",$9); b=gensub(":.*$","","g",gensub("^.*-","","g",$9)); s+=(1+b-a);}END{print s}' ; }
while true; do
NRUN=`nr`
NQUE=`nq`
NJOBS=`expr $NRUN + $NQUE`;
echo ${NRUN} jobs running, ${NQUE} jobs queued, total ${NJOBS}
if [ "${NJOBS}" -lt "${MAXJOBS}" ]; then
echo Only ${NJOBS} jobs running/queued...
echo ...tidying up running.\* files...
./tidyall > /dev/null
./tidyout > /dev/null
echo ...queuing more jobs...
while [ "${NJOBS}" -lt "${MAXJOBS}" ]; do
echo Only ${NJOBS} jobs running/queued...
echo submitting ${JOBNAME} to queue
qsub ${JOBNAME};
sleep 5;
NRUN=`nr`
NQUE=`nq`
NJOBS=`expr $NRUN + $NQUE`;
done;
qstat
fi;
date
sleep 3600
done;
| true
|
2341d84b49620ad096c4506db1e228414d077965
|
Shell
|
alejandrogallo/hirata
|
/tools/indices.sh
|
UTF-8
| 518
| 2.90625
| 3
|
[] |
no_license
|
holes=(i j k l m n o p)
parti=(a b c d e f g h)
{
for a in h p; do
for b in h p; do
for c in h p; do
for d in h p; do
for e in h p; do
for f in h p; do
for g in h p; do
nholes=0
nparti=0
index=""
for var in a b c d e f g; do
[[ ${!var} = h ]] &&
{ index="${index}${holes[nholes]}"; let nholes+=1; } ||
{ index="${index}${parti[nparti]}"; let nparti+=1; }
done
echo ${a}${b}${c}${d}${e}${f}${g}=\"${index}\"
done
done
done
done
done
done
done
} | sort | uniq
#vim-run: bash % | less
| true
|
09000e845aa1e5450e27622514774c5d2d08dd9b
|
Shell
|
travis-ci/php-src-builder
|
/bin/install-libzip
|
UTF-8
| 1,433
| 3.671875
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
set -o xtrace
set -o errexit
# If PHP < 7.3, exit
if [[ ! $VERSION =~ ^master$ ]] && [[ "$(printf "7.3\n$VERSION" | sort -V | head -n1)" < "7.3" ]]; then
echo 'PHP < 7.3; skip libzip steps'
exit 0
fi
LIBZIP_INSTALL_DIR=$HOME/.phpenv/versions/$VERSION
# uninstall system-wide zip packages
sudo apt-get purge $(dpkg -l | awk '/^ii/ && /libzip/ {print $2}' | cut -f1 -d:)
# get up-to-date cmake
mkdir cmake
pushd cmake
#install cmake specific to host architecture.
if [[ $HOSTTYPE == "powerpc64le" || $HOSTTYPE == "s390x"|| $HOSTTYPE == "aarch64" ]]
then
wget -O - https://cmake.org/files/v3.14/cmake-3.14.0.tar.gz | tar -xz --strip-components=1
#compile cmake
./configure > /dev/null 2>&1 && make > /dev/null 2>&1 && sudo make install > /dev/null 2>&1
else
wget https://github.com/Kitware/CMake/releases/download/v3.14.0/cmake-3.14.0-Linux-x86_64.sh
sudo sh cmake-3.14.0-Linux-x86_64.sh --prefix=/usr --skip-license
fi
popd
# compile libzip
git clone -b rel-1-5-2 https://github.com/nih-at/libzip.git
pushd libzip
cmake -DCMAKE_INSTALL_PREFIX=$LIBZIP_INSTALL_DIR .
make
make install
popd
# add the option in custom_configure_options
if [[ "$(printf "7.4\n$VERSION" | sort -V | head -n1)" < "7.4" ]]; then
echo "--with-libzip=$LIBZIP_INSTALL_DIR" >> $TRAVIS_BUILD_DIR/custom_configure_options
else
echo "--with-zip" >> $TRAVIS_BUILD_DIR/custom_configure_options
fi
| true
|
0c07c108311cce1531eb9fef0b02ec704d0eed35
|
Shell
|
yqtianust/gradingFramework
|
/2test.sh
|
UTF-8
| 614
| 2.796875
| 3
|
[] |
no_license
|
#!/bin/bash
echo "compile and test"
for f in lab_5/*/;
do
echo ${f}
srcfolder=`find ${f} -name Map.java -exec dirname {} \;`
find ${f} -name Map.java -exec javac -cp ${srcfolder} {} -d ${f} \;
srcfolder=`find ${f} -name BadMapException.java -exec dirname {} \;`
find ${f} -name BadMapException.java -exec javac -cp ${srcfolder} {} -d ${f} \;
# find ${f} -name BusCompany.java -exec javac -cp ${srcfolder} {} -d ${f} \;
java -jar ./lib/junit-platform-console-standalone-1.3.1.jar -cp lib:./${f} --scan-classpath lib >${f}/result.txt 2>&1
done
# find ${f} -name Bus.java
# find ${f} -name BusCompany.java
| true
|
9427ec39f9811d35f656c202404365a789d2ac96
|
Shell
|
container-images/boltron-27
|
/diff-mods.sh
|
UTF-8
| 354
| 2.921875
| 3
|
[] |
no_license
|
#! /bin/sh -e
if [ ! -f latest-Fedora-Modular-27.COMPOSE_ID ]; then
echo No known latest mod.
exit 1
fi
if [ ! -f prev-Fedora-Modular-27.COMPOSE_ID ]; then
echo No known prev mod.
exit 1
fi
tests="test-$(cat latest-Fedora-Modular-27.COMPOSE_ID)"
otests="test-$(cat prev-Fedora-Modular-27.COMPOSE_ID)"
diff -u $otests/mods $tests/mods
| true
|
7119430093e547f4417bb6f11d9f4941869772b5
|
Shell
|
janmojzis/curvevpn
|
/debian/service/curvecp/log/run
|
UTF-8
| 472
| 2.96875
| 3
|
[] |
no_license
|
#!/bin/sh
DEFAULTFILE="/etc/default/curvevpn-server-run"
[ -f "${DEFAULTFILE}" ] && . "${DEFAULTFILE}"
if [ x"${LOGDIR}" = x ]; then echo "\$LOGDIR not set, please edit ${DEFAULTFILE}"; exit 111; fi
if [ x"${LOGUSER}" = x ]; then echo "\$LOGUSER not set, please edit ${DEFAULTFILE}"; exit 111; fi
mkdir -p "${LOGDIR}/curvecp" 2>/dev/null || :
chown -R "${LOGUSER}" "${LOGDIR}/curvecp"
exec setuidgid "${LOGUSER}" multilog t !"gzip -9" n20 s1024000 "${LOGDIR}/curvecp"
| true
|
137727ffa6f36cb5b33884ce3e305802671727f4
|
Shell
|
jg19/scripts-shell-linux
|
/1-basicos/if-elif-else/if-eq-procura-usuario.sh
|
UTF-8
| 404
| 3.515625
| 4
|
[] |
no_license
|
#!/bin/bash
# Descrição: eemplo de if com teste de igual a 0
echo "Digite o nome do usuário que deseja procurar:"
read nome
echo "Procurando $nome..."
who | grep $nome
if [ $? -eq 0 ]; then
echo "$nome encontrado, este usuário está logado"
else
echo "$nome não foi encontrado"
fi
# Neste script podemos ver:
# - Uso dos comandos who e grep com pipe
# - Uso de if, else para tratar resultado
| true
|
2651d25741f9f5bad49a3f14105cc891342bbd48
|
Shell
|
rdjeddi/typhon
|
/SOURCE/Util/countlines
|
UTF-8
| 268
| 2.796875
| 3
|
[] |
no_license
|
#!/bin/sh
echo "* number of files : "$(ls -l */*.f90 | wc -l)
echo "* total lines : "$(cat */*.f90 | wc -l)
echo "* coded lines : "$(cat */*.f90 | grep -v '^ *$' | grep -v '^ *!' | wc -l)
echo "* comments : "$(cat */*.f90 | grep '^ *!' | wc -l)
| true
|
e126216ed47cb90bce7869178bd04db21425bad0
|
Shell
|
gvtek/MySQLDBSetup
|
/MySQLDBSetup.sh
|
UTF-8
| 779
| 3.328125
| 3
|
[] |
no_license
|
# !/bin/bash
#
# initialize databases from a standard file
# creating databases as needed
DBLIST=$(mysql -e "SHOW DATABASES;" | tail +2)
select DB in $DBLIST "new..."
do
if [[ $DB == "new..." ]]
then
printf "%b" "name for the new db: "
read db test
echo creating new database $DB
mysql -e "CREATE DATABASE IF NOT EXISTS $DB;"
fi
if [ "$DB" ]
then
echo Initializing database: $DB
mysql $DB < ourInit.sql
fi
done
# prompt/variables for username and password should be added for this command depending on the mysql server you are connecting to, which is hopefully flipping secured w/ athentication and not-a-terrible password, will be adding that feature using variable
# shamelessly stolen from the older Oreilly book bash Cookbook by Carl Alhing, JP Vossen, & Cameron Newham
| true
|
c5b2bb031a4b6da619d534f1c752a25ca9cd8792
|
Shell
|
lgtczh/ShellLearning
|
/02SubsAndLookup/merge-sales.sh
|
UTF-8
| 373
| 2.828125
| 3
|
[] |
no_license
|
#! /bin/sh
# merge-sales.sh
# 结合配额与业务员数据
# 删除注释并按姓名排序数据文件
sed '/^#/d' quotas | sort > quotas.sorted
sed '/^#/d' sales | sort -k 2 > sales.sorted
# 以名字作为key进行结合,将结果产生至标准输出
join -2 2 -o 1.1,1.2,2.3 quotas.sorted sales.sorted
# 删除缓存文件
rm quotas.sorted sales.sorted
| true
|
e7b01380ffeb3573dccc83834f5a005d43d24813
|
Shell
|
yaolongli520/rootfs
|
/unit_tests/BAT/bat-gpio-keypad.sh
|
UTF-8
| 4,176
| 4.09375
| 4
|
[] |
no_license
|
#!/bin/bash
#
# Tests that the GPIO keys are successfully registered for each board.
# Most boards have a power on/off button, some also have volume up/down, etc.
# Also tests that the state of the GPIO keys is unpressed when running this
# test.
#
set -e
batdir=$(dirname $(readlink -f "${BASH_SOURCE[0]}"))
. $batdir/bat_utils.sh
machine=$(cat /sys/devices/soc0/machine)
case $machine in
'Freescale i.MX6 SoloLite EVK Board')
gpio_possible_devs="kpp"
;;
esac
gpio_possible_devs="snvs-powerkey gpio-keys rpmsg-keys $gpio_possible_devs"
# find supported devs from the list of possible gpio devs
function find_supported_devices
{
devs=""
for dev in $gpio_possible_devs; do
path=$(find /sys/firmware/devicetree/base -name "$dev" -o -name "$dev@?*")
if [ "$path" != "" ]; then
devs="$devs $dev"
fi
done
echo $devs
}
# find event id from sysfs
# $1: event name
function find_event
{
name="$1"
input_devs=$(find /sys/class/input -name "input[0-9]*")
for input_dev in $input_devs; do
if [ $(grep "$name" $input_dev/name) ]; then
event_path=$(ls -d $input_dev/event*)
event_name=$(basename $event_path)
event_id=${event_name//[^0-9]/}
echo $event_id
fi
done
}
# find supported keycodes
# $1: gpio dev name
function find_supported_keycodes
{
dev=$1
keycodes=""
path=$(find /sys/firmware/devicetree/base -name "$dev" -o -name "$dev@?*")
# search for keycodes from linux,keycode
keycodes_path=$(find $path -name linux,keycode -o -name linux,code)
for keycode_path in $keycodes_path; do
code=$(cat $keycode_path)
code_hex=$(echo -ne $code | hexdump -v -e '/1 "%02X"')
code_dec=$(echo "ibase=16;obase=A;${code_hex}" | bc)
keycodes="$keycodes $code_dec"
done
# search for keycodes from linux,keymap
keycodes_path=$(find $path -name linux,keymap)
for keycode_path in $keycodes_path; do
# linux,keypad contains a list of 32-bit integer values
# that map a key matrix; each 32-bit integer value maps
# row and column in the first 16 bits and the actual keycode
# in the last 16 bits.
codes_hex=$(cat $keycode_path | hexdump -v -e '/1 "%02X\n"' | \
awk 'BEGIN { i=0; } { if (i % 4 == 2) printf $1; \
if (i % 4 == 3) printf $1"\n"; i++ }')
for code_hex in $codes_hex; do
code_dec=$(echo "ibase=16;obase=A;${code_hex}" | bc)
keycodes="$keycodes $code_dec"
done
done
echo $keycodes
}
# check the status of the gpio key (pressed, unpressed, error)
# $1: event id
# $2: key type (e.g. EV_KEY)
# $3: key value (e.g. KEY_POWER, KEY_VOLUMEDOWN)
function check_key_unpressed
{
event_id=$1
key_type=$2
key_value=$3
# evtest will return 0 if key is unpressed and 10 if key is pressed
set +e
evtest --query /dev/input/event${event_id} ${key_type} ${key_value}
ret=$?
set -e
if [ $ret -eq 0 ]; then
echo "/dev/input/event${event_id}, ${key_type}, ${key_value}: unpressed"
elif [ $ret -eq 10 ]; then
echo "/dev/input/event${event_id}, ${key_type}, ${key_value}: "\
"invalid state: pressed"
exit 1
else
echo "/dev/input/event${event_id}, ${key_type}, ${key_value}: "\
"invalid state: $ret"
exit 1
fi
}
gpio_devs=$(find_supported_devices)
if [ "$gpio_devs" == "" ]; then
echo "Missing gpio keypad support"
exit 1
fi
for dev in $gpio_devs; do
echo "Testing GPIO dev \"$dev\""
event_id=$(find_event $dev)
if [ "$event_id" == "" ]; then
echo "GPIO dev $dev not found"
exit 1
fi
echo "GPIO dev $dev is registered as /dev/input/event${event_id}"
evtest /dev/input/event${event_id} &
sleep 0.1
pkill evtest
key_values=$(find_supported_keycodes $dev)
if [ "$key_values" == "" ]; then
echo "No supported keycodes found for dev $dev"
exit 1
fi
for key_value in $key_values; do
check_key_unpressed ${event_id} EV_KEY ${key_value}
done
done
| true
|
963268b6cb71ce4944d841a06971f93d8dc03d67
|
Shell
|
mgibson91/geonames-to-json
|
/update/updateGeonamesData.sh
|
UTF-8
| 1,244
| 4
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
user=""
password=""
database=""
function usage()
{
echo "./updateGeonamesData.sh -u<user> -p<password> -d<database>"
}
while getopts u:d:p:f: option
do
case "${option}"
in
u) user=${OPTARG};;
p) password=${OPTARG};;
d) database=${OPTARG};;
h) usage;;
esac
done
if [[ -z "${user// }" ]]; then
echo "'--user|-u' field required. Use the '--help|-h' for more information."
exit 1
elif [[ -z "${password// }" ]]; then
echo "'--password|-p' field required. Use the '--help|-h' for more information."
exit 1
elif [[ -z "${database// }" ]]; then
echo "'--database|-p' field required. Use the '--help|-h' for more information."
exit 1
fi
downloadDir="./download"
rm -rf ${downloadDir}
mkdir ${downloadDir}
# Download and unzip country/city info
wget http://download.geonames.org/export/dump/countryInfo.txt -O ${downloadDir}/countryInfo.txt
wget http://download.geonames.org/export/dump/cities15000.zip -O ${downloadDir}/cities15000.zip
unzip ${downloadDir}/cities15000.zip -d ${downloadDir}
mysql -u"$user" -p"$password" "$database" < createGeonameSchema.sql
mysql -u"$user" -p"$password" "$database" < importGeonameDataIntoDatabase.sql
node convertSqlToJson.js
| true
|
5386ca3fd7c931fd49f8864ffa69863844a85e9b
|
Shell
|
rahulyesantharao/dotfiles
|
/bash/prompt.bash
|
UTF-8
| 12,895
| 3
| 3
|
[] |
no_license
|
# taken from: https://github.com/chris-marsh/pureline
# DEFAULTS ############################################
# Color Set
declare -A PL_COLORS=(
[Color_Off]='\[\e[0m\]' # Text Reset
# Foreground
[Default]='\[\e[0;39m\]' # Default
[Black]='\[\e[38;2;40;44;52m\]' # Black
[Red]='\[\e[38;2;224;108;117m\]' # Red
[Green]='\[\e[38;2;152;195;121m\]' # Green
[Yellow]='\[\e[38;2;229;192;123m\]' # Yellow
[Blue]='\[\e[38;2;97;195;239m\]' # Blue
[Purple]='\[\e[38;2;198;120;221m\]' # Purple
[Cyan]='\[\e[38;2;86;182;194m\]' # Cyan
[White]='\[\e[38;2;171;178;191m\]' # White
# Background
[On_Default]='\[\e[49m\]' # Default
[On_Black]='\[\e[48;2;40;44;52m\]' # Black
[On_Red]='\[\e[48;2;224;108;117m\]' # Red
[On_Green]='\[\e[48;2;152;195;121m\]' # Green
[On_Yellow]='\[\e[48;2;229;192;123m\]' # Yellow
[On_Blue]='\[\e[48;2;97;195;239m\]' # Blue
[On_Purple]='\[\e[48;2;198;120;221m\]' # Purple
[On_Cyan]='\[\e[48;2;86;182;194m\]' # Cyan
[On_White]='\[\e[48;2;171;178;191m\]' # White
)
# Character Set
declare -A PL_SYMBOLS=(
[hard_separator]=""
[soft_separator]="│"
[git_branch]="" #"╬"
[git_untracked]="?"
[git_stash]="§"
[git_ahead]="↑"
[git_behind]="↓"
[git_modified]="+"
[git_staged]="•"
[git_conflicts]="*"
[ssh]="╤"
[read_only]="Θ"
[return_code]="x"
[background_jobs]="↨"
[background_jobs]="↔"
[python]="π"
[battery_charging]="■ "
[battery_discharging]="▬ "
)
# Modules
declare -a PL_MODULES=(
#module BG FG
#'time_module Green Black'
'user_module Blue Black'
'ssh_module Green Black'
'virtual_env_module Yellow Black'
'path_module Purple Black'
'git_module White Black'
'return_code_module Red Black'
'newline_module'
'prompt_module Cyan Black'
)
# Options
PL_TIME_SHOW_SECONDS=false
PL_USER_SHOW_HOST=true
PL_USER_SHOW_IP=false
PL_SSH_SHOW_HOST=true
PL_SSH_SHOW_IP=false
PL_PATH_TRIM=0
PL_GIT_DIRTY_FG=Black
PL_GIT_DIRTY_BG=Yellow
PL_GIT_STASH=false
PL_GIT_AHEAD=true
PL_GIT_STAGED=false
PL_GIT_CONFLICTS=false
PL_GIT_MODIFIED=false
PL_GIT_UNTRACKED=false
[[ $(bind -v) =~ "set show-mode-in-prompt off" ]] && PL_ERASE_TO_EOL=true
######################################################
# UTILITIES ##########################################
# -----------------------------------------------------------------------------
# returns a string with the powerline symbol for a section end
# arg: $1 is foreground color of the next section
# arg: $2 is background color of the next section
section_end() {
if [ "$__last_color" == "$2" ]; then
# Section colors are the same, use a foreground separator
local end_char="${PL_SYMBOLS[soft_separator]}"
local fg="$1"
else
# section colors are different, use a background separator
local end_char="${PL_SYMBOLS[hard_separator]}"
local fg="$__last_color"
fi
if [ -n "$__last_color" ]; then
echo "${PL_COLORS[$fg]}${PL_COLORS[On_$2]}$end_char"
fi
}
# -----------------------------------------------------------------------------
# returns a string with background and foreground colours set
# arg: $1 foreground color
# arg: $2 background color
# arg: $3 content
section_content() {
echo "${PL_COLORS[$1]}${PL_COLORS[On_$2]}$3"
}
#------------------------------------------------------------------------------
# Helper function to return normal or super user prompt character
prompt_char() {
[[ ${EUID} -eq 0 ]] && echo "#" || echo "$"
}
######################################################
# MODULES ############################################
# -----------------------------------------------------------------------------
# append to prompt: current time
# arg: $1 foreground color
# arg: $2 background color
# optional variables;
# PL_TIME_SHOW_SECONDS: true/false for hh:mm:ss / hh:mm
time_module() {
local bg_color="$1"
local fg_color="$2"
if [ "$PL_TIME_SHOW_SECONDS" = true ]; then
local content="\t"
else
local content="\A"
fi
PS1+="$(section_end $fg_color $bg_color)"
PS1+="$(section_content $fg_color $bg_color " $content ")"
__last_color="$bg_color"
}
#------------------------------------------------------------------------------
# append to prompt: user@host or user or root@host
# arg: $1 foreground color
# arg: $2 background color
# option variables;
# PL_USER_SHOW_HOST: true/false to show host name/ip
# PL_USER_USE_IP: true/false to show IP instead of hostname
user_module() {
local bg_color="$1"
local fg_color="$2"
local content="\u"
# Show host if true or when user is remote/root
if [ "$PL_USER_SHOW_HOST" = true ]; then
if [ "$PL_USER_USE_IP" = true ]; then
content+="@$(ip_address)"
else
content+="@\h"
fi
fi
PS1+="$(section_end $fg_color $bg_color)"
PS1+="$(section_content $fg_color $bg_color " $content ")"
__last_color="$bg_color"
}
# -----------------------------------------------------------------------------
# append to prompt: indicate if SSH session
# arg: $1 foreground color
# arg: $2 background color
# option variables;
# PL_SSH_SHOW_HOST: true/false to show host name/ip
# PL_SSH_USE_IP: true/false to show IP instead of hostname
ssh_module() {
if [[ "${SSH_CLIENT}" || "${SSH_TTY}" ]]; then
local bg_color="$1"
local fg_color="$2"
local content="${PL_SYMBOLS[ssh]}"
if [ "$PL_SSH_SHOW_HOST" = true ]; then
if [ "$PL_SSH_USE_IP" = true ]; then
content+=" $(ip_address)"
else
content+=" \h"
fi
fi
PS1+="$(section_end $fg_color $bg_color)"
PS1+="$(section_content $fg_color $bg_color " $content ")"
__last_color="$bg_color"
fi
}
# -----------------------------------------------------------------------------
# append to prompt: current directory
# arg: $1 foreground color
# arg; $2 background color
# option variables;
# PL_PATH_TRIM: 0—fullpath, 1—current dir, [x]—trim to x number of dir
path_module() {
local bg_color="$1"
local fg_color="$2"
local content="\w"
if [ "$PL_PATH_TRIM" -eq 1 ]; then
local content="\W"
elif [ "$PL_PATH_TRIM" -gt 1 ]; then
PROMPT_DIRTRIM="$PL_PATH_TRIM"
fi
PS1+="$(section_end $fg_color $bg_color)"
PS1+="$(section_content $fg_color $bg_color " $content ")"
__last_color="$bg_color"
}
# -----------------------------------------------------------------------------
# append to prompt: git branch with indictors for;
# number of; modified files, staged files and conflicts
# arg: $1 foreground color
# arg; $2 background color
# option variables;
# PL_GIT_DIRTY_FG: <color>
# PL_GIT_DIRTY_BG: <color>
# PL_GIT_STASH: true/false
# PL_GIT_AHEAD: true/false
# PL_GIT_STAGED: true/false
# PL_GIT_CONFLICTS: true/false
# PL_GIT_MODIFIED: true/false
# PL_GIT_UNTRACKED: true/false
git_module() {
local git_branch=$(git rev-parse --abbrev-ref HEAD 2> /dev/null)
if [ -n "$git_branch" ]; then
local bg_color="$1"
local fg_color="$2"
local content="${PL_SYMBOLS[git_branch]} $git_branch"
# if [ "$PL_GIT_STASH" = true ]; then
# local number_stash="$(git stash list 2>/dev/null | wc -l)"
# if [ ! "$number_stash" -eq 0 ]; then
# content+="${PL_SYMBOLS[git_stash]}$number_stash"
# fi
# fi
#
if [ "$PL_GIT_AHEAD" = true ]; then
local number_ahead="$(git rev-list --count --right-only '@{upstream}...HEAD' 2>/dev/null)"
local number_behind="$(git rev-list --count --left-only '@{upstream}...HEAD' 2>/dev/null)"
if [ ! "0$number_ahead" -eq 0 -o ! "0$number_behind" -eq 0 ]; then
if [ ! "$number_ahead" -eq 0 ]; then
content+="${PL_SYMBOLS[git_ahead]}$number_ahead"
fi
if [ ! "$number_behind" -eq 0 ]; then
content+="${PL_SYMBOLS[git_behind]}$number_behind"
fi
fi
fi
#
# if [ "$PL_GIT_STAGED" = true ]; then
# local number_staged="$(git diff --staged --name-only --diff-filter=AM 2> /dev/null | wc -l)"
# if [ ! "$number_staged" -eq "0" ]; then
# content+=" ${PL_SYMBOLS[soft_separator]} ${PL_SYMBOLS[git_staged]}$number_staged"
# fi
# fi
#
# if [ "$PL_GIT_CONFLICTS" = true ]; then
# local number_conflicts="$(git diff --name-only --diff-filter=U 2> /dev/null | wc -l)"
# if [ ! "$number_conflicts" -eq "0" ]; then
# content+=" ${PL_SYMBOLS[soft_separator]} ${PL_SYMBOLS[git_conflicts]}$number_conflicts"
# fi
# fi
#
# if [ "$PL_GIT_MODIFIED" = true ]; then
# local number_modified="$(git diff --name-only --diff-filter=M 2> /dev/null | wc -l )"
# if [ ! "$number_modified" -eq "0" ]; then
# content+=" ${PL_SYMBOLS[soft_separator]} ${PL_SYMBOLS[git_modified]}$number_modified"
# fi
# fi
#
# if [ "$PL_GIT_UNTRACKED" = true ]; then
# local number_untracked="$(git ls-files --other --exclude-standard | wc -l)"
# if [ ! "$number_untracked" -eq "0" ]; then
# content+=" ${PL_SYMBOLS[soft_separator]} ${PL_SYMBOLS[git_untracked]}$number_untracked"
# fi
# fi
#
if [ -n "$(git status --porcelain)" ]; then
if [ -n "$PL_GIT_DIRTY_FG" ]; then
fg_color="$PL_GIT_DIRTY_FG"
fi
if [ -n "$PL_GIT_DIRTY_BG" ]; then
bg_color="$PL_GIT_DIRTY_BG"
fi
fi
PS1+="$(section_end $fg_color $bg_color)"
PS1+="$(section_content $fg_color $bg_color " $content ")"
__last_color="$bg_color"
fi
}
# -----------------------------------------------------------------------------
# append to prompt: python virtual environment name
# arg: $1 foreground color
# arg; $2 background color
virtual_env_module() {
# advanced check if in virtualenv
# if command -v python &>/dev/null; then
# INVENV=$(python -c 'import sys; print ("1" if hasattr(sys, "real_prefix") else "0")')
# elif command -v python3 &>/dev/null; then
# INVENV=$(python3 -c 'import sys; print ("1" if hasattr(sys, "real_prefix") else "0")')
# else
# if [ -n "$VIRTUAL_ENV" ]; then
# INVENV=1
# else
# INVENV=0
# fi
if [ -n "$VIRTUAL_ENV" ]; then
local venv="${VIRTUAL_ENV##*/}"
local bg_color="$1"
local fg_color="$2"
local content=" ${PL_SYMBOLS[python]} $venv"
PS1+="$(section_end $fg_color $bg_color)"
PS1+="$(section_content $fg_color $bg_color "$content ")"
__last_color="$bg_color"
fi
}
# -----------------------------------------------------------------------------
# append to prompt: append a '$' prompt with optional return code for previous command
# arg: $1 foreground color
# arg; $2 background color
prompt_module() {
local bg_color="$1"
local fg_color="$2"
local content=" $(prompt_char) "
#PS1+="$(section_end $fg_color $bg_color)"
#PS1+="$(section_content $fg_color $bg_color "$content")"
PS1+="$content"
__last_color="$bg_color"
}
# -----------------------------------------------------------------------------
# append to prompt: append a '$' prompt with optional return code for previous command
# arg: $1 foreground color
# arg; $2 background color
return_code_module() {
if [ ! "$__return_code" -eq 0 ]; then
local bg_color="$1"
local fg_color="$2"
local content=" ${PL_SYMBOLS[return_code]} $__return_code "
PS1+="$(section_end $fg_color $bg_color)"
PS1+="$(section_content $fg_color $bg_color "$content")"
__last_color="$bg_color"
fi
}
# -----------------------------------------------------------------------------
# append to prompt: end the current promptline and start a newline
newline_module() {
if [ -n "$__last_color" ]; then
PS1+="$(section_end $__last_color 'Default')"
fi
PS1+="${PL_COLORS[Color_Off]}"
PS1+="\n"
unset __last_color
}
######################################################
# PURELINE PROMPT ####################################
# -----------------------------------------------------------------------------
function pureline_ps1 {
__return_code=$? # save the return code
PS1="" # reset the command prompt
# load the modules
for module in "${!PL_MODULES[@]}"; do
${PL_MODULES[$module]}
done
# final end point
# if [ -n "$__last_color" ]; then
# PS1+="$(section_end $__last_color 'Default')"
# else
if [ ! -n "$__last_color" ]; then
# No modules loaded, set a basic prompt
PS1="PL | No Modules Loaded: $(prompt_char)"
fi
# cleanup
PS1+="${PL_COLORS[Color_Off]}"
if [ "$PL_ERASE_TO_EOL" = true ]; then
PS1+="\[\e[K\]"
fi
#PS1+=" "
unset __last_color
unset __return_code
}
# grab a snapshot of the systems PROMPT_COMMAND. this can then be
# appended to pureline when sourced without continually appending
# pureline to itself.
if [ -z "$__PROMPT_COMMAND" ]; then
__PROMPT_COMMAND="$PROMPT_COMMAND"
fi
# dynamically set the PS1
[[ ! ${PROMPT_COMMAND} =~ 'pureline_ps1;' ]] && PROMPT_COMMAND="pureline_ps1; $PROMPT_COMMAND" || true
| true
|
1035c25f2626cf1ff9bb37609c290c95cfba3efc
|
Shell
|
replit/polygott
|
/out/share/polygott/phase2.d/ballerina
|
UTF-8
| 973
| 3.171875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
set -ev
shopt -s dotglob
export HOME=/home/runner
rsync --delete --recursive --links --perms --times /opt/homes/default/ /home/runner
echo 'Setup parents of ballerina'
rsync --recursive --links --perms --times /opt/homes/java/ /home/runner/
chown -R $(id -u):$(id -g) /home/runner
echo 'Setup ballerina'
cd "${HOME}"
wget https://dist.ballerina.io/downloads/1.2.13/ballerina-linux-installer-x64-1.2.13.deb
dpkg -i ballerina-linux-installer-x64-1.2.13.deb
rm -r ballerina-linux-installer-x64-1.2.13.deb
if [[ -n "$(ls -A /home/runner)" ]]; then
echo Storing home for ballerina
mkdir -p /opt/homes/ballerina
rsync --archive --no-specials --no-devices /home/runner/ /opt/homes/ballerina
chown runner:runner -R /opt/homes/ballerina
find /home/runner/ -mindepth 1 -maxdepth 1 -exec rm -rf {} \;
ls -A /opt/homes/ballerina
fi
chown runner:runner -R /home/runner /config /opt/virtualenvs
if [[ -n "$(ls /tmp/)" ]]; then
rm -rf /tmp/*
fi
rm /phase2.sh
| true
|
5b3391fcd66bf49c7f48c2f267e833e63b8ff69d
|
Shell
|
fr34k8/prowler
|
/checks/check312
|
UTF-8
| 3,010
| 2.625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
# Prowler - the handy cloud security tool (copyright 2019) by Toni de la Fuente
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# Remediation:
#
# https://d1.awsstatic.com/whitepapers/compliance/AWS_CIS_Foundations_Benchmark.pdf
#
# aws logs put-metric-filter \
# --region us-east-1 \
# --log-group-name CloudTrail/CloudWatchLogGroup \
# --filter-name VPCGatewayConfigChanges \
# --filter-pattern '{ ($.eventName = CreateCustomerGateway) || ($.eventName = DeleteCustomerGateway) || ($.eventName = AttachInternetGateway) || ($.eventName = CreateInternetGateway) || ($.eventName = DeleteInternetGateway) || ($.eventName = DetachInternetGateway) }' \
# --metric-transformations metricName=GatewayEventCount,metricNamespace=CloudTrailMetrics,metricValue=1
#
# aws cloudwatch put-metric-alarm \
# --region us-east-1 \
# --alarm-name VPCGatewayConfigChangesAlarm \
# --alarm-description "Triggered by VPC Customer/Internet Gateway changes." \
# --metric-name GatewayEventCount \
# --namespace CloudTrailMetrics \
# --statistic Sum \
# --comparison-operator GreaterThanOrEqualToThreshold \
# --evaluation-periods 1 \
# --period 300 \
# --threshold 1 \
# --actions-enabled \
# --alarm-actions arn:aws:sns:us-east-1:123456789012:CloudWatchAlarmTopic
CHECK_ID_check312="3.12"
CHECK_TITLE_check312="[check312] Ensure a log metric filter and alarm exist for changes to network gateways"
CHECK_SCORED_check312="SCORED"
CHECK_CIS_LEVEL_check312="LEVEL1"
CHECK_SEVERITY_check312="Medium"
CHECK_ASFF_TYPE_check312="Software and Configuration Checks/Industry and Regulatory Standards/CIS AWS Foundations Benchmark"
CHECK_ASFF_RESOURCE_TYPE_check312="AwsCloudTrailTrail"
CHECK_ALTERNATE_check312="check312"
CHECK_SERVICENAME_check312="vpc"
CHECK_RISK_check312='Monitoring unauthorized API calls will help reveal application errors and may reduce time to detect malicious activity.'
CHECK_REMEDIATION_check312='It is recommended that a metric filter and alarm be established for unauthorized requests.'
CHECK_DOC_check312='https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudwatch-alarms-for-cloudtrail.html'
CHECK_CAF_EPIC_check312='Logging and Monitoring'
check312(){
check3x '\$\.eventName\s*=\s*CreateCustomerGateway.+\$\.eventName\s*=\s*DeleteCustomerGateway.+\$\.eventName\s*=\s*AttachInternetGateway.+\$\.eventName\s*=\s*CreateInternetGateway.+\$\.eventName\s*=\s*DeleteInternetGateway.+\$\.eventName\s*=\s*DetachInternetGateway'
}
| true
|
8d9595afd73c8e4445e65a3aa97544e84989a0d2
|
Shell
|
nataliamedinat/holberton-system_engineering-devops
|
/0x05-processes_and_signals/4-to_infinity_and_beyond
|
UTF-8
| 114
| 2.546875
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
#Display a sentence indefinitely
while true
do
echo "To infinity and beyond"
sleep 2
done
| true
|
e25dc33a750090da9eb9a099ae2bf2ba06ddd8ec
|
Shell
|
maksbotan/scripts
|
/rotate.sh
|
UTF-8
| 608
| 3.5625
| 4
|
[] |
no_license
|
#!/bin/sh
start(){
gpg -d ~/.vk/pass.bin 2>/dev/null > ~/pass
~/scripts/vk_spy.py > /home/maksbotan/vk_spy.log 2>&1 &
}
stop(){
killall vk_spy.py && sleep 5
}
rotate(){
mv ~/log.json{,-$(date +%F)}
mv ~/vk_spy.debug{,-$(date +%F)}
}
case $1 in
--restart)
stop
start
;;
--rotate)
stop
rotate
start
;;
--keep)
if [ -z "$(ps -Af | grep vk_spy | grep -v grep)" ]; then
mv ~/vk_spy.debug{,-$(date +%T)}
start
fi
;;
*)
echo "Usage: $0 --restart|rotate|keep"
;;
esac
| true
|
775451917cdbbbcc1ce5f43bd661f229b97867c2
|
Shell
|
ginnocen/Run3Analysisvalidation
|
/codeHF/config_input.sh
|
UTF-8
| 2,768
| 2.609375
| 3
|
[] |
no_license
|
#!/bin/bash
# shellcheck disable=SC2034 # Ignore unused parameters.
# Input specification for runtest.sh
# (Modifies input parameters.)
INPUT_CASE=2 # Input case
NFILESMAX=1 # Maximum number of processed input files. (Set to -0 to process all; to -N to process all but the last N files.)
# Number of input files per job (Automatic optimisation on if < 1.)
NFILESPERJOB_CONVERT=0 # Conversion
NFILESPERJOB_ALI=0 # AliPhysics
NFILESPERJOB_O2=1 # O2
# Maximum number of simultaneously running O2 jobs
NJOBSPARALLEL_O2=$(python3 -c "print(min(10, round($(nproc) / 2)))")
JSONRUN3="dpl-config_run3.json" # Run 3 tasks parameters
# Run 5 tasks parameters for open HF study
JSONRUN5_HF="dpl-config_run5_hf.json"
# Run 5 tasks parameters for onia studies:
# J/psi and X (higher pt cut on 2-prong decay tracks and no DCA cut on single track)
JSONRUN5_ONIAX="dpl-config_run5_oniaX.json"
JSON="$JSONRUN3"
# Default settings:
# INPUT_FILES="AliESDs.root"
# INPUT_SYS="pp"
# INPUT_RUN=2
# ISINPUTO2=0
# ISALICE3=0
# ISMC=0
# JSON="$JSONRUN3"
INPUT_BASE="/data2/data" # alicecerno2
case $INPUT_CASE in
1)
INPUT_LABEL="Run 2, p-p 5.02 TeV LHC17p, real"
INPUT_DIR="$INPUT_BASE/Run2/pp_5.02TeV/real/LHC17p_pass1_CENT_woSDD";;
2) # reference
INPUT_LABEL="Run 2, p-p 5.02 TeV LHC17p, MC LHC18a4a2_cent"
INPUT_DIR="$INPUT_BASE/Run2/pp_5.02TeV/sim/LHC18a4a2_cent/282099"
ISMC=1;;
3)
INPUT_LABEL="Run 2, p-p 5.02 TeV LHC17p, MC LHC18a4a2_cent"
INPUT_DIR="$INPUT_BASE/Run2/pp_5.02TeV/sim/LHC18a4a2_cent/282341"
ISMC=1;;
4)
INPUT_LABEL="Run 2, Pb-Pb 5.02 TeV LHC15o, real"
INPUT_DIR="$INPUT_BASE/Run2/PbPb_5.02TeV/real/LHC15o"
INPUT_SYS="PbPb";;
5)
INPUT_LABEL="Run 2, Pb-Pb 5.02 TeV LHC15o, MC LHC15k1a3"
INPUT_DIR="$INPUT_BASE/Run2/PbPb_5.02TeV/sim/LHC15k1a3"
INPUT_SYS="PbPb"
ISMC=1;;
6)
INPUT_LABEL="Run 2, p-p 13 TeV LHC16p, MC LHC19g6f3, dedicated Ξc"
INPUT_DIR="$INPUT_BASE/Run2/pp_13TeV/sim/LHC19g6f3"
ISMC=1;;
7)
INPUT_LABEL="Run 3, p-p 13.6 TeV, MC LHC21k6, general purpose"
INPUT_DIR="$INPUT_BASE/Run3/pp_13.6TeV/sim/LHC21k6/302028/AOD"
INPUT_FILES="AO2D.root"
ISINPUTO2=1
INPUT_RUN=3
ISMC=1;;
8)
INPUT_LABEL="Run 2, p-p 13 TeV LHC18f, MC LHC20f4a (ESD)"
INPUT_DIR="$INPUT_BASE/Run2/pp_13TeV/sim/LHC20f4a"
ISMC=1;;
9)
INPUT_LABEL="Run 2, p-p 13 TeV LHC18f, MC LHC20f4a (AO2D)"
INPUT_DIR="$INPUT_BASE/Run2/pp_13TeV/sim_converted/LHC20f4a"
INPUT_FILES="AO2D.root"
ISINPUTO2=1
ISMC=1;;
10)
INPUT_LABEL="Run 2, p-p 13 TeV, LHC17j (AO2D)"
INPUT_DIR="$INPUT_BASE/Run2/pp_13TeV/real_converted/LHC17j_20220601" # converted good AO2Ds
INPUT_FILES="AO2D.root"
ISINPUTO2=1;;
esac
| true
|
cd533f45b8b21d2f917995e6c13d6f7d9fa2ad6f
|
Shell
|
stephanfriedrich/dockerbunker
|
/data/services/seafilepro/containers.sh
|
UTF-8
| 2,027
| 3.078125
| 3
|
[
"MIT"
] |
permissive
|
seafilepro_db_dockerbunker() {
docker run -d \
--name=${FUNCNAME[0]//_/-} \
--restart=always \
--network dockerbunker-${SERVICE_NAME} \
--net-alias=db \
-v ${SERVICE_NAME}-db-vol-1:${volumes[${SERVICE_NAME}-db-vol-1]} \
--env MYSQL_ROOT_PASSWORD=${DBROOT} \
--env MYSQL_USER=${DBUSER} \
--env MYSQL_PASSWORD=${DBPASS} \
${IMAGES[db]} >/dev/null
if [[ -z $keep_volumes ]];then
if ! docker exec seafilepro-db-dockerbunker mysqladmin ping -h"127.0.0.1" --silent;then
echo -en "\n\e[3m\xe2\x86\x92 Waiting for Seafile DB to be ready...\n\n"
while ! docker exec seafilepro-db-dockerbunker mysqladmin ping -h"127.0.0.1" --silent;do
sleep 3
done
fi
fi
}
seafilepro_setup_dockerbunker() {
docker run -it --rm \
--name=${FUNCNAME[0]//_/-} \
--network=dockerbunker-${SERVICE_NAME} \
-v ${SERVICE_NAME}-data-vol-1:${volumes[${SERVICE_NAME}-data-vol-1]} \
${IMAGES[service]} $1
}
seafilepro_memcached_dockerbunker() {
docker run --entrypoint memcached -d \
--name=${FUNCNAME[0]//_/-} \
--restart=always \
--net-alias=memcached \
--network dockerbunker-seafilepro \
${IMAGES[memcached]} -m 256 >/dev/null
}
seafilepro_elasticsearch_dockerbunker() {
docker run -d \
--name=${FUNCNAME[0]//_/-} \
--restart=always \
--net-alias=elasticsearch \
--network dockerbunker-seafilepro \
-e discovery.type=single-node \
-e bootstrap.memory_lock=true \
-e "ES_JAVA_OPTS=-Xms1g -Xmx1g" \
--ulimit memlock=-1:-1 \
-m 2g \
-v ${SERVICE_NAME}-elasticsearch-vol-1:${volumes[${SERVICE_NAME}-elasticsearch-vol-1]} \
${IMAGES[elasticsearch]} >/dev/null
}
seafilepro_service_dockerbunker() {
docker run -e TZ=Europe/Amsterdam -d \
--name=${FUNCNAME[0]//_/-} \
--restart=always \
--network ${NETWORK} \
--network dockerbunker-seafilepro \
--env-file "${SERVICE_ENV}" \
-e DB_ROOT_PASSWD=${DBROOT} \
-e SEAFILE_SERVER_HOSTNAME=${SERVICE_DOMAIN} \
-v ${SERVICE_NAME}-data-vol-2:${volumes[${SERVICE_NAME}-data-vol-2]} \
${IMAGES[service]} >/dev/null
}
| true
|
d4331b88fee142fa7fc50c5ecc7ccc3befc251bd
|
Shell
|
ldraw-linux/ldraw-library
|
/ldraw-wrapper
|
UTF-8
| 2,094
| 4.25
| 4
|
[] |
no_license
|
#!/bin/bash
# Run LDraw application after checking/creating of $LDRAWHOME directory
# This is a wrapper for all the programs that are part of the
# linux-ldraw project. The system LDraw library is installed in
# /usr/share/ldraw To allow users to have their own unofficial
# parts, a custom ldconfig.ldr or other custom files, all the
# programs are configured to use ~/.ldraw as the LDraw library
# path by setting the LDRAWDIR environment variable.
#
# If this directory does not exist, it is created and the
# contents of the system-wide ldraw library is symlinked there.
#
# All the linux-ldraw packages are supposed to suffix their
# binary name with .bin and create a symlink named with the
# original binary name pointing to this script
function die() {
ERROR="ERROR: $1"
if [ -t 0 ] ; then
echo "$ERROR" >&2
else
xmessage "$ERROR"
fi
exit 1
}
APPLICATION="$(which -- "${0##*/}.bin")" || die "unknown application"
LDRAWHOME="${HOME}/.ldraw"
LDRAWSYS="/usr/share/ldraw"
function create_ldraw_dir() {
local LDRAWHOME="$1"
echo "Creating LDraw directory $LDRAWHOME ."
mkdir "$LDRAWHOME" || die "Cannot create ${LDRAWHOME}. You may want to set the LDRAWDIR environment variable to point to a copy of the LDraw library."
}
function fill_ldraw_dir() {
local LDRAWHOME="$1"
for d_full in "${LDRAWSYS}"/*; do
d="${d_full##*/}"
if [[ ! -e "${LDRAWHOME}/${d}" ]]; then
echo "Creating symlink ${LDRAWHOME}/${d} -> ${LDRAWSYS}/${d}:"
ln -s "${LDRAWSYS}/${d}" "${LDRAWHOME}/${d}" || die "Cannot create symlink ${LDRAWHOME}/${d} ."
elif [[ ! -L "${LDRAWHOME}/${d}" ]] || [[ $(readlink "${LDRAWHOME}/${d}") != "${LDRAWSYS}/${d}" ]]; then
echo "Warning: $LDRAWHOME/$d is not a symlink to the system LDraw library."
fi
done
mkdir -p "${LDRAWHOME}/unofficial/{p,parts}"
}
# main()
if [ -z "${LDRAWDIR}" ] ; then
if [ ! -d "$LDRAWHOME" ] ; then
if [ -e "$LDRAWHOME" ] ; then
die "Cannot create $LDRAWHOME directory."
else
create_ldraw_dir "$LDRAWHOME"
fi
fi
fill_ldraw_dir "$LDRAWHOME"
export LDRAWDIR="${LDRAWHOME}"
fi
exec $APPLICATION "$@"
| true
|
929f0c15c0bace41f5d58741191df9c7a9eef3ab
|
Shell
|
miguelrjim/lolmanager
|
/.openshift/action_hooks/build
|
UTF-8
| 332
| 2.84375
| 3
|
[] |
no_license
|
#!/bin/bash
OLD_HOME=$HOME
echo "Bower & Gulp"
export HOME=$OPENSHIFT_REPO_DIR
if [ -f "${OPENSHIFT_REPO_DIR}"/bower.json ]; then
(cd "${OPENSHIFT_REPO_DIR}"; node_modules/.bin/bower install)
fi
if [ -f "${OPENSHIFT_REPO_DIR}"/gulpfile.js ]; then
(cd "${OPENSHIFT_REPO_DIR}"; node_modules/.bin/gulp build)
fi
export HOME=$OLD_HOME
| true
|
eaf08162e9133dbb6e9e3b632ff10e0835ee7f9d
|
Shell
|
sschuberth/dev-scripts
|
/git/git-log-json
|
UTF-8
| 275
| 2.703125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
# See https://til.simonwillison.net/jq/git-log-json.
git log --pretty=format:'%H%x00%an <%ae>%x00%ad%x00%s%x00' | \
jq -R -s '[split("\n")[:-1] | map(split("\u0000")) | .[] | {
"commit": .[0],
"author": .[1],
"date": .[2],
"message": .[3]
}]'
| true
|
2bf391ae025c660d1b8db3dc0c9d19e13a3f9102
|
Shell
|
akoenig/bldr
|
/bldr
|
UTF-8
| 755
| 3.921875
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
#
# bldr
#
# Performs the build process of an application
# in an ad-hoc container-like environment
#
#
# Author: André König <andre.koenig@posteo.de>
#
PKG=package.json
CWD=$(readlink -f .)
MANIFEST=$CWD/$PKG
echo "bldr"
if [ ! -f $MANIFEST ]; then
echo "Well, there is no '$PKG' in this project. Exiting. Bye bye.
"
exit 1
fi
function getBuildCommand () {
echo $(cat package.json | grep -Po '"build":.*?[^\\]",' | sed -e 's/\"build"\://g' | sed -e 's/\"//g' | sed -e 's/\,//g')
}
CMD=$(getBuildCommand)
if [ -z "$CMD" ]; then
echo "No build command defined in the '$PKG'. Bye bye.
"
exit 1;
fi
echo "Starting the isolated box in which the build process will be executed ..."
schroot -c bldr -u $USER -- $CMD
exit 0
| true
|
891ec845d66aaee3a760a00b7ff115ac8e53d972
|
Shell
|
karlmolina/dotfiles
|
/.zshrc
|
UTF-8
| 3,315
| 2.609375
| 3
|
[] |
no_license
|
source ~/zsh-defer/zsh-defer.plugin.zsh
# If you come from bash you might have to change your $PATH.
# export PATH=$HOME/bin:/usr/local/bin:$PATH
export PATH="$PATH:$HOME/flutter/bin"
ZINIT_HOME="${XDG_DATA_HOME:-${HOME}/.local/share}/zinit/zinit.git"
[ ! -d $ZINIT_HOME ] && mkdir -p "$(dirname $ZINIT_HOME)"
[ ! -d $ZINIT_HOME/.git ] && git clone https://github.com/zdharma-continuum/zinit.git "$ZINIT_HOME"
source "${ZINIT_HOME}/zinit.zsh"
# Autocompletions and sources nvm
zinit ice wait'2' lucid
zi snippet OMZP::nvm
# zinit ice wait lucid
# zi snippet OMZP::git
# auto-completion for docker
zinit ice wait lucid
zi snippet OMZP::docker
zinit ice wait lucid
zi snippet OMZP::fzf
zinit ice wait lucid
zi snippet OMZP::z
zinit ice wait lucid
zi snippet OMZP::golang
# Autocomplete for sdkman
zi snippet OMZP::sdk
# Load pyenv if it's found
zinit ice wait lucid
zi snippet OMZP::pyenv
# completion support for awscli
zinit ice wait lucid
zi snippet OMZP::aws
zi snippet OMZP::vi-mode
VI_MODE_SET_CURSOR=true
VI_MODE_RESET_PROMPT_ON_MODE_CHANGE=true
VI_MODE_CURSOR_INSERT=5
zicompinit
zicdreplay
zi load Aloxaf/fzf-tab
zi load zsh-users/zsh-autosuggestions
### Fix slowness of pastes with zsh-syntax-highlighting.zsh
# pasteinit() {
# OLD_SELF_INSERT=${${(s.:.)widgets[self-insert]}[2,3]}
# zle -N self-insert url-quote-magic # I wonder if you'd need `.url-quote-magic`?
# }
#
# pastefinish() {
# zle -N self-insert $OLD_SELF_INSERT
# }
# zstyle :bracketed-paste-magic paste-init pasteinit
# zstyle :bracketed-paste-magic paste-finish pastefinish
### Fix slowness of pastes
timezsh() {
shell=${1-$SHELL}
for i in $(seq 1 10); do /usr/bin/time $shell -i -c exit; done
}
# Fzf options
# So we can see files beginning with '.'
export FZF_CTRL_T_COMMAND="$FZF_DEFAULT_COMMAND"
# Bat theme
export BAT_THEME="Coldark-Cold"
# Set editor as vim
export EDITOR='vim'
# Source alias files which source other alias files
source ~/.zsh_aliases
source ~/.bash_aliases
source ~/.extra.sh
# Source other function files
source ~/.fzfgitfunctions.zsh
# Less won't be used if it fits in the page
export LESS="-F -X $LESS"
# zmodload zsh/complist
# zstyle ':completion:*' menu select
# use the vi navigation keys in menu completion
# bindkey -M menuselect '^h' vi-backward-char
# bindkey -M menuselect '^k' vi-up-line-or-history
# bindkey -M menuselect '^l' vi-forward-char
# bindkey -M menuselect '^j' vi-down-line-or-history
# bindkey -M menuselect '?' history-incremental-search-forward
# Easier bindings than going to cmd mode then pressing j or k
bindkey -M main '^k' up-history
bindkey -M main '^j' down-history
bindkey -M main '^l' end-of-line
bindkey -M vicmd '^k' up-history
bindkey -M vicmd '^j' down-history
bindkey -M vicmd '^l' end-of-line
autoload -U url-quote-magic
zle -N self-insert url-quote-magic
# stop globing with ? and *
unsetopt nomatch
# show hidden dotfiles when using tab completion
setopt globdots
# set golang path
export GOPATH=$(go env GOPATH)
# add golang bin path to PATH
export PATH=$PATH:$(go env GOPATH)/bin
zsh-defer eval "$(pyenv init -)"
zsh-defer source /usr/local/opt/chruby/share/chruby/chruby.sh
zsh-defer source /usr/local/opt/chruby/share/chruby/auto.sh
# chruby ruby-3.1.2
zsh-defer source "$HOME/.sdkman/bin/sdkman-init.sh"
eval "$(starship init zsh)"
| true
|
84b056fa1e7a12f8ec438da853172b7b599e7078
|
Shell
|
taiji4444/cc
|
/DHshell/mkfs_mount.sh
|
UTF-8
| 530
| 3.6875
| 4
|
[] |
no_license
|
#!/bin/bash
disk=/dev/$1
datadir=$2
if [ $# -ne 2 ];then
echo "argment error"
echo "exmple:sh $0 sdb /data"
exit 1
fi
if [ ! -d $datadir ];then
mkdir -p $datadir
fi
flag=`fdisk -l | grep $disk| wc -l`
if [ $flag -eq 1 ];then
mkfs.xfs $disk
else
echo "$disk not exits "
exit 1
fi
if [ $? -ne 0 ];then
echo "format disk failed: $disk"
fi
uuid=`blkid $disk`
if [ "${datadir:0:1}" == '/' ];then
sed "$datadir/d" /etc/fstab
else
sed "/$datadir/d" /etc/fstab
fi
echo "UUID=$uuid $datadir xfs defaults 0 0" >> /etc/fstab
| true
|
87bb4d85c9128a26b4db8f3c9abe8e0be41e9543
|
Shell
|
asyrjasalo/kongman
|
/make_dc_and_examples
|
UTF-8
| 835
| 2.75
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/usr/bin/env bash
set -e
unset KONG_ADMIN_URL KONG_ADMIN_KEY
### recreate env and install package from source
make --silent dc_rm dc install
### create or upgrade Kong Admin API loopback requiring auth
kong-incubator --yaml examples/kadmin.yaml --output None
admin_key=$(kong-incubator --key-auth root --output key)
echo "Send header 'apikey:$admin_key' to authenticate http://localhost:8000/kadmin"
### example to create a new service via the authenticated Admin API
KONG_ADMIN_URL=http://localhost:8000/kadmin kong-incubator \
--admin-key=$admin_key \
--yaml examples/mockbin.yaml \
--output None
mockbin_key=$(KONG_ADMIN_KEY=$admin_key kong-incubator \
--admin-url=http://localhost:8000/kadmin \
--key-auth mocker \
--output key)
echo "Send header 'apikey:$mockbin_key' to authenticate http://localhost:8000"
| true
|
d45ebf32255a0823bdd2a17e82d402320fd2be54
|
Shell
|
jamescostian/.config
|
/update-setup.sh
|
UTF-8
| 1,037
| 3.296875
| 3
|
[
"ISC"
] |
permissive
|
#!/usr/bin/env bash
# setup.sh takes the existing configuration and writes it, it does _not_ absorb the configuration on your machine; it just overwrites it.
# But what if you have changes on your machine that are worth keeping? Run this script to prevent setup.sh from messing up your setup.
if [[ "$(uname)" = "Darwin" ]]; then
brew bundle dump --force --file="$HOME/.config/Brewfile$MULTITENANT_SUFFIX"
else
~/.config/scripts$MULTITENANT_SUFFIX/helpers/list-apt-packages > ~/.config/apt-packages
~/.config/scripts$MULTITENANT_SUFFIX/helpers/list-snaps-installed > ~/.config/snaps-installed
dconf dump /desktop/ibus/ > ~/ibus.dconf
fi
code --list-extensions > ~/.config/Code/User/extensions
# Update the version of ~/.ssh/config in 1Password
if [[ -s ~/.ssh/config ]]; then
eval $(op signin --account costian.1password.com)
# Remove the old SSH config (1Password's CLI doesn't support updating documents :/)
op document delete "SSH Config"
cd ~/.ssh
op document create config --title "SSH Config"
fi
# TODO: export wifi networks
| true
|
7b60f4cade6527ee045571748150fc95be055e70
|
Shell
|
DarcyChang/MyProjects
|
/Senao/UTM-T55/wg/T55_MFG/tpm_test.sh
|
UTF-8
| 1,784
| 3.03125
| 3
|
[] |
no_license
|
#! /bin/bash
#source /root/automation/Library/path.sh
test_result_path=$(cat /root/automation/T55_MFG/mfg_version | grep "test_result_path" | awk '{print $2}')
test_result_failure_path=$(cat /root/automation/T55_MFG/mfg_version | grep "test_result_failure_path" | awk '{print $2}')
all_test_done_path=$(cat /root/automation/T55_MFG/mfg_version | grep "all_test_done_path" | awk '{print $2}')
memory_stress_test_path=$(cat /root/automation/T55_MFG/mfg_version | grep "memory_stress_test_path" | awk '{print $2}')
log_backup_path=$(cat /root/automation/T55_MFG/mfg_version | grep "log_backup_path" | awk '{print $2}')
log_path=$(cat /root/automation/T55_MFG/mfg_version | grep "log_path" | awk '{print $2}')
log_folder_path=$(cat /root/automation/T55_MFG/mfg_version | grep "log_folder_path" | awk '{print $2}')
time_path=$(cat /root/automation/T55_MFG/mfg_version | grep "time_path" | awk '{print $2}')
tmp_path=$(cat /root/automation/T55_MFG/mfg_version | grep "tmp_path" | awk '{print $2}')
tmp_golden_path=$(cat /root/automation/T55_MFG/mfg_version | grep "tmp_golden_path" | awk '{print $2}')
tpm_selftest -l debug | tee -a $log_path | tee $tmp_path
get_tpm_status=$( cat $tmp_path | grep "succeeded" | cut -d " " -f 2)
get_tpm_success_num=$(grep -c "success" $tmp_path)
/root/automation/T55_MFG/tpmtools/verify_tpm_keys.sh -w | tee $tmp_path
get_tpm_key=$(grep -c "KEY" $tmp_path)
if [[ $get_tpm_status == "succeeded" ]] && [[ $get_tpm_success_num == "7" ]] && [[ $get_tpm_key == "4" ]] ;then
echo "$(date '+%Y-%m-%d %H:%M:%S') TPM_TEST: PASS" >> $test_result_path
else
echo "$(date '+%Y-%m-%d %H:%M:%S') TPM_TEST: FAIL" >> $test_result_path
fi
cp -rf /root/automation/T55_MFG/tpmtools/logs $log_folder_path
| true
|
a43d53344846d495c67e2038709633265ea85106
|
Shell
|
iacabezasbaculima/UNIX_ShellScripting
|
/for.sh
|
UTF-8
| 357
| 3.9375
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
# Using a "for" loop
echo "Please enter a list of number between 1 and 100."
read NUMBERS
# iterate through each item in $NUMBERS and put value in $NUM
for NUM in $NUMBERS
do
if [ "$NUM" -lt 1 ] || [ "$NUM" -gt 100 ]; then
echo "Invalid Number ($NUM) - Must be between 1 and 100!"
else
echo "$NUM is valid."
fi
done
| true
|
54bb557ce202f3903c8be3a27cfcf85c7624e5b8
|
Shell
|
tsarpaul/linux-bootloader
|
/build-initramfs.sh
|
UTF-8
| 1,092
| 3.953125
| 4
|
[] |
no_license
|
#!/bin/sh
# To create a 32-bit image I used an AWS instance.
if [[ $EUID -ne 0 ]]; then
echo "This script must be run as root"
exit 1
fi
mkdir --parents ./initramfs/{bin,dev,etc,lib,lib64,mnt/root,proc,root,sbin,sys}
cp --archive /dev/{null,console,tty,sda1} ./initramfs/dev/
yum install -y busybox
cp --archive $(which busybox) ./initramfs/bin/busybox
cp /lib/ld-linux.so.2 ./initramfs/lib/
ldd $(which sh) $(which busybox) | grep "=> /" | awk '{print $3}' | xargs -I '{}' cp -v '{}' ./initramfs/lib/
cat >./initramfs/init <<'EOF'
#!/bin/busybox sh
# Mount the /proc and /sys filesystems.
mount -t proc none /proc
mount -t sysfs none /sys
# Do your stuff here.
echo "This script just mounts and boots the rootfs, nothing else!"
# Mount the root filesystem.
mount -o ro /dev/sda1 /mnt/root
# Clean up.
umount /proc
umount /sys
exec /bin/sh
EOF
chmod +x ./initramfs/init
find ./initramfs -print0 | cpio --null --create --verbose --format=newc > custom-initramfs
echo "Now just copy it to the bootloader directory, and set size to: $(stat --format "%s" custom-initramfs)"
| true
|
eb2ca0e6fd1b5560e7b7dcc91ba086d2a1a58038
|
Shell
|
delkyd/alfheim_linux-PKGBUILDS
|
/dxreminders/PKGBUILD
|
UTF-8
| 552
| 2.609375
| 3
|
[] |
no_license
|
# Contributor: David Vachulka <arch_dvx@users.sourceforge.net>
pkgname=dxreminders
pkgver=1.10.1
pkgrel=1
pkgdesc="A simple program for reminders"
arch=('i686' 'x86_64')
url="http://dxreminders.dxsolutions.org"
license=('GPL')
depends=('wxgtk2')
makedepends=('cmake')
install=${pkgname}.install
source=(http://files.dxsolutions.org/$pkgname-$pkgver.tar.gz)
md5sums=('ccf770278ec8c580f8ca16c1b7ce6ad0')
build() {
cd "${srcdir}/${pkgname}-${pkgver}"
cmake -DCMAKE_INSTALL_PREFIX=/usr
make
}
package() {
cd "${srcdir}/${pkgname}-${pkgver}"
make DESTDIR="$pkgdir" install
}
| true
|
0d64bfe570e8c49fd18055dcf907a2faa4cfe30e
|
Shell
|
praser/restos-a-pagar
|
/backend/build.sh
|
UTF-8
| 875
| 3.34375
| 3
|
[] |
no_license
|
#!/bin/bash
while getopts t: flag
do
case "${flag}" in
t) tag=${OPTARG};;
esac
done
echo "Gerando build com a tag: $tag...";
# Remover build pre existente
rm -rf ./build
# copiar a pasta do projeto
rsync -ax ./ ./build
# acessar a pasta do build
cd build
# instalar dependências de produção
rm -rf vendor && composer install --no-dev
# remover os arquivos desnecessários para a produção
rm -rf \
.dockerignore \
CONTRIBUTING.md \
composer.json \
compose.lock \
phpcs.xml \
.env \
Dockerfile \
phpstan.neon.dist \
var \
.DS_Store \
.gitignore \
README.md \
logs \
phpunit.xml \
.coveralls.yml \
.vscode \
phinx.php \
src/Infrastructure/Database/Migrations \
src/Infrastructure/Database/Seeds
# compactar
zip -r "restos-a-pagar-backend-$tag.zip" .
# remover a pasta da build
cd .. && mv build/*.zip . && rm -rf build
| true
|
7a398f71543fa78c3e80cd8dccfa2d066e7a1441
|
Shell
|
DannyBen/rush-repo
|
/gpg2/main
|
UTF-8
| 289
| 3.421875
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
source "$REPO_PATH/lib.sh"
if command_exist gpg2 ; then
say "gpg2 is already installed"
else
case "$DISTRO" in
"ubuntu") package_install gnupg2 ;;
"arch") package_install gnupg ;;
esac
say "copying bashrc.d config"
cp gpg2.bashrc ~/.bashrc.d/
fi
| true
|
e3738206c757f51045baf6793e4f2b66146ab38d
|
Shell
|
daoleviethoang/lthdt-18clc6-group16
|
/login
|
UTF-8
| 1,316
| 2.515625
| 3
|
[] |
no_license
|
int Login(string &user)
{
char pass[50];
int check = 0;
int check1 = 0;
int temp = 0;
int index = 0;
do
{
cout << " |=========================|" << endl;
cout << " | (1) - LOGIN |" << endl;
cout << " | (2) - CHANGE PASSWORD |" << endl;
cout << " | (3) - CREATE ACOUNT |" << endl;
cout << " |=========================|" << endl;
cout << "\n ENTER: ";
cin >> temp;
cout << "__________________________\n" << endl;
} while (temp < 1 || temp > 3);
if (temp == 3)
{
createNewUser();
return 5;
}
else
{
do
{
cout << "USERNAME: ";
cin >> user;
cout << "PASSWORD: ";
inputpassword(pass);
cout << "__________________________\n" << endl;
for (int i = 0; i < PS.size(); i++)
{
if (user == PS[i].getUser() && pass == PS[i].getPass())
{
check = 1;
check1 = i;
break;
}
}
if (check == 1)
{
if (temp == 1)
{
if (user[0] == 'N' && user[1] == 'V')
return 2;
else if (user == "admin")
return 1;
else return 3;
}
else if (temp == 2)
{
changePassword(check1);
savePassword();
return 4;
}
}
else index = 1;
} while (index == 1 && cout << "Password account is incorrect" << endl);
}
}
| true
|
6348bd7a29333ab51b2b6a635b6772a4ebb560db
|
Shell
|
minbrowser/min
|
/resources/postinst_script
|
UTF-8
| 451
| 3.140625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
set -e
if [ "$1" = "configure" ] || [ "$1" = "abort-upgrade" ]; then
update-alternatives --install /usr/bin/x-www-browser \
x-www-browser /opt/Min/min 80
chown root /opt/Min/chrome-sandbox
chmod 4755 /opt/Min/chrome-sandbox
echo "Run sudo xdg-settings set default-web-browser min.desktop && sudo update-alternatives --config x-www-browser to set Min as your default browser."
ln -sf /opt/Min/min /usr/bin/min
fi
| true
|
dcf47f8c4eeee81b79d5c92f81f617ef44dcb85c
|
Shell
|
alejandropachecovizuet/node-api-rest-elastic
|
/extras/bin/testApp.sh
|
UTF-8
| 1,105
| 3.15625
| 3
|
[] |
no_license
|
IP=$1
PORT=$2
CONTEXT=$3
me=`basename "$0"`
if [[ "$IP" != "" && "$PORT" != "" && "$CONTEXT" != "" ]];
then
echo "--------------------------------"
echo "login:"`curl -X POST -H "Accept: application/json" -d '{"username":"chingon", "pwd":"123456"}' http://$IP:$PORT/$CONTEXT`
echo ""
echo "ping:"
curl -I -X POST -H "Accept: application/json" -d '{"username":"chingon", "pwd":"123456"}' http://$IP:$PORT/$CONTEXT/ping
echo ""
echo "fail:"
curl -X POST -H "Accept: application/json" -d '{"username":"chingon", "pwd":"123456"}' http://$IP:$PORT/$CONTEXT/fail
echo ""
echo "ping:"
curl -X POST -H "Accept: application/json" -d '{"username":"chingon", "pwd":"123456"}' http://$IP:$PORT/$CONTEXT/ping
echo ""
echo "fix:"
curl -X POST -H "Accept: application/json" -d '{"username":"chingon", "pwd":"123456"}' http://$IP:$PORT/$CONTEXT/fix
echo ""
echo "ping:"
curl -X POST -H "Accept: application/json" -d '{"username":"chingon", "pwd":"123456"}' http://$IP:$PORT/$CONTEXT/ping
echo ""
else
echo "USO: ./%me IP PORT CONTEXT";
fi
| true
|
97dd7345e97dfa25a47735f7465db88477fa08e9
|
Shell
|
laaners/progetto-labiagi_pick_e_delivery
|
/catkin_ws/src/srrg_cmake_modules/ci_scripts/build.sh
|
UTF-8
| 1,333
| 3.578125
| 4
|
[
"MIT",
"BSD-2-Clause",
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
#ds check input parameters
if [ "$#" -ne 3 ]; then
echo "ERROR: call as $0 BUILD_DIRECTORY PROJECT_NAME CMAKE_BUILD_TYPE"
exit -1
fi
#ds parameters
BUILD_DIRECTORY="$1"
PROJECT_NAME="$2"
CMAKE_BUILD_TYPE="$3"
echo -e "\e[1;96m--------------------------------------------------------------------------------\e[0m"
echo -e "\e[1;96mrunning build.sh|bash version: '${BASH_VERSION}'\e[0m"
cd "${BUILD_DIRECTORY}"
catkin config
#ds set complete cmake build flags
CMAKE_BUILD_FLAGS="-j4 --no-status --summary -DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE} -DCMAKE_CXX_FLAGS=-fdiagnostics-color=always"
#ds test build flags
CMAKE_BUILD_TESTS_FLAGS="-j4 --catkin-make-args tests"
#ds choose build system (currently only catkin)
BUILD_COMMAND="catkin build"
#ds build stack
echo "${BUILD_COMMAND} ${PROJECT_NAME} ${CMAKE_BUILD_FLAGS}"
${BUILD_COMMAND} ${PROJECT_NAME} ${CMAKE_BUILD_FLAGS}
#ds build tests
echo "${BUILD_COMMAND} ${PROJECT_NAME} ${CMAKE_BUILD_FLAGS} ${CMAKE_BUILD_TESTS_FLAGS}"
${BUILD_COMMAND} ${PROJECT_NAME} ${CMAKE_BUILD_FLAGS} ${CMAKE_BUILD_TESTS_FLAGS}
DEVEL_SPACE="${BUILD_DIRECTORY}devel"
for LIB in ${SRRG_RDEPS}; do
echo ". ${DEVEL_SPACE}/${LIB}/setup.sh" >> "${DEVEL_SPACE}/setup.sh";
done
echo -e "\e[1;96m--------------------------------------------------------------------------------\e[0m"
| true
|
c8f2014535610540e9787bede0c39936f62a2ce7
|
Shell
|
mrfarstad/thesis
|
/scripts/test_all_configurations.sh
|
UTF-8
| 1,168
| 3.34375
| 3
|
[
"BSD-2-Clause"
] |
permissive
|
#!/bin/bash
project_folder=$(echo ${PWD} | sed 's/thesis.*/thesis/')
source $project_folder/constants.sh
host=yme
out_path=results/out.txt
gpus=(1 2 4)
stencils=(1 2 4 8 16 32 64 128)
for g in "${gpus[@]}"
do
:
if [[ $g -eq 1 ]] ; then
versions=(base smem coop) # coop_smem)
else
versions=(base smem)
fi
for v in "${versions[@]}"
do
:
for d in "${stencils[@]}"
do
:
bash $project_folder/scripts/set_run_configuration.sh $v $g $d
bash $project_folder/scripts/run.sh prod yme | tee ${out_path}
error=$(awk '/reading solution/{getline;print;}' ${out_path})
if [[ ! -z $(echo "$error" | awk '!/rms error = 0.000000/') ]] ; then
echo "#############################"
echo "ERROR"
echo "$g GPU[s] $v RADIUS=$d"
echo "$error"
echo "#############################"
exit
fi
rm ${out_path}
done
done
done
echo "#############################"
echo "CONGRATULATIONS!"
echo "NO ERRORS IN TESTED CONFIGURATIONS"
echo "#############################"
exit 0
| true
|
fe98a3f5e987cd9ccee4881a69e32d2270507bd7
|
Shell
|
madharjan/docker-base
|
/services/syslog-ng/syslog-ng.sh
|
UTF-8
| 833
| 3.3125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
set -e
export LC_ALL=C
export DEBIAN_FRONTEND=noninteractive
if [ "${DEBUG}" = true ]; then
set -x
fi
SYSLOG_NG_BUILD_PATH=/build/services/syslog-ng
## Install a syslog daemon.
apt-get install -y --no-install-recommends syslog-ng-core
mkdir -p /var/lib/syslog-ng
cp $SYSLOG_NG_BUILD_PATH/syslog-ng-default /etc/default/syslog-ng
touch /var/log/syslog
chmod u=rw,g=r,o= /var/log/syslog
cp $SYSLOG_NG_BUILD_PATH/syslog-ng.conf /etc/syslog-ng/syslog-ng.conf
## Install logrotate.
apt-get install -y --no-install-recommends logrotate
cp $SYSLOG_NG_BUILD_PATH/logrotate-syslog-ng /etc/logrotate.d/syslog-ng
## Install syslog to "docker logs" forwarder.
mkdir -p /etc/service/syslog-forwarder
cp $SYSLOG_NG_BUILD_PATH/syslog-forwarder.runit /etc/service/syslog-forwarder/run
chmod 750 /etc/service/syslog-forwarder/run
| true
|
de5b25c33febd4993ddd0d487d70c11c4aa65b1b
|
Shell
|
xavim/android_device_infotm_imapx800
|
/InfotmMedia/collect.sh
|
UTF-8
| 5,526
| 3.40625
| 3
|
[] |
no_license
|
#!/bin/bash
### sam_ye, 2012/06/02 ###
### to fix the current path
LOCAL_PATH=`pwd`
cd ..
TMP_PATH=`pwd`
TMP_PATH=${TMP_PATH/%\/}
cd $LOCAL_PATH
if [ "$TMP_PATH/" = "${LOCAL_PATH/%InfotmMedia}" ]
then
cd ${0/collect.sh}
fi
IM_ROOT=`pwd`
state_file="env_state_must_not_delete"
config_file="./config.mk"
IM_EXTERNAL_ROOT=$IM_ROOT/external/project/
DRV_ROOT="../../../../../../../"
WP_PATH="../../work_copy/"
ext_file_list="change_file_list.txt"
###########################################################
### error print
print_err()
{
echo "error: " $1 " failed " $2 "!"
}
copy_file()
{
cp $1 $2
if [ $? -ne 0 ] ; then
print_err "copy file " $1
exit 1
fi
}
move_file()
{
mv $1 $2
if [ $? -ne 0 ] ; then
print_err "move file " $1
exit 1
fi
}
### user help
print_help()
{
echo "####################################################################################"
echo "$0 [help/module/external_module]"
echo " collect the indicated module envirionment, if not set, modules in config.mk will collect."
echo " module: optional, module folder."
echo " external_module: optionanl, external module folder."
echo "example:"
echo " $0"
echo " $0 foundations"
echo " $0 external_hwcursor"
echo "###################################################################################"
}
### check the module state : $1 = "$dir", $2 = "$str"
check_state()
{
dir=$1
str=$2
if [ $# -ne 2 ]; then
echo "error: check state parameters error "
exit 1
fi
if [ ! -f $state_file ] ; then
echo "warn: $dir not built yet , check it"
return 1
fi
line=`grep $str $state_file`
if [ "$line" = "" ]; then
echo "warn: $dir not built yet , check it"
return 1
else
curr_state=`echo $line | awk '{print $NF}'`
fi
if [ $curr_state -eq 0 ] ; then
echo "warn: $dir already collected , please check it"
return 1
fi
return 0
}
### compiler module enviroment build
comp_collect()
{
exclude=".svn"
for dir in `ls -l | grep ^d | awk '{print $NF}'`
do
if [ $1 = "all" ] || [ $dir = $1 ]; then
### check the state
if [ ! -f $state_file ] ; then
echo "warn: $dir not built yet , check it"
continue
fi
str="IM_"$dir"_state"
line=`grep $str $state_file`
if [ "$line" = "" ]; then
echo "warn: $dir not built yet , check it"
continue
else
curr_state=`echo $line | awk '{print $NF}'`
fi
if [ $curr_state -eq 0 ] ; then
echo "warn: $dir already collected , please check it"
continue
fi
## execute the script if needed.
cd ../../../$dir
if [ -f collectenv.sh ] ; then
./collectenv.sh
if [ $? -ne 0 ]; then
print_err $dir " collectenv"
fi
fi
cd - 1>/dev/null
sed -i "s/$str = 1/$str = 0/g" $state_file
fi
done
}
### real external module
ext_change_file_list_collect()
{
if [ ! -f $ext_file_list ] ; then
return 0
fi
exec 5<> $ext_file_list
cnt=0
while read line <&5
do {
len=`echo $line | awk '{print length()}'`
if [ $len -eq 0 ] || [ "$line" != "${line/"#"}" ];then
continue
else
file=$line
fi
move_file $DRV_ROOT/$file $WP_PATH/$file
if [ -f $WP_PATH/$file.tmpbak ] ; then
move_file $WP_PATH/$file.tmpbak $DRV_ROOT/$file
fi
}
done
exec 5>&-
}
### external module enviroment build
### should check the state first, then call the truely dealing function
ext_collect()
{
for dir in `ls -l $IM_EXTERNAL_ROOT | grep ^d | awk '{print $NF}'`
do
if [ $1 = "all" ] || [ $dir = $1 ]; then
if [ ! -f $state_file ] ; then
echo $dir " not built yet , check it"
continue
fi
str="IM_EXTERNAL_"$dir"_state"
line=`grep $str $state_file`
if [ "$line" = "" ]; then
echo "warn: $dir not built yet , check it"
continue
else
curr_state=`echo $line | awk '{print $NF}'`
fi
if [ $curr_state -eq 0 ] ; then
echo "warn: ${dir} already collected, please check it"
continue
fi
cd $IM_EXTERNAL_ROOT/$dir
if [ -f collectenv.sh ]
then
echo "execute " $dir "collectenv.sh"
./collectenv.sh
if [ $? -ne 0 ]
then
print_err "external collectenv.sh " $dir
exit 1
fi
fi
echo "external collect: " $dir
ext_change_file_list_collect
cd - 1>/dev/null
sed -i "s/$str = 1/$str = 0/g" $state_file
fi
done
}
collect_env()
{
exec 104<> $config_file
cnt=0
while read line <&104
do {
((cnt++))
len=`echo $line | awk '{print length()}'`
if [ $len -ne 0 ] && [ "$line" = "${line/"#"}" ]
then
COM_index=`echo $line | awk '{print index($line, "IM_SUPPORT_")}'`
EXT_index=`echo $line | awk '{print index($line, "IM_SUPPORT_EXTERNAL_")}'`
if [ $COM_index -eq 1 ] && [ $EXT_index -eq 0 ]
then
dir=`echo $line | awk '{a=12; b=index($line, ":="); print substr($line,a,b-a)}'`
yes_no=`echo $line | awk '{a=index($line,":="); print substr($line,a+3,4)}'`
if [ $yes_no = "true" ]
then
comp_collect $dir
fi
#elif [ $EXT_index -eq 1 ]
#then
# dir=`echo $line | awk '{a=21; b=index($line, ":="); print substr($line,a,b-a)}'`
# yes_no=`echo $line | awk '{a=index($line,":="); print substr($line,a+3,4)}'`
# if [ $yes_no = "true" ]
# then
# ext_collect $dir
# fi
fi
fi
}
done
exec 104>&-
}
############################################################
if [ $# -eq 1 ] && [ $1 = "help" ] ; then
print_help
exit 0
fi
## no params, build all the enviroment
if [ $# -eq 0 ] ; then
echo
collect_env
echo "##########collect system enviroment success##########"
echo
exit 0
fi
echo
exit 0
| true
|
521dd076dee018d02a175945e0804db301a84742
|
Shell
|
Eric2018/curl-ndk
|
/jni/configure.sh
|
UTF-8
| 2,263
| 3.15625
| 3
|
[
"curl",
"NTP",
"BSD-2-Clause"
] |
permissive
|
#!/bin/sh
# the path of curl and c_ares
SOURCE=/home/android
# the path of android ndk
ANDROID_NDK="/home/android/android-ndk-r8b"
# toolchain version, change them if necessary
# android-ndk r8b supports 4.4.3 and 4.6
TOOLCHAIN_VERSION="4.4.3"
# platform version, i.e. api level
PLATFORM_VERSION=8
# target
TARGET=arm-linux-androideabi
# path
PATH=$ANDROID_NDK/toolchains/$TARGET-$TOOLCHAIN_VERSION/prebuilt/linux-x86/bin:$PATH
# the fullpath of libgcc.a
LIBGCCA=`ls $ANDROID_NDK/toolchains/$TARGET-$TOOLCHAIN_VERSION/prebuilt/linux-x86/lib/gcc/$TARGET/*/thumb/libgcc.a`
# the path of openssl
OPENSSL_PREFIX=/home/android/external/openssl
# the path of libcrypto.so libssl.so, can get it from /system/lib
OPENSSL_LIBDIR=/home/android/out/target/product/generic/system/lib
# the version of curl and c_ares
CURL_VERSION=7.27.0
C_ARES_VERSION=1.9.1
CURL_EXTRA="--disable-ftp --disable-file --disable-ldap --disable-ldaps --disable-rtsp --disable-proxy --disable-dict --disable-telnet --disable-tftp --disable-pop3 --disable-imap --disable-smtp --disable-gopher --disable-sspi"
pushd `dirname $0`
rm -rf curl curl-$CURL_VERSION
tar xf $SOURCE/curl-$CURL_VERSION.tar.*
mv curl-$CURL_VERSION curl
mkdir -p curl/ares
rm -rf ares c-ares-$C_ARES_VERSION
tar xf $SOURCE/c-ares-$C_ARES_VERSION.tar.*
mv c-ares-$C_ARES_VERSION ares
pushd curl
./configure CC=$TARGET-gcc --host=arm-linux \
CPPFLAGS="-DANDROID -I$ANDROID_NDK/platforms/android-$PLATFORM_VERSION/arch-arm/usr/include " \
CFLAGS="-fno-exceptions -Wno-multichar -mthumb-interwork -mthumb -nostdlib " \
LIBS="-lc -ldl -lz $LIBGCCA " \
LDFLAGS="-L$ANDROID_NDK/platforms/android-$PLATFORM_VERSION/arch-arm/usr/lib -L$OPENSSL_LIBDIR " \
--enable-ipv6 --disable-manual --with-random=/dev/urandom \
--with-ssl=$OPENSSL_PREFIX --without-ca-bundle --without-ca-path \
--with-zlib --enable-ares $CURL_EXTRA || exit 1
popd
pushd ares
./configure CC=$TARGET-gcc --host=arm-linux \
CPPFLAGS="-DANDROID -I$ANDROID_NDK/platforms/android-$PLATFORM_VERSION/arch-arm/usr/include " \
CFLAGS="-fno-exceptions -Wno-multichar -mthumb-interwork -mthumb -nostdlib " \
LIBS="-lc -ldl " \
LDFLAGS="-L$ANDROID_NDK/platforms/android-$PLATFORM_VERSION/arch-arm/usr/lib " \
--with-random=/dev/urandom || exit 1
popd
popd
| true
|
5fc00aa5349243092a23caaef1ea45308b4f2e21
|
Shell
|
h01ger/piuparts
|
/custom-scripts/scripts-debug-problemresolver/pre_install_debug_problemresolver
|
UTF-8
| 381
| 3
| 3
|
[] |
no_license
|
#!/bin/sh
set -e
test "$PIUPARTS_TEST" = "install" || exit 0
if [ ! -f /etc/apt/apt.conf.d/piuparts-debug-problemresolver ]
then
echo "Enabling Debug::pkgProblemResolver"
echo 'Debug::pkgProblemResolver "true";' >> /etc/apt/apt.conf.d/piuparts-debug-problemresolver
echo 'Debug::pkgProblemResolver::ShowScores "true";' >> /etc/apt/apt.conf.d/piuparts-debug-problemresolver
fi
| true
|
5a3b7c59e79c69cd298229020206d3da8bfef678
|
Shell
|
magos-linux/magos-linux
|
/make_modules/add-modules/make_mod_deps.sh
|
UTF-8
| 1,150
| 3.171875
| 3
|
[] |
no_license
|
#!/bin/bash
# Лицензия: GPL последней версии
# Описание: Генерирует файлы с зависимостями пакетов модулей для MagOS
# Дата модификации: 03.12.2012
# Автор: Горошкин Антон
if [ "`id -u`" != "0" ] ;then
echo "Нужны права root"
exit 1
fi
if [ -f .config ] ;then
. .config
else
echo "Не вижу файла .config" ; exit 1
fi
rm work/errors
for mod in `ls -1 $MOD_NAMES_DIR/??-*` ;do
echo "Генерация файла зависимостей для модуля $(basename $mod)"
#--------------
[ -f $MOD_NAMES_DIR/deps_$(basename $mod) ]&&rm $MOD_NAMES_DIR/deps_$(basename $mod)
# urpmq -d --no-suggests --urpmi-root=$MOD_PREV --root=$MOD_PREV `cat $mod` |sort -u >$MOD_NAMES_DIR/deps_$(basename $mod)
urpmq -d --no-suggests --auto-select --force --urpmi-root=$MOD_PREV --root=$MOD_PREV `cat $mod` 2>> $MOD_NAMES_DIR/urpmi_deps_errors |sort -u >$MOD_NAMES_DIR/deps_$(basename $mod)
#--------------
echo -ne \\n "---> OK."\\n
done
cat $MOD_NAMES_DIR/deps_* |sort -u >$MOD_NAMES_DIR/full_deps
| true
|
44c99e879ccc7419ee7bea8d41ab690f07946481
|
Shell
|
Otus-DevOps-2020-02/DmitryKorlas_infra
|
/VPN/setupvpn.sh
|
UTF-8
| 766
| 2.640625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# pritunl
sudo tee /etc/apt/sources.list.d/pritunl.list << EOF
deb http://repo.pritunl.com/stable/apt xenial main
EOF
sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com --recv 7568D9BB55FF9E5287D586017AE645C0CF8E292A
# mongodb
wget -qO - https://www.mongodb.org/static/pgp/server-4.2.asc | sudo apt-key add -
sudo apt-get install gnupg
wget -qO - https://www.mongodb.org/static/pgp/server-4.2.asc | sudo apt-key add -
echo "deb [ arch=amd64,arm64 ] https://repo.mongodb.org/apt/ubuntu xenial/mongodb-org/4.2 multiverse" | sudo tee /etc/apt/sources.list.d/mongodb-org-4.2.list
apt-get --assume-yes update
apt-get --assume-yes upgrade
apt-get --assume-yes install pritunl mongodb-org
systemctl start pritunl mongod
systemctl enable pritunl mongod
| true
|
a0ccb41d5f52904ad203c2c2aa870a37488ddd05
|
Shell
|
x11remix/portable-pypy
|
/build_deps
|
UTF-8
| 2,824
| 2.921875
| 3
|
[] |
no_license
|
#!/bin/bash
set -e
if [[ $ABI == "32" ]]; then
export CFLAGS="-m32 $CFLAGS"
export CPPFLAGS=$CFLAGS
export LDFLAGS="-m32 $LDFLAGS"
fi
env
set -x
wget http://ftp.gnu.org/gnu/ncurses/ncurses-6.0.tar.gz -O - | tar xz
cd ncurses-6.0
./configure --prefix=/opt/prefix --without-cxx --with-termlib --without-normal --with-shared --enable-database --with-terminfo-dirs=/lib/terminfo:/usr/share/terminfo
echo "#define NCURSES_USE_DATABASE 1" >> include/ncurses_cfg.h
make
make install
cd -
wget http://ftp.vim.org/security/openssl/openssl-1.0.2j.tar.gz -O - | tar xz
cd openssl-1.0.2j
if [[ $ABI == "32" ]]; then
setarch i386 ./config --prefix=/opt/prefix -m32 shared
else
./config --prefix=/opt/prefix shared
fi
sed -i "s#^SHARED_LDFLAGS=\\(.*\\)#SHARED_LDFLAGS=$LDFLAGS \\1#" Makefile
make
make install
cd -
wget http://tukaani.org/xz/xz-5.2.2.tar.gz -O - | tar xz
cd xz-5.2.2
./configure --prefix=/opt/prefix
make -j4
make install
cd -
wget http://nixos.org/releases/patchelf/patchelf-0.9/patchelf-0.9.tar.gz -O - | tar xz
cd patchelf-0.9
if [[ $ABI == "32" ]]; then
LDFLAGS="$LDFLAGS -Wl,-rpath,/opt/devtools-6.2/lib" ./configure --prefix=/opt/prefix
else
LDFLAGS="$LDFLAGS -Wl,-rpath,/opt/devtools-6.2/lib64" ./configure --prefix=/opt/prefix
fi
make -j4
make install
cd -
wget https://sqlite.org/2016/sqlite-autoconf-3150100.tar.gz -O - | tar xz
cd sqlite-autoconf-3150100
./configure --prefix=/opt/prefix
make -j4
make install
cd -
wget ftp://sourceware.org/pub/libffi/libffi-3.2.1.tar.gz -O - | tar xz
cd libffi-3.2.1
./configure --prefix=/opt/prefix
make -j4
make install
cd -
cd /opt/prefix/lib
find . -name ffi.h | xargs -i ln -sf ../lib/{} ../include
find . -name ffitarget.h | xargs -i ln -sf ../lib/{} ../include
cd -
wget http://downloads.sourceforge.net/project/expat/expat/2.2.0/expat-2.2.0.tar.bz2 -O - | tar xj
cd expat-2.2.0
./configure --prefix=/opt/prefix
make -j4
make install
cd -
wget http://ftp.gnu.org/gnu/gdbm/gdbm-1.12.tar.gz -O - | tar xz
cd gdbm-1.12
./configure --prefix=/opt/prefix
make -j4
make install
cd -
wget http://prdownloads.sourceforge.net/tcl/tcl8.6.6-src.tar.gz -O - | tar xz
cd tcl8.6.6/unix
./configure --prefix=/opt/prefix
make -j4
make install
cd -
wget http://prdownloads.sourceforge.net/tcl/tk8.6.6-src.tar.gz -O - | tar xz
cd tk8.6.6/unix
./configure --prefix=/opt/prefix
make -j4
make install
cd -
wget https://www.python.org/ftp/python/2.7.12/Python-2.7.12.tgz -O - | tar xz
cd Python-2.7.12
./configure --prefix=/opt/prefix/cpython-2.7
make -j4
make install
cd -
ln -sf /opt/prefix/lib/libtcl8.6.so /opt/prefix/lib/libtcl.so
ln -sf /opt/prefix/lib/libtk8.6.so /opt/prefix/lib/libtk.so
if [ -d /opt/prefix/lib64 ]; then
cp /opt/prefix/lib64/* /opt/prefix/lib
fi
ln -sf /opt/prefix/lib/libexpat.so /opt/prefix/lib/libexpat.so.0
| true
|
a11956fba5d4c81d827c969090bceec726767011
|
Shell
|
ubes-exped/goaty
|
/serve
|
UTF-8
| 392
| 2.515625
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env sh
export RABBITMQ_NODE_IP_ADDRESS=127.0.0.1
export ERL_EPMD_ADDRESS=127.0.0.1
export RABBITMQ_NODENAME=rabbit@localhost
cleanup() {
rabbitmqctl shutdown
trap - INT
kill -s INT "$$"
}
trap cleanup INT
wait_and_start() {
sleep 5
rabbitmqctl await_startup
yarn start
rabbitmqctl shutdown
}
rabbitmq-server -detached 2> /dev/null && wait_and_start
| true
|
7d5ef4f06946e8dc878d299824740d16a14745c8
|
Shell
|
xjdsdm/youtube-video
|
/share.sh
|
UTF-8
| 890
| 3.46875
| 3
|
[] |
no_license
|
#!/bin/bash
baseDir=/usr/share/nginx/html
page=$baseDir/share.html
egrep "singlemessage|groupmessage" /var/log/nginx/access.log | awk '{ print $1","$7 }' | cut -d'?' -f1 | sed 's/"//g' | grep 'htm' | sort | uniq | cut -d',' -f2 | sort | uniq -c | sort -nr > ll.txt
cat > $page << EOF
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1, maximum-s">
<style>
body {
padding: 5px;
}
th, td {
padding: 3px;
}
</style>
<b>
</b><br/><br/>
<table border='1px' cellspacing='0'>
<tr><th>分享链接</th><th>分享量</th></tr>
EOF
total=0
while read line; do
count=$(echo $line | awk '{ print $1 }')
key=$(echo $line | awk '{ print $2 }')
total=$(($total + $count))
title=$(grep $key channels.csv | cut -d',' -f1)
echo "<tr><td>$key</td><td>$count</td></tr>" >> $page
done < ll.txt
sed -i "s/total_count/$total/" $page
head -n 10 ll.txt
| true
|
c161af2c444d3284ef044a80271a2dc1bac8dac0
|
Shell
|
contiln/dotfiles
|
/bash/bash_profile
|
UTF-8
| 166
| 2.921875
| 3
|
[] |
no_license
|
# Source ~/.profile
if [ -f "$HOME"/.profile ] ; then
source "$HOME"/.profile
fi
# Source ~/.bashrc
if [ -f "$HOME"/.bashrc ] ; then
source "$HOME"/.bashrc
fi
| true
|
c92c6141cb2027d514c4ef721b40cbda03e8d84d
|
Shell
|
intelsdi-x/snap-plugin-publisher-graphite
|
/scripts/medium.sh
|
UTF-8
| 1,251
| 3.28125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
# File managed by pluginsync
set -e
set -u
set -o pipefail
__dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
__proj_dir="$(dirname "$__dir")"
__proj_name="$(basename "$__proj_dir")"
# shellcheck source=scripts/common.sh
. "${__dir}/common.sh"
_verify_docker() {
type -p docker > /dev/null 2>&1 || _error "docker needs to be installed"
docker version >/dev/null 2>&1 || _error "docker needs to be configured/running"
}
_verify_docker
[[ -f "${__proj_dir}/build/linux/x86_64/${__proj_name}" ]] || (cd "${__proj_dir}" && make)
SNAP_VERSION=${SNAP_VERSION:-"latest"}
OS=${OS:-"alpine"}
PLUGIN_PATH=${PLUGIN_PATH:-"${__proj_dir}"}
DEMO=${DEMO:-"false"}
TASK=${TASK:-""}
if [[ ${DEBUG:-} == "true" ]]; then
cmd="cd /plugin/scripts && rescue rspec ./test/*_spec.rb"
else
cmd="cd /plugin/scripts && rspec ./test/*_spec.rb"
fi
_info "running medium test"
#Starting docker with graphite
_docker_ps_id="$(docker run -d -p 80:80 -p 2003:2003 -p 2004:2004 -p 4444:4444/udp -p 8126:8126 -p 8086:8086 hopsoft/graphite-statsd)"
if ./scripts/medium_tests.sh ; then
_info "medium test ended: succeeded"
else
_info "medium test ended: failed"
fi
docker kill $_docker_ps_id >/dev/null && docker rm $_docker_ps_id >/dev/null
| true
|
f739515db67157b5bc8acae741623cdf55d3fab2
|
Shell
|
ericseong/wlan-monitor
|
/test/send-logs.sh
|
UTF-8
| 751
| 3.515625
| 4
|
[] |
no_license
|
#!/bin/bash
function send_logs()
{
# get kernel log
now=$(date +'%Y%m%d-%H%M')
dmesg > "${now}-dmesg.txt"
systemctl -n 100 status wlanmonitor > "${now}-systemctl_status.txt"
tail -n 500 /var/log/syslog > "${now}-syslog.txt"
tail -n 100 /var/log/mail.log > "${now}-mail-log.txt"
tail -n 100 /var/log/mail.err > "${now}-mail-err.txt"
# Now, let's send those files to mail. It seems that there's no way we can send body + attachment with mail utility and thus only attachments are sent without message body.
echo '' | mail -s "Sent from send_logs()" \
-A "${now}-dmesg.txt" \
-A "${now}-systemctl_status.txt" \
-A "${now}-syslog.txt" \
-A "${now}-mail-log.txt" \
-A "${now}-mail-err.txt" \
account@myemail.com
}
send_logs
# eof
| true
|
40247caba37c9eaf0f87401b6c2abc9da19e8d4c
|
Shell
|
jlevon/grot
|
/git/git-fixup
|
UTF-8
| 163
| 2.828125
| 3
|
[] |
no_license
|
#!/bin/bash
# squash current diff, staged, or current commit into a specific commit
base=${2:-master}
git commit -a --fixup $1
git rebase --autosquash -i $base
| true
|
ad4742227cc186f70157fbcdbb830b478e3d1a23
|
Shell
|
dns301/conll-2012
|
/setup_training.sh
|
UTF-8
| 1,339
| 2.9375
| 3
|
[] |
no_license
|
#!/bin/bash
OUTDIR=$1
dlx() {
wget $1/$2
tar -xvzf $2
rm $2
}
conll_url=http://conll.cemantix.org/2012/download
dlx $conll_url conll-2012-train.v4.tar.gz
dlx $conll_url conll-2012-development.v4.tar.gz
dlx $conll_url/test conll-2012-test-key.tar.gz
dlx $conll_url/test conll-2012-test-official.v9.tar.gz
dlx $conll_url conll-2012-scripts.v3.tar.gz
dlx http://conll.cemantix.org/download reference-coreference-scorers.v8.01.tar.gz
mv reference-coreference-scorers conll-2012/scorer
ontonotes_path=/shared/corpora/corporaWeb/multi-mode/multi/ontonotes-release-5.0
bash conll-2012/v3/scripts/skeleton2conll.sh -D $ontonotes_path/data/files/data conll-2012
function compile_partition() {
rm -f $2.$5.$3$4
cat conll-2012/$3/data/$1/data/$5/annotations/*/*/*/*.$3$4 >> $2.$5.$3$4
}
function compile_language() {
compile_partition development dev v4 _auto_conll $1
compile_partition train train v4 _auto_conll $1
compile_partition test test v4 _gold_conll $1
}
compile_language english
python2 minimize.py
mv conll-2012 $OUTDIR/conll-2012
mv train.english.jsonlines $OUTDIR/train.english.jsonlines
mv dev.english.jsonlines $OUTDIR/dev.english.jsonlines
mv test.english.jsonlines $OUTDIR/test.english.jsonlines
rm train.english.v4_auto_conll
rm dev.english.v4_auto_conll
rm test.english.v4_gold_conll
rm *.pyc
| true
|
954182a0b4fcec9401e5b61854dba889572485d9
|
Shell
|
BCLab-UNM/Swarmathon-Deployment
|
/scripts/buildAllTeams.sh
|
UTF-8
| 691
| 3.15625
| 3
|
[] |
no_license
|
#!/bin/bash
shopt -s extglob
for filename in ./*.tar.gz; do
tar -xvzf "$filename"
dir=$(basename "$filename" .tar.gz)
mv $dir rover_workspace/
if [ -d rover_workspace ]; then
mv rover_workspace/ ~/rover_workspace/
cd ~/rover_workspace/
catkin clean --yes
catkin build
source /home/swarmie/rover_workspace/devel/setup.bash
export GAZEBO_MODEL_PATH=/home/swarmie/rover_workspace/simulation/models
export GAZEBO_PLUGIN_PATH=${GAZEBO_PLUGIN_PATH}:/home/swarmie/rover_workspace/devel/lib/
cd ~/
tar -cvzf "$filename" rover_workspace/
rm -rf rover_workspace/
mv "$filename" physicalTeams2017/"$filename"
cd physicalTeams2017/
fi
done
| true
|
3f350ebf293f7274fb97bfdb2f7c620fba81fcac
|
Shell
|
wawltor/models
|
/PaddleCV/caffe2fluid/examples/imagenet/tools/run.sh
|
UTF-8
| 2,012
| 3.9375
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
#function:
# a tool used to:
# 1, convert a caffe model
# 2, do inference(only in fluid) using this model
#
#usage:
# cd caffe2fluid/examples/imagenet && bash run.sh resnet50 ./models.caffe/resnet50 ./models/resnet50
#
#set -x
if [[ $# -lt 3 ]];then
echo "usage:"
echo " bash $0 [model_name] [cf_model_path] [pd_model_path] [only_convert]"
echo " eg: bash $0 resnet50 ./models.caffe/resnet50 ./models/resnet50"
exit 1
else
model_name=$1
cf_model_path=$2
pd_model_path=$3
only_convert=$4
fi
proto_file=$cf_model_path/${model_name}.prototxt
caffemodel_file=$cf_model_path/${model_name}.caffemodel
weight_file=$pd_model_path/${model_name}.npy
net_file=$pd_model_path/${model_name}.py
if [[ ! -e $proto_file ]];then
echo "not found prototxt[$proto_file]"
exit 1
fi
if [[ ! -e $caffemodel_file ]];then
echo "not found caffemodel[$caffemodel_file]"
exit 1
fi
if [[ ! -e $pd_model_path ]];then
mkdir $pd_model_path
fi
PYTHON=`which cfpython`
if [[ -z $PYTHON ]];then
PYTHON=`which python`
fi
$PYTHON ../../convert.py \
$proto_file \
--caffemodel $caffemodel_file \
--data-output-path $weight_file\
--code-output-path $net_file
ret=$?
if [[ $ret -ne 0 ]];then
echo "failed to convert caffe model[$cf_model_path]"
exit $ret
else
echo "succeed to convert caffe model[$cf_model_path] to fluid model[$pd_model_path]"
fi
if [[ -z $only_convert ]];then
PYTHON=`which pdpython`
if [[ -z $PYTHON ]];then
PYTHON=`which python`
fi
imgfile="data/65.jpeg"
#FIX ME:
# only look the first line in prototxt file for the name of this network, maybe not correct
net_name=`grep "name" $proto_file | head -n1 | perl -ne 'if(/^name\s*:\s*\"([^\"]+)\"/){ print $1."\n";}'`
if [[ -z $net_name ]];then
net_name="MyNet"
fi
cmd="$PYTHON ./infer.py dump $net_file $weight_file $imgfile $net_name"
echo $cmd
eval $cmd
ret=$?
fi
exit $ret
| true
|
3f97b535f3f533ad17087630116cf9ecfefa0c8f
|
Shell
|
temptemp3/hackerrank.com
|
/test-tail-of-text-file-1.sh
|
UTF-8
| 580
| 2.671875
| 3
|
[] |
no_license
|
#!/bin/bash
## test-tail-of-text-file-1
## version 0.0.1 - initial
##################################################
test-tail-of-text-file-1() {
tail -n 20 /dev/stdin
}
##################################################
if [ ${#} -eq 0 ]
then
true
else
exit 1 # wrong args
fi
##################################################
test-tail-of-text-file-1
##################################################
## generated by create-stub2.sh v0.1.1
## on Mon, 04 Feb 2019 08:23:19 +0900
## see <https://github.com/temptemp3/sh2>
##################################################
| true
|
635987725297e827e82e0bee7d006d4c1985de34
|
Shell
|
cih9088/dotfiles
|
/config/simplebar/skhd_mode
|
UTF-8
| 575
| 3.390625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
modefile=$TMPDIR/current_skhd_mode
if [[ -r $modefile ]]; then
mod="$(cat "$modefile")"
mod="$(echo "$mod" | awk '{print toupper($0)}')"
if [[ -n $mod ]]; then
skhd_mode="$mod"
if [[ $skhd_mode == 'FOCUS' ]]; then
menu="✧"
elif [[ $skhd_mode == 'WARP' ]]; then
menu="⎌"
elif [[ $skhd_mode == 'RESIZE' ]]; then
menu="✠"
elif [[ $skhd_mode == 'PREFIX' ]]; then
menu="⚑"
fi
else
menu="✌"
fi
else
menu=""
fi
echo $menu
| true
|
793d8c7e7850ac8d66c0936c49c289c9c5379482
|
Shell
|
ericosur/ericosur-snippet
|
/perl/bash/cnt.sh
|
UTF-8
| 484
| 3.390625
| 3
|
[] |
no_license
|
#!/bin/bash
# use bash shell script to count numbers
#
# 2006/12/27 by ericosur
#
LIMIT=50
for ((i=0; i <= LIMIT; i++))
do
if [ $i -lt 10 ] ; then
echo -n "00$i "
else
echo -n "0$i "
fi
done
echo ; echo
#echo $STR
i=0
variable2=`while [ "$i" -le 50 ]
do
if [ $i -lt 10 ] ; then
echo -n "lk00$i "
else
echo -n "lk0$i "
fi
let "i += 1" # Increment.
done`
echo "variable2 = $variable2"
mycmd=`echo -n "cat $variable2 > x.tgz"`
echo $mycmd
`$mycmd`
| true
|
09e91742c472ec0875b2e325c3b8546b829801ec
|
Shell
|
mr-segfault/mme
|
/odroid-mitm/masquerade.sh
|
UTF-8
| 1,480
| 3.671875
| 4
|
[
"WTFPL"
] |
permissive
|
#!/bin/sh
UPSTREAM_LAN_INTERFACE=eth0
UPSTREAM_WIFI_INTERFACE=eth0
DOWNSTREAM_WIFI_INTERFACE=wlan1
DOWNSTREAM_LAN_INTERFACE=eth1
# Detect which interface is actually connected to the internet
ifconfig $UPSTREAM_LAN_INTERFACE | grep 'inet ' &> /dev/null
if [ $? -eq 0 ]; then
echo "Using LAN interface for Internet access"
UPSTREAM_INTERFACE=$UPSTREAM_LAN_INTERFACE
else
echo "Using WiFi interface for Internet access"
UPSTREAM_INTERFACE=$UPSTREAM_WIFI_INTERFACE
fi
# Flush all previous rules so we know we're starting from a clean slate
sudo /sbin/iptables -F
# Default policies are allow everything all the time, woohoo!
sudo iptables -P INPUT ACCEPT
sudo iptables -P OUTPUT ACCEPT
sudo iptables -P FORWARD ACCEPT
# Masquerage packets coming out of the LAN interface
sudo /sbin/iptables -t nat -A POSTROUTING -o "$UPSTREAM_INTERFACE" -j MASQUERADE
# Connections which are established or related are allowed from downstream interfaces
sudo /sbin/iptables -A FORWARD -i "$UPSTREAM_INTERFACE" \
-o "$DOWNSTREAM_WIFI_INTERFACE" -m state --state RELATED,ESTABLISHED -j ACCEPT
sudo /sbin/iptables -A FORWARD -i "$UPSTREAM_INTERFACE" \
-o "$DOWNSTREAM_LAN_INTERFACE" -m state --state RELATED,ESTABLISHED -j ACCEPT
# Outbound connections are allowed from downstream interfaces
sudo /sbin/iptables -A FORWARD -i "$DOWNSTREAM_WIFI_INTERFACE" -o "$UPSTREAM_INTERFACE" -j ACCEPT
sudo /sbin/iptables -A FORWARD -i "$DOWNSTREAM_LAN_INTERFACE" -o "$UPSTREAM_INTERFACE" -j ACCEPT
| true
|
b48949257b12c37e1c5c661ff278e1ba392ca966
|
Shell
|
ohmycode/backbone.js-boilerplate-setup-script
|
/omc_bin/setup.sh
|
UTF-8
| 696
| 2.75
| 3
|
[] |
no_license
|
#!/bin/sh
BOILERPLATE_VERSION=`php ./get_boilerplate_version.php`
wget "http://github.com/h5bp/html5-boilerplate/zipball/"$BOILERPLATE_VERSION"stripped" --no-check-certificate .
unzip $BOILERPLATE_VERSION"stripped" -d ./omc_temp
TEMPNAME=`ls ./omc_temp/`
mv omc_temp/$TEMPNAME ./web
wget -P ./web/js/libs/ http://backbonejs.org/backbone.js
wget -P ./web/js/libs/ http://backbonejs.org/backbone-min.js
wget -P ./web/js/libs/ http://documentcloud.github.com/underscore/underscore.js
wget -P ./web/js/libs/ http://documentcloud.github.com/underscore/underscore-min.js
# move everything outside the bin dir
mv ./web ../rename_me
# CLEAN UP
rm -rf ../omc_bin
rm ../start_backbone_project.command
| true
|
2399a25ac57e60c20ce1f6112e1b11db5e6c3528
|
Shell
|
ulissigroup/MTT-MD
|
/SDS/adsorption/simulations/output_umbrella/submit_all.sh
|
UTF-8
| 235
| 2.6875
| 3
|
[] |
no_license
|
if (( $# != 1 )); then {
echo "usage: $0 <num_window>"
exit -1
}; fi
for (( i=0; i<$1; ++i )); do cd $i; sbatch submit_calc.sh; cd ../;done
#for (( i=0; i<$1; ++i )); do cd ../;done
#do sbatch submit_calc.sh
#do cd ../} #done
| true
|
e9f009e891698664a385f51882c4523edf8d4dcf
|
Shell
|
dashkb/buildpack-cmd
|
/bin/compile
|
UTF-8
| 162
| 2.796875
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
export BUILD_DIR=$1
export CACHE_DIR=$2
export ENV_DIR=$3
cd "$BUILD_DIR"
cmd=$(cat "$ENV_DIR/BUILDPACK_CMD")
echo "Executing $cmd"
$cmd
| true
|
acbfeff296b20cd02e8701a98fe8f05924b1bf60
|
Shell
|
stori-es/stori_es
|
/tool/db_update_triggers.sh
|
UTF-8
| 777
| 3.75
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
clear
MAIN_PATH=`dirname $0`/..
MYSQL='mysql --no-defaults'
MDB="stories"
GREP=$(which grep)
AWK=$(which awk)
source $MAIN_PATH/aws-settings.prop
TRIGGERS=$MAIN_PATH/site/WEB-INF/schemaUpdates/2012-08-24_systwo-1100-stories_triggers.sql
echo "...Stories db Update Triggers Manually... Started..."
# make sure we can connect to server
$MYSQL -u $LOCAL_DB_USER -e "use $MDB" &>/dev/null
if [ $? -ne 0 ]
then
$ECHO "Error - Cannot connect to mysql server using given username or database does not exits!"
exit 1
fi
FILENAME=${TRIGGERS##*/}
$MYSQL -u $LOCAL_DB_USER $MDB < $TRIGGERS
if [ $? = 0 ]
then
echo "Script Executed Successful " $TRIGGERS
else
echo "Error Running script " $TRIGGERS
fi
echo "...Stories db Update Triggers Manually... Finished..."
| true
|
6153df743d554070c8b537f8a0633c6b3a9da524
|
Shell
|
derek-dalle/config-dalle
|
/bin/gettokens
|
UTF-8
| 363
| 3.515625
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Get the user name
if [ $# -lt 1 ]; then
# Default: the Unix username
user=$USER
else
# username argument
user=$1
fi
# First get the Kerberos version 5 tokens
kinit -5 -l 30d "$user@UMICH.EDU"
# Then get the AFS tokens
aklog -cell umich.edu
# Now change the current directory to match that one.
cd /afs/umich.edu/user/${user:0:1}/${user:1:1}/$user
| true
|
b366371299a2cfce9018652d0adfdbf0c49edc4c
|
Shell
|
irepan/NearTest
|
/bin/stack-commons.sh
|
UTF-8
| 3,946
| 3.890625
| 4
|
[] |
no_license
|
#!/bin/bash
export scriptDir=$(cd `dirname $0` ; pwd)
export REGION=$(aws configure get region)
function test_stack {
typeset var local STACK_NAME=$1
aws cloudformation describe-stacks --stack-name $STACK_NAME >/dev/null 2>&1
exitSts=$?
if [ $exitSts -eq 0 ] ; then
echo 0
else
echo 1
fi
}
function stack_exists {
typeset var local STACK_NAME=$1
[ $(test_stack "$STACK_NAME") -eq 0 ]
}
function get_stack_output_file {
typeset var local STACK_NAME=$1
echo "$scriptDir/../$STACK_NAME.outputs.json"
}
function get_stack_outputs {
typeset var local STACK_NAME=$1
typeset var local FILE_NAME=$(get_stack_output_file $STACK_NAME)
if [ ! -f $FILE_NAME ] ; then
aws cloudformation describe-stacks --stack-name $STACK_NAME | jq -r '[.Stacks[0].Outputs[] | {key: .OutputKey, value: .OutputValue}] | from_entries' > $FILE_NAME
fi
cat $FILE_NAME
}
function create_stack {
typeset var local STACK_NAME=$1
typeset var local STACK_BODY=$2
typeset var local STACK_FILE_NAME=$(get_stack_output_file $STACK_NAME)
if ! stack_exists $STACK_NAME ; then
aws cloudformation create-stack \
--template-body file://${STACK_BODY} \
--stack-name ${STACK_NAME}
aws cloudformation wait stack-create-complete \
--stack-name ${STACK_NAME}
fi
#aws cloudformation describe-stacks --stack-name $STACK_NAME
if [ ! -f $STACK_FILE_NAME ] ; then
rm -f $STACK_FILE_NAME
fi
get_stack_outputs $STACK_NAME
}
function create_or_update_stack {
typeset var local STACK_NAME=$1
typeset var local STACK_BODY=$2
typeset var local STACK_PARAMETERS=
typeset var local STACK_FILE_NAME=$(get_stack_output_file $STACK_NAME)
if [ $# -gt 2 ] ; then
STACK_PARAMETERS="$3"
fi
typeset var local exitSts=0
if stack_exists $STACK_NAME ; then
echo "updating stack $STACK_NAME"
aws cloudformation update-stack \
--template-body file://${STACK_BODY} \
--stack-name ${STACK_NAME} \
$STACK_PARAMETERS \
2>/dev/null
exitSts=$?
#No update needed
if [ $exitSts -eq 0 ] ; then
aws cloudformation wait stack-update-complete \
--stack-name ${STACK_NAME}
else
echo "No updates needed for stack $STACK_NAME"
fi
else
echo "creating stack $STACK_NAME"
aws cloudformation create-stack \
--template-body file://${STACK_BODY} \
--stack-name ${STACK_NAME} \
$STACK_PARAMETERS
aws cloudformation wait stack-create-complete \
--stack-name ${STACK_NAME}
fi
#aws cloudformation describe-stacks --stack-name $STACK_NAME
if [ ! -f $STACK_FILE_NAME ] ; then
rm -f $STACK_FILE_NAME
fi
get_stack_outputs $STACK_NAME
}
function wait_for_stack_operation {
typeset var local STACK_NAME=$1
typeset var local exitSts=0
typeset var local STACK_OPERATION=$(aws cloudformation describe-stacks --stack-name MysfitsCognitoStack | jq ' .Stacks[0].StackStatus ' | grep -o -e 'CREATE' -e 'UPDATE' -e 'DELETE')
case $STACK_OPERATION in
'CREATE')
aws cloudformation wait stack-create-complete --stack-name $STACK_NAME
;;
'UPDATE')
aws cloudformation wait stack-update-complete --stack-name $STACK_NAME
;;
esac
}
function getTaskOutputsValue {
typeset var local STACK_NAME=$1
typeset var local VALUE=$2
get_stack_outputs $STACK_NAME | jq ". | .$VALUE" | sed 's/.*"\([^"]*\)".*/\1/'
}
function test_command {
typeset var local COMMAND=$1
typeset var local exitSts=0
which $COMMAND >/dev/null 2>&1
exitSts=$?
if [ $exitSts -eq 0 ] ; then
echo 0
else
echo 1
fi
}
function command_exists {
typeset var local COMMAND="$1"
[ $(test_command "$COMMAND") -eq 0 ]
}
| true
|
6b275ce06538a08672893d7bd79d18f45381c6c5
|
Shell
|
iCodeIN/reaveros
|
/loader/uefi/run.sh
|
UTF-8
| 728
| 2.90625
| 3
|
[
"LicenseRef-scancode-other-permissive",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/usr/bin/env bash
if [[ -z "$mkfs" ]]
then
mkfs_fat=mkfs.fat
fi
if ! which $mkfs_fat
then
if [[ -x /sbin/mkfs.fat ]]
then
mkfs_fat=/sbin/mkfs.fat
else
echo "Can't find mkfs.fat; please install it, or provide via an environment variable $$mkfs_fat."
exit 1
fi
fi
set -e
make
dd if=/dev/zero of=fat.img bs=1474560 count=1
$mkfs_fat fat.img
mmd -i fat.img EFI EFI/BOOT
mcopy -o -i fat.img BOOTX64.EFI ::/EFI/BOOT/BOOTX64.EFI
mcopy -o -i fat.img config/reaveros.conf ::/EFI/BOOT/reaveros.conf
qemu-system-x86_64 -no-kvm -bios ../../deps/ovmf/OVMF.fd -hda fat.img -monitor stdio -parallel file:/dev/stdout -cpu qemu64,+sse3,+sse4.1,+sse4.2 -m 2048 -smp 4 -vga std -M pc-i440fx-2.1
| true
|
353383082d0e362922fcf22d311ba3580f1939c4
|
Shell
|
Horgix/salt-states
|
/users/files/bashrc
|
UTF-8
| 423
| 2.703125
| 3
|
[] |
no_license
|
# HGXonf
# by Alexis 'Horgix' Chotard
# https://bitbucket.org/Horgix/
# .bashrc for BASH
# Loads every needed configuration file for bash
for file in ~/.shell-config/*; do
source $file;
done
for file in ~/.bash/*; do
source $file;
done
if [ -d ~/.local.bash/ ]; then
for file in ~/.local.bash/*; do
source $file;
done
fi
if [ -f ~/.extra-paths ]; then
source ~/.extra-paths
fi
true
# EOF
| true
|
8e42e8c2f438642fc8e0038e5f5b182364b768da
|
Shell
|
delkyd/alfheim_linux-PKGBUILDS
|
/dtach-git/PKGBUILD
|
UTF-8
| 869
| 2.625
| 3
|
[] |
no_license
|
# $Id$
# Maintainer: John Lane <archlinux@jelmail.com>
# Contributor: Chris Brannon <cmbrannon79@gmail.com>
# Contributor: Allan McRae <allan@archlinux.org>
# Contributor: Adam Vogt <vogt.adam@gmail.com>
pkgname=dtach-git
pkgver=r40.c794d06
pkgrel=1
pkgdesc="emulates the detach feature of screen"
arch=('i686' 'x86_64')
url="http://dtach.sourceforge.net/"
license=('GPL')
depends=('glibc')
provides=('dtach')
conflicts=('dtach')
source=(${pkgname}::git+https://github.com/crigler/dtach.git)
md5sums=('SKIP')
build() {
cd "${srcdir}/${pkgname}"
./configure CFLAGS="$CFLAGS -Wno-unused-result" --prefix=/usr
make
}
package() {
cd "${srcdir}/${pkgname}"
install -Dm755 dtach "${pkgdir}/usr/bin/dtach"
install -Dm644 dtach.1 "${pkgdir}/usr/share/man/man1/dtach.1"
}
pkgver() {
cd "${srcdir}/${pkgname}"
printf "r%s.%s" "$(git rev-list --count HEAD)" "$(git rev-parse --short HEAD)"
}
# vim:set ts=2 sw=2 et:
| true
|
156c027bb4e3b5da9b90c34a45a0f94cad86c9c9
|
Shell
|
bellmit/swarm
|
/scripts/client/build_js.sh
|
UTF-8
| 424
| 3.375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# "Builds" all the JS, taking minified GWT output, and cat-ing that with minified native support JS
# like History.JS, CodeMirror, Modernizr, etc. Then updates resource version in HTML (or JSP) file.
APP_JS=$1
JS_MIN_OUT=$2
HTML_FILE=$3
MODULE=$4
sh minify_js.sh
SUPPORT_JS="../../bin/dependencies.min.js"
cat $SUPPORT_JS $APP_JS > "$MODULE/$JS_MIN_OUT"
sh update_resource_version.sh $HTML_FILE $JS_MIN_OUT
| true
|
8e18e0c63d19820af9d1b0eb75078686afcee7c6
|
Shell
|
qianwenluo/biosys-analytics
|
/lectures/04-shell-scripting/examples/while.sh
|
UTF-8
| 98
| 2.703125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
FILE=${1:-'srr.txt'}
while read -r LINE; do
echo "LINE \"$LINE\""
done < "$FILE"
| true
|
f352b75631b7ce11d0a6f9c526f0947c29a4d42f
|
Shell
|
wakafengfan/CPM-1-Finetune
|
/scripts/few-shot_generate_dialog.sh
|
UTF-8
| 1,188
| 2.546875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
DATA_DIR="data/STC/"
CHECKPOINT_PATH="data/checkpoints/CPM-large"
RESULTS_DIR="results/few-shot_dialog/"
TOKENIZER_PATH="bpe_3w_new/"
MPSIZE=2
NLAYERS=32
NHIDDEN=2560
NATT=32
MAXSEQLEN=1024
#SAMPLING ARGS
TEMP=0.9
#If TOPK/TOPP are 0 it defaults to greedy sampling, top-k will also override top-p
TOPK=0
TOPP=0.9
script_path=$(realpath $0)
script_dir=$(dirname $script_path)
config_json="$script_dir/ds_dialog_config.json"
python -m torch.distributed.launch --nproc_per_node 2 --master_port 1255 generate_samples_dialog_fewshot.py \
--data_dir ${DATA_DIR} \
--model-parallel-size $MPSIZE \
--num-layers $NLAYERS \
--hidden-size $NHIDDEN \
--load $CHECKPOINT_PATH \
--num-attention-heads $NATT \
--seq-length $MAXSEQLEN \
--max-position-embeddings 1024 \
--tokenizer-type GPT2BPETokenizer \
--fp16 \
--cache-dir cache \
--out-seq-length 50 \
--temperature $TEMP \
--top_k $TOPK \
--top_p $TOPP \
--tokenizer-path ${TOKENIZER_PATH} \
--vocab-size 30000 \
--deepspeed \
--deepspeed_config ${config_json} \
--results_dir ${RESULTS_DIR}
| true
|
5c2b88d73757245bfd5010d4e54866576b29a2e9
|
Shell
|
ThonyDroidYT/Herramientas
|
/shell-obsh.sh
|
UTF-8
| 1,773
| 3.40625
| 3
|
[] |
no_license
|
#!/bin/bash
#BARRAS AZUL
#barra="\033[1;34m======================================================\033[0m"
#barra="\033[1;34m**********************************************************\033[0m"
barra="\033[1;34m+++++++++++++++++++++++++++++++++++++++++++++++++++++++\033[0m"
#barra="\033[1;34m###############################################\033[0m"
#barra="\033[1;34m_____________________________________________________\033[0m"
barra () {
cd /etc/newadm && bash menu --barra
}
#menu
[[ "$1" = "--barra" ]] && {
msg -bar
exit 0
}
#menu
#NUMEROS
num0='\033[1;32m [0] \033[1;31m>'
num1='\033[1;32m [1] \033[1;31m>'
num2='\033[1;32m [2] \033[1;31m>'
num3='\033[1;32m [3] \033[1;31m>'
num4='\033[1;32m [4] \033[1;31m>'
num5='\033[1;32m [5] \033[1;31m>'
#COLORES
blan="\033[1;37m"
plain="\033[0m"
red="\033[1;31m"
green="\033[1;32m"
yellow="\033[1;33m"
blue="\033[1;34m"
purple="\033[1;35m"
cyan="\033[1;36m"
#FONDO
Gris="\033[1;100m"
Rojo="\033[1;41m"
Azul="\033[44m"
#Actualizar Archivos
fun_update () {
apt-get update -y
apt-get upgrade -y
dpkg --configure -a
clear
}
#MENU SCRIPT
echo -e "${barra}"
echo -e "${Rojo} ${cyan} SHELL COMPILER - ENCRIPTADOR BASH ${green}[BY: @THONY_DROIDYT]${plain}"
echo -e "${barra}"
echo -e "${num1} ${cyan}USAR SHC ${plain}"
echo -e "${num2} ${cyan}USAR BASH_OBFUSCATE ${plain}"
echo -e "${num0} ${red}EXIT SCRIPT ${plain}"
echo -e "${barra}"
#echo -e "${blue}SELECIONE UNA OPCIÓN: 》 ${yellow}"; read multiscripts
read -p "SELECIONE UNA OPCIÓN: 》" script
case $script in
0)
clear
exit;;
1)bash <(curl -Ls https://thonydroidyt.github.io/Herramientas/TD_SHC.sh);;
2)bash <(curl -Ls https://thonydroidyt.github.io/Herramientas/bash.sh);;
*)echo -e "${red}¡POR FAVOR SELECIONE EL NÚMERO CORRECTO! ${plain}"
exit ;;
esac
| true
|
cda18349a0a10515457241385e971159cccb88dc
|
Shell
|
lday0321/uDepot
|
/scripts/benchmarking/ubench-1core.sh
|
UTF-8
| 6,218
| 2.875
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
# Copyright (c) 2020 International Business Machines
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
#
# Authors: Kornilios Kourtis (kou@zurich.ibm.com, kornilios@gmail.com)
#
echo "device:$DEVNAME SN:$DEV_SN NUMA_NODE:$DEV_NODE CPUS:${DEV_CPUS[@]}" > $dname/device
sudo nvme list 2>&1 > $dname/nvme-devices
grep -H . /proc/sys/net/core/busy_poll /proc/sys/net/core/busy_read > $dname/net_busy_polling
function run_trt_aio {
for trt_tasks in $QD; do
rdir=$(printf "$dname/trt-aio-ntasks-%03g\n" $trt_tasks)
mkdir $rdir
cmd="sudo $PERF_RECORD_TRT_AIO -o $rdir/perf.data $TASKSET $UDEPOT_TEST -f $DEVNAME -u 5 -t 1 --trt-ntasks $trt_tasks $COMMON_OPTS"
echo $cmd | tee $rdir/perf-cmd
$cmd 2>&1 | tee $rdir/perf-log
# NB: There seems to be a significant change in the measured throughput
# even with when doing sampling (-F 1000). Do a run without profiling.
cmd="sudo $TASKSET $UDEPOT_TEST -f $DEVNAME -u 5 -t 1 --trt-ntasks $trt_tasks $COMMON_OPTS"
echo $cmd | tee $rdir/cmd
$cmd 2>&1 | tee $rdir/log
done
}
function run_linux_directio {
for threads in $QD; do
rdir=$(printf "$dname/linux-directio-nthreads-%03g\n" $threads)
mkdir $rdir
cmd="sudo $PERF_RECORD_POSIX_ODIRECT -o $rdir/perf.data $TASKSET $UDEPOT_TEST -f $DEVNAME -u 3 -t $threads $COMMON_OPTS"
echo $cmd | tee $rdir/perf-cmd
$cmd 2>&1 | tee $rdir/perf-log
# NB: There seems to be a significant change in the measured throughput
# even with when doing sampling (-F 1000). Do a run without profiling.
cmd="sudo $TASKSET $UDEPOT_TEST -f $DEVNAME -u 3 -t $threads $COMMON_OPTS"
echo $cmd | tee $rdir/cmd
$cmd 2>&1 | tee $rdir/log
done
}
function run_linux_buffered {
for threads in $QD; do
rdir=$(printf "$dname/linux-buffered-nthreads-%03g\n" $threads)
mkdir $rdir
cmd="sudo $PERF_RECORD_POSIX -o $rdir/perf.data $TASKSET $UDEPOT_TEST -f $DEVNAME -u 2 -t $threads $COMMON_OPTS"
echo $cmd | tee $rdir/perf-cmd
$cmd 2>&1 | tee $rdir/perf-log
# NB: There seems to be a significant change in the measured throughput
# even with when doing sampling (-F 1000). Do a run without profiling.
cmd="sudo $TASKSET $UDEPOT_TEST -f $DEVNAME -u 2 -t $threads $COMMON_OPTS"
echo $cmd | tee $rdir/cmd
$cmd 2>&1 | tee $rdir/log
done
}
#
function run_trt_spdk {
for trt_tasks in $QD; do
rdir=$(printf "$dname/trt-spdk-ntasks-%03g\n" $trt_tasks)
mkdir $rdir
cmd="sudo $PERF_RECORD_TRT_SPDK -o $rdir/perf.data $TASKSET_SPDK $UDEPOT_TEST -u 6 -f $DEV_SN -t 1 --trt-ntasks $trt_tasks $COMMON_OPTS"
echo $cmd | tee $rdir/perf-cmd
$cmd 2>&1 | tee $rdir/perf-log
# NB: There seems to be a significant change in the measured throughput
# even with when doing sampling (-F 1000). Do a run without profiling.
cmd="sudo $TASKSET_SPDK $UDEPOT_TEST -u 6 -f $DEV_SN -t 1 --trt-ntasks $trt_tasks $COMMON_OPTS"
echo $cmd | tee $rdir/cmd
$cmd 2>&1 | tee $rdir/log
done
}
function run_spdk_perf {
SPDK_MASK=$(python -c "print hex(1<<${DEV_CPUS[0]})")
for qd in $QD; do
for wload in randread write randwrite; do
cmd="sudo ./trt/external/spdk/examples/nvme/perf/perf -c $SPDK_MASK -ll -L -s 4096 -q $qd -t $((5*60)) -w $wload -r '$SPDK_DEV'"
echo $cmd
eval $cmd
done
done > $dname/spdk-perf-1core-latencies
for qd in $QD; do
for wload in randread write randwrite; do
cmd="sudo ./trt/external/spdk/examples/nvme/perf/perf -c $SPDK_MASK -s 4096 -q $qd -t $((5*60)) -w $wload -r '$SPDK_DEV'"
echo $cmd
eval $cmd
done
done > $dname/spdk-perf-1core
}
COMMON_SRV_OPTS="-w 0 -r 0 --force-destroy --grain-size 32 --server-conf *:5555"
COMMON_CLI_OPTS="--grain-size 32 -u 2 -w $NWRS -r $NRDS --val-size 4000 -f /dev/shm/deleteme --force-destroy --thin --zero-copy -m $SRV_ADDR:5555"
#nix-copy-closure $CLI_ADDR $PERF_BINARY
function run_trt_aio_net {
for qd in $QD; do
rdir=$(printf "$dname/trt-aio-net-qd-%03g\n" $qd)
mkdir $rdir
echo $SRV_ADDR >$rdir/srv-addr
echo $CLI_ADDR >$rdir/cli-addr
for perf in 1 0; do
if [ $perf == "1" ]; then
cli_fname="$rdir/perf-cli"
srv_fname="$rdir/perf-srv"
else
cli_fname="$rdir/cli"
srv_fname="$rdir/srv"
fi
srv_cmd="sudo"
#srv_cmd="$srv_cmd $PERF -o $rdir/perf-srv.data"
srv_cmd="$srv_cmd $TASKSET"
srv_cmd="$srv_cmd $UDEPOT_TEST -f $DEVNAME"
srv_cmd="$srv_cmd -u 5"
srv_cmd="$srv_cmd -t 1"
srv_cmd="$srv_cmd $COMMON_SRV_OPTS"
echo $srv_cmd > ${srv_fname}-cmd
($srv_cmd 2>&1 | tee ${srv_fname}-log) &
SRV_PID=$!
sleep 1s
cli_cmd="sudo"
if [ $perf == "1" ]; then
cli_cmd="$cli_cmd $PERF_RECORD_TRT_AIO -o $rdir/perf-cli.data"
fi
cli_cmd="$cli_cmd $UDEPOT_TEST"
cli_cmd="$cli_cmd $COMMON_CLI_OPTS"
cli_cmd="$cli_cmd -t $qd"
echo $cli_cmd >${cli_fname}-cmd
ssh $CLI_ADDR "cd ~/wibm/src/udepot.git; $cli_cmd" 2>&1 1>${cli_fname}-log
sudo pkill udepot-test
done
done
}
function run_trt_spdk_net {
for qd in $QD; do
rdir=$(printf "$dname/trt-spdk-net-qd-%03g\n" $qd)
mkdir $rdir
echo $SRV_ADDR >$rdir/srv-addr
echo $CLI_ADDR >$rdir/cli-addr
for perf in 1 0; do
if [ $perf == "1" ]; then
cli_fname="$rdir/perf-cli"
srv_fname="$rdir/perf-srv"
else
cli_fname="$rdir/cli"
srv_fname="$rdir/srv"
fi
srv_cmd="sudo"
#srv_cmd="$srv_cmd $PERF -o $rdir/perf-srv.data"
srv_cmd="$srv_cmd $TASKSET_SPDK"
srv_cmd="$srv_cmd $UDEPOT_TEST -f $DEV_SN"
srv_cmd="$srv_cmd -u 6"
srv_cmd="$srv_cmd -t 1"
srv_cmd="$srv_cmd $COMMON_SRV_OPTS"
echo $srv_cmd > ${srv_fname}-cmd
($srv_cmd 2>&1 | tee ${srv_fname}-log) &
SRV_PID=$!
sleep 1s
cli_cmd="sudo"
if [ $perf == "1" ]; then
cli_cmd="$cli_cmd $PERF_RECORD_TRT_SPDK -o $rdir/perf-cli.data"
fi
cli_cmd="$cli_cmd $UDEPOT_TEST"
cli_cmd="$cli_cmd $COMMON_CLI_OPTS"
cli_cmd="$cli_cmd -t $qd"
echo $cli_cmd >${cli_fname}-cmd
ssh $CLI_ADDR "cd ~/wibm/src/udepot.git; $cli_cmd" 2>&1 1>${cli_fname}-log
sudo pkill udepot-test
done
done
}
run_linux_directio
run_trt_aio
#run_linux_buffered
#run_trt_aio_net
sudo NRHUGE=1024 trt/external/spdk/scripts/setup.sh
run_trt_spdk
run_spdk_perf
#run_trt_spdk_net
| true
|
57267f48874350aedabe3f34b903ea9b1d1eed7f
|
Shell
|
doohee323/tz-tajo
|
/scripts/master01.sh
|
UTF-8
| 3,382
| 2.9375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
# change hosts
echo '' >> /etc/hosts
echo '# for vm' >> /etc/hosts
echo '192.168.82.170 master01' >> /etc/hosts
echo '192.168.82.171 slave01' >> /etc/hosts
echo '192.168.82.172 slave02' >> /etc/hosts
echo '192.168.82.173 slave03' >> /etc/hosts
echo "Reading config...." >&2
source /vagrant/setup.rc
apt-get -y -q update
apt-get install software-properties-common python-software-properties -y
add-apt-repository ppa:webupd8team/java -y
apt-get -y -q update
echo oracle-java8-installer shared/accepted-oracle-license-v1-1 select true | /usr/bin/debconf-set-selections
apt-get -y -q install oracle-java8-installer
apt-get purge openjdk* -y
apt-get install oracle-java8-set-default
apt-get install wget curl unzip -y
su - vagrant
export NODE=tajo-0.11.1
export PROJ_DIR=/home/vagrant
export SERVERS=/vagrant/servers
#export SERVERS=/Users/dhong/Documents/workspace/etc/tz-tajo/servers
export JAVA_HOME=/usr/lib/jvm/java-8-oracle
export HADOOP_HOME=/vagrant/servers/hadoop-2.7.2
export TAJO_HOME=/vagrant/servers/${NODE}
export TAJO_MASTER_HEAPSIZE=1000
export TAJO_WORKER_HEAPSIZE=5000
export TAJO_PID_DIR=${TAJO_HOME}/pids
export TAJO_LOG_DIR=${TAJO_HOME}/logs
echo '' >> $PROJ_DIR/.bashrc
echo 'export SERVERS=/vagrant/servers' >> $PROJ_DIR/.bashrc
echo 'export JAVA_HOME='$JAVA_HOME >> $PROJ_DIR/.bashrc
echo 'export NODE='$NODE >> $PROJ_DIR/.bashrc
echo 'export PROJ_DIR='$PROJ_DIR >> $PROJ_DIR/.bashrc
echo 'export SERVERS='$SERVERS >> $PROJ_DIR/.bashrc
echo 'export HADOOP_HOME='$HADOOP_HOME >> $PROJ_DIR/.bashrc
echo 'export TAJO_HOME='$TAJO_HOME >> $PROJ_DIR/.bashrc
echo 'export TAJO_MASTER_HEAPSIZE='$TAJO_MASTER_HEAPSIZE >> $PROJ_DIR/.bashrc
echo 'export TAJO_WORKER_HEAPSIZE='$TAJO_WORKER_HEAPSIZE >> $PROJ_DIR/.bashrc
echo 'export TAJO_PID_DIR='$TAJO_PID_DIR >> $PROJ_DIR/.bashrc
echo 'export TAJO_LOG_DIR='$TAJO_LOG_DIR >> $PROJ_DIR/.bashrc
echo 'export HADOOP_PREFIX=/vagrant/servers/hadoop-2.7.2' >> $PROJ_DIR/.bashrc
echo 'export PATH=$PATH:.:$SERVERS/apache-storm-0.10.0/bin:$HADOOP_PREFIX/bin:$HADOOP_PREFIX/sbin' >> $PROJ_DIR/.bashrc
# ssh setting
mkdir -p $PROJ_DIR/.ssh
ssh-keygen -t dsa -P '' -f $PROJ_DIR/.ssh/id_dsa
cat $PROJ_DIR/.ssh/id_dsa.pub >> $PROJ_DIR/.ssh/authorized_keys
echo '' >> /etc/ssh/ssh_config
echo ' ForwardX11 no' >> /etc/ssh/ssh_config
echo ' StrictHostKeyChecking no' >> /etc/ssh/ssh_config
sudo service ssh restart
#ssh vagrant@slave01 "mkdir -p ~/.ssh"
#scp ~/.ssh/authorized_keys slave01:~/.ssh/.
#ssh vagrant@slave01 "chmod 755 ~/.ssh; chmod 644 ~/.ssh/authorized_keys"
mkdir -p $SERVERS/tmp/${NODE}
cd $SERVERS/tmp/${NODE}
# hadoop download
if [ ! -f "hadoop-2.7.2.tar.gz" ]; then
wget http://apache.arvixe.com/hadoop/common/hadoop-2.7.2/hadoop-2.7.2.tar.gz
fi
tar xvf hadoop-2.7.2.tar.gz
rm -Rf $SERVERS/hadoop-2.7.2
mv hadoop-2.7.2 $SERVERS
cp -Rf $SERVERS/configs/hadoop/etc/hadoop/* $SERVERS/hadoop-2.7.2/etc/hadoop
# tajo download
if [ ! -f "tajo-0.11.1.tar.gz" ]; then
wget http://apache.mirror.cdnetworks.com/tajo/tajo-0.11.1/tajo-0.11.1.tar.gz
fi
tar xvf tajo-0.11.1.tar.gz
rm -Rf $SERVERS/${NODE}
mv $SERVERS/tmp/${NODE}/${NODE} $SERVERS
cd $SERVERS/${NODE}
cp -Rf $SERVERS/configs/tajo/conf/* $SERVERS/${NODE}/conf
sed -ie 's/${NODE}/'${NODE}'/g' $SERVERS/${NODE}/conf/tajo-env.sh
chown -Rf vagrant:vagrant $SERVERS
ln -s $SERVERS/hadoop-2.7.2 $PROJ_DIR/hadoop-2.7.2
exit 0
| true
|
7d782218113291a1a85442392b91eea2cc70694f
|
Shell
|
rosethepose/username
|
/username.sh
|
UTF-8
| 401
| 3.34375
| 3
|
[] |
no_license
|
echo '''Enter a username with at least 3 and no more that 12 characters
It must start with a lower case letter
and the only charcaters that can be used are lower case letters, digits, and underscore.'''
read USERNAME
while echo $USERNAME | egrep -v "^[a-z]"+"[0-9,a-z,_]{3,12}$"> /dev/null 2>&1
do
echo "You must have a valid username"
echo "Enter a username"
read USERNAME
done
echo "Thank You"
| true
|
a9d049149dd1ef76157f7c455575b69f7a426e83
|
Shell
|
rsenn/scripts
|
/sh/rcat.sh
|
UTF-8
| 623
| 3.65625
| 4
|
[] |
no_license
|
#!/bin/bash
NL="
"
#
# rcat.sh:
#
# $Id: rcat.sh 538 2008-08-18 19:20:49Z enki $
# -------------------------------------------------------------------------
pushv()
{
eval "shift;$1=\"\${$1+\"\$$1\${IFS%\"\${IFS#?}\"}\"}\$*\""
}
# rcat [options] [files...]
#
# A recursive 'cat' through 'grep'
# ---------------------------------------------------------------------------
rcat()
{
(IFS="
"; OPTS= ARGS=
while [ -n "$1" ]; do
case $1 in
*) pushv ARGS "$1" ;;
-*) pushv OPTS "$1" ;;
esac
shift
done
${GREP-grep
-a
--line-buffered
--color=auto} --color=no $OPTS '^' $ARGS)
}
rcat "$@"
| true
|
6f27ba9d06780e290cdb5c28b92a35b59e20d199
|
Shell
|
gruntwork-io/terraform-aws-couchbase
|
/modules/couchbase-commons/couchbase-common.sh
|
UTF-8
| 10,068
| 4.03125
| 4
|
[
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-free-unknown",
"Apache-2.0"
] |
permissive
|
#!/bin/bash
set -e
source "/opt/gruntwork/bash-commons/log.sh"
source "/opt/gruntwork/bash-commons/string.sh"
source "/opt/gruntwork/bash-commons/assert.sh"
source "/opt/gruntwork/bash-commons/aws-wrapper.sh"
readonly COUCHBASE_BASE_DIR="/opt/couchbase"
readonly COUCHBASE_BIN_DIR="$COUCHBASE_BASE_DIR/bin"
readonly COUCHBASE_CLI="$COUCHBASE_BIN_DIR/couchbase-cli"
# Run the Couchbase CLI
function run_couchbase_cli {
local readonly args=($@)
# The Couchbase CLI may exit with an error, but we almost always want to ignore that and make decision based on
# stdout instead, so we temporarily disable exit on error
set +e
local out
"$COUCHBASE_CLI" "${args[@]}"
set -e
}
# Run the Couchbase CLI and retry until its stdout contains the expected message or max retries is exceeded.
function run_couchbase_cli_with_retry {
local readonly cmd_description="$1"
local readonly expected_message="$2"
local readonly max_retries="$3"
local readonly sleep_between_retries_sec="$4"
shift 4
local readonly args=($@)
for (( i=0; i<"$max_retries"; i++ )); do
local out
out=$(run_couchbase_cli "${args[@]}")
if string_contains "$out" "$expected_message"; then
log_info "Success: $cmd_description."
return
else
log_warn "Failed to $cmd_description. Will sleep for $sleep_between_retries_sec seconds and try again. couchbase-cli output:\n$out"
sleep "$sleep_between_retries_sec"
fi
done
log_error "Failed to $cmd_description after $max_retries retries."
exit 1
}
# Returns true (0) if the Couchbase cluster has already been initialized and false otherwise.
function cluster_is_initialized {
local readonly cluster_url="$1"
local readonly cluster_username="$2"
local readonly cluster_password="$3"
local cluster_status
cluster_status=$(get_cluster_status "$cluster_url" "$cluster_username" "$cluster_password")
string_contains "$cluster_status" "healthy active"
}
# Returns true if the Couchbase server at the given hostname has booted. Note that this ONLY checks if the Couchbase
# process is running and responding to queries; it does NOT check if the Couchbase server has joined the cluster and is
# active.
function couchbase_is_running {
local readonly node_url="$1"
local readonly username="$2"
local readonly password="$3"
set +e
local cluster_status
cluster_status=$(get_cluster_status "$node_url" "$username" "$password")
set -e
string_contains "$cluster_status" "healthy active" || string_contains "$cluster_status" "unknown pool"
}
# Get the status of the Couchbase cluster using the server-list command. If the cluster is initialized, returns output
# of the format:
#
# ns_1@172.19.0.2 172.19.0.2:8091 healthy inactiveAdded
# ns_1@172.19.0.3 172.19.0.3:8091 healthy active
# ns_1@172.19.0.4 172.19.0.4:8091 healthy active
#
# Otherwise, returns error text (e.g., "unknown pool") from the server-list command.
function get_cluster_status {
local readonly cluster_url="$1"
local readonly cluster_username="$2"
local readonly cluster_password="$3"
log_info "Looking up server status in $cluster_url"
local server_list_args=()
server_list_args+=("server-list")
server_list_args+=("--cluster=$cluster_url")
server_list_args+=("--username=$cluster_username")
server_list_args+=("--password=$cluster_password")
run_couchbase_cli "${server_list_args[@]}"
}
# Returns true if the node with the given hostname has already been added (via the server-add command) to the Couchbase
# cluster. Note that this does NOT necessarily mean the new node is active; in order for the node to be active, you
# not only need to add it, but also rebalance the cluster. See also node_is_active_in_cluster.
function node_is_added_to_cluster {
local readonly cluster_url="$1"
local readonly cluster_username="$2"
local readonly cluster_password="$3"
local readonly node_url="$4"
local cluster_status
cluster_status=$(get_cluster_status "$cluster_url" "$cluster_username" "$cluster_password")
string_multiline_contains "$cluster_status" "$node_url healthy"
}
# Returns true if the node with the given hostname has already been added (via the server-add command) to the Couchbase
# cluster and is active (via the rebalance command).
function node_is_active_in_cluster {
local readonly cluster_url="$1"
local readonly cluster_username="$2"
local readonly cluster_password="$3"
local readonly node_url="$4"
local cluster_status
cluster_status=$(get_cluster_status "$cluster_url" "$cluster_username" "$cluster_password")
string_multiline_contains "$cluster_status" "$node_url healthy active"
}
# Returns true (0) if the cluster is balanced and false (1) otherwise
function cluster_is_balanced {
local readonly cluster_url="$1"
local readonly cluster_username="$2"
local readonly cluster_password="$3"
log_info "Checking if cluster $cluster_url is currently rebalancing..."
local server_list_args=()
server_list_args+=("rebalance-status")
server_list_args+=("--cluster=$cluster_url")
server_list_args+=("--username=$cluster_username")
server_list_args+=("--password=$cluster_password")
local out
out=$(run_couchbase_cli "${server_list_args[@]}")
local status
status=$(echo "$out" | jq -r '.status')
[[ "$status" != "running" ]]
}
# Return true (0) if the given bucket exists in the given cluster and false (0) otherwise
function has_bucket {
local readonly cluster_url="$1"
local readonly cluster_username="$2"
local readonly cluster_password="$3"
local readonly bucket_name="$4"
log_info "Checking if bucket $bucket_name exists in $cluster_url"
local server_list_args=()
server_list_args+=("bucket-list")
server_list_args+=("--cluster=$cluster_url")
server_list_args+=("--username=$cluster_username")
server_list_args+=("--password=$cluster_password")
local out
out=$(run_couchbase_cli "${server_list_args[@]}")
# The bucket-list output is of the format:
#
# <BUCKET_NAME_1>
# bucketType: membase
# numReplicas: 1
# ramQuota: 314572800
# ramUsed: 27230952
# <BUCKET_NAME_2>
# bucketType: membase
# numReplicas: 1
# ramQuota: 314572800
# ramUsed: 27230952
#
# So all we do is grep for a line that exactly matches the name of the bucket we're looking for
string_multiline_contains "$out" "^$bucket_name$"
}
# Wait until the specified cluster is initialized and not rebalancing
function wait_for_couchbase_cluster {
local readonly cluster_url="$1"
local readonly cluster_username="$2"
local readonly cluster_password="$3"
local readonly retries=200
local readonly sleep_between_retries=5
for (( i=0; i<"$retries"; i++ )); do
if cluster_is_ready "$cluster_url" "$cluster_username" "$cluster_password"; then
log_info "Cluster $cluster_url is ready!"
return
else
log_warn "Cluster $cluster_url is not yet ready. Will sleep for $sleep_between_retries seconds and check again."
sleep "$sleep_between_retries"
fi
done
log_error "Cluster $cluster_url still not initialized after $retries retries."
exit 1
}
# Return true (0) if the cluster is initialized and not rebalancing and false (1) otherwise
function cluster_is_ready {
local readonly cluster_url="$1"
local readonly cluster_username="$2"
local readonly cluster_password="$3"
if ! cluster_is_initialized "$cluster_url" "$cluster_username" "$cluster_password"; then
log_warn "Cluster $cluster_url is not yet initialized."
return 1
fi
if ! cluster_is_balanced "$cluster_url" "$cluster_username" "$cluster_password"; then
log_warn "Cluster $cluster_url is currently rebalancing."
return 1
fi
return 0
}
# Wait until the specified bucket exists in the specified cluster
function wait_for_bucket {
local readonly cluster_url="$1"
local readonly cluster_username="$2"
local readonly cluster_password="$3"
local readonly bucket="$4"
local readonly retries=200
local readonly sleep_between_retries=5
for (( i=0; i<"$retries"; i++ )); do
if has_bucket "$cluster_url" "$cluster_username" "$cluster_password" "$bucket"; then
log_info "Bucket $bucket exists in cluster $cluster_url."
return
else
log_warn "Bucket $bucket does not yet exist in cluster $cluster_url. Will sleep for $sleep_between_retries seconds and check again."
sleep "$sleep_between_retries"
fi
done
log_error "Bucket $bucket still does not exist in cluster $cluster_url after $retries retries."
exit 1
}
# Identify the server to use as a "rally point." This is the "leader" of the cluster that can be used to initialize
# the cluster and kick off replication. We use a simple technique to identify a unique rally point in each ASG: look
# up all the Instances in the ASG and select the one with the oldest launch time. If there is a tie, pick the one with
# the lowest Instance ID (alphabetically). This way, all servers will always select the same server as the rally point.
# If the rally point server dies, all servers will then select the next oldest launch time / lowest Instance ID.
function get_rally_point_hostname {
local readonly aws_region="$1"
local readonly asg_name="$2"
local readonly use_public_hostname="$3"
log_info "Looking up rally point for ASG $asg_name in $aws_region"
local instances
instances=$(aws_wrapper_wait_for_instances_in_asg "$asg_name" "$aws_region")
assert_not_empty_or_null "$instances" "Fetch list of Instances in ASG $asg_name"
local rally_point
rally_point=$(echo "$instances" | jq -r '[.Reservations[].Instances[]] | sort_by(.LaunchTime, .InstanceId) | .[0]')
assert_not_empty_or_null "$rally_point" "Select rally point server in ASG $asg_name"
local hostname_field=".PrivateDnsName"
if [[ "$use_public_hostname" == "true" ]]; then
hostname_field=".PublicDnsName"
fi
local hostname
hostname=$(echo "$rally_point" | jq -r "$hostname_field")
assert_not_empty_or_null "$hostname" "Get hostname from field $hostname_field for rally point in $asg_name: $rally_point"
echo -n "$hostname"
}
| true
|
72d583aac01f2ccb6db5a4b0b6279f1afb79a49b
|
Shell
|
anarabanana/Dictionary-English_Kazakh
|
/dictionary_ui
|
UTF-8
| 1,254
| 4.03125
| 4
|
[] |
no_license
|
#!/bin/sh
CALC=dictionary
HOST=localhost
PORT=1234
FILE1=/tmp/calc1-$$
FILE2=/tmp/calc2-$$
ERROR=/tmp/calc-err-$$
GETTEXT="gettext -d dictionary_ui"
if [ -r ~/.dictionary.conf ]
then
. ~/.dictionary.conf
elif [ -r /etc/dictionary.conf ]
then
. /etc/dictionary.conf
fi
which gdialog 2> /dev/null && DIALOG=gdialog || DIALOG=dialog
help () {
echo "Usage: $0 [ host [port] | --help ]"
}
end () {
rm -f $FILE1 $FILE2 $ERROR
exit
}
if [ $# -eq 1 ]
then
if [ $1 -eq '--help' ]
then
help
exit -1
else
HOST=$1
fi
elif [ $# -eq 2 ]
then
PORT=$2
fi
if [ $# -ge 1 -o `basename $0` = 'ndictionary_ui' ]
#if [ $# -ge 1 -o `basename $0` = '' ]
then
if [ $HOST -a $PORT ]
then
CALC="nc $HOST $PORT"
else
echo "You must set HOST and PORT"
exit -2
fi
fi
while true
do
$DIALOG --inputbox "`$GETTEXT \"Enter the word number:\"`" 8 35 2> $FILE1 || end
if cat $FILE1 | $CALC > $FILE2 2>$ERROR
then
MSG="`$GETTEXT \"Translation:\"` `cat $FILE2`\\n\\n`$GETTEXT \"Continue?\"`"
$DIALOG --yesno "$MSG" 7 20 || end
else
MSG="`$GETTEXT \"Error:\"`\\n\\n`cat $ERROR`\\n\\n`$GETTEXT \"Continue?\"`"
$DIALOG --yesno "$MSG" 10 35 || end
fi
done
| true
|
51ef567e645af02ad5f479692590b31f5d733b94
|
Shell
|
realBjornRoden/cognition
|
/azure/pre-request.sh
|
UTF-8
| 2,555
| 2.75
| 3
|
[
"MIT"
] |
permissive
|
#
# pre-request.sh
#
# Copyright (c) 2019 B.Roden
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
REQ=request.json
[[ -z "$2" ]] && { echo "***ENOFILE"; exit 1; }
INPUT=$2
case $1 in
vision-pdf)
cat <<-EOD > $REQ
{ "url" : "$INPUT" }
EOD
#[[ -f "$INPUT" ]] || { echo "***ENOFILE"; exit 1; }
#file $INPUT
#exit 0
;;
vision-ocr*)
[[ -f "$INPUT" ]] || { echo "***ENOFILE"; exit 1; }
file $INPUT
exit 0
;;
vision-tag)
[[ -f "$INPUT" ]] || { echo "***ENOFILE"; exit 1; }
file $INPUT
exit 0
;;
vision-objects)
cat <<-EOD > $REQ
{ "url" : "$INPUT" }
EOD
;;
vision-landmark)
cat <<-EOD > $REQ
{ "url" : "$INPUT" }
EOD
;;
face-identify)
cat <<-EOD > $REQ
{ "url" : "$INPUT" }
EOD
;;
face-detect)
# model _02 Does not return face attributes or face landmarks but has improved accuracy on small, side-view, and blurry faces
# https://docs.microsoft.com/en-us/azure/cognitive-services/face/face-api-how-to-topics/specify-detection-model
cat <<-EOD > $REQ
{ "url" : "$INPUT", "recognitionModel" : "recognition_02", "detectionModel" : "detection_02" }
EOD
;;
face-detect-details)
cat <<-EOD > $REQ
{ "url" : "$INPUT" }
EOD
;;
base64*)
B64=base64.tmp
base64 -i $INPUT -o $B64
[[ -f "$B64" ]] || { echo "***ENOBASE64"; exit 1; }
case $1 in
face)
cat <<-EOD > $REQ
{ "url" : "https://cloud.google.com/vision/docs/images/faces.png" }
EOD
;;
esac
rm -f $B64
;;
*) exit 1;;
esac
[[ -f "$REQ" ]] || { echo "***ENOREQ"; exit 1; }
echo $REQ
| true
|
887e5533aad705d0e8852f893123ec62ca0e7e0a
|
Shell
|
tmlye/dotfiles
|
/createSymlinks.sh
|
UTF-8
| 2,221
| 3.046875
| 3
|
[] |
no_license
|
#!/bin/bash
# Create .config directory if needed
mkdir -p $HOME/.config
declare -a links=(.gitconfig .zlogin .zshrc .vim .vimrc .config/zathura/zathurarc .config/htop/htoprc .config/gtk-3.0 .config/ranger .tmux.conf .config/user-dirs.dirs .config/waybar)
# If files already exist create backups
for i in ${links[*]}
do
if [ -e $HOME/$i ]
then mv ~/$i ~/$i.backup
fi
done
# sway
mkdir -p $HOME/.config/sway
ln -s $HOME/.dotfiles/sway/config $HOME/.config/sway/config
# set environment variables so firefox uses wayland and xdg-desktop-portal-wlr works
mkdir -p $HOME/.config/environment.d
ln -s $HOME/.dotfiles/various/env.conf $HOME/.config/environment.d/env.conf
# waybar
ln -s $HOME/.dotfiles/waybar $HOME/.config/waybar
# alacritty
mkdir -p $HOME/.config/alacritty
ln -s $HOME/.dotfiles/alacritty/alacritty.yml $HOME/.config/alacritty/alacritty.yml
# mako
mkdir -p $HOME/.config/mako
ln -s $HOME/.dotfiles/mako/config $HOME/.config/mako/config
# X
ln -s $HOME/.dotfiles/X/Xresources $HOME/.Xresources
# zsh
ln -s $HOME/.dotfiles/zsh/zshrc $HOME/.zshrc
ln -s $HOME/.dotfiles/zsh/zlogin $HOME/.zlogin
# vim
ln -s $HOME/.dotfiles/vim/vim/ $HOME/.vim
ln -s $HOME/.dotfiles/vim/vimrc $HOME/.vimrc
# nvim
ln -s $HOME/.dotfiles/nvim $HOME/.config/nvim
# gtk
ln -s $HOME/.dotfiles/gtk $HOME/.config/gtk-3.0
# ranger
ln -s $HOME/.dotfiles/ranger/ $HOME/.config/ranger
# tmux
ln -s $HOME/.dotfiles/various/tmux.conf $HOME/.tmux.conf
# zathura
mkdir -p $HOME/.config/zathura
ln -s $HOME/.dotfiles/various/zathurarc $HOME/.config/zathura/zathurarc
# htop
mkdir -p $HOME/.config/htop
ln -s $HOME/.dotfiles/various/htoprc $HOME/.config/htop/htoprc
# vscode
mkdir -p $HOME/.config/Code\ -\ OSS/User
ln -s $HOME/.dotfiles/vscode/settings.json $HOME/.config/Code\ -\ OSS/User/settings.json
ln -s $HOME/.dotfiles/vscode/keybindings.json $HOME/.config/Code\ -\ OSS/User/keybindings.json
# git
ln -s $HOME/.dotfiles/various/gitconfig $HOME/.gitconfig
# imv
mkdir -p $HOME/.config/imv
ln -s $HOME/.dotfiles/various/imv_config $HOME/.config/imv/config
# mpv
ln -s $HOME/.dotfiles/mpv/ $HOME/.config/mpv
# don't use Desktop, use desktop
ln -s $HOME/.dotfiles/various/user-dirs.dirs $HOME/.config/user-dirs.dirs
| true
|
c9f8a4818b415c4fda8a32b54c41bab06d97de80
|
Shell
|
ArcticDevs/ev-server
|
/githooks/pre-commit
|
UTF-8
| 458
| 2.875
| 3
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/bin/sh
# Instructions:
# Put this file into your .git/hooks folder and set as executable
# - for Windows (attrib +x pre-commit)
# - for *nix (chmod +x pre-commit)
# echo "Starting the server"
# npm run start:dev
echo "Running linter fix:"
npm run eslint:fix
echo "Running unit tests (do not forget to start the server before with 'npm run start:dev'):"
# npm run start:email &
npm run mochatest
# Do not abort the commit because of errors
exit 0
| true
|
5165fad325200e50ba2e33fb1e7be078dc709201
|
Shell
|
gregorybrancq/shellScripts
|
/loadDiffWallpaper.bash
|
UTF-8
| 558
| 3.328125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Script pour changer le fond d'écran lors du changement de bureau.
desktop_dir="/home/greg/Wallpaper/Home/" # chemin complet du dossier des images;
desktop_img[0]="desk_1.jpg"
desktop_img[1]="desk_2.jpg"
desktop_img[2]="desk_3.png"
desktop_img[3]="desk_4.jpg"
desktop_img[4]="desk_5.jpg"
setdesktop() {
gsettings set org.gnome.desktop.background picture-uri "file://$desktop_dir$1"
}
xprop -root -spy _NET_CURRENT_DESKTOP | (
while read -r; do
desk=${REPLY:${#REPLY}-1:1}
setdesktop ${desktop_img[$desk]}
done
)
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.