blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
3cb80dcfe9e9625e2dab4b86a34982000fc1a577
|
Shell
|
swordhui/xgstage0
|
/scripts_tools/P030gcc
|
UTF-8
| 2,143
| 3.375
| 3
|
[] |
no_license
|
#!/bin/bash
. functions_pub
PackageName=gcc-4.4.3
PackageURL=http://ftp.gnu.org/gnu/gcc/$PackageName/$PackageName.tar.bz2
GMP_PACKNAME=gmp-4.3.2
GMP_URL=ftp://ftp.gnu.org/gnu/gmp/$GMP_PACKNAME.tar.bz2
MPFR_PACKNAME=mpfr-2.4.2
MPFR_URL=http://www.mpfr.org/$MPFR_PACKNAME/$MPFR_PACKNAME.tar.bz2
case "${1}" in
download)
check_and_download $PackageURL
check_and_download $GMP_URL
check_and_download $MPFR_URL
;;
build)
#step0: unpack.
unpack_and_enter $PackageName $PackageURL -pass2
#unpack GMP.
tar xf $LFS_SOURCE/`basename $GMP_URL`
mv {$GMP_PACKNAME,gmp}
err_check "[Error] unpack gmp failed."
#unpack MPFR
tar xf $LFS_SOURCE/`basename $MPFR_URL`
mv {$MPFR_PACKNAME,mpfr}
err_check "[Error] unpack mpfr failed."
cp -v gcc/Makefile.in{,.orig}
sed 's@\./fixinc\.sh@-c true@' gcc/Makefile.in.orig > gcc/Makefile.in
cp -v gcc/Makefile.in{,.tmp}
sed 's/^XCFLAGS =$/& -fomit-frame-pointer/' gcc/Makefile.in.tmp \
> gcc/Makefile.in
for file in $(find gcc/config -name linux64.h -o -name linux.h)
do
cp -uv $file{,.orig}
sed -e 's@/lib\(64\)\?\(32\)\?/ld@/tools&@g' \
-e 's@/usr@/tools@g' $file.orig > $file
echo "
#undef STANDARD_INCLUDE_DIR
#define STANDARD_INCLUDE_DIR 0" >> $file
touch $file.orig
done
#step3: configure.
mkdir -p $LFS_TEMP/build
cd $LFS_TEMP/build
err_check "enter directory $LFS_TEMP/build failed"
../$PackageName/configure --prefix=/tools \
--with-local-prefix=/tools --enable-clocale=gnu \
--enable-shared --enable-threads=posix \
--enable-__cxa_atexit --enable-languages=c,c++ \
--disable-libstdcxx-pch --disable-bootstrap
err_check "[Error] configure failed."
#step4: make.
make
err_check "[Error] make failed."
#step5: install
make install
err_check "[Error] install failed."
#check.
echo 'main(){}' > dummy.c
cc dummy.c
err_check "[Error] gcc-pass2: compile failed."
readelf -l a.out | grep ': /tools'
err_check "[Error] gcc-pass2: not link to /tools."
#leave.
package_leave $PackageName $PackageURL -pass2
;;
*)
exit 1
;;
esac
#end
| true
|
ac52f95eb08bd1cee410ac058f0dac6cf7338483
|
Shell
|
lnevo/radb
|
/src/bin/update_ra.sh
|
UTF-8
| 958
| 2.921875
| 3
|
[] |
no_license
|
#!/bin/sh
RAURL="http://xena.easte.net:2000"
LABELURL="http://forum.reefangel.com/status/labels.aspx?id=lnevo"
WGET=/usr/bin/wget
cd /www/radb_data
$WGET -t3 -T3 -O r99.txt.new $RAURL/r99 && mv r99.txt.new r99.txt
$WGET -O ra_labels.txt.new $LABELURL && mv ra_labels.txt.new ra_labels.txt
$WGET -t3 -T3 -O mr_full_mem.txt.new $RAURL/mr100,400 && mv mr_full_mem.txt.new mr_full_mem.txt
FULL_MEM=`cat mr_full_mem.txt`
RA_DATA=`cat r99.txt | sed 's/.....$//g'`
echo "${RA_DATA}${FULL_MEM}</RA>" > all_data.txt.new && mv all_data.txt.new all_data.txt
#eval $(/root/bin/r99.sh 2>/dev/null)
eval $(curl -s http://www.easte.net/radb/r99.php)
ALK=`echo "$PHE" | awk '{ val=$1*17.86 / 100;printf "%3.0f\n",val}'`
PH=`echo "$PH" | awk '{ val=$1 / 100;printf "%.2f\n",val}'`
TEMP=`echo "$T1" | awk '{ val=$1 / 10;printf "%.1f\n",val}'`
SAL=`echo "$SAL" | awk '{ val=$1 / 10;printf "%.1f\n",val}'`
WL=`echo "$WL"`
echo "$TEMP $PH $SAL $ALK $WL END" > status.txt
| true
|
b60c6b5e5f7c57d0368ab6c10c9e9db10688b4ad
|
Shell
|
SecMeant/sets
|
/linux/.local/bin/godefiled
|
UTF-8
| 177
| 2.609375
| 3
|
[] |
no_license
|
#!/usr/bin/sh
[ -z "$DEFILED_SSH_PORT" ] && { echo "\$DEFILED_SSH_PORT var is not set. Did u initialized env correctly?"; exit 1; }
ssh -p $DEFILED_SSH_PORT defil3d@localhost
| true
|
55da1348e974e74d6c237de21f3f3472fa5ccf7a
|
Shell
|
petronny/aur3-mirror
|
/pacman-glib-git/PKGBUILD
|
UTF-8
| 1,067
| 3.15625
| 3
|
[] |
no_license
|
# Maintainer: PirateJonno <j@skurvy.no-ip.org>
_pkgname=pacman-glib
pkgname="$_pkgname-git"
pkgver=20100620
pkgrel=1
pkgdesc='GLib library for Arch Linux package management'
arch=('i686' 'x86_64')
url="http://github.com/PirateJonno/pacman-glib"
license=('GPL')
depends=('pacman>=3.4.0' 'pacman<3.5.0' 'glib2')
makedepends=('git' 'gnome-common' 'gtk-doc' 'intltool')
provides=("$_pkgname=3.4.0")
conflicts=("$_pkgname")
_gitroot='git://github.com/PirateJonno/pacman-glib.git'
_gitname='pacman-glib'
build() {
cd "$srcdir"
msg "Connecting to GIT server...."
if [ -d "$_gitname" ]; then
cd "$_gitname" && git pull origin
msg "The local files are updated."
else
git clone "$_gitroot" "$_gitname"
fi
msg "GIT checkout done or server timeout"
msg "Starting make..."
[ -d "$srcdir/$_gitname-build" ] && rm -rf "$srcdir/$_gitname-build"
git clone "$srcdir/$_gitname" "$srcdir/$_gitname-build"
cd "$srcdir/$_gitname-build"
./autogen.sh --prefix=/usr --sysconfdir=/etc --localstatedir=/var || return 1
make -s || return 1
make -s DESTDIR="$pkgdir" install
}
| true
|
16eec494fc49058a1617536ea92e2e23cf006f6f
|
Shell
|
critterandguitari/Organelle_Patches_Dev
|
/Update-OS-v2.1/root/scripts/killpd.sh
|
UTF-8
| 335
| 2.59375
| 3
|
[] |
no_license
|
#!/bin/sh
# quit Pd in 3 steps
# give pd a chance to shut itself off
oscsend localhost 4000 /quitpd i 1
sleep .12
# kill pd SIGTERM
killall pd
sleep .1
# and kill SIGKILL
killall -s 9 pd
# turn off led, just to be sure
oscsend localhost 4001 /led i 0
# clean up
# remove old state directory
rm -fr /tmp/state
mkdir /tmp/state
| true
|
76661a7e79b360450997c04617330aa460aab95b
|
Shell
|
OrdiNeu/Terminal
|
/qsub_manual.sh
|
UTF-8
| 285
| 3.03125
| 3
|
[] |
no_license
|
#!/bin/bash
#$ -q hoffmangroup
#$ -cwd
# verbose failures
set -o nounset -o pipefail -o errexit
TSTART=$1
TEND=$2
shift
shift
set +o errexit
for i in `seq $TSTART $TEND`; do
export SGE_TASK_ID=$i
$@
if [ "$?" -ne "0" ]; then
echo "subtask $i failed"
fi
done
| true
|
17026092f55e75c6bf766af193876d4175fe0173
|
Shell
|
riwsky/mo-pomo
|
/deploy.sh
|
UTF-8
| 264
| 2.546875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Merge master to gh-pages, rebuild the prod cljs, and push.
set -e
git checkout gh-pages
git merge master
rm resources/public/cljs/main.js
lein cljsbuild once prod
git add -f resources/public/cljs/main.js
git commit -m "Update gh-pages js"
git push
| true
|
137216a76d5070ad0ff6da9cfba40e982eecbd12
|
Shell
|
Open-GP/opengp-distro
|
/docker/web/startup.sh
|
UTF-8
| 1,208
| 3.078125
| 3
|
[] |
no_license
|
#!/bin/bash -eux
DB_CREATE_TABLES=${DB_CREATE_TABLES:-false}
DB_AUTO_UPDATE=${DB_AUTO_UPDATE:-false}
MODULE_WEB_ADMIN=${MODULE_WEB_ADMIN:-true}
DEBUG=${DEBUG:-false}
cat > /usr/local/tomcat/openmrs-server.properties << EOF
install_method=auto
connection.url=jdbc\:mysql\://${DB_HOST}\:3306/${DB_DATABASE}?autoReconnect\=true&sessionVariables\=default_storage_engine\=InnoDB&useUnicode\=true&characterEncoding\=UTF-8
connection.username=${DB_USERNAME}
connection.password=${DB_PASSWORD}
has_current_openmrs_database=true
create_database_user=false
module_web_admin=${MODULE_WEB_ADMIN}
create_tables=${DB_CREATE_TABLES}
auto_update_database=${DB_AUTO_UPDATE}
EOF
echo "------ Starting distribution -----"
cat /root/openmrs-distro.properties
echo "-----------------------------------"
# wait for mysql to initialise
/usr/local/tomcat/wait-for-it.sh --timeout=3600 ${DB_HOST}:3306
if [ $DEBUG ]; then
export JPDA_ADDRESS="1044"
export JPDA_TRANSPORT=dt_socket
fi
# start tomcat in background
/usr/local/tomcat/bin/catalina.sh jpda run &
# trigger first filter to start data importation
sleep 15
curl -L http://localhost:8080/openmrs/
sleep 15
# bring tomcat process to foreground again
wait ${!}
| true
|
34c0a9040c90f40f694adbc1c8714b3454144d47
|
Shell
|
danjulio/lepton
|
/pocketbeagle/pru_rpmsg_fb/scripts/setup_pru.sh
|
UTF-8
| 813
| 3.046875
| 3
|
[] |
no_license
|
#!/bin/bash
#
# Script to start the PRUs running and make /dev/rpmsg_pru31 accessible
# by non-root processes. This script must be run as root and is designed
# to assist in debugging.
#
# Setup remoteproc devices for user access
chmod 666 /sys/class/remoteproc/remoteproc1/state
chmod 666 /sys/class/remoteproc/remoteproc1/firmware
chmod 666 /sys/class/remoteproc/remoteproc2/state
chmod 666 /sys/class/remoteproc/remoteproc2/firmware
# Load the firmware
echo "am335x-pru0-fw" > /sys/class/remoteproc/remoteproc1/firmware
echo "am335x-pru1-fw" > /sys/class/remoteproc/remoteproc2/firmware
# Start the firmware
echo "start" > /sys/class/remoteproc/remoteproc1/state
echo "start" > /sys/class/remoteproc/remoteproc2/state
# Configure the RPMsg device file for user access
sleep 1
chmod 666 /dev/rpmsg_pru31
| true
|
57998b43f8fe751ecbb9f5f5c56faa2230cef9ee
|
Shell
|
AlexHilson/timewatch_server
|
/terraform/modules/node_bootstrap/files/bootstrap.tpl
|
UTF-8
| 477
| 2.890625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
sudo apt-get update && sudo apt-get upgrade -y
sudo apt-get install -y git
cd ~
wget https://nodejs.org/dist/v6.9.4/node-v6.9.4-linux-x64.tar.xz
mkdir node
tar xvf node-v*.tar.?z --strip-components=1 -C ./node
rm -rf node-v*
mkdir node/etc
echo 'prefix=/usr/local' > node/etc/npmrc
sudo mv node /opt/
sudo chown -R root: /opt/node
sudo ln -s /opt/node/bin/node /usr/local/bin/node
sudo ln -s /opt/node/bin/npm /usr/local/bin/npm
sudo npm install -g pm2
| true
|
b801b82700d53690bfbc993b234794e2fb89d4d3
|
Shell
|
twhtanghk/server.dns
|
/script/afterConnected.sh
|
UTF-8
| 556
| 2.671875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
export CISCO_SPLIT_INC=0
root=/root/git/server.dns
HOSTNAME=`hostname`
cd ${root}
case "${reason}" in
connect)
script/record.coffee -u ${oauth2user} -p ${oauth2pass} --add vpn.net ${HOSTNAME} A ${INTERNAL_IP4_ADDRESS}
echo "nameserver ${INTERNAL_IP4_DNS}" |resolvconf -a tun0.vpn
;;
disconnect)
echo "nameserver ${INTERNAL_IP4_DNS}" |resolvconf -d tun0.vpn
script/record.coffee -u ${oauth2user} -p ${oauth2pass} --del vpn.net ${HOSTNAME} A
;;
esac
unset INTERNAL_IP4_DNS
. /usr/share/vpnc-scripts/vpnc-script
| true
|
807d309d17a5cdb8e881eafdd76ea121808423cc
|
Shell
|
scudiero/tools
|
/src/goto.sh
|
UTF-8
| 3,983
| 3.28125
| 3
|
[] |
no_license
|
#!/bin/bash
#==================================================================================================
version=2.0.55 # -- dscudiero -- Fri 03/23/2018 @ 14:34:08.50
#==================================================================================================
TrapSigs 'on'
myIncludes=""
Import "$standardInteractiveIncludes $myIncludes"
originalArgStr="$*"
scriptDescription="Goto courseleaf site"
myDebug=true
## Copyright �2015 David Scudiero -- all rights reserved.
#==================================================================================================
# local functions
#==================================================================================================
#==================================================================================================
# parse script specific arguments
#==================================================================================================
function goto-ParseArgsStd {
#myArgs+=("shortToken|longToken|type|scriptVariableName|<command to run>|help group|help textHelp")
myArgs+=('co|courseleaf|switch||target='/web/courseleaf'|script|Go to .../web/courseleaf')
myArgs+=('db|db|switch||target='/db'|script|Go to .../db')
}
#==================================================================================================
# Standard arg parsing and initialization
#==================================================================================================
helpSet='script,client,env'
GetDefaultsData $myName
ParseArgsStd $originalArgStr
[[ $env == '' ]] && env='dev'
dump -r
dump -l myName client env target
#==================================================================================================
## lookup client in clients database
#==================================================================================================
sqlStmt="select hosting from $clientInfoTable where name=\"$client\""
RunSql $sqlStmt
if [[ ${#resultSet[@]} -eq 0 ]]; then
Error "Client '$client', Env '$env' not found in warehouse.$clientInfoTable table";
return
fi
hosting=${resultSet[0]}
dump -l hosting
if [[ $hosting == 'leepfrog' ]]; then
## lookup client in database
whereClause="name=\"$client\" and env=\"$env\" and host=\"$hostName\""
[[ $env = 'pvt' ]] && whereClause="name=\"$client\" and env=\"dev\" and host=\"$hostName\""
[[ $env = 'test' ]] && client="$client-test" && whereClause="name=\"$client\" and env=\"test\" and host=\"$hostName\""
sqlStmt="select share,hosting from $siteInfoTable where $whereClause"
dump -l sqlStmt
RunSql $sqlStmt
if [[ ${#resultSet[@]} -eq 0 ]]; then
Error "Client '$client', Env '$env' not found in warehouse.$siteInfoTable table";
return
fi
share=${resultSet[0]}
dump -l share
if [[ $env == 'dev' ]]; then
tgtDir=/mnt/$share/web/$client
elif [[ $env == 'pvt' ]]; then
tgtDir=/mnt/$share/web/$client-$userName
else
tgtDir=/mnt/$share/$client/$env
fi
dump -l tgtDir target
if [[ $target != "" ]]; then
if [[ $target == 'debug' ]]; then wizdebug $client -$env
elif [[ $target == 'clone' ]]; then clone $client -$env -nop
elif [[ -d $tgtDir$target ]]; then tgtDir=$tgtDir$target;
fi
fi
cd $tgtDir
else
pwFile=/home/$userName/.pw2
unset pwRec
if [[ -r $pwFile ]]; then
pwRec=$(grep "^$client" $pwFile)
if [[ $pwRec != '' ]]; then
read -ra tokens <<< "$pwRec"
remoteUser=${tokens[1]}
remotePw=${tokens[2]}
remoteHost=${tokens[3]}
sshpass -p $remotePw ssh $remoteUser@$remoteHost
fi
else
Terminate "Remote site and could not retrieve login information from file: \n^$pwFile."
fi
fi
return
## Wed Apr 27 15:17:07 CDT 2016 - dscudiero - Switch to use RunSql
## Mon Jun 6 09:30:11 CDT 2016 - dscudiero - Added support for remote sites
## Thu Jul 14 15:08:29 CDT 2016 - fred - Switch LOGNAME for userName
## 03-22-2018 @ 12:36:17 - 2.0.54 - dscudiero - Updated for Msg3/Msg, RunSql2/RunSql, ParseArgStd/ParseArgStd2
## 03-23-2018 @ 15:34:59 - 2.0.55 - dscudiero - D
| true
|
d7cc05f16f19d402371f417ed5c380c9f7a34a3c
|
Shell
|
battlesnake/qbasic-games
|
/extract.sh
|
UTF-8
| 167
| 3.140625
| 3
|
[] |
no_license
|
#!/bin/bash
set -eu
declare dir=
for zip in *.zip; do
dir=${zip%.zip}
(
mkdir -p "$dir"
cd "$dir"
unzip "../$zip"
)
done
find -iname phatcode.exe -delete
| true
|
f6b71fec963a2217bcfaaec2ec392acc5cfd1e5c
|
Shell
|
friendlyarm/debian_nanopi2
|
/build.sh
|
UTF-8
| 1,169
| 3.890625
| 4
|
[] |
no_license
|
#!/bin/bash
#----------------------------------------------------------
ROOTIMGSIZE=2097152000 # 2G
BOOTIMGSIZE=67108864 # 64M
MKE4FS=./tools/make_ext4fs
OUTDIR=sd-fuse_nanopi2/debian
TOPDIR=`pwd`
ROOTDIR=rootfs
BOOTDIR=boot
IMGFILE=debian-jessie-images.tgz
[ -d ${BOOTDIR} ] || { echo "Error: ./${BOOTDIR}: not found"; exit 1; }
[ -d ${ROOTDIR} ] || { echo "Error: ./${ROOTDIR}: not found"; exit 1; }
#----------------------------------------------------------
# Execute an action
FA_DoExec() {
echo "${@}"
eval $@ || exit $?
}
#----------------------------------------------------------
# Make ext4 image
mkdir -p ${OUTDIR}
FA_DoExec ${MKE4FS} -s -l ${BOOTIMGSIZE} -a root -L boot ${OUTDIR}/boot.img ${BOOTDIR}
FA_DoExec ${MKE4FS} -s -l ${ROOTIMGSIZE} -a root -L rootfs ${OUTDIR}/rootfs.img ${ROOTDIR}
#----------------------------------------------------------
# Create package & md5sum
if [ ! -f ${OUTDIR}/partma.txt ]; then
cp config/partmap.txt ${OUTDIR}/ -avf
fi
FA_DoExec "(cd ${OUTDIR} && tar cvzf ${TOPDIR}/${IMGFILE} rootfs.img boot.img partmap.txt)"
FA_DoExec "md5sum ${IMGFILE} > ${IMGFILE}.hash.md5"
ls -l ${IMGFILE}*
echo "...done."
| true
|
0e51359d34a506df7b734fc394d67c30264ef490
|
Shell
|
AlexKuchynskyi/Bash-Scripts
|
/find_IP_MAC.sh
|
UTF-8
| 1,300
| 4.03125
| 4
|
[] |
no_license
|
#!/bin/bash
# script gets a directory name as an input parameter,
# then finds mentioning of IP-addresses in all the files
# (regular files only) located in the specified directory
# BTW the script checks if the file is the regular one.
# if flag --mac is specified so the script also finds
# mentioning of MAC-addresses
# make sure about correct input
if [ -z $1 ]
then
echo "Usage: script_name dir_name (--mac flag additionally for MAC-addresses)"
elif [ -z $2 ]
then
# specify a directory to find files in
DIR=$1
# find needed expression and write the result to the file
echo "Seeking for IP-addresses in $DIR ... and writing the result to the file..."
find $DIR -type f -exec grep -ER "([0-9]{1,3}\.){3}[0-9]{1,3}" '{}' \; > IP_1.log
elif [ $2 -ne "--mac" ]
then
echo "Usage: script_name dir_name (--mac flag additionally for MAC-addresses)"
else
# specify a directory to find files in
DIR=$1
# find needed expression and write the result to the file
echo "Seeking for IP- and MAC-addresses in $DIR ... and writing the result to the files IP.log and MAC.log ..."
find $DIR -type f -exec grep -ER "([0-9]{1,3}\.){3}[0-9]{1,3}" '{}' \; > IP_1.log
find $DIR -type f -exec grep -ER "([[:xdigit:]]{2}:){5}[[:xdigit:]]" '{}' \; > MAC_1.log
fi
| true
|
b83da67617adf658897a9768e65cad0027ebcc92
|
Shell
|
arifsetwn/injen-cctv
|
/script/cekmotion.sh
|
UTF-8
| 154
| 2.953125
| 3
|
[] |
no_license
|
#!/bin/bash
#Digunakan untuk mengecek proses motion berjalan atau tidak
motion=$(pgrep motion)
if [[ -z "$motion" ]];
then
echo "0"
else
echo "1"
fi
| true
|
3ae7bfa5b71e5cfba0438ff102767ec766347515
|
Shell
|
enovella/hashcode2020
|
/run-all
|
UTF-8
| 675
| 2.59375
| 3
|
[] |
no_license
|
#!/bin/bash
function start_tmux() {
set -e
tmux new-window -n 'RUN-ALL'
tmux send-keys "clear && node script input/b_read_on.txt output/b_read_on.out" C-m
tmux split-window -h
tmux send-keys "clear && node script input/c_incunabula.txt output/c_incunabula.out" C-m
tmux split-window -fv
tmux send-keys "clear && node script input/d_tough_choices.txt output/d_tough_choices.out" C-m
tmux split-window -h -p 66
tmux send-keys "clear && node script input/e_so_many_books.txt output/e_so_many_books.out" C-m
tmux split-window -h
tmux send-keys "clear && node script input/f_libraries_of_the_world.txt output/f_libraries_of_the_world.out" C-m
}
start_tmux
| true
|
581665d6a0aced6aab097a4bdba753623a86ace1
|
Shell
|
jkbonfield/mpeg_misc
|
/CE5/old/loop.sh
|
UTF-8
| 506
| 2.984375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
in=$1
tok=./tokenise_name2
rm -rf $in.*
$tok $in >/dev/null
for blk in $in.blk_??????.0_0
do
b=`echo $blk | sed 's/\.0_0$//'`
./try_comp.sh $b $b.comp > /dev/null
./pack_dir $b.comp $b > $b.comp.pack
done
cat $in.blk_??????.comp.pack | wc -c
(for blk in $in.blk_??????.comp.pack
do
b=`echo $blk | sed 's/\.comp.pack//'`
./unpack_dir $b.unpack $b < $b.comp.pack
./try_uncomp.sh $b.unpack $b.uncomp 2>/dev/null
$tok -d $b.uncomp/$b
done) > $in.new
cmp $in $in.new
| true
|
4c10c15d205419b0348fea9b8f1313877cc1fe03
|
Shell
|
lakomiec/random
|
/svc_check.sh
|
UTF-8
| 1,774
| 4.46875
| 4
|
[] |
no_license
|
#!/bin/bash
# (re)Start a service on a remote machine if it doesn't respond on a given list of ports
# Author: Jacek Lakomiec
# Date: 09 Nov 2014
# Version: 0.1
# Define basic variables. Make sure you replace SSH_USER and SSH_KEY values
IP="$1"
PORTS=$( echo $2 | sed -e "s#,# #g")
LOGFILE="log_svc_check-`date +%Y%m%d`.log"
SSH_USER=vagrant
SSH_KEY=vagrant-debian64
# Make sure the script has enough parameters and if not, present expected usage
if [ $# -lt 2 ]; then
echo "Usage: $0 <IP> <port1,port2,portN>"
exit 1
fi
# Check if nc binary is present on the system
if [ ! -x /bin/nc ]; then
echo "This script requires netcat (nc) binary installed on this system"
exit 1
fi
# Iterate through the list of ports, check if there is any response from remote host on that port and if not,
# login via ssh and restart a service name expected to be running on that port.
for port in $PORTS; do
nc -z $IP $port
RES=$(echo $?)
if [ $RES -eq 0 ]; then
echo "Port: $port on IP $IP is open for connections" 2>&1 >> $LOGFILE
else
echo "The port $port on IP $IP seems down. I will login to $IP and restart the service running on port ${port}." 2>&1 >> $LOGFILE
# port number <> service name mapping is required in order to restart appropriate services on the remote machine
case $port in
21)
service_name="pure-ftpd"
;;
25)
service_name="exim4"
;;
80|443)
service_name="apache2"
;;
3306)
service_name="mysql"
;;
esac
ssh -i $SSH_KEY -l $SSH_USER $IP "sudo /etc/init.d/${service_name} restart" 2>&1 >> $LOGFILE
fi
done
exit 0
| true
|
86f85daff6e8486c91d22b472a8308a0353b0237
|
Shell
|
Myralllka/UCU_operating_systems
|
/lab6_myshell_3/compile.sh
|
UTF-8
| 256
| 2.65625
| 3
|
[] |
no_license
|
#!/bin/bash -x
set -o errexit
set -o nounset
set -o pipefail
mkdir -p bin build
(
cd build
cmake -G"Unix Makefiles" ..
make
cp ./myshell ../bin
mv ./myshell ../
mv ./programs/mycat/mycat ../bin
mv ./programs/mycp/mycp ../bin
)
#rm -r build
| true
|
df3c0531d3a8b170881546cef7adb91d6c0273ef
|
Shell
|
zachwolf/circleci-docker-goss
|
/entrypoint.sh
|
UTF-8
| 1,530
| 4.1875
| 4
|
[] |
no_license
|
#!/bin/bash
#
# Any changes are written to the SCALING_FILE
#
set -euo pipefail
SCALING_FILE=$1
SCHEDULE_FILE=$2
CURRENT_HOUR=$(date +"%H") # 0-23
CURRENT_DAY=$(date +"%u") # 1-7
function update_scale_config(){
# doesnt update actual scale, but adjusts number in config file.
# Config file is read every minute by scaler entrypoint
cp ${SCALING_FILE} ${SCALING_FILE}.mod
if [ "$4" == docker ];then
sed -i.bak 's/\(l1.medium default true\).*/\1 '$3'/' ${SCALING_FILE}.mod
echo -e "\tUpdated docker preallocation count to $3"
elif [ "$4" == machine ];then
sed -i.bak 's/\(l1.medium default false\).*/\1 '$3'/' ${SCALING_FILE}.mod
echo -e "\tUpdated machine preallocation count to $3"
fi
cat ${SCALING_FILE}.mod > ${SCALING_FILE}
rm ${SCALING_FILE}.mod
}
cat "$SCHEDULE_FILE" | grep -v -e '^ *$' | grep -v -e '^#' | while read line
do
tokens=( $line )
if [[ "${tokens[0]}" == "w" && ${CURRENT_DAY} -ge 6 ]];then
#echo -e "\tSkip weekday rule"
continue; #rule is weekday, its the weekend, do nothing
elif [[ "${tokens[0]}" != "w" && "${tokens[0]}" != "${CURRENT_DAY}" ]];then
#echo -e "\tSkip non-matching day"
continue; #days don' match
fi
if [ "${tokens[1]}" != "${CURRENT_HOUR}" ];then
#echo -e "\tSkip non-matching hour"
continue; #days don' match
fi
echo -e "\tMatching rule - Day: ${tokens[0]}, Hour: ${tokens[1]}, Type : ${tokens[3]}, Count: ${tokens[2]}"
update_scale_config ${tokens[*]}
done
echo "schedule updated"
| true
|
5c48390a21cf12800c43089e475eb42beb8c7783
|
Shell
|
BIMIB-DISCo/SARS-CoV-2-early-detection
|
/1_variant_calling/variant_calling.sh
|
UTF-8
| 2,770
| 3.484375
| 3
|
[] |
no_license
|
#!/bin/bash
# REQUIRED TOOLS:
# bwa 0.7.17-r1188
# ivar 1.3.1
# samtools 1.10
filename="list_samples_ids.txt"
while read line; do
echo $line
#====B0====#
echo "performing variant calling"
sampleName=$line
projectName="SARS-CoV-2"
dataDir="/data/SARS-CoV-2/raw_data/${projectName}/"
genomeFa="/data/SARS-CoV-2/reference/SARS-CoV-2-ANC.fasta"
primersBed="/data/SARS-CoV-2/primers/nCoV-2019_v3.bed"
resultsDir="/data/SARS-CoV-2/results/${projectName}/"
bamDir=${resultsDir}bamDir/
vcfDir=${resultsDir}vcfDir/
coverageDir=${resultsDir}coverage/
jobs=1
mkdir -p $resultsDir
if [ ! -d "$resultsDir" ]; then
echo "Error mkdir"
exit 1
fi
#====E0====#
#====B1====#
echo "bwa mem -- mapping reads to SARS-CoV-2 reference"
mkdir -p $bamDir
if [ ! -d "$bamDir" ]; then
echo "Error mkdir"
exit 1
fi
/path_to_conda/conda/envs/SARS-CoV-2/bin/bwa mem -t $jobs $genomeFa ${dataDir}${sampleName}_1.fastq.gz ${dataDir}${sampleName}_2.fastq.gz > ${bamDir}${sampleName}_aln.sam
#====E1====#
#====B2====#
echo "samtools -- building sorted bam"
/path_to_conda/conda/envs/SARS-CoV-2/bin/samtools view -b -F 4 -F 2048 -T $genomeFa ${bamDir}${sampleName}_aln.sam > ${bamDir}${sampleName}_aln.bam
/path_to_conda/conda/envs/SARS-CoV-2/bin/samtools sort ${bamDir}${sampleName}_aln.bam > ${bamDir}${sampleName}_aln.sorted.bam
rm ${bamDir}${sampleName}_aln.sam
rm ${bamDir}${sampleName}_aln.bam
#====E2====#
#====B3====#
echo "ivar trim -- trimming off the primer sequences"
/path_to_conda/conda/envs/SARS-CoV-2/bin/ivar trim -i ${bamDir}${sampleName}_aln.sorted.bam -b $primersBed -e -m 30 -q 20 -s 4 -p ${bamDir}${sampleName}_trimmed
rm ${bamDir}${sampleName}_aln.sorted.bam
rm ${bamDir}${sampleName}_aln.sorted.bam.bai
#====E3====#
#====B4====#
echo "samtools -- building and indexing trimmed sorted bam"
/path_to_conda/conda/envs/SARS-CoV-2/bin/samtools sort ${bamDir}${sampleName}_trimmed.bam > ${bamDir}${sampleName}.bam
/path_to_conda/conda/envs/SARS-CoV-2/bin/samtools index ${bamDir}${sampleName}.bam
rm ${bamDir}${sampleName}_trimmed.bam
#====E4====#
#====B5====#
echo "ivar variants -- calling variants"
mkdir -p $vcfDir
if [ ! -d "$vcfDir" ]; then
echo "Error mkdir"
exit 1
fi
/path_to_conda/conda/envs/SARS-CoV-2/bin/samtools mpileup -A -d 0 --reference $genomeFa -Q 0 ${bamDir}${sampleName}.bam | /path_to_conda/conda/envs/SARS-CoV-2/bin/ivar variants -p ${vcfDir}${sampleName} -q 20 -t 0.03
#====E5====#
#====B6====#
mkdir -p $coverageDir
if [ ! -d "$coverageDir" ]; then
echo "Error mkdir"
exit 1
fi
echo "samtools depth -- extracting coverage information"
/path_to_conda/conda/envs/SARS-CoV-2/bin/samtools depth -a ${bamDir}${sampleName}.bam > ${coverageDir}${sampleName}.txt
#====E6====#
done < $filename
| true
|
791e8acba7ec76668228a55f0f48736f359577b0
|
Shell
|
paulyc/IncludeOS
|
/etc/bochs_installation.sh
|
UTF-8
| 1,382
| 3.171875
| 3
|
[
"Apache-2.0"
] |
permissive
|
# Upgrade packages to solve dependencies
origdir=`pwd`
sudo apt-get clean
sudo apt-get dist-upgrade
bochs_version="2.6.6"
bochs_link="http://downloads.sourceforge.net/project/bochs/bochs/2.6.6/bochs-2.6.6.tar.gz?r=http%3A%2F%2Fsourceforge.net%2Fprojects%2Fbochs%2Ffiles%2Fbochs%2F2.6.6%2F&ts=1410367455&use_mirror=heanet"
bochs_file="bochs-$bochs_version.tar.gz"
# Create a directory (update this path if you don't like the location)
mkdir -p ~/src/
cd ~/src
wget -c --trust-server-name $bochs_link
tar -xf bochs*.gz
# Bochs configure asks for this
sudo apt-get install -y pkg-config
sudo apt-get install -y libgtk2.0-dev
# Bochs configuration:
# - Enable the internal debugger
# - Use X graphics; works over terminal
# - Enable USB (might be useful for USB-stick support)
# - Enable disassembly; sounded useful for assembly-parts of IncludeOS
cd bochs-$bochs_version
./configure --enable-debugger --with-x11 --enable-usb --enable-disasm
# - I also tried using sdl-graphics for GUI (Ubuntu doesn't use X anymore):
#./configure --enable-debugger --with-sdl --enable-usb --enable-disasm
# ... But this caused a linking error, so switched to x11, which works fine after all
#PATCH Makefile:
cat Makefile | sed s/lfreetype/"lfreetype -lpthread"/ > Makefile.tmp
mv Makefile.tmp Makefile
make
sudo make install
cd $origdir
cp .bochsrc ~/
echo -e "\nDONE! (hopefully)\n"
| true
|
33dac7c355f39f8e98bd97ec556e8ee73c688566
|
Shell
|
cloudfoundry/cf-k8s-logging
|
/hack/rollout.sh
|
UTF-8
| 475
| 3.140625
| 3
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
set -ex
export CF_FOR_K8s_DIR="${CF_FOR_K8s_DIR:-${HOME}/workspace/cf-for-k8s/}"
export SCRIPT_DIR="$(cd $(dirname $0) && pwd -P)"
export BASE_DIR="${SCRIPT_DIR}/.."
export CF_NAME=${CF_NAME:-$(kubectl config current-context | rev | cut -d '_' -f -1 | rev)}
$SCRIPT_DIR/../scripts/bump-cf-for-k8s.sh
pushd "${CF_FOR_K8s_DIR}"
ytt -f config -f /tmp/$CF_NAME.yml > /tmp/${CF_NAME}-rendered.yml
kapp deploy -a cf -f /tmp/${CF_NAME}-rendered.yml
popd
| true
|
c2912af5cd771822d3d436e7ab56634b3c683152
|
Shell
|
wangpanqiao/VDR_CHIPEXO
|
/do_vcf2diploid_cluster.sh
|
UTF-8
| 3,513
| 3.734375
| 4
|
[] |
no_license
|
#!/bin/sh
#java -jar vcf2diploid.jar -id sample_id -chr file.fa ... [-vcf file.vcf ...]
#where sample_id is the ID of individual whose genome is being constructed
#(e.g., NA12878), file.fa is FASTA file(s) with reference sequence(s), and
#file.vcf is VCF4.0 file(s) with variants. One can specify multiple FASTA and
#VCF files at a time. Splitting the whole genome in multiple files (e.g., with
#one FASTA file per chromosome) reduces memory usage.
#Amount of memory used by Java can be increased as follows
#java -Xmx4000m -jar vcf2diploid.jar -id sample_id -chr file.fa ... [-vcf file.vcf ...]
#Important notes
#===============
#
#All characters between '>' and first white space in FASTA header are used
#internally as chromosome/sequence names. For instance, for the header
#
#>chr1 human
#
#vcf2diploid will upload the corresponding sequence into the memory under the
#name 'chr1'.
#Chromosome/sequence names should be consistent between FASTA and VCF files but
#omission of 'chr' at the beginning is allows, i.e. 'chr1' and '1' are treated as
#the same name.
#
#The output contains (file formats are described below):
#1) FASTA files with sequences for each haplotype.
#2) CHAIN files relating paternal/maternal haplotype to the reference genome.
#3) MAP files with base correspondence between paternal-maternal-reference
#sequences.
#File formats:
#* FASTA -- see http://www.ncbi.nlm.nih.gov/blast/fasta.shtml
#* CHAIN -- http://genome.ucsc.edu/goldenPath/help/chain.html
#* MAP file represents block with equivalent bases in all three haplotypes
#(paternal, maternal and reference) by one record with indices of the first
#bases in each haplotype. Non-equivalent bases are represented as separate
#records with '0' for haplotypes having non-equivalent base (see
#clarification below).
#
#Pat Mat Ref MAP format
#X X X ____
#X X X \
#X X X --> P1 M1 R1
#X X - -------> P4 M4 0
#X X - ,---> 0 M6 R4
#- X X --' ,-> P6 M7 R5
#X X X ----'
#X X X
#X X X
if [ ! $# == 3 ]; then
echo "Usage: `basename $0` <VCF_PATH> <REF> <DESC>"
echo "<VCF_PATH> path to dir containing .vcf.gz files, one per sample, or one per trio"
echo "<REF> FASTA file with reference sequence (can be full/chr masked/nonmasked)"
echo "<DESC> short 1-word description of the ref/vcf (eg hg19)"
echo "THIS DOESNT WORK FOR TRIO VCF FILES. ID MUST BE THE CHILD'S ID ONLY USE TO CREATE SCRIPT THEN SEND MANUALLY TO GSUB"
exit
fi
PDATA=$1;
PREF=$2;
PCODE="/net/isi-scratch/giuseppe/tools/vcf2diploid";
PDESC=$3;
POUT1=${PDATA}/VCF2DIPLOID_${PDESC};
mkdir ${POUT1};
for FILE in ${PDATA}/*.vcf.gz;
do BASENAME=`basename ${FILE} ".vcf.gz"`;
ID=`echo ${FILE} | egrep -o "NA[0-9]*"`; #single
#ID=`echo ${FILE} | egrep -o "YRI_Y[0-9]*"`; #trios
POUT=${POUT1}/${ID};
mkdir ${POUT};
SCRIPT=vcf2diploid_${BASENAME}.sh;
echo '#!/bin/bash' >>${POUT}/${SCRIPT};
echo '' >>${POUT}/${SCRIPT};
#unzip vcf
echo "gunzip -f ${FILE}" >>${POUT}/${SCRIPT};
echo "cd ${POUT}" >> ${POUT}/${SCRIPT};
echo "java -jar ${PCODE}/vcf2diploid.jar \\
-id ${ID} \\
-chr ${PREF} \\
-vcf ${PDATA}/${BASENAME}.vcf" >>${POUT}/${SCRIPT};
#zip vcf
echo "gzip ${PDATA}/${BASENAME}.vcf" >> ${POUT}/${SCRIPT};
nice -5 qsub -e ${POUT1}/vcf2diploid_${ID}.err -o ${POUT1}/vcf2diploid_${ID}.out -q medium_jobs.q ${POUT}/${SCRIPT};
rm ${POUT}/${SCRIPT};
unset POUT;
done
| true
|
081a5481137471a4914a0d0d61b62bc5f70055cc
|
Shell
|
toulousain79/MySB
|
/inc/lang/fr/Questions.lng
|
UTF-8
| 17,197
| 2.96875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# ----------------------------------
# __/\\\\____________/\\\\___________________/\\\\\\\\\\\____/\\\\\\\\\\\\\___
# _\/\\\\\\________/\\\\\\_________________/\\\/////////\\\_\/\\\/////////\\\_
# _\/\\\//\\\____/\\\//\\\____/\\\__/\\\__\//\\\______\///__\/\\\_______\/\\\_
# _\/\\\\///\\\/\\\/_\/\\\___\//\\\/\\\____\////\\\_________\/\\\\\\\\\\\\\\__
# _\/\\\__\///\\\/___\/\\\____\//\\\\\________\////\\\______\/\\\/////////\\\_
# _\/\\\____\///_____\/\\\_____\//\\\____________\////\\\___\/\\\_______\/\\\_
# _\/\\\_____________\/\\\__/\\_/\\\______/\\\______\//\\\__\/\\\_______\/\\\_
# _\/\\\_____________\/\\\_\//\\\\/______\///\\\\\\\\\\\/___\/\\\\\\\\\\\\\/__
# _\///______________\///___\////__________\///////////_____\/////////////_____
# By toulousain79 ---> https://github.com/toulousain79/
#
######################################################################
#
# Copyright (c) 2013 toulousain79 (https://github.com/toulousain79/)
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# --> Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
#
##################### FIRST LINE #####################################
Upgrade_MainUserPasswordConfirm="
${CYELLOW}Merci de confirmer le mot de passe de l'utilisateur principal.
Vous pouvez utiliser l'ancien mot de passe ou en utiliser un nouveau.${CEND}"
Upgrade_LanguageSelect="
${CYELLOW}Quelle langue voulez-vous utiliser ? Tapez${CEND} ${CGREEN}fr${CEND} ${CYELLOW}pour du français OU ${CGREEN}en${CEND} ${CYELLOW}pour de l'anglais.
What language do you want to use ? Type${CEND} ${CGREEN}en${CEND} ${CYELLOW}for english OR${CEND} ${CGREEN}fr${CEND} ${CYELLOW}for french.${CEND}"
Questions_LanguageSelect="Quelle est la langue que vous souhaitez utiliser ? (en/fr) "
Questions_Category_MainUserInfos="${CYELLOW}#### Informations concernant l'utilisateur principal ####${CEND}"
Questions_Category_ServerInfos="${CYELLOW}#### Informations du serveur ####${CEND}"
Questions_Category_Services="${CYELLOW}#### Services ####${CEND}"
Questions_Category_Security="${CYELLOW}#### Securité ####${CEND}"
Questions_Category_Statistics="${CYELLOW}#### Statistiques ####${CEND}"
Questions_NeedSomeInfos="${CGREEN}############################################################${CEND}
${CGREEN}#${CEND} ${CYELLOW}Maintenant, il est nécessaire de répondre à quelques questions.${CEND}
${CGREEN}############################################################${CEND}"
Questions_MainUserUsername="Nom d'utilisateur pour l'utilisateur principal de votre seedbox: "
Questions_MainUserPassword="Mot de passe pour l'utilisateur principal (SANS espaces, PAS de caractères spéciaux): "
Questions_Message_LetsEncrypt="${CYELLOW}Il semble que l'adresse IP de ce serveur (<gsSrvIpAddress>) soit différente de votre IP publique (<gsSrvIpAddressExt>).
Pour obtenir un certificat signé et validé par Let's Encrypt, le port 80 doit être redirigé vers vôtre serveur si celui-ci se trouve derrière un firewall.
Cette rêgle est nécessaire et obligatoire.
Dans le cas contraire, vous n'obtiendrez qu'un certificat auto-signé.${CEND}"
Questions_HttpOpened="Confirmez-vous que le port 80 (http) est bel et bien redirigé vers vôtre serveur ?"
Questions_Message_IpRestriction="${CYELLOW}Si vous ne souhaitez pas activer la restriction par IP pour l'accès à votre serveur, tapez simplement: ${CEND}${CGREEN}NON${CEND}
${CRED}Mais ce n'est pas une très bonne idée !${CEND}
${CYELLOW}Toutefois, vous aurez toujours besoin d'ajouter au moins une adresse IP à la question suivante.
Rappelez-vous simplement que Fail2Ban et PeerGuardian peuvent vous bloquer si vous n'activez pas cette fonction...${CEND}"
Questions_UseIprestriction="Voulez-vous pour activer la restriction IP ?"
Questions_Message_AddAddresses="${CYELLOW}Ajouter maintenant, au moins une de vos adresses IP (publiques ou privées) qui seront autorisées à se connecter à votre serveur pour tous les services (liste blanche).
Vous serez en mesure de gérer cette liste plus tard sur le portail, et aussi d'ajouter des adresses IP dynamiques gérées avec des noms d'hôtes (DynDNS, No-IP, ...).${CEND}"
Questions_AddAddresses="Quelles sont les adresses IP publiques que vous souhaitez ajouter dès maintenant ? (ex: x.x.x.x,y.y.y.y)"
Questions_Message_SMTP_0="${CYELLOW}Voulez-vous utiliser un serveur SMTP externe ? (assure l'acheminement des e-mails envoyés par le serveur)${CEND}
${CYELLOW}Choisissez entre ${CGREEN}LOCAL${CEND}${CYELLOW} ou l'un des fournisseurs suivants ${CEND}${CGREEN}FREE${CEND}${CYELLOW}|${CEND}${CGREEN}OVH${CEND}${CYELLOW}|${CEND}${CGREEN}GMAIL${CEND}${CYELLOW}|${CEND}${CGREEN}YAHOO${CEND}${CYELLOW}|${CEND}${CGREEN}ZOHO${CEND}${CYELLOW} . Seules des transactions SSL seront utilisées (SSL / 465).${CEND}
${CYELLOW}Il est recommandé d'utiliser un serveur SMTP externe. Cela réduit le risque que les mails envoyés par le serveur soit considérés comme spam.
Vous pouvez modifier votre choix plus tard avec le portail.${CEND}"
Questions_SMTP="Quel est votre fournisseur de SMTP ? "
Questions_SMTP_Username="Quel est le nom d'utilisateur pour ce serveur SMTP ?"
Questions_SMTP_Password="Quel est le mot de passe pour ce serveur SMTP ?"
Questions_Message_SMTP_1="${CYELLOW}Soyez prudent lorsque vous saisissez votre adresse e-mail !
Si l'adresse saisie est fausse, vous ne recevrez pas l'email de confirmation à la fin de l'installation...
Si vous avez sélectionné un autre fournisseur que ${CGREEN}LOCAL${CEND}${CYELLOW} à la question précédente,
vous devez saisir ici l'adresse email correspondant au compte SMTP que vous avez spécifié.${CEND}"
Questions_EmailAddress="Quelle est votre adresse e-mail ? "
Questions_TimeZone="Quel est votre fuseau horaire ? (ex: Europe/Paris): "
Questions_PrimaryNetwork="Quelle est votre interface réseau primaire ? (ex: eth0, nic0, ...): "
Questions_BoxIpAddress="Quelle est l'adresse IP locale de ce serveur ? "
Questions_BoxIpAddressAdditional="Adresses IP supplémentaires détectées: "
Questions_BoxIpAddressExt="Quelle est l'adresse IP publique de ce serveur ? "
Questions_PortHTTP="Port HTTP pour NginX (habituellement 80): "
Questions_PortHTTPs="Port HTTPs pour NginX (habituellement 443): "
Questions_PortSSH="Port SSH (habituellement 22): "
Questions_PortFTPs="Port FTPs (habituellement 990): "
Questions_PortFTPData="Port FTP Active Data (habituellement 20): "
Questions_Message_ServerProvider="${CYELLOW}Peut-être souhaitez-vous surveiller votre serveur avec un service proposé par votre fournisseur ?${CEND}
${CYELLOW}Cela permettra à votre fournisseur d'effectuer des pings.${CEND}
${CRED}Cela n'est pas recommandé...
Donc, si vous n'utilisez pas ce service, désactivez le dès maintenant dans l'interface de votre fournisseur...
Si vous n'activez pas cette fonction maintenant ET que cette fonction est tout de même activée dans l'interface de votre fournisseur,
votre serveur peut être redémarré en mode de sauvetage/rescue par la plateforme de supervision de votre fournisseur (cas rencontré pour des serveurs OVH)...${CEND}
${CYELLOW}Il serait plus sage de désactiver la fonction de surveillance maintenant ET dans l'interface de votre fournisseur.${CEND}
${CRED}Dans le cas où vous souhaitez tout de même utiliser cette fonction, désactivez d'abord le service MAINTENANT dans l'interface de votre fournisseur.
Vous pourrez le réactiver à la fin de l'installation de MySB, une fois que vous aurez reçu le mail de confirmation.${CEND}
${CYELLOW}Choisissez entre ${CGREEN}${Global_None}${CEND}${CYELLOW} ou l'un des fournisseurs suivants ${CEND}${CGREEN}OVH${CEND}|${CGREEN}ONLINE${CEND}|${CGREEN}DIGICUBE${CEND}|${CGREEN}HETZNER${CEND}${CYELLOW} .${CEND}"
Questions_ServerProvider="Quelle est le fournisseur de votre serveur ? "
Questions_InstallSeedboxManager="Voulez-vous installer Seedbox-Manager ? (${Global_YES}/${Global_NO})"
Questions_InstallCakeBox="Voulez-vous installer CakeBox Light ? (${Global_YES}/${Global_NO}) "
Questions_InstallPlexMedia="Voulez-vous installer Plex Media Server ? (${Global_YES}/${Global_NO}) "
Questions_InstallWebmin="Voulez-vous installer Webmin ? (${Global_YES}/${Global_NO}) "
Questions_PortWebmin="Port pour Webmin (habituellement 10000): "
Questions_InstallOpenVPN="Voulez-vous installer OpenVPN ? (${Global_YES}/${Global_NO}) "
Questions_PortOpenvpnTUNwithGW="Port OpenVPN avec redirection du trafic (TUN): "
Questions_PortOpenvpnTUNwithoutGW="Port OpenVPN sans redirection du trafic (TUN): "
Questions_PortOpenvpnTAPwithoutGW="Port OpenVPN sans redirection du trafic (TAP): "
Questions_ProtoOpenVPN=("Quel est le protocole à utiliser (UDP si vous avez une connexion de bonne qualité, TCP autrement), 'UDP' ou 'TCP' ? ")
Questions_InstallNextCloud="Voulez-vous installer NextCloud ? (${Global_YES}/${Global_NO}) "
Questions_ContinueInstall="Do you want to continue MySB installation without OpenVPN ? Appuyer sur entrer pour annuler l'installation, ou tapez ${Global_YES} pour continuer."
Questions_InstallLogwatch="Voulez-vous installer Logwatch ? (${Global_YES}/${Global_NO}) "
Questions_InstallFail2Ban="Voulez-vous installer Fail2ban (recommandé) ? (${Global_YES}/${Global_NO}) "
Questions_Message_Blocklists="${CYELLOW}Comment voulez-vous gérer les listes de blocage ?${CEND}
${CYELLOW}Avec${CEND} ${CGREEN}PeerGuardian${CEND}${CYELLOW}, vous protégez complètement votre serveur. Vous utiliserez également moins de RAM.${CEND}
${CYELLOW}Tandis qu'avec${CEND} ${CGREEN}rTorrent${CEND}${CYELLOW}, vous protègerez seulement votre utilisation de rTorrent. Et la consommation de mémoire sera en fonction du nombre d'utilisateurs utilisant votre seedbox.${CEND}
${CYELLOW}Choisissez ${CEND}${CGREEN}${Global_None}${CEND}${CYELLOW} si vous ne voulez pas utiliser de liste de blocage. Ceci est déconseillé !${CEND}"
Questions_Message_Blocklists_0="${CYELLOW}Votre serveur dispose de ${CRED}${gsMemory}${CEND} ${CYELLOW}MB de RAM. Il est recommandé d'utiliser PeerGuardian.${CEND}"
Questions_Message_Blocklists_1="${CYELLOW}Votre serveur dispose de ${CRED}$gsMemory${CEND} ${CYELLOW}MB de RAM. Vous pouvez choisir entre PeerGuardian et rTorrent.${CEND}"
Questions_InstallBlockList_0=("'PeerGuardian' OU 'rTorrent' OU '${Global_None}' ? ")
Questions_InstallDNScrypt="Voulez-vous installer DNScrypt-proxy (recommandé) ? (${Global_YES}/${Global_NO}) "
Questions_Message_Statistics="${CYELLOW}Voulez-vous me signaler votre installation ?
Les informations collectées sont:
1/ Le numéro de version de MySB (${gsCurrentVersion})
2/ Le pays où se trouve votre serveur (${sCountry})
3/ L'ID de votre serveur chiffré avec sha256 ($(sha256sum <<</etc/machine-id | awk '{ print $1 }'))
Aucune autre information n'est envoyée.
Les logs d'accès ne sont pas conservés, l'adresse IP de votre serveur n'est donc pas enregistrée.
Le but de cette démarche est uniquement de savoir si je dois continuer à me casser le cul à maintenir MySB ;-)
Si vous souhaitez un support (ex: Discord), vous devez signaler votre installation.
Vous devrez fournir le résultat de la commande ${CEND}${CGREEN}sha256sum <<</etc/machine-id${CEND}${CYELLOW} pour toute demande de support.${CEND}"
Questions_SendStatistics="Voulez-vous signaler votre installation à toulousain79 ? "
#### SUMMARY
# MAIN USER INFO
Summary_Language="${CBLUE}Langue : ${CEND}"
Summary_MainUser="${CBLUE}Utilisateur principal : ${CEND}"
Summary_MainUserPass="${CBLUE}Mot de passe : ${CEND}"
Summary_SmtpProvider="${CBLUE}Fournisseur SMTP : ${CEND}"
Summary_SmtpUsername="${CBLUE}Utilisateur SMTP : ${CEND}"
Summary_SmtpPassword="${CBLUE}Mot de passe SMTP : ${CEND}"
Summary_MainUserMail="${CBLUE}Votre adresse e-mail : ${CEND}"
# SERVER INFO
Summary_TimeZone="${CBLUE}Fuseau horaire : ${CEND}"
Summary_PrimaryInet="${CBLUE}Interface principale : ${CEND}"
Summary_SrvIpAddress="${CBLUE}Adresse IP locale du serveur : ${CEND}"
Summary_SrvIpAddressExt="${CBLUE}Adresse IP publique du serveur : ${CEND}"
Summary_gsHostNameFqdn="${CBLUE}Nom d'hôte FQDN : ${CEND}"
Summary_PortHTTPs="${CBLUE}Port pour HTTPs : ${CEND}"
Summary_Port_SSH="${CBLUE}Port pour SSH : ${CEND}"
Summary_Port_FTP="${CBLUE}Port pour FTPs : ${CEND}"
#### SERVICES
Summary_ServerProvider="${CBLUE}Fournisseur (supervision) : ${CEND}"
Summary_SeedboxManager="${CBLUE}Installer Seedbox-Manager : ${CEND}"
Summary_Cakebox="${CBLUE}Installer Cakebox-Light : ${CEND}"
Summary_PlexMedia="${CBLUE}Installer Plexmedia Server : ${CEND}"
Summary_Webmin="${CBLUE}Installer Webmin : ${CEND}"
Summary_WebminPort="${CBLUE}Port pour Webmin : ${CEND}"
Summary_NextCloud="${CBLUE}Installer NextCloud : ${CEND}"
Summary_OpenVPN="${CBLUE}Installer OpenVPN : ${CEND}"
Summary_gsOpenVPN_Proto="${CBLUE}Protocol OpenVPN : ${CEND}"
Summary_OpenVPN_WithGW="${CBLUE}Port VPN avec passerelle : ${CEND}"
Summary_OpenVPN_WithoutGW="${CBLUE}Port VPN sans passerelle : ${CEND}"
Summary_OpenVPN_TAP_WithoutGW="${CBLUE}Port VPN TAP sans passerelle : ${CEND}"
# SECURITY
Summary_HttpPortOpend="${CBLUE}Port 80 ouvert pour Let's Encrypt : ${CEND}"
Summary_IpRestriction="${CBLUE}Restriction par IP : ${CEND}"
Summary_MainUserIPs="${CBLUE}Vos adresses IP : ${CEND}"
Summary_InstallLogWatch="${CBLUE}Installer LogWatch : ${CEND}"
Summary_InstallFail2Ban="${CBLUE}Installer Fail2Ban : ${CEND}"
Summary_InstallDNScrypt="${CBLUE}Installer DNScrypt-proxy : ${CEND}"
Summary_PeerBlock="${CBLUE}Blocage par listes : ${CEND}"
#### STATISTICS
Summary_Statistics="${CBLUE}Statistiques : ${CEND}"
# Confirm
Summary_ConfirmMessage="${CYELLOW}En acceptant, l'installation pourra continuer.
Dans le cas contraire, vous retournerez au questionnaire.${CEND}"
Summary_Confirm="Voulez-vous continuer et confirmer ces valeurs ? (${Global_YES}/${Global_NO}) "
export Upgrade_MainUserPasswordConfirm Upgrade_LanguageSelect Questions_LanguageSelect Questions_Category_MainUserInfos Questions_Category_ServerInfos
export Questions_Category_Services Questions_Category_Security Questions_Category_Statistics Questions_NeedSomeInfos Questions_MainUserUsername Questions_MainUserPassword
export Questions_Message_LetsEncrypt Questions_HttpOpened Questions_Message_IpRestriction Questions_UseIprestriction Questions_Message_AddAddresses Questions_AddAddresses
export Questions_Message_SMTP_0 Questions_SMTP Questions_SMTP_Username Questions_SMTP_Password Questions_Message_SMTP_1 Questions_EmailAddress Questions_TimeZone
export Questions_PrimaryNetwork Questions_BoxIpAddress Questions_BoxIpAddressAdditional Questions_BoxIpAddressExt
export Questions_PortHTTP Questions_PortHTTPs Questions_PortSSH Questions_PortFTPs Questions_PortFTPData Questions_Message_ServerProvider
export Questions_ServerProvider Questions_InstallSeedboxManager Questions_InstallCakeBox Questions_InstallPlexMedia Questions_InstallWebmin Questions_PortWebmin
export Questions_InstallOpenVPN Questions_PortOpenvpnTUNwithGW Questions_PortOpenvpnTUNwithoutGW Questions_PortOpenvpnTAPwithoutGW Questions_ProtoOpenVPN
export Questions_InstallNextCloud Questions_ContinueInstall Questions_InstallLogwatch Questions_InstallFail2Ban Questions_Message_Blocklists
export Questions_Message_Blocklists_0 Questions_Message_Blocklists_1 Questions_InstallBlockList_0 Questions_InstallBlockList_1 Questions_InstallDNScrypt
export Questions_Message_Statistics Questions_SendStatistics
export Summary_Language Summary_MainUser Summary_MainUserPass Summary_SmtpProvider Summary_SmtpUsername Summary_SmtpPassword Summary_MainUserMail
export Summary_TimeZone Summary_PrimaryInet Summary_SrvIpAddress Summary_SrvIpAddressExt Summary_gsHostNameFqdn Summary_PortHTTPs Summary_Port_SSH Summary_Port_FTP
export Summary_ServerProvider Summary_SeedboxManager Summary_Cakebox Summary_PlexMedia Summary_Webmin Summary_WebminPort Summary_NextCloud Summary_OpenVPN
export Summary_gsOpenVPN_Proto Summary_OpenVPN_WithGW Summary_OpenVPN_WithoutGW Summary_OpenVPN_TAP_WithoutGW
export Summary_HttpPortOpend Summary_IpRestriction Summary_MainUserIPs Summary_InstallLogWatch Summary_InstallFail2Ban Summary_InstallDNScrypt Summary_PeerBlock
export Summary_Statistics Summary_ConfirmMessage Summary_Confirm
##################### LAST LINE ######################################
| true
|
4bd5c799497073b0afbb1166bbd190cd04463361
|
Shell
|
ralsallaq/metaTx
|
/scripts/jplaceStats.sh
|
UTF-8
| 960
| 3.1875
| 3
|
[] |
no_license
|
#!/bin/bash
#requires python
module load python/2.7.13
if [ ! -d "analysis" ]; then
mkdir analysis
fi
files=`echo $@ | tr " " "\n"|grep '.*.jplace'`
outFs=`echo $@ | tr " " "\n"|grep '.*.csv'`
#number of sample files
nFs=`echo $files|wc -w`
echo $nFs
echo $files
##go through the sample files
function func1 {
local j=$1
local fls=$2
file=`echo $fls | cut -f $j -d " "`
outPrefix=`basename $file .jplace|sed -e 's/sample_//g'`
echo "save a csv file for pplacer statistics from jplace file for $outPrefix"
echo "the command is python ./scripts/jplaceStats.py $file analysis/'$outPrefix'_pplaceStats.csv"
python ./scripts/jplaceStats.py $file --csv analysis/"$outPrefix"_pplaceStats.csv
echo $outPrefix
}
export LSB_JOB_REPORT_MAIL="N"
export -f func1
bsub -q "standard" -n 1 -R "span[hosts=1] rusage[mem=4000]" -P microbiome -J "jplaceStats[1-$nFs]" -oo "logging/jplaceStats_%I.log" "func1 \$LSB_JOBINDEX '$files'"
| true
|
bc6a3665795de0f60ce483e9c3eda3b362445db1
|
Shell
|
rdickey/aws-tools
|
/lib/util.bash
|
UTF-8
| 1,669
| 4.25
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash -e
###################
# ARRAY UTILITIES #
###################
function containElementInArray()
{
local element=''
for element in "${@:2}"
do
[[ "${element}" = "${1}" ]] && echo 'true' && return 0
done
echo 'false' && return 1
}
#################
# AWS UTILITIES #
#################
function getAllowRegions()
{
echo 'ap-northeast-1 ap-southeast-1 ap-southeast-2 eu-central-1 eu-west-1 sa-east-1 us-east-1 us-west-1 us-west-2'
}
function isValidRegion()
{
local region="${1}"
local regions=($(getAllowRegions))
echo "$(containElementInArray "${region}" "${regions[@]}")"
}
####################
# STRING UTILITIES #
####################
function encodeURL()
{
local length="${#1}"
local i=0
for ((i = 0; i < length; i++))
do
local walker="${1:i:1}"
case "${walker}" in
[a-zA-Z0-9.~_-])
printf "${walker}"
;;
' ')
printf +
;;
*)
printf '%%%X' "'${walker}"
;;
esac
done
}
function error()
{
echo -e "\033[1;31m${1}\033[0m" 1>&2
}
function fatal()
{
error "${1}"
exit 1
}
function formatPath()
{
local string="${1}"
while [[ "$(echo "${string}" | grep -F '//')" != '' ]]
do
string="$(echo "${string}" | sed -e 's/\/\/*/\//g')"
done
echo "${string}" | sed -e 's/\/$//g'
}
function isEmptyString()
{
if [[ "$(trimString ${1})" = '' ]]
then
echo 'true'
else
echo 'false'
fi
}
function trimString()
{
echo "${1}" | sed -e 's/^ *//g' -e 's/ *$//g'
}
| true
|
bfe2e9fd5d8281b5444e5994136efd3aaeef52a3
|
Shell
|
auriocus/iNotes-exporter
|
/packageinotes.sh
|
UTF-8
| 488
| 2.796875
| 3
|
[] |
no_license
|
#!/bin/bash
target=/tmp/inotes-install
srcdir=~/bin
destdir=$(pwd)
rm -rf $target
mkdir $target
cd ~/bin
cp $srcdir/iNotes-exporter-1.8-jar-with-dependencies.jar $srcdir/webmailtrust $target
sed 's/keyStorePassword=.*/keyStorePassword=####### \\/' $srcdir/start-inotes-cert.sh > $target/start-inotes-cert.sh
chmod +x $target/start-inotes-cert.sh
touch $target/webmailclientcert.p12
cd $(dirname $target)
tar -cvzf $destdir/inotes-install.tgz -C $(dirname $target) $(basename $target)
| true
|
c527007a29e2165a92f44e6184263ae02f99bb12
|
Shell
|
martinalberto/Projects
|
/HeatingSystem/utemper/software/utemper/start_rele.sh
|
UTF-8
| 478
| 3.015625
| 3
|
[] |
no_license
|
#!/bin/bash
echo -e "\tIniciar start_rele.sh"
count=1
while [ $count -le 15 ]
do
# si el programa esta arrancado lo cerramos.
killall rele 2>/dev/null
/home/pi/utemper/rele
x=$(( $x + 1 ))
sleep 30
echo $(date +"%F_%T")";4;start_rele.sh;salida inesperada de rele KO">>/var/utemp/logs.log
done
echo $(date +"%F_%T")";5;start_rele.sh;max errores KO">>/var/utemp/logs.log
echo $(date +"%F_%T")";5;start_rele.sh;Rebbot OK">>/var/utemp/logs.log
sleep 60
reboot
| true
|
51cf420bf8bf9dbaa781dab2d82fc7b0a085126e
|
Shell
|
containers/podman
|
/test/system/170-run-userns.bats
|
UTF-8
| 5,844
| 3.234375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bats -*- bats -*-
# shellcheck disable=SC2096
#
# Tests for podman build
#
# bats file_tags=distro-integration
#
load helpers
function _require_crun() {
runtime=$(podman_runtime)
if [[ $runtime != "crun" ]]; then
skip "runtime is $runtime; keep-groups requires crun"
fi
}
@test "podman --group-add keep-groups while in a userns" {
skip_if_rootless "chroot is not allowed in rootless mode"
skip_if_remote "--group-add keep-groups not supported in remote mode"
_require_crun
run chroot --groups 1234 / ${PODMAN} run --rm --uidmap 0:200000:5000 --group-add keep-groups $IMAGE id
is "$output" ".*65534(nobody)" "Check group leaked into user namespace"
}
@test "podman --group-add keep-groups while not in a userns" {
skip_if_rootless "chroot is not allowed in rootless mode"
skip_if_remote "--group-add keep-groups not supported in remote mode"
_require_crun
run chroot --groups 1234,5678 / ${PODMAN} run --rm --group-add keep-groups $IMAGE id
is "$output" ".*1234" "Check group leaked into container"
}
@test "podman --group-add without keep-groups while in a userns" {
skip_if_cgroupsv1 "run --uidmap fails on cgroups v1 (issue 15025, wontfix)"
skip_if_rootless "chroot is not allowed in rootless mode"
skip_if_remote "--group-add keep-groups not supported in remote mode"
run chroot --groups 1234,5678 / ${PODMAN} run --rm --uidmap 0:200000:5000 --group-add 457 $IMAGE id
is "$output" ".*457" "Check group leaked into container"
}
@test "rootful pod with custom ID mapping" {
skip_if_cgroupsv1 "run --uidmap fails on cgroups v1 (issue 15025, wontfix)"
skip_if_rootless "does not work rootless - rootful feature"
random_pod_name=$(random_string 30)
run_podman pod create --uidmap 0:200000:5000 --name=$random_pod_name
run_podman pod start $random_pod_name
run_podman pod inspect --format '{{.InfraContainerID}}' $random_pod_name
run_podman inspect --format '{{.HostConfig.IDMappings.UIDMap}}' $output
is "$output" ".*0:200000:5000" "UID Map Successful"
# Remove the pod and the pause image
run_podman pod rm $random_pod_name
run_podman rmi -f $(pause_image)
}
@test "podman --remote --group-add keep-groups " {
if ! is_remote; then
skip "this test only meaningful under podman-remote"
fi
run_podman 125 run --rm --group-add keep-groups $IMAGE id
is "$output" ".*not supported in remote mode" "Remote check --group-add keep-groups"
}
@test "podman --group-add without keep-groups " {
run_podman run --rm --group-add 457 $IMAGE id
is "$output" ".*457" "Check group leaked into container"
}
@test "podman --group-add keep-groups plus added groups " {
run_podman 125 run --rm --group-add keep-groups --group-add 457 $IMAGE id
is "$output" ".*the '--group-add keep-groups' option is not allowed with any other --group-add options" "Check group leaked into container"
}
@test "podman userns=auto in config file" {
skip_if_remote "userns=auto is set on the server"
if is_rootless; then
grep -E -q "^$(id -un):" /etc/subuid || skip "no IDs allocated for current user"
else
grep -E -q "^containers:" /etc/subuid || skip "no IDs allocated for user 'containers'"
fi
cat > $PODMAN_TMPDIR/userns_auto.conf <<EOF
[containers]
userns="auto"
EOF
# First make sure a user namespace is created
CONTAINERS_CONF_OVERRIDE=$PODMAN_TMPDIR/userns_auto.conf run_podman run -d $IMAGE sleep infinity
cid=$output
run_podman inspect --format '{{.HostConfig.UsernsMode}}' $cid
is "$output" "private" "Check that a user namespace was created for the container"
run_podman rm -t 0 -f $cid
# Then check that the main user is not mapped into the user namespace
CONTAINERS_CONF_OVERRIDE=$PODMAN_TMPDIR/userns_auto.conf run_podman 0 run --rm $IMAGE awk '{if($2 == "0"){exit 1}}' /proc/self/uid_map /proc/self/gid_map
}
@test "podman userns=auto and secrets" {
ns_user="containers"
if is_rootless; then
ns_user=$(id -un)
fi
grep -E -q "${ns_user}:" /etc/subuid || skip "no IDs allocated for user ${ns_user}"
test_name="test_$(random_string 12)"
secret_file=$PODMAN_TMPDIR/secret$(random_string 12)
secret_content=$(random_string)
echo ${secret_content} > ${secret_file}
run_podman secret create ${test_name} ${secret_file}
run_podman run --rm --secret=${test_name} --userns=auto:size=1000 $IMAGE cat /run/secrets/${test_name}
is "$output" "$secret_content" "Secrets should work with user namespace"
run_podman secret rm ${test_name}
}
@test "podman userns=nomap" {
if is_rootless; then
ns_user=$(id -un)
baseuid=$(grep -E "${ns_user}:" /etc/subuid | cut -f2 -d:)
test ! -z ${baseuid} || skip "no IDs allocated for user ${ns_user}"
test_name="test_$(random_string 12)"
run_podman run -d --userns=nomap $IMAGE sleep 100
cid=${output}
run_podman top ${cid} huser
is "${output}" "HUSER.*${baseuid}" "Container should start with baseuid from /etc/subuid not user UID"
run_podman rm -t 0 --force ${cid}
else
run_podman 125 run -d --userns=nomap $IMAGE sleep 100
is "${output}" "Error: nomap is only supported in rootless mode" "Container should fail to start since nomap is not supported in rootful mode"
fi
}
@test "podman userns=keep-id" {
user=$(id -u)
run_podman run --rm --userns=keep-id $IMAGE id -u
is "${output}" "$user" "Container should run as the current user"
}
@test "podman userns=keep-id in a pod" {
user=$(id -u)
run_podman pod create --userns keep-id
pid=$output
run_podman run --rm --pod $pid $IMAGE id -u
is "${output}" "$user" "Container should run as the current user"
run_podman rmi -f $(pause_image)
}
| true
|
526e599f3cec4df4f8ca65bf5eeab4fccdec997a
|
Shell
|
ESAI-CEU-UCH/kaggle-epilepsy
|
/test.sh
|
UTF-8
| 4,638
| 3.03125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# This file is part of ESAI-CEU-UCH/kaggle-epilepsy (https://github.com/ESAI-CEU-UCH/kaggle-epilepsy)
#
# Copyright (c) 2014, ESAI, Universidad CEU Cardenal Herrera,
# (F. Zamora-Martínez, F. Muñoz-Malmaraz, P. Botella-Rocamora, J. Pardo)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
. settings.sh
. scripts/configure.sh
if ! ./preprocess.sh; then
exit 10
fi
###############################################################################
MLP_TEST_SCRIPT=scripts/MODELS/test_one_subject_mlp.lua
KNN_TEST_SCRIPT=scripts/MODELS/test_one_subject_knn.lua
TEST_OUTPUT=$(mktemp --tmpdir=$SUBMISSIONS_PATH/ test.XXXXXX.txt)
BASE=$(basename $TEST_OUTPUT)
cleanup()
{
rm -f $TEST_OUTPUT
}
# control-c execution
control_c()
{
echo -en "\n*** Exiting by control-c ***\n"
cleanup
exit 10
}
test_subject()
{
script=$1
model=$2
subject=$3
fft=$4
cor=$5
mkdir -p $model.TEST
$APRIL_EXEC $script $model $subject $model.TEST/validation_$subject.test.txt $fft $cor &&
cp -f $model/validation_$subject.txt $model.TEST/
return $?
}
test_mlp()
{
model=$1
subject=$2
fft=$3
cor=$4
test_subject $MLP_TEST_SCRIPT $model $subject $fft $cor
return $?
}
test_knn()
{
model=$1
subject=$2
fft=$3
cor=$4
test_subject $KNN_TEST_SCRIPT $model $subject $fft $cor
return $?
}
bmc_ensemble()
{
echo "Computing BMC ensemble result"
$APRIL_EXEC scripts/ENSEMBLE/bmc_ensemble.lua \
$ANN2P_PCA_CORW_RESULT.TEST $ANN5_PCA_CORW_RESULT.TEST $ANN2_ICA_CORW_RESULT.TEST \
$KNN_ICA_CORW_RESULT.TEST $KNN_PCA_CORW_RESULT.TEST \
$KNN_CORG_RESULT.TEST $KNN_COVRED_RESULT.TEST > $1
return $?
}
###############################################################################
# CHECK TRAINED MODELS
if [[ ! -e $ANN5_PCA_CORW_RESULT ]]; then
echo "Execute train.sh before test.sh"
exit 10
fi
if [[ ! -e $ANN2P_PCA_CORW_RESULT ]]; then
echo "Execute train.sh before test.sh"
exit 10
fi
if [[ ! -e $ANN2_ICA_CORW_RESULT ]]; then
echo "Execute train.sh before test.sh"
exit 10
fi
if [[ ! -e $KNN_PCA_CORW_RESULT ]]; then
echo "Execute train.sh before test.sh"
exit 10
fi
if [[ ! -e $KNN_ICA_CORW_RESULT ]]; then
echo "Execute train.sh before test.sh"
exit 10
fi
if [[ ! -e $KNN_CORG_RESULT ]]; then
echo "Execute train.sh before test.sh"
exit 10
fi
if [[ ! -e $KNN_COVRED_RESULT ]]; then
echo "Execute train.sh before test.sh"
exit 10
fi
#############################################################################
for subject in $SUBJECTS; do
echo "# $subject"
if ! test_mlp $ANN5_PCA_CORW_RESULT $subject $FFT_PCA_PATH $WINDOWED_COR_PATH; then
cleanup
exit 10
fi
if ! test_mlp $ANN2P_PCA_CORW_RESULT $subject $FFT_PCA_PATH $WINDOWED_COR_PATH; then
cleanup
exit 10
fi
if ! test_mlp $ANN2_ICA_CORW_RESULT $subject $FFT_ICA_PATH $WINDOWED_COR_PATH; then
cleanup
exit 10
fi
if ! test_knn $KNN_PCA_CORW_RESULT $subject $FFT_PCA_PATH $WINDOWED_COR_PATH; then
cleanup
exit 10
fi
if ! test_knn $KNN_ICA_CORW_RESULT $subject $FFT_ICA_PATH $WINDOWED_COR_PATH; then
cleanup
exit 10
fi
if ! test_knn $KNN_CORG_RESULT $subject $CORG_PATH; then
cleanup
exit 10
fi
if ! test_knn $KNN_COVRED_RESULT $subject $COVRED_PATH; then
cleanup
exit 10
fi
done
mkdir -p $BMC_ENSEMBLE_RESULT
if ! bmc_ensemble $TEST_OUTPUT; then
cleanup
exit 10
fi
echo "The BMC ensemble result is located at $TEST_OUTPUT"
| true
|
245c7a99496cea19783447adfc7f7cbfe5ed1373
|
Shell
|
lukchen/csye6225-spring2018
|
/infrastructure/aws/scripts/csye6225-aws-networking-teardown.sh
|
UTF-8
| 1,162
| 2.796875
| 3
|
[] |
no_license
|
#!/bin/bash
echo Deleting vpc, below is the Vpcs:
aws ec2 describe-vpcs --query 'Vpcs[*].{VpcId:VpcId,Default:IsDefault}'
read -p "Please enter the VpcId you want to delete: " vpcId
routeTable1=`aws ec2 describe-route-tables --filters "Name=vpc-id,Values=$vpcId" --query 'RouteTables[0].RouteTableId' --output text`
routeTable2=`aws ec2 describe-route-tables --filters "Name=vpc-id,Values=$vpcId" --query 'RouteTables[1].RouteTableId' --output text`
aws ec2 describe-route-tables
read -p "Please enter the RoutetableId you want to delete: " routeTable2
echo Start to delete Route Table $routeTable2 associated with $vpcId ...
aws ec2 delete-route-table --route-table-id $routeTable2
internetGateway=`aws ec2 describe-internet-gateways --filters "Name=attachment.vpc-id,Values=$vpcId" --query 'InternetGateways[*].InternetGatewayId' --output text`
echo Start to delete Internet Gateway $internetGateway associated with $vpcId ...
aws ec2 detach-internet-gateway --internet-gateway-id $internetGateway --vpc-id $vpcId
aws ec2 delete-internet-gateway --internet-gateway-id $internetGateway
aws ec2 delete-vpc --vpc-id $vpcId
echo Vpc $vpcId is deleted!
| true
|
11d3c86fb72db16c5fb6766a58de27e8c61c8993
|
Shell
|
petronny/aur3-mirror
|
/youtube2mp3/PKGBUILD
|
UTF-8
| 909
| 2.75
| 3
|
[] |
no_license
|
# Maintainer: Patryk Rzucidlo (@PTKDev) <ptkdev@gmail.com>
# Contributor: [Vitaliy Berdinskikh](mailto:ur6lad@archlinux.org.ua) aka UR6LAD
pkgname=youtube2mp3
pkgver=0.1.4
pkgrel=1
pkgdesc="A simple system to convert YouTube music videos to MP3"
arch=('any')
url="https://github.com/ur6lad/youtube2mp3"
license=('GPL3')
depends=('ffmpeg' 'lame' 'youtube-dl' 'zenity')
source=(http://github.com/ur6lad/${pkgname}/archive/${pkgver}.tar.gz)
changelog=${pkgname}.ChangeLog.markdown
package() {
install -d -m 755 "$pkgdir"/usr/bin
install -d -m 755 "$pkgdir"/usr/share/{applications,pixmaps}
cd "$srcdir"/${pkgname}-${pkgver}
install -m 755 youtube2mp3.sh "$pkgdir"/usr/bin/youtube2mp3
install -m 644 YouTube\ Downloader.desktop "$pkgdir"/usr/share/applications/
install -m 644 img/youtube.png "$pkgdir"/usr/share/pixmaps/
}
md5sums=('daac38dc66b3a6d4e3bf3c637a146a8c')
sha256sums=('01402bb8f687b73691f884de387acb47c297d7d4a877bab8d9dc7314ac58db7f')
| true
|
0569070ce9893f8228a2c6ae808d3ac6f02803cb
|
Shell
|
wesm87/dotfiles
|
/completions.zsh.sh
|
UTF-8
| 485
| 3.546875
| 4
|
[
"MIT"
] |
permissive
|
# shellcheck shell=bash disable=1090
function __dotfiles_completions_zsh() {
local -r dotfiles_completions_dir_path="${HOME}/.dotfiles/completions"
# Bail if completions folder doesn't exist
if ! [ -d "$dotfiles_completions_dir_path" ]; then
return
fi
# Load completions
for completion_file in "${dotfiles_completions_dir_path}/"*.zsh.sh; do
if can-source-file "$completion_file"; then
source "$completion_file"
fi
done
}
__dotfiles_completions_zsh
| true
|
e1c049067dcc85279e5ad6778b8a68e4cea773f1
|
Shell
|
shwethasparmar/Shell-scripts
|
/migrationScript.sh
|
UTF-8
| 1,202
| 3.34375
| 3
|
[] |
no_license
|
#!/bin/bash
echo "-----------------------Enter old repo url-----------------------"
read oldRepoUrl
echo "-----------------------Enter new repo url-----------------------"
read newRepoUrl
echo "-----------------------CLONING OLD GIT REPO-----------------------"
git clone "$oldRepoUrl"
echo "-----------------------FETCHING FROM OLD GIT REPO-----------------------"
urlRe='https://github.homedepot.com/(.*)/(.*).git'
if [[ $oldRepoUrl =~ $urlRe ]]
then
folderName=${BASH_REMATCH[2]}
cd $folderName
git fetch origin
echo "-----------------------CHECKING OUT ALL BRANCHES FROM OLD GIT REPO-----------------------"
branchRe='remotes/origin/(.*)'
for eachBranch in $(git branch -a); do
if [[ $eachBranch =~ remotes/origin/(.*) ]]
then git checkout ${BASH_REMATCH[1]}
fi
done
echo "-----------------------ALL THE LOCAL BRANCHES-----------------------"
git branch -a
fi
echo "-----------------------MOVING TO THE NEW REPO-----------------------"
git remote add new-origin "$newRepoUrl"
git push --all "$newRepoUrl"
git push --tags "$newRepoUrl"
git remote -v
git remote rm origin
git remote rename new-origin origin
echo "-----------------------DONE SUCCESSFULLY-----------------------"
| true
|
2761b9698cdcc736dee5ead20a9efd6b998d1eb2
|
Shell
|
simwir/Aut-LTL-Thesis
|
/run_benchkit.sh
|
UTF-8
| 824
| 2.875
| 3
|
[] |
no_license
|
#!/bin/bash
#SBATCH --time=1:15:00
#SBATCH --mail-type=FAIL,END
#SBATCH --mem=16500
# Usage: tool verifypn_binary examination [partition]
if [[ -z $SLURM_ARRAY_TASK_ID ]] ; then
SLURM_ARRAY_TASK_ID=$4
fi
export BK_TOOL=tapaal
export PREFIX="$(pwd)/scripts/$1"
export VERIFYPN="$(pwd)/parallel-bin/$2"
MODEL=$(ls "./mcc2020" | sed -n "${SLURM_ARRAY_TASK_ID}p")
export BK_EXAMINATION=$3
export BK_TIME_CONFINEMENT=3600
export TEMPDIR=$(mktemp -d)
export LD_LIBRARY_PATH="$(pwd)/parallel-bin"
export MODEL_PATH=$TEMPDIR
mkdir -p BENCHKIT/$1/$2/
mkdir -p $TEMPDIR
F="$(pwd)/BENCHKIT/$1/$2/${MODEL}.${3}"
cp ./mcc2020/$MODEL/* $TEMPDIR
cd $TEMPDIR
let "m=16*1024*1024"
#ulimit -v $m
echo "$PREFIX/BenchKit_head.sh &> $F"
if [ -s "$F" ]
then
echo "No Redo!"
else
$PREFIX/BenchKit_head.sh &> $F
fi
rm -r $TEMPDIR
| true
|
d903b3267f557f0f4067ca60a6947318f40712d2
|
Shell
|
YaoStriveCode/home
|
/bin/sshscreen
|
UTF-8
| 983
| 4
| 4
|
[] |
no_license
|
#!/bin/sh
# Small utility to intelligently start a (sub)screen session to a remote
NESTED_ESCAPE="^Ee"
TOP_ESCAPE="^Uu"
REMOTE=$1
if [ -z $REMOTE ]; then
echo "Missing argumet, expecting [user@]host[:port]" >&2
exit 1
fi
case $REMOTE in *@*)
USERNAME=${REMOTE%@*}
REMOTE=${REMOTE#*@}
;;
esac
case $REMOTE in *:*)
PORT=${REMOTE#*:}
REMOTE=${REMOTE%:*}
;;
esac
sss=$(which sss 2>/dev/null)
if [ -n "$sss" ] && [ $? -eq 0 ]; then
$sss $REMOTE status >/dev/null
if [ $? -eq 1 ]; then
echo -n "Opening master channel to $REMOTE..."
sss $REMOTE start
if [ $? -ne 0 ]; then
echo "failed."
exit 1
fi
echo "done."
fi
fi
CMD="ssh -t $REMOTE"
[ ! -z "$USERNAME" ] && CMD="$CMD -l $USERNAME"
[ ! -z "$PORT" ] && CMD="$CMD -p $PORT"
CMD="$CMD screen -D -R -e $TOP_ESCAPE"
if [ "$TERM" = "screen" ]; then
NAME=$REMOTE
[ ! -z "$USERNAME" ] && NAME="$USERNAME@$NAME"
[ ! -z "$PORT" ] && NAME="$NAME:$PONT"
CMD="screen -t $NAME $CMD -e $NESTED_ESCAPE"
fi
$CMD
| true
|
361e210a1c230d39752a5515381c7cf6b60000a9
|
Shell
|
l2dy/split-cue
|
/split-cue-flac
|
UTF-8
| 1,692
| 3.984375
| 4
|
[] |
no_license
|
#!/bin/zsh
CUETAG_CMD=`dirname $0`/cuetag
ICONV_CUE_F=
ICONV_CUE_T=UTF8
while getopts 'hc:' OPTION
do
case $OPTION in
c)
ICONV_CUE_F=$OPTARG
;;
h)
printf "Usage: %s: [-c cue_encoding]\n" $(basename $0) >&2
exit 2
;;
esac
done
DISKNUM=
test_disk_image() {
DISKNUM=`echo "$1" | perl -pe 's/.*(?:cd|disk)\s*(\d+).*/$1/i'`
if [ "x$DISKNUM" = "x$1" ]; then
DISKNUM=""
fi
}
SAVEIFS=$IFS
IFS='\n'
WORKDIR=.tmp-split-cue-flac
find ./ -name '*.flac' -size +80M | while read image
do
echo "$image"
DIR=`dirname "$image"`
BASE=`basename "$image"`
BASE=`echo "$BASE" | sed -e 's/.flac//'`
CUE=$BASE.cue
IMAGE=$BASE.flac
cd "$DIR"
echo "Splitting with shnsplit"
rm -rf $WORKDIR
mkdir $WORKDIR
if [ "x$ICONV_CUE_F" != "x" ]; then
echo "Converting cue $CUE"
iconv -f $ICONV_CUE_F -t $ICONV_CUE_T "$CUE" -o "$WORKDIR/$CUE"
else
cp "$CUE" "$WORKDIR/$CUE"
fi && \
shnsplit -d $WORKDIR -o flac -f "$WORKDIR/$CUE" -t "%n – %t" "$IMAGE" && \
echo "Moving $IMAGE -> ${IMAGE}.image" && \
mv "$IMAGE" "${IMAGE}.image" && \
echo "Fixing tags with cuetag" && \
$CUETAG_CMD "$WORKDIR/$CUE" ./$WORKDIR/*.flac && \
test_disk_image "$IMAGE" && \
DSK= && \
if [ "x$DISKNUM" != "x" ]; then
DSK="CD$DISKNUM"
echo "Creating $DSK dir"
rm -rf $DSK
mkdir $DSK
fi && \
mv $WORKDIR/*.flac ./$DSK && \
rm -r $WORKDIR
if [ "x$DIR" != "x." ]; then
cd ../
fi
done
IFS=$SAVEIFS
| true
|
9999ebcb1f3edd12f5acb9ae26f53eb7c065cde6
|
Shell
|
operasoftware/ssh-key-authority
|
/services/init.d/keys-sync
|
UTF-8
| 849
| 3.546875
| 4
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
#!/bin/sh
### BEGIN INIT INFO
# Provides: keys-sync
# Required-Start: mysql
# Required-Stop: mysql
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: SSH key synchronization daemon
### END INIT INFO
. /lib/lsb/init-functions
SCRIPT=/srv/keys/scripts/syncd.php
USER=keys-sync
PIDFILE=/var/run/keys-sync.pid
test -f $SCRIPT || exit 0
case "$1" in
start)
log_daemon_msg "Starting keys-sync daemon"
start-stop-daemon --start --quiet --pidfile $PIDFILE --startas $SCRIPT --user $USER --
log_end_msg $?
;;
stop)
log_daemon_msg "Stopping keys-sync daemon"
start-stop-daemon --stop --quiet --pidfile $PIDFILE --name syncd.php --user $USER
log_end_msg $?
rm -f $PIDFILE
;;
restart)
$0 stop && $0 start
;;
*)
log_action_msg "Usage: /etc/init.d/keys-sync {start|stop|restart}"
exit 2
;;
esac
exit 0
| true
|
5e7ab3689583b82a4cf541237dc0654ad912d1d2
|
Shell
|
nnalpas/Proteogenomics_reannotation
|
/src/Genome4Cactus.sh
|
UTF-8
| 4,285
| 3.09375
| 3
|
[] |
no_license
|
# Retrieve a list of all available genome
wget ftp://ftp.ncbi.nlm.nih.gov/genomes/genbank/bacteria/assembly_summary.txt
# Filter the list of geneome based on specific taxon ID
awk -F '\t' 'FNR==NR{k[$1]=1;next;} FNR==1 || FNR==2 || k[$6] || k[$7]' taxonomy_result.txt assembly_summary.txt > assembly_summary_taxon.txt
# Only keep the complete genome
awk -F '\t' '{if($12=="Complete Genome") print}' assembly_summary_taxon.txt > assembly_summary_taxon_complete_genomes.txt
# Download all genomes
for path in $(cut -f 20 ./work/Synechocystis_6frame/Synteny/assembly_summary_taxon_complete_genomes.txt); do
wget -R "*_from_*" -P ./work/Synechocystis_6frame/Synteny/ "${path}/*genomic.fna.gz"
done
# Extract all data
cd ./work/Synechocystis_6frame/Synteny/; gunzip ./*.gz; cd -;
# Collect the info of each downloaded assembly
for infile in `find /home/tu/tu_tu/tu_kxmna01/work/Synechocystis_6frame/Synteny -name "GCA*"`; do
mypattern=`basename $infile | cut -f1,2 -d_`
grep $mypattern assembly_summary_taxon_complete_genomes.txt | awk -v f=$infile '{print f "\t" $0}' >> downloaded_assembly.txt
done
# Use vim to clean up the organism name (remove parentheses '()' or '[]', slashes '/', equal '=' and make all names unique (i.e. 'Synechococcus sp. WH 8101', 'Synechocystis sp. PCC 6803'). This can be automatised as well.
# Replace all space by underscore in organisms name
awk -F "\t" '{OFS = "\t"; gsub(/ /,"_",$9); print}' downloaded_assembly.txt > downloaded_assembly_format.txt
# Create the Newick file required for Cactus
echo -n "(" > evolverCyanobacteria.txt
cut -f9 downloaded_assembly_format.txt | sed ':a;N;$!ba;s/\n/:1.0,/g' >> evolverCyanobacteria.txt
sed -i "s/$/:1.0);/" evolverCyanobacteria.txt
echo "" >> evolverCyanobacteria.txt
# Keep only the first sequence for each organism (make sure this is the chromosome sequence and not plasmid) and format the header within each fasta
cut -f1,9 downloaded_assembly_format.txt |
while IFS=$'\t' read -r -a myArray; do
file=`basename ${myArray[0]} | sed "s/\\.gz$//"`
outfile="`pwd`/${myArray[1]}.fasta"
awk -v RS='>' 'NR>1 { gsub("\n", ";", $0); sub(";$", "", $0); print ">"$0 }' "${file}" | head -n 1 | tr ';' '\n' | grep -vE "^>" > ${myArray[1]}.fasta
sed -i "1i >${myArray[1]}" "${myArray[1]}.fasta"
echo "${myArray[1]} ${outfile}" >> evolverCyanobacteria.txt
done
# Attempt to run Cactus on my data
mkdir ./Synteny/TMP
nohup cactus ./Synteny/jobStore ./Synteny/evolverCyanobacteria.txt ./Synteny/evolverCyanobacteria.hal --stats --binariesMode local --logDebug --workDir ./Synteny/TMP --buildAvgs --defaultMemory 4Gi --defaultCores 2 --defaultDisk 100Gi --restart > ./Synteny/cactus.log 2>&1 &
nohup cactus ./Synteny_2021-03-02/jobStore ./Synteny_2021-03-02/evolverCyanobacteria.txt ./Synteny_2021-03-02/evolverCyanobacteria.hal --stats --binariesMode local --logDebug --workDir ./Synteny_2021-03-02/TMP --buildAvgs --defaultMemory 12Gi --defaultCores 2 --defaultDisk 100Gi > ./Synteny_2021-03-02/cactus.log 2>&1 &
nohup singularity exec ./Cactus_docker_1.3.0.sif cactus ./Synteny_2021-03-02/jobStore ./Synteny_2021-03-02/evolverCyanobacteria.txt ./Synteny_2021-03-02/evolverCyanobacteria.hal --stats --binariesMode local --logDebug --workDir ./Synteny_2021-03-02/TMP --buildAvgs > ./Synteny_2021-03-02/cactus.log 2>&1 &
nohup singularity exec ./Cactus_docker_1.3.0.sif cactus ./Synteny_2021-03-02/jobStore ./Synteny_2021-03-02/evolverMycobacterium.txt ./Synteny_2021-03-02/evolverMycobacterium.hal --stats --binariesMode local --logDebug --debugWorker --maxMemory 30Gi --maxDisk 600Gi --maxCores 6 --workDir ./Synteny_2021-03-02/TMP --buildAvgs > ./Synteny_2021-03-02/cactus.log 2>&1 &
nohup singularity exec ./Cactus_docker_1.3.0.sif cactus ./Synteny_2021-03-02/jobStore ./Synteny_2021-03-02/evolverMycobacterium.txt ./Synteny_2021-03-02/evolverMycobacterium.hal --stats --binariesMode local --logDebug --debugWorker --maxMemory 30Gi --maxDisk 600Gi --maxCores 6 --workDir ./Synteny_2021-03-02/TMP --buildAvgs --restart > ./Synteny_2021-03-02/cactus.log 2>&1 &
singularity exec /mnt/vol1000/Cactus_docker_1.3.0.sif cactus-prepare examples/evolverMammals.txt --outDir steps-output --outSeqFile steps-output/evolverMammals.txt --outHal steps-output/evolverMammals.hal --jobStore jobstore
| true
|
7f9cc5ebbc1cccd30370e1914a3e9bc770d1f02e
|
Shell
|
phatblat/dotfiles
|
/.dotfiles/xcode/dsym.zsh
|
UTF-8
| 704
| 3.265625
| 3
|
[
"MIT"
] |
permissive
|
#-------------------------------------------------------------------------------
#
# xcode/dsym.zsh
# Functions for locating DWARF debug information (dSYM).
#
#-------------------------------------------------------------------------------
alias dsym_uuid="mdls -name com_apple_xcode_dsym_uuids -raw *.dSYM | grep -e \\\" | sed 's/[ |\\\"]//g'"
# https://docs.fabric.io/ios/crashlytics/advanced-setup.html#uploading-dsyms-manually
function finddsym {
if [[ $# -ne 1 ]]; then
echo "Usage: finddsym uuid"
return 1
fi
mdfind "com_apple_xcode_dsym_uuids == <$1>"
}
function dsyminfo {
if [[ $# -ne 1 ]]; then
echo "Usage: dsyminfo path/to/dsym"
return 1
fi
dwarfdump -u $1
}
| true
|
cdd38163143a84fc607a0246fdf2a7d0046ef337
|
Shell
|
isabella232/vogl
|
/bin/ninja
|
UTF-8
| 643
| 3.5625
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# toilet website: http://caca.zoy.org/wiki/toilet
# list all fonts in figlet
# for font in `ls -1 /usr/share/figlet | grep .flf | cut -d . -f 1`; do echo "$font:"; figlet -f $font Hello World; done
# time and date in terminal
# while true; do echo -e "$(date '+%D %T' | toilet -f term -F border --gay)\033[3A"; sleep 1; done
date '+%b %d %H:%M' | toilet -t -F border --gay
# time without /usr/bin does built-in time command (help time)
# for /usr/bin/time (man time)
SCRIPT=$(readlink -f "$0")
SCRIPTPATH=$(dirname "${SCRIPT}")
NINJA_APP=${SCRIPTPATH}/$(uname -i)/ninja
/usr/bin/time -f "\ntime:%E for '%C'" ${NINJA_APP} "$@"
| true
|
01cda3f6642464b74db888c8988c30db07e338ff
|
Shell
|
gmalysa/dotfiles
|
/scripts/fstr_comp
|
UTF-8
| 230
| 2.921875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
_fstr_comp()
{
local cur prev options
COMPREPLY=()
cur="${COMP_WORDS[COMP_CWORD]}"
options=$(cat /tmp/fstr_options)
COMPREPLY=($(compgen -W "${options}" -- ${cur}))
}
alias fed='vim'
complete -F _fstr_comp fed
| true
|
1538eba818712f7678ec37f8e3b9be9dedeef51c
|
Shell
|
ghuntley/monorepo
|
/third_party/git/t/perf/p0100-globbing.sh
|
UTF-8
| 868
| 3.5625
| 4
|
[
"GPL-1.0-or-later",
"LGPL-2.0-or-later",
"LGPL-2.1-only",
"GPL-3.0-only",
"GPL-2.0-only",
"MIT"
] |
permissive
|
#!/bin/sh
test_description="Tests pathological globbing performance
Shows how Git's globbing performance performs when given the sort of
pathological patterns described in at https://research.swtch.com/glob
"
. ./perf-lib.sh
test_globs_big='10 25 50 75 100'
test_globs_small='1 2 3 4 5 6'
test_perf_fresh_repo
test_expect_success 'setup' '
for i in $(test_seq 1 100)
do
printf "a" >>refname &&
for j in $(test_seq 1 $i)
do
printf "a*" >>refglob.$i
done &&
echo b >>refglob.$i
done &&
test_commit test $(cat refname).t "" $(cat refname).t
'
for i in $test_globs_small
do
test_perf "refglob((a*)^nb) against tag (a^100).t; n = $i" '
git for-each-ref "refs/tags/$(cat refglob.'$i')b"
'
done
for i in $test_globs_small
do
test_perf "fileglob((a*)^nb) against file (a^100).t; n = $i" '
git ls-files "$(cat refglob.'$i')b"
'
done
test_done
| true
|
17d46411fe33c2741bdbceb1f4ff7769bd507cbd
|
Shell
|
ToQoz/bin
|
/ghq-git-check-all-repo
|
UTF-8
| 626
| 3.59375
| 4
|
[] |
no_license
|
#!/bin/bash
for repo in `ghq list --full-path`; do
cd $repo
if [ "$(git remote -v | grep origin | wc -l)" = "0" ]; then
echo "$repo: origin is not found"
continue
fi
if [ -n "$(git diff --shortstat)" ]; then
echo "$repo: repo has uncommitted files"
continue
fi
if [ -n "$(git ls-files --others --exclude-standard)" ]; then
echo "$repo: repo has untracked files"
continue
fi
DIFF=$(git diff master origin/master)
if [ $? != 0 ]; then
echo "$repo: repo has error"
continue
fi
if [ -n "$DIFF" ]; then
echo "$repo: repo has unpushed commits"
continue
fi
done
| true
|
be2c0a3f7aa55b145c57d13925f97f0652cf0178
|
Shell
|
Muuo/RPiVideoLooper
|
/startvideos.sh
|
UTF-8
| 539
| 3.34375
| 3
|
[] |
no_license
|
#!/bin/bash
declare -A vids
#Make a newline a delimiter instead of a space
SAVEIFS=$IFS
IFS=$(echo -en "\n\b")
usb=`cat /boot/looperconfig.txt | grep usb | cut -c 5- | tr -d '\r' | tr -d '\n'`
FILES=/home/pi/videos/
if [[ $usb -eq 1 ]]; then
FILES=/media/USB/videos/
fi
var_1=""
for f in `ls $FILES | grep ".mp4$"`
do
var_1="$var_1 $FILES$f"
done
#Reset the IFS
IFS=$SAVEIFS
while true; do
if pgrep omxplayer > /dev/null
then
echo 'running'
else
/home/pi/omxplayer-dist/usr/bin/omxplayer --loop -r -o hdmi $var_1
fi
done
| true
|
5c0c97791b6627e444268e193b829030f76ef411
|
Shell
|
isabella232/reference-wallet
|
/scripts/run_dmw_test.sh
|
UTF-8
| 1,802
| 3.40625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# Copyright (c) The Diem Core Contributors
# SPDX-License-Identifier: Apache-2.0
trap 'jobs -p | xargs kill -9' EXIT
script_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
output_dir="$script_dir/../output"
backend_dir="$script_dir/../backend"
if ! command -v ifconfig &> /dev/null
then
sudo apt install net-tools
fi
host_ip=$(ifconfig | grep -Eo 'inet (addr:)?([0-9]*\.){3}[0-9]*' | grep -Eo '([0-9]*\.){3}[0-9]*' | grep -v '127.0.0.1' | grep -v -e'^$' | head -n 1)
if [ -z "$host_ip" ]
then
echo "COULD NOT GET HOST IP!"
exit 1
fi
echo "Networks:"
ip addr
echo "Host IP: $host_ip"
cd "$script_dir/../"
rm -rf backend/.env
VASP_BASE_URL=http://localhost:8080/api/offchain PIPENV_PIPFILE=backend/Pipfile pipenv run python3 ./scripts/set_env.py
echo "Env:"
cat backend/.env
# Prepare output dir
mkdir -p "$output_dir"
rm -rf "$output_dir/*"
set +e
# Launch DRW
echo "Launching DRW..."
./scripts/lrw.sh develop 8080 > "$output_dir/docker.txt" 2>&1 &
# Wait for server to report readiness
./scripts/wait_for_server_ready.sh 6
# Wait additionally as waiting for log lines is a poor indicator of readiness
sleep 15
# Launch Proxy
cd "$backend_dir"
echo "Launching Proxy..."
DRW_URL_PREFIX=http://localhost:8080 MW_DRW_PROXY_PORT=3150 MW_DRW_PROXY_HOST=0.0.0.0 pipenv run python3 ./tests/mw_drw_proxy/proxy.py > "$output_dir/proxy.txt" 2>&1 &
sleep 5
# Write pre output
docker-compose -f ../docker/docker-compose.yaml -f ../docker/dev.docker-compose.yaml logs > "$output_dir/pre_test_docker.txt" 2>&1
# Start testing!
echo "Starting Test..."
pipenv run dmw -- test --verbose --target http://localhost:3150 --stub-bind-host=0.0.0.0 --stub-bind-port 4542 --stub-diem-account-base-url "http://$host_ip:4542" | tee "$output_dir/test.txt" 2>&1 || true
| true
|
0f4a286add307faac7d78be6b8f3cd7feb7959c2
|
Shell
|
yamasampo/treeEstimator
|
/src/bootstrap_dnapars.sh
|
UTF-8
| 865
| 3.78125
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/local/bin/zsh
## Commands to run dnapars with bootstrapped data
# Specify virtual environment for BioPython
source activate biopython
# Make output directory
fasta=$1
outdir=$2
fname=`basename $fasta`
subdir="$(cut -d'.' -f1 <<<"$fname")"
dirname="$outdir/$subdir"
mkdir "$dirname"
echo "Input FASTA: $fasta"
echo "Output directory: $dirname"
# Convert FASTA to Phylip format
python ./fasta2phylip.py "$fasta" "infile_seqboot.phy"
# Bootstrap an alignment
seqboot < seqboot.ctl
# mv infile infile_seqboot
mv outfile outfile_seqboot
# Infer tree for bootstrap data
dnapars < dnapars.ctl
mv outfile outfile_dnapars
mv outtree outtree_dnapars
# Find consensus tree
consense < consense.ctl
mv outfile outfile_consense
mv outtree outtree_consense
# Move data into sub directory
cp $fasta "$dirname"
mv infile_seqboot.phy "$dirname"
mv out* "$dirname"
| true
|
aecc15b0e0601b2970d3118e647079a2d1190e29
|
Shell
|
delkyd/alfheim_linux-PKGBUILDS
|
/kst-git/PKGBUILD
|
UTF-8
| 1,244
| 2.625
| 3
|
[] |
no_license
|
_pkgname="kst"
pkgname="${_pkgname}-git"
pkgrel=1
pkgver=2.0.8r3268.540a599a
pkgdesc="Fast real-time large-dataset viewing and plotting tool for KDE"
arch=('i686' 'x86_64')
url="http://kst-plot.kde.org"
license=('GPL')
depends=('gsl' 'qt5-base' 'qt5-svg' 'muparser' 'python2-scipy>=0.9' 'python2-numpy>=1.6' 'cfitsio' 'python2-pyside')
optdepends=(
'getdata: provides support for files in the Dirfile format'
'libmatio: provides support for Matlab binary files'
)
makedepends=('cmake')
#install=$pkgname.install
source=("git://github.com/Kst-plot/kst.git")
md5sums=('SKIP' )
pkgver() {
cd "${srcdir}/${_pkgname}"
printf "2.0.8r%s.%s" "$(git rev-list --count HEAD)" "$(git rev-parse --short HEAD)"
}
conflicts=("kst")
build() {
cd "${srcdir}/${_pkgname}"
cp -r ./cmake/pyKst/* ./pyKst/
cmake ./ \
-Dkst_release=2 \
-Dkst_version_string=2.0.8 \
-Dkst_svnversion=0 \
-Dkst_python=1 \
-DPYTHON_EXECUTABLE=/usr/bin/python2.7 \
-Dkst_python_prefix=/usr/lib/python2.7 \
-Dkst_install_prefix=/usr \
-Dkst_qt5=ON
}
prepare(){
cd "${srcdir}/${_pkgname}"
}
package() {
cd "${srcdir}/${_pkgname}"
make -j$(nproc) DESTDIR="${pkgdir}" install
install -D -m644 "COPYING" "$pkgdir/usr/share/licenses/${_pkgname}/LICENSE"
}
| true
|
3017e2a66fc9357df00737ef0977a040ccf0e1ed
|
Shell
|
fffilo/gitsound
|
/gitsound.sh
|
UTF-8
| 3,519
| 4
| 4
|
[] |
no_license
|
#!/bin/bash
#echo "Config..."
TEMPLATE="${HOME}/.git_template"
SOUNDS="${HOME}/.git_sounds"
HOOKS=( "applypatch-msg" "pre-applypatch" "post-applypatch" "pre-commit" "prepare-commit-msg" "commit-msg" "post-commit" "pre-rebase" "post-checkout" "post-merge" "pre-push" "pre-receive" "update" "post-receive" "post-update" "pre-auto-gc" "post-rewrite" "rebase" )
PLAYERS=( "afplay" "aplay" "mplayer" "ffplay" "cvlc" "nvlc" "mocp" "play" "powershell.exe" )
DOWNLOADERS=( "curl -o" "wget -O" )
#echo "Searching for git package..."
git --version > /dev/null 2>&1
if [[ $? -ne 0 ]]; then
echo "Git package not found on this system."
exit 100
fi
#echo "Searching for media player..."
PLAYER=""
PLAYERCMD=""
for ITEM in "${PLAYERS[@]}"; do
ARR=($ITEM)
CMD=${ARR[0]}
PRM=${ARR[@]:1}
if [[ "$PLAYER" == "" ]] && `which ${CMD} >/dev/null 2>&1`; then
PLAYER=`which ${CMD}`
PLAYERCMD="${PLAYER} ${PRM} "
fi
done
if ! `which $PLAYER >/dev/null 2>&1`; then
echo "Unable do determine media player."
exit 200
fi
#echo "Searching for downloader..."
DOWNLOADER=""
DOWNLOADERCMD=""
for ITEM in "${DOWNLOADERS[@]}"; do
ARR=($ITEM)
CMD=${ARR[0]}
PRM=${ARR[@]:1}
if [[ "$DOWNLOADER" == "" ]] && `which ${CMD} >/dev/null 2>&1`; then
DOWNLOADER="`which ${CMD}`"
DOWNLOADERCMD="${DOWNLOADER} ${PRM} "
fi
done
if ! which $DOWNLOADER >/dev/null 2>&1; then
echo "Unable do determine downloader."
exit 300
fi
echo "This action will create git template directory and change global init.templatedir configuration."
CONFIG=`git config --global init.templatedir`
if [[ $CONFIG != $TEMPLATE ]]; then
echo -e "You can revert old config by executing: \e[93mgit config --global init.templatedir \"${CONFIG}\"\e[0m"
fi
while true; do
read -p "Do you wish to continue [y/n]? " yn
case $yn in
[Yy]* ) break;;
[Nn]* ) echo "Canceling..."; exit;;
* ) echo "Please answer yes or no.";;
esac
done
echo "Creating git template directory..."
mkdir -p ${TEMPLATE}/hooks
git config --global init.templatedir ${TEMPLATE}
echo "Creating git hooks..."
for HOOK in "${HOOKS[@]}"; do
echo "#!/bin/sh" > ${TEMPLATE}/hooks/${HOOK}
echo "" >> ${TEMPLATE}/hooks/${HOOK}
echo "if [ -f ${SOUNDS}/${HOOK}.wav ]; then" >> ${TEMPLATE}/hooks/${HOOK}
if [[ "$PLAYER" == *powershell.exe ]]; then
echo -e "\t$PLAYERCMD -c '(New-Object Media.SoundPlayer \"${SOUNDS:1:1}:${SOUNDS:2}/${HOOK}.wav\").PlaySync();' </dev/null >/dev/null 2>&1 &" >> ${TEMPLATE}/hooks/${HOOK}
else
echo -e "\t$PLAYERCMD ${SOUNDS}/${HOOK}.wav </dev/null >/dev/null 2>&1 &" >> ${TEMPLATE}/hooks/${HOOK}
fi
echo "fi" >> ${TEMPLATE}/hooks/$HOOK
chmod 755 ${TEMPLATE}/hooks/$HOOK
done
echo "Creating git sound directory..."
mkdir -p ${SOUNDS}
echo "Add wav file for each git hook:" > ${SOUNDS}/README
for HOOK in "${HOOKS[@]}"; do
echo -e "\t${HOOK}.wav" >> ${SOUNDS}/README
done
chmod 644 ${SOUNDS}/README
echo "Downloading sounds..."
$DOWNLOADERCMD ${SOUNDS}/post-commit.wav http://themushroomkingdom.net/sounds/wav/smw/smw_egg_hatching.wav &> /dev/null
chmod 644 ${SOUNDS}/post-commit.wav
$DOWNLOADERCMD ${SOUNDS}/post-checkout.wav http://themushroomkingdom.net/sounds/wav/smw/smw_1-up.wav &> /dev/null
chmod 644 ${SOUNDS}/post-checkout.wav
$DOWNLOADERCMD ${SOUNDS}/post-merge.wav http://themushroomkingdom.net/sounds/wav/smw/smw_power-up.wav &> /dev/null
chmod 644 ${SOUNDS}/post-merge.wav
$DOWNLOADERCMD ${SOUNDS}/pre-push.wav http://themushroomkingdom.net/sounds/wav/smw/smw_power-up_appears.wav &> /dev/null
chmod 644 ${SOUNDS}/pre-push.wav
echo "Done."
| true
|
f1bb676ed116e6d152916b680c896f24133a8543
|
Shell
|
venoodkhatuva12/pem_key_generator
|
/pemkey.sh
|
UTF-8
| 1,418
| 3.9375
| 4
|
[] |
no_license
|
#!/bin/bash
#author: Vinood NK
#script to Add user and generate Pem file for Login
#Usage : Adduser , generate pemfile, and add in sudoer.
#Check whether root user is running the script
if [ "$(id -u)" != "0" ]; then
echo "This script must be run as root" 1>&2
exit 1
fi
read -p "Please Enter the Username : " user
user_present="`cat /etc/passwd | grep $user | grep -v grep | wc -l`"
if [ "$user_present" == "1" ]; then
echo -e "\nUser $user already present No need to create .. "
echo -e "\nGenertaing keys for $user ... "
else
adduser $user
fi
read -p "Please Enter the Hostname : " hostname
ssh-keygen -t rsa -f $user
mkdir /home/$user/.ssh
cat $user.pub > /home/$user/.ssh/authorized_keys
chmod -R 700 /home/$user/.ssh/
chown -R $user:$user /home/$user/.ssh/
mv $user /tmp/$hostname-$user.pem
read -p "Do you want to add this User to Sudoer(Yes/No)? : " response
sudoers_present="`cat /etc/sudoers | grep $user | grep -v grep | wc -l`"
if [ "$sudoers_present" -ge "1" ]; then
echo -e "\nEntry for user in sudoers already exsist !!"
else
if [[ $response =~ ^([yY][eE][sS]|[yY])$ ]]
then
sudo sed -i "95 i $user ALL=(ALL) ALL" /etc/sudoers
else
exit
fi
fi
rm -f $user $user.pub
echo -e "\n Keys generated successfully ...\n"
echo -e "\n Please find pem for user $user at /tmp/$hostname-$user.pem"
| true
|
5c228ef4714ecdaeebb2b879cd960d9eecbd76e0
|
Shell
|
1-cool/Linux
|
/view-connections.sh
|
UTF-8
| 621
| 3.671875
| 4
|
[] |
no_license
|
#!/bin/bash
############################################################################
# 查看v2ray连接列表
############################################################################
#读取v2ray端口
PORT=$(grep \"port /etc/v2ray/config.json | tr -cd "[0-9]")
#读取连接列表
LIST=$(lsof -i -n -P | grep ESTABLISHED | grep ${PORT} | awk '{print $9}' | awk -F '->' '{print $2}' | awk -F ':' '{print $1}' | sort -u)
if [ -z "${LIST}" ];
then
echo "当前无连接"
else
for IP in ${LIST}
do
#查询IP及归属地
curl https://cip.cc/${IP}
done
fi
| true
|
77df756e94a7e48e6b6ed45a1f122619fbe7b83d
|
Shell
|
Alexxfromgit/task10_12_1
|
/task10_12_1.sh
|
UTF-8
| 6,332
| 3.28125
| 3
|
[] |
no_license
|
#!/bin/bash
######################################## PREPARE #######################################
source $(dirname $0)/config
mkdir -p networks /var/lib/libvirt/images/$VM1_NAME /var/lib/libvirt/images/$VM2_NAME config-drives/$VM1_NAME-config config-drives/$VM2_NAME-config
echo 1 > /proc/sys/net/ipv4/ip_forward
echo "$VM1_MANAGEMENT_IP $VM1_NAME
$VM2_MANAGEMENT_IP $VM2_NAME" >> /etc/hosts
VM1_MAC=52:54:00:`(date; cat /proc/interrupts) | md5sum | sed -r 's/^(.{6}).*$/\1/; s/([0-9a-f]{2})/\1:/g; s/:$//;'`
VIRT_TYPE=$(egrep -c '(vmx|svm)' /proc/cpuinfo)
if (( $VIRT_TYPE > 0 )); then VIRT_TYPE="kvm"; else VIRT_TYPE="qemu"; fi
####################################### CLOUD INIT #########################################
mkdir -p $(dirname $SSH_PUB_KEY)
yes "y" | ssh-keygen -t rsa -N "" -f $(echo $SSH_PUB_KEY | rev | cut -c5- | rev)
###### vm1 user-data ######
cat << EOF > config-drives/$VM1_NAME-config/user-data
#cloud-config
ssh_authorized_keys:
- $(cat $SSH_PUB_KEY)
apt_update: true
apt_sources:
packages:
runcmd:
- echo 1 > /proc/sys/net/ipv4/ip_forward
- iptables -A INPUT -i lo -j ACCEPT
- iptables -A FORWARD -i $VM1_EXTERNAL_IF -o $VM1_INTERNAL_IF -j ACCEPT
- iptables -t nat -A POSTROUTING -o $VM1_EXTERNAL_IF -j MASQUERADE
- iptables -A FORWARD -i $VM1_EXTERNAL_IF -m state --state ESTABLISHED,RELATED -j ACCEPT
- iptables -A FORWARD -i $VM1_EXTERNAL_IF -o $VM1_INTERNAL_IF -j REJECT
- ip link add $VXLAN_IF type vxlan id $VID remote $VM2_INTERNAL_IP local $VM1_INTERNAL_IP dstport 4789
- ip link set vxlan0 up
- ip addr add $VM1_VXLAN_IP/24 dev vxlan0
EOF
###### vm2 user-data ######
cat << EOF > config-drives/$VM2_NAME-config/user-data
#cloud-config
ssh_authorized_keys:
- $(cat $SSH_PUB_KEY)
apt_update: true
apt_sources:
packages:
- apt-transport-https
- ca-certificates
- curl
- software-properties-common
runcmd:
- ip link add $VXLAN_IF type vxlan id $VID remote $VM1_INTERNAL_IP local $VM2_INTERNAL_IP dstport 4789
- ip link set vxlan0 up
- ip addr add $VM2_VXLAN_IP/24 dev vxlan0
- curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
- add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
- apt update
- apt install docker-ce docker-compose -y
EOF
###### vm1 meta-data ######
echo "hostname: $VM1_NAME
local-hostname: $VM1_NAME
network-interfaces: |
auto $VM1_EXTERNAL_IF
iface $VM1_EXTERNAL_IF inet dhcp
dns-nameservers $VM_DNS
auto $VM1_INTERNAL_IF
iface $VM1_INTERNAL_IF inet static
address $VM1_INTERNAL_IP
netmask $INTERNAL_NET_MASK
auto $VM1_MANAGEMENT_IF
iface $VM1_MANAGEMENT_IF inet static
address $VM1_MANAGEMENT_IP
netmask $MANAGEMENT_NET_MASK" > config-drives/$VM1_NAME-config/meta-data
###### vm2 meta-data ######
echo "hostname: $VM2_NAME
local-hostname: $VM2_NAME
network-interfaces: |
auto $VM2_INTERNAL_IF
iface $VM2_INTERNAL_IF inet static
address $VM2_INTERNAL_IP
netmask $INTERNAL_NET_MASK
gateway $VM1_INTERNAL_IP
dns-nameservers $EXTERNAL_NET_HOST_IP $VM_DNS
auto $VM2_MANAGEMENT_IF
iface $VM2_MANAGEMENT_IF inet static
address $VM2_MANAGEMENT_IP
netmask $MANAGEMENT_NET_MASK" > config-drives/$VM2_NAME-config/meta-data
###### MK ISO ######
mkisofs -o $VM1_CONFIG_ISO -V cidata -r -J --quiet config-drives/$VM1_NAME-config
mkisofs -o $VM2_CONFIG_ISO -V cidata -r -J --quiet config-drives/$VM2_NAME-config
######################################## CONF NETWORK ##############################################
###### EXTERNAL ######
echo "
<network>
<name>$EXTERNAL_NET_NAME</name>
<forward mode='nat'>
<nat>
<port start='1024' end='65535'/>
</nat>
</forward>
<ip address='$EXTERNAL_NET_HOST_IP' netmask='$EXTERNAL_NET_MASK'>
<dhcp>
<range start='$EXTERNAL_NET.2' end='$EXTERNAL_NET.254'/>
<host mac='$VM1_MAC' name='vm1' ip='$VM1_EXTERNAL_IP'/>
</dhcp>
</ip>
</network>" > networks/$EXTERNAL_NET_NAME.xml
###### INTERNAL ######
echo "
<network>
<name>$INTERNAL_NET_NAME</name>
</network>" > networks/$INTERNAL_NET_NAME.xml
###### MANAGEMENT ######
echo "
<network>
<name>$MANAGEMENT_NET_NAME</name>
<ip address='$MANAGEMENT_HOST_IP' netmask='$MANAGEMENT_NET_MASK'/>
</network>" > networks/$MANAGEMENT_NET_NAME.xml
###### APPLY XML ######
virsh net-destroy default
virsh net-undefine default
virsh net-define networks/$EXTERNAL_NET_NAME.xml
virsh net-start $EXTERNAL_NET_NAME
virsh net-autostart $EXTERNAL_NET_NAME
virsh net-define networks/$INTERNAL_NET_NAME.xml
virsh net-start $INTERNAL_NET_NAME
virsh net-autostart $INTERNAL_NET_NAME
virsh net-define networks/$MANAGEMENT_NET_NAME.xml
virsh net-start $MANAGEMENT_NET_NAME
virsh net-autostart $MANAGEMENT_NET_NAME
####################################### VIRT INSTALL ##################################################
wget -O /var/lib/libvirt/images/ubunut-server-16.04.qcow2 $VM_BASE_IMAGE
###### VM1 CREATE ######
cp /var/lib/libvirt/images/ubunut-server-16.04.qcow2 /var/lib/libvirt/images/$VM1_NAME/$VM1_NAME.qcow2
qemu-img resize /var/lib/libvirt/images/$VM1_NAME/$VM1_NAME.qcow2 +3GB
virt-install \
--name $VM1_NAME\
--ram $VM1_MB_RAM \
--vcpus=$VM1_NUM_CPU \
--$VM_TYPE \
--os-type=linux \
--os-variant=ubuntu16.04 \
--disk path=$VM1_HDD,format=qcow2,bus=virtio,cache=none \
--disk path=$VM1_CONFIG_ISO,device=cdrom \
--graphics vnc,port=-1 \
--network network=$EXTERNAL_NET_NAME,mac=\'$VM1_MAC\' \
--network network=$INTERNAL_NET_NAME \
--network network=$MANAGEMENT_NET_NAME \
--noautoconsole \
--quiet \
--virt-type $VM_VIRT_TYPE \
--import
virsh autostart $VM1_NAME
sleep 300
###### VM2 CREATE ######
cp /var/lib/libvirt/images/ubunut-server-16.04.qcow2 /var/lib/libvirt/images/$VM2_NAME/$VM2_NAME.qcow2
qemu-img resize /var/lib/libvirt/images/$VM2_NAME/$VM2_NAME.qcow2 +3GB
virt-install \
--name $VM2_NAME\
--ram $VM2_MB_RAM \
--vcpus=$VM2_NUM_CPU \
--$VM_TYPE \
--os-type=linux \
--os-variant=ubuntu16.04 \
--disk path=$VM2_HDD,format=qcow2,bus=virtio,cache=none \
--disk path=$VM2_CONFIG_ISO,device=cdrom \
--graphics vnc,port=-1 \
--network network=$INTERNAL_NET_NAME \
--network network=$MANAGEMENT_NET_NAME \
--noautoconsole \
--quiet \
--virt-type $VM_VIRT_TYPE \
--import
virsh autostart $VM2_NAME
virsh list
echo '###### ALL DONE ######'
exit
| true
|
31a6e4aa9602b18a08338d77bb09d362049dbc6f
|
Shell
|
couchbasedeps/erlang
|
/scripts/build-docker-otp
|
UTF-8
| 336
| 3.453125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
if [ $# -lt 1 ]; then
echo "Usage $0 32|64 [command] [arg]..."
exit 1
fi
ARCH="$1"
shift
git archive --format=tar.gz --prefix=otp/ HEAD >scripts/otp.tar.gz
docker build -t otp --file scripts/Dockerfile.$ARCH scripts
rm scripts/otp.tar.gz
docker run --volume="$PWD/logs:/buildroot/otp/logs" -i --rm otp ${1+"$@"}
| true
|
8f734d1727178a7dba3b412983118d2af0cb2236
|
Shell
|
kliment-olechnovic/voronota
|
/document.bash
|
UTF-8
| 4,668
| 3.21875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
cd $(dirname "$0")
TMPLDIR=$(mktemp -d)
trap "rm -r $TMPLDIR" EXIT
################################################################################
{
cat ./resources/texts/intro.markdown
echo -e "# Command reference"
./voronota \
| grep 'Commands:' -A 999999 \
| sed 's/^Commands:/## List of all commands/' \
| sed 's/^\([[:alpha:]]\S*\)/* \1/'
./voronota --help \
| grep 'Command ' -A 999999 \
| sed "s/^Command\s\+'\(\S\+\)'.*/## Command '\1'\n\n### Command line arguments:\n\nCOMMAND_OPTIONS_TABLE_HEADER1\nCOMMAND_OPTIONS_TABLE_HEADER2/" \
| sed 's/COMMAND_OPTIONS_TABLE_HEADER1/ Name Type Description/' \
| sed 's/COMMAND_OPTIONS_TABLE_HEADER2/ ------------------------------- ------ ---- ------------------------------------------------------------------------/' \
| sed 's/^\(--[[:alpha:]]\S*\)/ \1/' \
| sed 's/^stdin <-\s*/\n### Input stream:\n\n /' \
| sed 's/^stdout ->\s*/\n### Output stream:\n\n /' \
| sed 's/^\s\+(\(.\+\))/\n \1/'
echo -e "# Wrapper scripts"
echo -e "\n## VoroMQA method script\n"
cat << EOF
'voronota-voromqa' script is an implementation of VoroMQA (Voronoi diagram-based Model Quality Assessment) method using Voronota.
The script interface is presented below:
EOF
./voronota-voromqa -h 2>&1 | tail -n +3 | sed 's/^/ /'
echo -e "\n### Full list of options for the VoroMQA method script\n"
cat << EOF
All options of 'voronota-voromqa' script, in alphabetical order:
EOF
./voronota-voromqa --list-all-options 2>&1 | tail -n +3 | sed 's/^/ /'
echo -e "\n## CAD-score method script\n"
cat << EOF
'voronota-cadscore' script is an implementation of CAD-score (Contact Area Difference score) method using Voronota.
The script interface is presented below:
EOF
./voronota-cadscore -h 2>&1 | tail -n +3 | sed 's/^/ /'
echo -e "\n## Contacts calculation convenience script\n"
cat << EOF
'voronota-contacts' script provides a way for calculating and querying interatomic contacts with just one command (without the need to construct a pipeline from 'voronota' calls).
EOF
./voronota-contacts -h 2>&1 | tail -n +4 | sed 's/^/ /'
echo -e "\n## Volumes calculation convenience script\n"
cat << EOF
'voronota-volumes' script provides a way for calculating and querying atomic volumes with just one command (without the need to construct a pipeline from 'voronota' calls).
EOF
./voronota-volumes -h 2>&1 | tail -n +4 | sed 's/^/ /'
echo -e "\n## Pocket analysis script\n"
cat << EOF
'voronota-pocket' script provides a way for identifying and describing pockets, cavities and channels using the Voronoi tessellation vertices.
EOF
./voronota-pocket -h 2>&1 | tail -n +4 | sed 's/^/ /'
echo -e "\n## Membrane fitting script\n"
cat << EOF
'voronota-membrane' script provides a way for fitting a membrane for a protein struture using VoroMQA-based surface frustration analysis.
EOF
./voronota-membrane -h 2>&1 | tail -n +4 | sed 's/^/ /'
} > $TMPLDIR/documentation.markdown
################################################################################
cat > $TMPLDIR/include_in_header.html << 'EOF'
<style type="text/css">
a { color: #0000CC; }
td { padding-right: 1em; }
pre { background-color: #DDDDDD; padding: 1em; }
div#TOC > ul > li > ul > li ul { display: none; }
</style>
EOF
{
echo "<h1>$(./voronota | head -1)</h1>"
cat << 'EOF'
<h2>Quick links</h2>
<ul>
<li><a href="./expansion_js/index.html">Voronota-JS</a> (for advanced scripting using JavaScript)</li>
<li><a href="./expansion_gl/index.html">Voronota-GL</a> (for advanced scripting and visualization)</li>
<li><a href="./expansion_gl/web/index.html">Web Voronota-GL</a> (online version of Voronota-GL)</li>
</ul>
<h2>Table of contents</h2>
EOF
} \
> $TMPLDIR/include_before_body.html
pandoc $TMPLDIR/documentation.markdown -f markdown -t html --toc -H $TMPLDIR/include_in_header.html -B $TMPLDIR/include_before_body.html -s -o ./index.html
################################################################################
cat $TMPLDIR/documentation.markdown \
| sed 's|\./index.html|./README.md|g' \
| sed 's|expansion_js/index.html|expansion_js/README.md|g' \
| sed 's|expansion_gl/index.html|expansion_gl/README.md|g' \
> ./README.md
################################################################################
pandoc -s -t man ./resources/texts/manpage.markdown -o "$TMPLDIR/manpage.troff"
mv "$TMPLDIR/manpage.troff" "./voronota.man"
################################################################################
./expansion_js/document.bash
./expansion_gl/document.bash
################################################################################
| true
|
1a121ef6ef61299c14d99d5a6c46e1528f78832d
|
Shell
|
OmeGak/dotfiles
|
/git/bin/git-author
|
UTF-8
| 448
| 3.640625
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/sh
#
# Prints the Git author information from context.
author_name="${GIT_AUTHOR_NAME}"
author_email="${GIT_AUTHOR_EMAIL}"
if [ -z "${author_name}" ] || [ -z "${author_email}" ]; then
author_name="$(git config --get user.name)"
author_email="$(git config --get user.email)"
fi
if [ -z "${author_name}" ] || [ -z "${author_email}" ]; then
echo 'Error: Missing author information'
exit 1
fi
echo "${author_name} <${author_email}>"
| true
|
6a7dc11c1faf60b67dc69d6fd5c7168a205b2cbd
|
Shell
|
lliicchh/chips
|
/intall_scripts/install-centos.sh
|
UTF-8
| 1,831
| 2.640625
| 3
|
[] |
no_license
|
#!/bin/bash
# 换成163源
yum install -y wget
sudo mv /etc/yum.repos.d/CentOS-Base.repo /etc/yum.repos.d/CentOS-Base.repo.backup
#wget -O /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo
wget -O /etc/yum.repos.d/CentOS-Base.repo http://mirrors.163.com/.help/CentOS7-Base-163.repo
# 1.配置yum
yum clean all
yum makecache
# 2.更新
sudo yum -y update
sudo yum -y upgrade
# 3.安装常用软件
sudo yum install -y git vim tree
sudo yum install -y net-tools lsof tcpdump telnet nc
# 4.gcc
sudo yum install -y gcc g++ make cmake
sudo yum -y install gcc-go
sudo yum install centos-release-scl -y
sudo yum install devtoolset-3-toolchain -y
sudo scl enable devtoolset-3 bash
# 5.账号权限升级
echo 'ethan ALL=(ALL) ALL'>> /etc/sudoers # 向配置文件中加入语句
# 6. 安装小工具:
sudo yum -y install lrzsz screen
sudo yum -y install socat nc nmap
sudo yum -y install tree
sudo yum -y install man-pages libstdc++-docs #安装开发手册
sudo yum install tcl.x86_64
sudo yum -y install pcre-devel openssl openssl-devel
# 7. java
sudo yum install -y java java-1.8.0-openjdk-devel.x86_64
# others
yum -y install yum-utils
sudo yum-config-manager --add-repo https://openresty.org/yum/cn/centos/OpenResty.repo
yum -y install openresty
# goaccess
sudo yum install geoip-devel ncurses ncurses-devel glib2-devel -y
wget https://tar.goaccess.io/goaccess-1.3.tar.gz
tar -xzvf goaccess-1.3.tar.gz
cd goaccess-1.3/
./configure --enable-utf8 --enable-geoip=legacy
make
make install
# httpie 比curl 适合阅读
# yum install httpie -y
sudo yum install epel-release -y
sudo yum install snapd -y
sudo systemctl enable --now snapd.socket
sudo ln -s /var/lib/snapd/snap /snap
sudo snap install http
# 网络管理工具
sudo yum install sar ss iftop sysstat -y
sudo yum install strace
| true
|
ef9a41fbfa5bff68e4e8cde116afdcca2b6303c5
|
Shell
|
ShadeLab/RhizobiaPanGenome
|
/Plasmids/template.sb
|
UTF-8
| 820
| 2.625
| 3
|
[] |
no_license
|
#!/bin/bash --login
########## Define Resources Needed with SBATCH Lines ##########
#SBATCH --time=1:00:00 # limit of wall clock time - how long the jo$
#SBATCH --ntasks=1 # number of tasks - how many tasks (nodes) $
#SBATCH --cpus-per-task=4 # number of CPUs (or cores) per task (same $
#SBATCH --mem=200G # memory required per node - amount of me$
#SBATCH --job-name spades_test1 # you can give your job a name for easier $
########## Command Lines to Run ##########
cd /mnt/research/ShadeLab/Bezemek/sra/ ### change to the directory$
module load GCC/5.4.0-2.26 OpenMPI/1.10.3
module load SPAdes/3.13.0
spades.py --plasmid --merged merged.fastq -o spadesout2
scontrol show job $SLURM_JOB_ID ### write job information to output file
| true
|
1bd67c4749a2dfcfac6954d566064b3afe6b9c77
|
Shell
|
civo/cli
|
/install.sh
|
UTF-8
| 4,484
| 4.25
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
set -e
# Copyright Civo Ltd 2020, all rights reserved
export VERIFY_CHECKSUM=0
export OWNER="civo"
export REPO="cli"
export SUCCESS_CMD="$OWNER version"
export BINLOCATION="/usr/local/bin"
###############################
# Get the last version #
###############################
get_last_version() {
VERSION=""
echo "Finding latest version from GitHub"
VERSION=$(curl -sI https://github.com/$OWNER/$REPO/releases/latest | grep -i "location:" | awk -F"/" '{ printf "%s", $NF }' | tr -d '\r')
VERSION_NUMBER=$(echo "$VERSION" | cut -d "v" -f 2)
echo "$VERSION_NUMBER"
if [ ! "$VERSION" ]; then
echo "Failed while attempting to install $REPO. Please manually install:"
echo ""
echo "1. Open your web browser and go to https://github.com/$OWNER/$REPO/releases"
echo "2. Download the latest release for your platform. Call it '$REPO'."
echo "3. chmod +x ./$REPO"
echo "4. mv ./$REPO $BINLOCATION"
if [ -n "$ALIAS_NAME" ]; then
echo "5. ln -sf $BINLOCATION/$REPO /usr/local/bin/$ALIAS_NAME"
fi
exit 1
fi
}
###############################
# Check for curl #
###############################
hasCurl() {
which curl
if [ "$?" = "1" ]; then
echo "You need curl to use this script."
exit 1
fi
}
# --- set arch and suffix, fatal if architecture not supported ---
setup_verify_arch() {
if [ -z "$ARCH" ]; then
ARCH=$(uname -m)
fi
case $ARCH in
amd64)
ARCH=-amd64
SUFFIX=
;;
x86_64)
ARCH=-amd64
SUFFIX=
;;
arm64)
ARCH=-arm64
;;
aarch64)
ARCH=-arm64
;;
arm*)
ARCH=-arm
SUFFIX=
;;
*)
fatal "Unsupported architecture $ARCH"
;;
esac
}
setup_verify_os() {
if [ -z "$SUFFIX" ]; then
SUFFIX=$(uname -s)
fi
case $SUFFIX in
"Darwin")
SUFFIX="-darwin"
;;
"MINGW"*)
SUFFIX="windows"
;;
"Linux")
SUFFIX="-linux"
;;
*)
fatal "Unsupported OS $SUFFIX"
;;
esac
}
download() {
URL=https://github.com/$OWNER/$REPO/releases/download/$VERSION/$OWNER-$VERSION_NUMBER$SUFFIX$ARCH.tar.gz
TARGETFILE="/tmp/$OWNER-$VERSION_NUMBER$SUFFIX$ARCH.tar.gz"
echo "Downloading package $URL to $TARGETFILE"
curl -sSL "$URL" --output "$TARGETFILE"
if [ "$VERIFY_CHECKSUM" = "1" ]; then
check_hash
fi
tar -xf /tmp/$OWNER-$VERSION_NUMBER$SUFFIX$ARCH.tar.gz -C /tmp
chmod +x /tmp/$OWNER
echo "Download complete."
if [ ! -w "$BINLOCATION" ]; then
echo
echo "============================================================"
echo " The script was run as a user who is unable to write"
echo " to $BINLOCATION. To complete the installation the"
echo " following commands may need to be run manually."
echo "============================================================"
echo
echo " sudo mv /tmp/civo $BINLOCATION/$OWNER"
if [ -n "$ALIAS_NAME" ]; then
echo " sudo ln -sf $BINLOCATION/$OWNER $BINLOCATION/$ALIAS_NAME"
fi
echo
else
echo
echo "Running with sufficient permissions to attempt to move $OWNER to $BINLOCATION"
if [ ! -w "$BINLOCATION/$OWNER" ] && [ -f "$BINLOCATION/$OWNER" ]; then
echo
echo "================================================================"
echo " $BINLOCATION/$OWNER already exists and is not writeable"
echo " by the current user. Please adjust the binary ownership"
echo " or run sh/bash with sudo."
echo "================================================================"
echo
exit 1
fi
mv /tmp/$OWNER $BINLOCATION/$OWNER
if [ "$?" = "0" ]; then
echo "New version of $OWNER installed to $BINLOCATION"
fi
if [ -e TARGETFILE ]; then
rm TARGETFILE
rm /tmp/$OWNER
fi
${SUCCESS_CMD}
fi
}
check_hash() {
SHACMD="sha256sum"
if [ ! -x "$(command -v $SHACMD)" ]; then
SHACMD="shasum -a 256"
fi
if [ -x "$(command -v "$SHACMD")" ]; then
TARGETFILEDIR=${TARGETFILE%/*}
(cd "$TARGETFILEDIR" && curl -sSL https://github.com/$OWNER/$REPO/releases/download/$VERSION/$OWNER-$VERSION_NUMBER-checksums.sha256 | $SHACMD -c >/dev/null)
if [ "$?" != "0" ]; then
rm TARGETFILE
echo "Binary checksum didn't match. Exiting"
exit 1
fi
fi
}
# Error: Show error message in red and exit
fatal() {
printf "Error: \033[31m${1}\033[39m\n"
exit 1
}
{
hasCurl
setup_verify_arch
setup_verify_os
get_last_version
download
}
| true
|
71108c5f61df6ba02a76108a6ffbcc4a2a89f69f
|
Shell
|
jdelStrother/BBOSC
|
/lib/vvosc/scripts/installSDK.sh
|
UTF-8
| 334
| 3.0625
| 3
|
[] |
no_license
|
if (test ! -e "${HOME}/Library/SDKs/${PRODUCT_NAME}")
then
mkdir -p "${HOME}/Library/SDKs/${PRODUCT_NAME}"
fi
# remove the existing SDK (if there is one)
rm -rf "${HOME}/Library/SDKs/${PRODUCT_NAME}"
# install the newly-compiled SDK
cp -RfH "build/${BUILD_STYLE}/SDKs/${PRODUCT_NAME}" "${HOME}/Library/SDKs/${PRODUCT_NAME}"
exit 0
| true
|
6e635612d2cd3e7d6b0b21c8b166eeabe6bf3e7b
|
Shell
|
fsfe/fsfe-website
|
/tools/update_localmenus.sh
|
UTF-8
| 2,814
| 4.3125
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
# -----------------------------------------------------------------------------
# Update local menus
# -----------------------------------------------------------------------------
# This script is called from the phase 1 Makefile and updates all the
# .localmenu.*.xml files containing the local menus.
# -----------------------------------------------------------------------------
set -e
echo "* Updating local menus"
# -----------------------------------------------------------------------------
# Get a list of all source files containing local menus
# -----------------------------------------------------------------------------
all_files=$(
find * -name "*.xhtml" -not -name "*-template.*" \
| xargs grep -l "</localmenu>" \
| sort
)
# -----------------------------------------------------------------------------
# Split that list by localmenu directory
# -----------------------------------------------------------------------------
declare -A files_by_dir
for file in ${all_files}; do
dir=$(xsltproc build/xslt/get_localmenu_dir.xsl ${file})
dir=${dir:-$(dirname ${file})}
files_by_dir[${dir}]="${files_by_dir[${dir}]} ${file}"
done
# -----------------------------------------------------------------------------
# If any of the source files has been updated, rebuild all .localmenu.*.xml
# -----------------------------------------------------------------------------
for dir in ${!files_by_dir[@]}; do
for file in ${files_by_dir[${dir}]}; do
if [ "${file}" -nt "${dir}/.localmenu.en.xml" ]; then
# Find out which languages to generate.
languages=$(
ls ${files_by_dir[${dir}]} \
| sed 's/.*\.\(..\)\.xhtml/\1/' \
| sort --uniq
)
# Compile the list of base filenames of the source files.
basefiles=$(
ls ${files_by_dir[${dir}]} \
| sed 's/\...\.xhtml//' \
| sort --uniq
)
# For each language, create the .localmenu.${lang}.xml file.
for lang in $languages; do
echo "* Creating ${dir}/.localmenu.${lang}.xml"
{
echo "<?xml version=\"1.0\"?>"
echo ""
echo "<feed>"
for basefile in ${basefiles}; do
if [ -f "${basefile}.${lang}.xhtml" ]; then
file="${basefile}.${lang}.xhtml"
else
file="${basefile}.en.xhtml"
fi
xsltproc \
--stringparam "link" "/${basefile}.html" \
build/xslt/get_localmenu_line.xsl \
"${file}"
echo ""
done | sort
echo "</feed>"
} > "${dir}/.localmenu.${lang}.xml"
done
# The local menu for this directory has been built, no need to check
# further source files.
break
fi
done
done
| true
|
33fd802c0e3612e5bfbca5ec48cc5eea345caf63
|
Shell
|
perfecto25/salt_cis_centos7
|
/cis/rules/files/6_2_9
|
UTF-8
| 764
| 4.125
| 4
|
[] |
no_license
|
#!/bin/bash
# 6.2.9 Ensure users own their home directories
# Description:
# The user home directory is space defined for the particular user to set local environment
# variables and to store personal files.
# Rationale:
# Since the user is accountable for files stored in the user home directory, the user must be
# the owner of the directory.
cat /etc/passwd | egrep -v '^(root|halt|sync|shutdown|bin|adm|daemon)' | awk -F: '($7 !="/sbin/nologin" && $7 != "/bin/false") { print $1 " " $6 }' | while read user dir;
do
if [ ! -d "$dir" ]; then
echo -e "\n>>> $dir - does not exist."
else
owner=$(stat -L -c "%U" "$dir")
if [ "$owner" != "$user" ]; then
echo -e "\n>>> $dir is owned by: $owner."
fi
fi
done
| true
|
7e0e04d336e8bba2ab0361b03f407b9abd836915
|
Shell
|
remetremet/FreeBSD-Munin-plugins
|
/ipfw.sh
|
UTF-8
| 897
| 3.234375
| 3
|
[
"BSD-2-Clause"
] |
permissive
|
#!/bin/sh
#
if [ "$1" = "autoconf" ]; then
echo yes
exit 0
fi
if [ "$1" = "config" ]; then
echo 'graph_title IPFW routed packets per second'
echo 'graph_category security'
echo 'graph_args --base 1000 -l 0'
echo 'graph_vlabel Routing pps'
echo 'packets.label Packets'
echo 'packets.type DERIVE'
echo 'packets.min 0'
echo 'nat.label NAT'
echo 'nat.type DERIVE'
echo 'nat.min 0'
exit 0
fi
#now=$(date +%s)
#boot=$(sysctl kern.boottime | sed 's/,//' | awk '{print $5}')
#uptime=$(( ${now} - ${boot} ))
cnt=$(ipfw -a list | grep " count " | awk '{print $2}' | grep -v "^0$" | paste -s -d + - | bc)
#cnt=$(echo "scale=2; ${cnt}/${uptime}" | bc)
echo "packets.value ${cnt:-0}"
cnt=$(ipfw -a list | grep " nat " | awk '{print $2}' | grep -v "^0$" | paste -s -d + - | bc)
#cnt=$(echo "scale=2; ${cnt}/${uptime}" | bc)
echo "nat.value ${cnt:-0}"
| true
|
1bd277c2595fbf1c8b23fcd45ff05ea1243a755c
|
Shell
|
anatol-karlinski/Studia-Linux
|
/bash9.sh
|
UTF-8
| 176
| 3.09375
| 3
|
[] |
no_license
|
#!/bin/bash
function spell()
{
flaga=0
for x in `cat /usr/share/dict/polish`
do
if [ "$1" == "$x" ]; then
flaga=1
break
fi
done
echo $flaga
}
read lolz
spell $lolz
| true
|
0ec17014a9cda215142ec50e46e0161a3e652475
|
Shell
|
jbodah/AnberPorts
|
/AnberPorts/scripts/streaming/Moonlight-Embedded
|
UTF-8
| 427
| 2.703125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Gamestream client for embedded systems
# Release: 2017 Genre: Streaming
printf "\e[32mDownloading Moonlight-Embedded ..." > /dev/tty1
wget -q 'https://github.com/krishenriksen/moonlight-embedded-rg351p/releases/download/1.0.0/moonlight-embedded.zip'
printf "\e[32mExtracting ..." > /dev/tty1
unzip -qq -o moonlight-embedded.zip -d ../
printf "\e[32mCleaning up ..." > /dev/tty1
rm -rf moonlight-embedded.zip
| true
|
334d93ab050ea1b560129997d97d1d3f449294ae
|
Shell
|
radical-experiments/AIMES-Swift
|
/Swift_Experiments/bin/test_ssh.sh
|
UTF-8
| 304
| 3.265625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
stampede="stampede.tacc.utexas.edu"
gordon="gordon.sdsc.edu"
check() {
site=$1
ssh $1 "hostname -f" &> /dev/null
if [[ "$?" != "0" ]]
then
echo "SSH: to $site [FAIL]"
else
echo "SSH: to $site [PASS]"
fi
}
check $stampede
check $gordon
| true
|
fff7e0f8968e571578a5bae698f76a37097db675
|
Shell
|
pstray/image-updaters
|
/install
|
UTF-8
| 620
| 3.984375
| 4
|
[] |
no_license
|
#! /bin/bash
cd $(dirname $0)
PREFIX=
[ -f .config ] && . .config
default_PREFIX="$HOME/images"
if [ -z "$PREFIX" ]; then
read -e -i "$default_PREFIX" -p "Install dir: " PREFIX
PREFIX=${PREFIX%/};
echo "PREFIX='$PREFIX'" >> .config
fi
shopt -s nullglob
for script in *-update; do
target="$PREFIX/${script//-/\/}"
echo "Installing $script to $target"
install -D "$script" "$target"
done
for script in update*; do
case "$script" in
*~)
:
;;
*)
target="$PREFIX/$script"
echo "Installing $script to $target"
install -D "$script" "$target"
;;
esac
done
| true
|
25eb388f750711affd449dad423bb779b8881d75
|
Shell
|
gjvanoldenborgh/climexp_data
|
/KNMIData/update_potwind.sh
|
UTF-8
| 3,343
| 3.640625
| 4
|
[] |
no_license
|
#!/bin/bash
mkdir -p tmp_potwind
DEBUG=false
if [ "$DEBUG" != true ]; then
wget=wget
unzip=unzip
else
echo "DEBUG, NO DATA RETRIEVED"
wget="echo wget"
unzip="echo unzip"
fi
# new data
base=http://projects.knmi.nl/klimatologie/onderzoeksgegevens/potentiele_wind-sigma/
$wget -O list.html $base
files=`fgrep .zip list.html | sed -e 's/^.*href=["]//' -e 's/["].*$//'`
for file in $files; do
$wget -q -N $base$file
done
base=http://projects.knmi.nl/klimatologie/onderzoeksgegevens/potentiele_wind/up_upd/
# old data
histfile=20140307_update_up.zip
$wget -q -N $base/$histfile
# older data
oldfile=20090515_update_up.zip
$wget -q -N $base/$oldfile
cd tmp_potwind
for file in ../potwind*.zip; do
$unzip -o $file
done
$unzip -o ../$histfile
$unzip -o ../$oldfile
cd ..
stations=`ls tmp_potwind | sed -e 's/potwind_//' -e 's/_.*$//' | sort | uniq`
nstations=`echo $stations | wc -w`
((nstations = nstations - 10)) # skipped files below
for ext in "" _sea _coast _land; do
# this will be wrong when stations are added but it is purely cosmetic.
case $ext in
_sea) nstations=8;;
_coast) nstations=11;;
_land) nstations=36;;
esac
cat > list_upx$ext.txt <<EOF
located $nstations stations in 50.0N:56.5N, 1.5E:8.0E
EOF
nstations=""
done
for station in $stations; do
# stations with two numbers
extrastation=XXX
extrastation1=XXX
case $station in
252) extrastation=550;;
254) extrastation=554;;
269) extrastation=041;extrastation1=008;; # just a few months
277) extrastation=605;;
279) extrastation=615;;
321) extrastation=553;;
343) extrastation=609;;
356) extrastation=604;;
008) station=;;
041) station=;;
271) station=;; # second Stavoren station with no added value.
550) station=;;
553) station=;;
554) station=;;
604) station=;;
605) station=;;
609) station=;;
615) station=;;
esac
if [ -n "$station" ]; then
if [ ! -s latlon_wind$station.txt ]; then
if [ ! -x RDNAPTRANS2008/rdnaptrans2008 ]; then
cd RDNAPTRANS2008
c++ -o rdnaptrans2008 rdnaptrans2008.cpp
cd ..
fi
if [ ! -x RDNAPTRANS2008/rdnaptrans2008 ]; then
echo "Please compile rdnaptrans2008.cpp"
exit -1
fi
lastfile=`ls tmp_potwind/potwind_${station}_???? | tail -n 1`
line=`fgrep COORDINATES $lastfile`
x=`echo $line | sed -e 's/^.*X ://' -e 's/;.*$//'`
y=`echo $line | sed -e 's/^.*Y ://'`
cd RDNAPTRANS2008
./rdnaptrans2008 <<EOF > ../aap.txt
2
$x
$y
0
0
EOF
cd ..
lat=`cat aap.txt | fgrep phi | sed -e 's/phi *//'`
lon=`cat aap.txt | fgrep lambda | sed -e 's/lambda *= *//'`
cat > latlon_wind$station.txt <<EOF
$station
$lat N
$lon E
EOF
fi
./txt2dat_potwind tmp_potwind/potwind_${station}_???? tmp_potwind/potwind_${extrastation}_???? tmp_potwind/potwind_${extrastation1}_???? >> list_upx.txt
gzip -c upx$station.dat > upx$station.gz
fi
done
echo "==============================================" >> list_upx.txt
$HOME/NINO/copyfiles.sh upx* list_upx*.txt
| true
|
d04b95abbfead5257e67f58ad854b316fa0767ec
|
Shell
|
hankshz/dockers
|
/hbase-cluster/script/build-hadoop.sh
|
UTF-8
| 889
| 2.703125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
set -e
apt-get update
apt-get install -y default-jdk openssh-server
java -version
# setup sshd and skip password
mkdir /var/run/sshd
ssh-keygen -t rsa -P '' -f ~/.ssh/id_rsa
cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys
chmod 0600 ~/.ssh/authorized_keys
cp /raw/ssh_config ~/.ssh/config
mkdir -p /build
tar xvf /raw/hadoop-$HADOOP_VERSION.tar.gz -C /build
mv /build/hadoop-$HADOOP_VERSION $HADOOP_BUILD
sed -i 's@export JAVA_HOME=${JAVA_HOME}@export JAVA_HOME='"$JAVA_HOME"'@g' $HADOOP_BUILD/etc/hadoop/hadoop-env.sh
cp /raw/hadoop/hdfs-site.xml $HADOOP_BUILD/etc/hadoop/hdfs-site.xml
cp /raw/hadoop/core-site.xml $HADOOP_BUILD/etc/hadoop/core-site.xml
cp /raw/hadoop/mapred-site.xml $HADOOP_BUILD/etc/hadoop/mapred-site.xml
cp /raw/hadoop/yarn-site.xml $HADOOP_BUILD/etc/hadoop/yarn-site.xml
cp /raw/hadoop/slaves $HADOOP_BUILD/etc/hadoop/slaves
hdfs namenode -format
| true
|
8b41e7244c7fa975358b21452b525c2df6431567
|
Shell
|
shubhamlokhande66/Week2
|
/arry.sh
|
UTF-8
| 119
| 2.65625
| 3
|
[] |
no_license
|
#!/bin/bash
for ((i = 0; i < 18; i++)); do ## fill array with random values
a[i]=$(($RANDOM % 100 + 1))
done
| true
|
baea9975934bf94a0dc6c4a4db2dfe4b6a417cb6
|
Shell
|
whichrakesh/Course-Projects
|
/CS296-Software-Lab/scripts/g26_gen_data_csv.sh
|
UTF-8
| 1,123
| 3.21875
| 3
|
[] |
no_license
|
PROJECT_ROOT=.
mkdir -p $PROJECT_ROOT/data
BINDIR=$PROJECT_ROOT/mybins
DATA=$PROJECT_ROOT/data
if test -f $DATA/g26_lab05data_02.csv;
then
rm $DATA/g26_lab05data_02.csv
fi
#
#gXXout-<iteration_value>-<rerun_number>.txt
# Beginning of outer loop.
for ((itr_val=1; itr_val <= 1200 ; itr_val++)) # Double parentheses, and naked "LIMIT"
do
echo -n "$itr_val "
# ===============================================
# Beginning of inner loop.
for ((rerun=1; rerun <= 150 ; rerun++)) # Double parentheses, and naked "LIMIT
do
echo -n "$rerun "
TEMP=$($BINDIR/cs296_26_exe $itr_val)
TEMP1=$(sed "s/[^0-9.]//g" <<< "${TEMP}")
VAR=$(tr -s '\n' ' ' <<< "${TEMP1}")
a=($VAR)
TEMP=${a[0]}","$rerun","${a[1]}","${a[2]}","${a[3]}","${a[4]}","${a[5]}
echo $TEMP >> $DATA/g26_lab05data_02.csv
done
# End of inner loop.
# ===============================================
echo
done
# End of outer loop.
cat ./data/g26_lab05data_02.csv | ./scripts/process.sh >./scripts/temp.dat
cat ./data/g26_lab05data_02.csv | ./scripts/process2.sh >./scripts/temp2.dat
| true
|
135c65efc054025dfd953627d680a86445d0d6c3
|
Shell
|
plark-app/plark-website
|
/docker/build.sh
|
UTF-8
| 830
| 3.53125
| 4
|
[] |
no_license
|
#!/bin/bash
NODE_ENV=production
UN_ENV=production
##### Constants
APPLICATION_VERSION=$(jq -r ".version" package.json)
WITH_PUSH=0
for arg in "$@"
do
case $arg in
--push*)
WITH_PUSH=1
shift
;;
--version=*)
APPLICATION_VERSION="${arg#*=}"
shift
;;
esac
done
build_docker()
{
yarn locales:import
docker build \
--file ./Dockerfile \
--tag plark/plark-website:$APPLICATION_VERSION \
--tag plark/plark-website:latest .
# end of build_docker
}
push_docker()
{
docker push plark/plark-website:$APPLICATION_VERSION
docker push plark/plark-website:latest
# end of push_docker
}
echo " ========= Start building ========= "
build_docker
if [[ $WITH_PUSH = "1" ]]
then
echo " ========= Start pushing ========= "
push_docker
fi
| true
|
bae3f3ef5cdd59c0cdfdd873aaebbceb6dfde5d6
|
Shell
|
piiih/git-issues
|
/git-issues
|
UTF-8
| 2,399
| 3.75
| 4
|
[] |
no_license
|
#!/bin/bash
if [[ $1 != "list" ]] && [[ $1 != "pick" ]]
then
echo "Invalid action '$1'"
exit
fi
action=$1
shift
while getopts ":l-:" opt; do
case ${opt} in
-)
case ${OPTARG} in
"label"=*)
label=`echo ${OPTARG} | cut -d "=" -f 2`
esac
esac
done
function 256fromHex() {
hex=$1
if [[ $hex == "#"* ]]; then
hex=$(echo $1 | awk '{print substr($0,2)}')
fi
r=$(printf '0x%0.2s' "$hex")
g=$(printf '0x%0.2s' ${hex#??})
b=$(printf '0x%0.2s' ${hex#????})
echo -e `printf "%03d" "$(((r<75?0:(r-35)/40)*6*6+(g<75?0:(g-35)/40)*6+(b<75?0:(b-35)/40)+16))"`
}
output=''
if [[ $GITHUBACCESSTOKEN == '' ]]
then
echo 'Must set GITHUBACCESSTOKEN environment with your Github Personal Token'
exit
fi
if [[ $GITHUB_REPO != '' ]]
then
url=$GITHUB_REPO
else
repo_remotes=`git remote -v 2> /dev/null`
if [[ $repo_remotes == '' ]]
then
echo 'Must be in git directory or set GITHUB_REPO with a github repository url'
exit
fi
IFS=$'\n'
for repo_url in $repo_remotes
do
url=`echo $repo_url | cut -d ":" -f 2 | cut -d "." -f 1`
break
done
fi
response=`curl -s -H "Content-Type: application/json" https://api.github.com/repos/${url}/issues?access_token=$GITHUBACCESSTOKEN`
output=$response
if [[ $label != '' ]]
then
output=`echo $output | jq --raw-output 'map(select(.labels[].name == "'"$label"'"))'`
fi
output=`echo $output | jq --raw-output 'map(select(.pull_request == null)) | .[] | "\\\e[38;5;15m \(.number) - \(.title) \([["#" + .labels[].color], ["["+.labels[].name + "] \\\e[38;5;15m"]] | transpose | flatten | reduce .[] as $item (""; . + $item)) "'`
colors=`echo $response | jq --raw-output 'map(select(.pull_request == null)) | .[] | ["#" + .labels[].color + ";"] | reduce .[] as $item (""; . + $item)' | sed -e "s/\;\#/\; \#/g"`
for color in $(echo "$colors" | grep -o "\#.*\;" | sed -e "s/\;//g" | uniq) ; do
color256=`256fromHex "$color"`
output=`echo "$output" | sed -e "s/${color}/\\\\\e[38;5;${color256}m/g"`
novasCores[$index]="$color"
done
if [[ $action == "list" ]]
then
if [[ $output != '' ]]
then
echo -e "$output"
fi
elif [[ $action == "pick" ]]
then
echo -e "$output"
printf 'issue number to open: '
read issue_number
google-chrome-stable -s `echo $response | jq "map(select(.number == ${issue_number})) | .[0].html_url" | sed -e "s/\"//g"` &> /dev/null
fi
| true
|
d5b4b4a0ab26b36c876789106569adaf7f38f0ce
|
Shell
|
Schwarzam/usvirtualobservatory
|
/usvao/prototype/dalserver/branches/prodv1/prototype/ensureDatabase.sh
|
UTF-8
| 1,057
| 3.90625
| 4
|
[] |
no_license
|
#! /bin/bash
#
# Usage: ensureDatabase.sh muser mpass [dbname] [recreate] [host]
# muser: the mysql user name
# mpass: the mysql user's password
# dbname: the name of the database to create
# recreate: if "f" or not specified, the database will not be recreated
# if it already exists
# host: the hostname of the platform running the database
set -e
usernm=$1
passwd=$2
dbname=$3
reload=$4
host=$5
[ -z "$usernm" ] && {
echo "Missing username argument"
exit 1
}
[ -z "$passwd" ] && {
echo "Missing password argument"
exit 1
}
[ -n "$host" ] && host="-h$host"
[ -n "$dbname" ] || dbname="siav2proto"
echo Ensuring database $dbname
if [ -z "$reload" -o "$reload" = "f" ]; then
cat <<EOF
CREATE DATABASE IF NOT EXISTS $dbname;
EOF
mysql $host -u$usernm -p$passwd <<EOF
CREATE DATABASE IF NOT EXISTS $dbname;
EOF
else
cat <<EOF
DROP DATABASE If EXISTS $dbname;
CREATE DATABASE $dbname;
EOF
mysql $host -u$usernm -p$passwd <<EOF
DROP DATABASE If EXISTS $dbname;
CREATE DATABASE $dbname;
EOF
fi
| true
|
4f8c977640fa2953124099264e1c40daabb05476
|
Shell
|
tibotiber/greeny-oms
|
/api/policies/generate_policies.sh
|
UTF-8
| 820
| 3.1875
| 3
|
[] |
no_license
|
#!/bin/sh
IFS=,
{
read line;
while read policy accrec accpay documentation packing qualitycheck purchasing sales manager
do
cp p_xxx.js p_$policy.js
TEST=' if(admin'
if [ $accrec -eq 1 ]; then
TEST=${TEST}' || accounts_receivable'
fi
if [ $accpay -eq 1 ]; then
TEST=${TEST}' || accounts_payable'
fi
if [ $documentation -eq 1 ]; then
TEST=${TEST}' || documentation'
fi
if [ $packing -eq 1 ]; then
TEST=${TEST}' || packing'
fi
if [ $qualitycheck -eq 1 ]; then
TEST=${TEST}' || quality_check'
fi
if [ $purchasing -eq 1 ]; then
TEST=${TEST}' || purchasing'
fi
if [ $sales -eq 1 ]; then
TEST=${TEST}' || sales'
fi
if [ $manager -eq 1 ]; then
TEST=${TEST}' || manager'
fi
TEST=${TEST}')'
sed -i '/xxx/c\'${TEST} p_$policy.js
done
} < policies.csv
| true
|
04e5db709d5e0ea17d5415bde61716611ecbbe59
|
Shell
|
AdamDS/personal_helpful
|
/new_py
|
UTF-8
| 1,812
| 3.78125
| 4
|
[] |
no_license
|
#!/bin/bash
#a bash script to create a template perl script
#20151004 Kuan Huang
usage="new_python <new python script> #number of inputs#"
#author of perl script
name=`finger $USER | grep Name | cut -f 3 -d':' | sed s'/ //'`
if [ $# -gt 0 ]; then
#"shebang" for perl
echo "#!/bin/python" > $1
#file creation & authorship
echo -n "#" >> $1
timestamp=$(date "+%d %B %Y")
echo -n "$timestamp" >> $1
echo " - $name - " >> $1
echo "" >> $1
# file description
#echo "\"\"\""
#echo "\"\"\""
#package use
echo "import sys" >> $1
echo "import argparse" >> $1
# echo "import Bio" >> $1" # biopython if needed
echo "" >> $1
#perl script usage
echo "INPUT = \"input\"" >> $1
echo "" >> $1
echo "def usage():" >> $1
echo " helpText = \"USAGE: $1 [-h] --\" + INPUT + \" <input file>\\n\"" >> $1
echo " helpText += \"\\t-h this message\\n\"" >> $1
echo " helpText += \"\\t--\" + INPUT + \" input file\\n\"" >> $1
echo " return helpText" >> $1
echo "" >> $1
# get arguments
echo "def getArguments():" >> $1
echo " filename = None" >> $1
echo " parser = argparse.ArgumentParser( description = usage() )" >> $1
echo " parser.add_argument( \"--\" + INPUT , \"-i\" )" >> $1
echo " return vars( parser.parse_args() )" >> $1
echo "" >> $1
echo "def readFile( inFile ):" >> $1
echo " #open input file" >> $1
echo " try:" >> $1
echo " with open( inFile , \"r\" ) as f:" >> $1
echo " for line in f:" >> $1
echo " line = line.strip()" >> $1
echo " fields = line.split( \"\\t\" )" >> $1
echo " except:" >> $1
echo " print( \"ERROR: Bad input file\\n\" )" >> $1
echo " print( usage() )" >> $1
echo " sys.exit()" >> $1
echo "" >> $1
echo "def main():" >> $1
echo " data = getArguments()" >> $1
echo " readFile( data[INPUT] )" >> $1
echo "" >> $1
echo "sys.exit( main() )" >> $1
fi
exit
| true
|
535bc853753210965090cf576d82f6babacaabfd
|
Shell
|
jduepmeier/scripts
|
/display_mode.sh
|
UTF-8
| 2,522
| 4.40625
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
#
# display mode switches between primary and other display configurations
PRIMARY_DISPLAY="${PRIMARY_DISPLAY:-eDP-1}"
set -euo pipefail
log() {
( >&2 echo "$@" )
}
select_mode() {
wofi --show=dmenu <<-EOF
primary
secondary
extended
extended-right
extended-left
EOF
}
# Sends the command to sway.
# First parameter is the output name,
# second parameter is the cmd (on or off).
output_cmd() {
output="${1}"
cmd="${2}"
log "${output}" "${cmd}"
swaymsg "output '${output}' ${cmd}"
}
# Enables the primary display and disables all
# other displays.
mode_primary() {
while read output
do
if [[ "${output}" == "${PRIMARY_DISPLAY}" ]]
then
mode='enable'
else
mode='disable'
fi
output_cmd "${output}" "${mode}"
done <<<"${output_names}"
}
# Disables the primary display and
# enables all secondary displays.
mode_secondary() {
while read output
do
if [[ "${output}" == "${PRIMARY_DISPLAY}" ]]
then
mode='disable'
else
mode='enable'
fi
output_cmd "${output}" "${mode}"
done <<<"${output_names}"
}
# Extends the primary display.
mode_extended() {
direction="${1:-}"
while read output
do
output_cmd "${output}" "enable"
done <<<"${output_names}"
if [[ -n "${direction}" ]]
then
outputs=$(swaymsg -t get_outputs -r)
output_names="$(jq -r '.[].name' <<<"${outputs}")"
while read output
do
width="$(output="${output}" jq -r '.[] | select(.name == env.output) | .current_mode.width' <<<"$outputs")"
log "found width ${width} for output ${output}"
if [[ "${output}" == "${PRIMARY_DISPLAY}" ]]
then
primary_width="${width}"
primary_output="${output}"
else
secondary_width="${width}"
secondary_output="${output}"
fi
done <<<"${output_names}"
case "${direction}" in
left)
swaymsg "output '${secondary_output}' pos 0 0"
swaymsg "output '${primary_output}' pos ${secondary_width} 0"
;;
right)
swaymsg "output '${primary_output}' pos 0 0"
swaymsg "output '${secondary_output}' pos ${primary_width} 0"
;;
esac
fi
}
main() {
mode="${1:-}"
outputs=$(swaymsg -t get_outputs -r)
output_names=$(jq -r '.[].name' <<<"${outputs}")
if [[ -z "${mode}" ]]
then
mode="$(select_mode)"
fi
case "${mode}" in
primary)
mode_primary
;;
secondary)
mode_secondary
;;
extended)
mode_extended
;;
extended-left)
mode_extended left
;;
extended-right)
mode_extended right
;;
*)
log "unkown mode: ${mode}"
exit 2
;;
esac
}
main "$@"
| true
|
914100935081098a7bdfe984bc47afa8416d2cee
|
Shell
|
dimitar-asenov/Envision
|
/misc/version-control/scripts/import_and_merge.sh
|
UTF-8
| 5,855
| 3.5625
| 4
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
# This script takes 3 java files as input: base master dev + an optional flag '-quick-match'
#
# It creates a repo in /tmp/EnvisionVC/TestMerge and merges the files using Envision.
# It does the same using the Git merge in /tmp/EnvisionVC/Git.
# If run manually, this script is intended for the files in Envision/FilePersistence/test/persisted/version-control/manual/
# The script is also used in the diff_envision_dev.sh script.
#
# If the '-quick-match' flag is used, then instead of GumTree, a simple text-comparison is used to match IDs from different versions.
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
ENVISION_ROOT="$( cd "$SCRIPT_DIR/../../.." && pwd )"
JavaImportTool="$SCRIPT_DIR/../JavaImportToolBin/JavaImportTool/bin/JavaImportTool"
gumtree="$SCRIPT_DIR/../gumtree_bin/gumtree-2.1.0-SNAPSHOT/bin/gumtree -c Clients.experimental true -c match.gt.minh 1 -c match.bu.sim 0.2 envdmp -g envision -m gumtree"
quick_match="$SCRIPT_DIR/quick-match.py"
QUICK_MATCH_ARG=$4
idpatcher=$SCRIPT_DIR/patch_ids.py
idsync=$SCRIPT_DIR/sync_branch_inserted_ids.py
repoScript=$ENVISION_ROOT/FilePersistence/test/persisted/version-control/create-test-git-repo.py
testdir="/tmp/EnvisionVC"
scriptReadyFile="/tmp/EnvisionVC/scriptReady"
envisionReadyFile="/tmp/EnvisionVC/envisionReadyFile"
base="${testdir}/base"
master="${testdir}/master"
dev="${testdir}/dev"
envRepoSrc="${testdir}/envSrc"
gitRepoSrc="${testdir}/gitSrc"
envRepo="${testdir}/TestMerge"
gitRepo="${testdir}/Git"
# $1 is base
# $2 is master
# $3 is dev
# $4 is the optional -quick-match
# Import the files into Envision
rm -rf $testdir
mkdir $testdir
mkdir $base
mkdir $master
mkdir $dev
cp $1 $base
cp $2 $master
cp $3 $dev
mkdir $gitRepoSrc
cp $1 "${gitRepoSrc}/master_a_(base)_TestMerge"
cp $2 "${gitRepoSrc}/master_b_(master)_TestMerge"
cp $3 "${gitRepoSrc}/dev_a_master_a_(dev)_TestMerge"
mkdir $envRepoSrc
MERGE_AND_FILE_DIR="$( pwd )"
cd $testdir
function waitFor () {
while kill -0 $1 2> /dev/null; do sleep 0.05; done;
}
echo "-------------------- Importing into Envision --------------------"
# Import Java to Envision
$JavaImportTool TestMerge base base -force-single-pu -no-size-estimation &
baseImportPID=$!
# Move to Envision test directory
function copyBase ()
{
waitFor $baseImportPID
cp base/TestMerge/TestMerge "${envRepoSrc}/master_a_(base)_TestMerge"
}
copyBase &
copyBasePID=$!
$JavaImportTool TestMerge master master -force-single-pu -no-size-estimation &
masterImportPID=$!
function matchMaster ()
{
waitFor $baseImportPID
waitFor $masterImportPID
echo "-------------------- Matching Master to Base --------------------"
if [ "$QUICK_MATCH_ARG" == "-quick-match" ]; then
$quick_match base/TestMerge/TestMerge master/TestMerge/TestMerge > master/TestMerge/TestMerge.idpatch
else
$gumtree base/TestMerge/TestMerge master/TestMerge/TestMerge
fi
echo "-------------------- Patching Master IDs --------------------"
$idpatcher master/TestMerge/TestMerge
}
matchMaster &
matchMasterPID=$!
# In some cases both the master and dev branch of a file are identical, but they are both different to base
# In such cases create matching ids and skip unnecessary computation
DEV_MASTER_DIFF=$(diff dev/dev.java master/master.java)
function processDevBranch ()
{
if [[ $DEV_MASTER_DIFF ]]; then
$JavaImportTool TestMerge dev dev -force-single-pu -no-size-estimation
waitFor $baseImportPID
echo "-------------------- Matching Dev to Base --------------------"
if [ "$QUICK_MATCH_ARG" == "-quick-match" ]; then
$quick_match base/TestMerge/TestMerge dev/TestMerge/TestMerge > dev/TestMerge/TestMerge.idpatch
else
$gumtree base/TestMerge/TestMerge dev/TestMerge/TestMerge
fi
echo "-------------------- Patching Dev IDs --------------------"
$idpatcher dev/TestMerge/TestMerge
waitFor $matchMasterPID
echo "-------------------- Syncing branch IDs --------------------"
# Additionally match some newly introduced IDs
# Match dev to master
$idsync base/TestMerge/TestMerge master/TestMerge/TestMerge dev/TestMerge/TestMerge > dev/TestMerge/TestMerge.idpatch.sync
mv -f dev/TestMerge/TestMerge.idpatch.sync dev/TestMerge/TestMerge.idpatch
$idpatcher dev/TestMerge/TestMerge
# Match master to dev
$idsync base/TestMerge/TestMerge dev/TestMerge/TestMerge master/TestMerge/TestMerge > master/TestMerge/TestMerge.idpatch.sync
mv -f master/TestMerge/TestMerge.idpatch.sync master/TestMerge/TestMerge.idpatch
$idpatcher master/TestMerge/TestMerge
# Copy
cp master/TestMerge/TestMerge "${envRepoSrc}/master_b_(master)_TestMerge"
cp dev/TestMerge/TestMerge "${envRepoSrc}/dev_a_master_a_(dev)_TestMerge"
else
waitFor $matchMasterPID
cp master/TestMerge/TestMerge "${envRepoSrc}/dev_a_master_a_(dev)_TestMerge"
cp master/TestMerge/TestMerge "${envRepoSrc}/master_b_(master)_TestMerge"
fi
}
processDevBranch &
processDevBranchPID=$!
waitFor $copyBasePID
waitFor $processDevBranchPID
echo "-------------------- Creating repositories --------------------"
$repoScript $envRepoSrc $envRepo &> /dev/null
# Move Java files to Git test directory
$repoScript $gitRepoSrc $gitRepo &> /dev/null
(
cd $gitRepo
git merge dev -m "merge dev"
)
echo "-------------------- Merging in Envision --------------------"
rm -rf $envisionReadyFile
touch $scriptReadyFile
while [ ! -f $envisionReadyFile ] ;
do
sleep 0.1
done
echo "-------------------- Copying merged files and info --------------------"
# Copy envision repo sources and summary
cp -rf $envRepoSrc $MERGE_AND_FILE_DIR/.
declare -a filesToCopy=("direct_conflicts" "remaining_changes" "soft_conflicts")
for f in "${filesToCopy[@]}"
do
if [ -f $f ] ; then
cp -f $f $MERGE_AND_FILE_DIR/.
rm $f
else
rm -f $MERGE_AND_FILE_DIR/$f
fi
done
# Cleanup of unnecessary files
rm -rf $envRepoSrc
rm -rf $gitRepoSrc
| true
|
cc0e5cd04ed7f944c28a73ce8d8bc552008e3710
|
Shell
|
ewxrjk/ljdump
|
/build
|
UTF-8
| 310
| 2.953125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
VERSION=`grep Version ljdump.py | cut -d' ' -f3`
DIR=ljdump-$VERSION
rm -rf $DIR
mkdir $DIR
cp ChangeLog README.txt ljdump.py ljdump.config.sample $DIR/
TARGZ=ljdump-$VERSION.tar.gz
rm $TARGZ
tar czf $TARGZ $DIR/*
ZIP=ljdump-$VERSION.zip
rm $ZIP
zip $ZIP $DIR/*
rm -rf $DIR
md5 $TARGZ
md5 $ZIP
| true
|
6a69057530b29ffc732a72243898cd214258858e
|
Shell
|
xlab-si/intersight-sdk-ruby
|
/bin/oas_generator/regex-syntax-fix.sh
|
UTF-8
| 500
| 3.171875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# MacOS uses BSD sed which has a different CLI, so on Mac prefer gnu-sed.
# gnu-sed can be installed on MacOS with `brew install gnu-sed`
cmd="$([[ "$OSTYPE" == "darwin"* ]] && echo "gsed" || echo "sed")"
set -e
find . -name "*.rb" -exec $cmd -i -e '/Regexp.new/ s|\\\\/|\\/|g' {} +
find . -name "*.rb" -exec $cmd -i -e 's|\\xFF|\\u00FF|g' {} +
find . -name "*.rb" -exec $cmd -i -e 's|#\[\\\]|#\\\[\\\]|g' {} +
find . -name "*.rb" -exec $cmd -i -e 's|a-zA-Z0-9-|a-zA-Z0-9\\-|g' {} +
| true
|
d9d86d234b09c21a2053ad9d8ddcfebbfc9895a5
|
Shell
|
robertkraig/Leela
|
/PKGBUILD
|
UTF-8
| 839
| 3.046875
| 3
|
[] |
no_license
|
# Maintainer: "Trilby" <jmcclure [at] cns [dot] umass [dot] edu>
pkgname=leela-git
pkgver=20120605
pkgrel=2
pkgdesc="CLI frontend to poppler-glib of PDF tools"
url="http://github.com/TrilbyWhite/leela.git"
arch=('any')
license=('GPLv3')
depends=('poppler-glib' 'ghostscript')
makedepends=('git')
_gitroot="git://github.com/TrilbyWhite/Leela.git"
_gitname="leela"
build() {
cd "$srcdir"
msg "Connecting to GIT server...."
if [ -d $_gitname ] ; then
cd $_gitname && git pull origin
msg "The local files are updated."
else
git clone $_gitroot $_gitname
fi
msg "GIT checkout done or server timeout"
msg "Starting make..."
rm -rf "$srcdir/$_gitname-build"
git clone "$srcdir/$_gitname" "$srcdir/$_gitname-build"
cd "$srcdir/$_gitname-build"
make
}
package() {
cd "$srcdir/$_gitname-build"
make DESTDIR="$pkgdir" install
}
| true
|
a01f6e44dd6dc90e6a43ad143f663ae0cf42ae9c
|
Shell
|
bylee/cantata-jsbind
|
/build
|
UTF-8
| 2,753
| 3.015625
| 3
|
[] |
no_license
|
#!/bin/sh
SCRIPT=`readlink -f $0`
DIR_HOME=`dirname $SCRIPT`
TARGET=$DIR_HOME/target
if [ -n "$1" ];
then
TARGET=$1
fi
echo "Target: $TARGET"
#cd $DIR_HOME/src
#node-waf configure
#node-waf build
SRC_LIST=`find src -name *.cpp`
O_LIST=""
echo "NODE :$INC_NODE"
echo "NODE DEPS:$NODE_DEPS"
for SRC_NAME in $SRC_LIST
do
D_NAME="${SRC_NAME%.*}.d"
O_NAME="${SRC_NAME%.*}.o"
O_LIST="$O_LIST $O_NAME"
$CXX -I"pch" -D_DEBUG -I"$DIR_HOME/inc" -O0 -g3 -Wall -c -fmessage-length=0 -fPIC -I"$INC_TIZEN" -I"$INCXML" -I"$TIZEN_HOME/library" -I"$INC_OSP" -I"$INC_NODE" -I"$NODE_DEPS/uv/include" -I"$NODE_DEPS/v8/include" --sysroot="$SYSROOT" -D_APP_LOG -MMD -MP -MF"$D_NAME" -MT"$D_NAME" -o "$O_NAME" "$SRC_NAME"
done;
SRC_LIST=`find src -name *.cc`
for SRC_NAME in $SRC_LIST
do
D_NAME="${SRC_NAME%.*}.d"
O_NAME="${SRC_NAME%.*}.o"
O_LIST="$O_LIST $O_NAME"
$CXX -I"pch" -D_DEBUG -I"$DIR_HOME/inc" -O0 -g3 -Wall -c -fmessage-length=0 -fPIC -I"$INC_TIZEN" -I"$INCXML" -I"$TIZEN_HOME/library" -I"$INC_OSP" -I"$INC_NODE" -I"$NODE_DEPS/uv/include" -I"$NODE_DEPS/v8/include" --sysroot="$SYSROOT" -D_APP_LOG -MMD -MP -MF"$D_NAME" -MT"$D_NAME" -o "$O_NAME" "$SRC_NAME"
done;
echo "Linking..."
echo "$LD -o"$TARGET/tizenair.node" $O_LIST -L"$DIR_HOME/lib" -Xlinker --as-needed -pie -lpthread --sysroot="$SYSROOT" -L"$LIB_TIZEN" -L"$LIB_OSP" -losp-appfw -losp-uifw -losp-image -losp-json -losp-ime -losp-net -lpthread -losp-content -losp-locations -losp-telephony -losp-uix -losp-media -losp-messaging -losp-web -losp-social -losp-wifi -losp-bluetooth -losp-nfc -losp-face -losp-speech-tts -losp-speech-stt -losp-shell -losp-shell-core -lxml2 -losp-vision -ldl -shared"
$LD -o"$TARGET/tizenair.node" $O_LIST -L"$DIR_HOME/lib" -Xlinker --as-needed -lpthread --sysroot="$SYSROOT" -L"$LIB_TIZEN" -L"$LIB_OSP" -losp-appfw -losp-uifw -losp-image -losp-json -losp-ime -losp-net -lpthread -losp-content -losp-locations -losp-telephony -losp-uix -losp-media -losp-messaging -losp-web -losp-social -losp-wifi -losp-bluetooth -losp-nfc -losp-face -losp-speech-tts -losp-speech-stt -losp-shell -losp-shell-core -lxml2 -losp-vision -shared
#i386-linux-gnueabi-g++ -o"$TARGET/cantata.exe" $O_LIST -L"$DIR_HOME/lib" -Xlinker --as-needed -pie -lpthread -Xlinker -rpath=\$ORIGIN/../lib --sysroot="$SYSROOT" -L"$LIB_TIZEN" -L"$LIB_OSP" -losp-appfw -losp-uifw -losp-image -losp-json -losp-ime -losp-net -lpthread -losp-content -losp-locations -losp-telephony -losp-uix -losp-media -losp-messaging -losp-web -losp-social -losp-wifi -losp-bluetooth -losp-nfc -losp-face -losp-speech-tts -losp-speech-stt -losp-shell -losp-shell-core -lxml2 -losp-vision -ldl
#if ! [ -e $TARGET ];
#then
# mkdir -p $TARGET
#fi
#cp $DIR_HOME/src/build/Release/tizen-native.node $TARGET
| true
|
f94164d0b2185ad7df4d081927ac51efce28a3d0
|
Shell
|
Stavrosfil/server-dots
|
/.zshrc
|
UTF-8
| 4,617
| 2.671875
| 3
|
[] |
no_license
|
# If you come from bash you might have to change your $PATH.
export PATH=$HOME/bin:/usr/local/bin:$PATH
# Path to your oh-my-zsh installation.
# export ZSH="$HOME/.oh-my-zsh"
source $HOME/server-dots/antigen.zsh
# Export sync script thingy
export already_setup=true
# Load the oh-my-zsh's library.
antigen use oh-my-zsh
# Bundles from the default repo (robbyrussell's oh-my-zsh).
antigen bundle zsh-users/zsh-autosuggestions
antigen bundle git
antigen bundle command-not-found
antigen bundle ael-code/zsh-colored-man-pages
antigen bundle "MichaelAquilina/zsh-you-should-use"
# Syntax highlighting bundle.
antigen bundle zsh-users/zsh-syntax-highlighting
# Load the theme.
antigen bundle mafredri/zsh-async
antigen bundle sindresorhus/pure
# Tell Antigen that you're done.
antigen apply
# Uncomment the following line to change how often to auto-update (in days).
# export UPDATE_ZSH_DAYS=13
# Uncomment the following line to disable colors in ls.
# DISABLE_LS_COLORS="true"
# Uncomment the following line to disable auto-setting terminal title.
# DISABLE_AUTO_TITLE="true"
# Uncomment the following line to enable command auto-correction.
ENABLE_CORRECTION="true"
# Uncomment the following line to display red dots whilst waiting for completion.
COMPLETION_WAITING_DOTS="true"
# Uncomment the following line if you want to disable marking untracked files
# under VCS as dirty. This makes repository status check for large repositories
# much, much faster.
# DISABLE_UNTRACKED_FILES_DIRTY="true"
# Would you like to use another custom folder than $ZSH/custom?
# ZSH_CUSTOM=/path/to/new-custom-folder
# Which plugins would you like to load?
# Standard plugins can be found in ~/.oh-my-zsh/plugins/*
# Custom plugins may be added to ~/.oh-my-zsh/custom/plugins/
# Example format: plugins=(rails git textmate ruby lighthouse)
# Add wisely, as too many plugins slow down shell startup.
# source $ZSH/oh-my-zsh.sh
# GEOMETRY_PROMPT=(geometry_status geometry_path geometry_git) # redefine left prompt
# GEOMETRY_RPROMPT=(geometry_exec_time pwd) # append exec_time and pwd right prompt
# Custom cd functinallity
custom_cd() {
cd $1
ls
}
alias cd="custom_cd"
# export MANPATH="/usr/local/man:$MANPATH"
# You may need to manually set your language environment
# export LANG=en_US.UTF-8
# Preferred editor for local and remote sessions
# if [[ -n $SSH_CONNECTION ]]; then
# export EDITOR='vim'
# else
# export EDITOR='mvim'
# fi
# ssh
# export SSH_KEY_PATH="~/.ssh/rsa_id"
export PATH="$PATH:/usr/sbin"
export PATH="$HOME/tools/cubemx/STM32CubeMX/:$PATH"
export PATH="$HOME/tools/android-studio/bin:$PATH"
export PATH="$HOME/tools/flutter/bin:$PATH"
export PATH="$HOME/.scripts:$PATH"
export PATH="$HOME/tools/flutter/bin:$PATH"
export PATH="${PATH}:${HOME}/.local/bin/"
# export JAVA_HOME="${HOME}/tools/jdk-13/"
export ANDROID_HOME="$HOME/android-sdk"
# export IDF_PATH="$HOME/esp/esp-idf"
export IDF_PATH="$HOME/esp/ESP8266_RTOS_SDK"
export TERM=xterm-256color
# export TERM=xterm-kitty
# export TERM=xterm
export INFLUXDB_CONFIG_PATH="$HOME/config/influxdb.conf"
# Aliases
alias cl="clear"
# alias cubemx="tools/cubemx/STM32CubeMX"
alias zcon="vi $HOME/.zshrc"
alias cfi="vi $HOME/.config/i3/config"
alias ser="ssh stavrosfil@23.97.181.92 -p 1999"
alias dot="$HOME/repos/dotfiles/"
alias userpackages="comm -23 <(apt-mark showmanual | sort -u) <(gzip -dc /var/log/installer/initial-status.gz | sed -n 's/^Package: //p' | sort -u)"
alias sync="bash $HOME/repos/dotfiles/sync.sh"
alias pip="pip3"
alias mexec="chmod a+x"
alias vi="vim"
### file browsing
# list files
alias ll="ls -Ahl"
# fast find
ff() {
find . -name "$1"
}
### file manipulation
# remove
alias rm="rm -rfv"
alias rmi="rm -rfvi"
# copy
alias cp="cp -rfv"
# move
alias mv="mv -fv"
alias mvi="mv -fvi"
# History directory navigation
d='dirs -v | head -10'
1='cd -'
2='cd -2'
3='cd -3'
4='cd -4'
5='cd -5'
6='cd -6'
7='cd -7'
8='cd -8'
9='cd -9'
wal-tile() {
wal -n -i "$@"
feh --bg-tile "$(<"${HOME}/.cache/wal/wal")"
}
# Open files with zathura and disown
za() {
zathura $1 &
disown
kill $PPID
}
# Automatically start a tmux session when connecting with SSH
if [[ -z "$TMUX" ]] && [ "$SSH_CONNECTION" != "" ]; then
tmux -u attach-session -t ssh_tmux || tmux -u new-session -s ssh_tmux
fi
alias tmux="tmux -u"
# Fix repeating characters on tab completion
export LC_ALL="en_US.UTF-8"
# Base16 Shell
BASE16_SHELL="$HOME/.config/base16-shell/"
[ -n "$PS1" ] && \
[ -s "$BASE16_SHELL/profile_helper.sh" ] && \
eval "$("$BASE16_SHELL/profile_helper.sh")"
| true
|
8b144813e3011adf45201f134af81dae7dbbdb7f
|
Shell
|
symmetryinvestments/autowrap
|
/examples/csharp/build.sh
|
UTF-8
| 334
| 2.609375
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/sh
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd )"
rm -f csharp
rm -f libcsharp*.so
dub build --arch=x86 --force
mv libcsharp.so libcsharp.x86.so
dub build --arch=x86_64 --force > /dev/null 2>&1
mv libcsharp.so libcsharp.x64.so
rm -f libcsharp.so
dub run --config=emitCSharp
LD_LIBRARY_PATH=$DIR dotnet run
| true
|
94c8eb63ce42620ea0cae2d9916a4554d95886ce
|
Shell
|
xoqhdgh1002/SHELL_SCRIPT
|
/TEST/switch.sh
|
UTF-8
| 516
| 3.796875
| 4
|
[] |
no_license
|
#!/bin/bash
x_tag=0
while getopts "x" option
do
case $option in
x)
x_tag=1
;;
\?)
echo "Usage : button.sh [-x] process_name" 1>&2
exit 1
;;
esac
done
shift $(expr $OPTIND - 1)
process_name="$1"
if [ $x_tag -eq 1 ]; then
pgrep -f $process_name | { read var; kill -9 $var ;}
else
pgrep -f $process_name | { read var; kill -18 $var ;}
fi
| true
|
d17d96dbd99b0d1838515ea3785480503003fc1f
|
Shell
|
appfirst/poll-aroid
|
/check_newrelic.sh
|
UTF-8
| 1,158
| 3.15625
| 3
|
[] |
no_license
|
#!/bin/bash
# Include path so we can execute commands like 'python'
PATH=$PATH:/usr/bin
export PATH
# script location
AfPath=/usr/share/appfirst/plugins/libexec/poll-aroid/AfPoller
## NewRelic Specific Values
accessKey=""
## you can find you application ID at https://rpm.newrelic.com/api/explore/applications/list
accessAppId=""
## Application Name
## Name metrics are sorted with in statsd buckets
appName=""
## From "Applications > Metric Names copy metric name"
## https://rpm.newrelic.com/api/explore/applications/names
## for example "Agent/MetricsReported/count"
## or separated by comma Apdex, Agent/MetricsReported/count
## or get just one value from metric Agent/MetricsReported/count%min_response_time
## or Apdex%count, Apdex%score, Agent/MetricsReported/count%min_response_time
metricPath=""
## any additional program execution flags, IE --test or --dry-run (or both)
flags="--test --dry-run"
python $AfPath/AfPoller.py --plugin=newrelic --newrelic-access-key-id=$accessKey --newrelic-access-app-id=$accessAppId -m "$metricPath" -a $appName
## Output basic nagios-format "OK" so the polled data script knows this executed
echo "$0 OK"
| true
|
0de4916a5f671c067798a2a67b3f57447ab8e4a2
|
Shell
|
tdfischer/buildenv
|
/lib/buildenv/fs.sh
|
UTF-8
| 1,481
| 4.09375
| 4
|
[] |
no_license
|
source $BUILDENV_HOME/lib/load.sh
_buildenv_lib_include buildenv/debug.sh
# Sources a file if it exists
# Usage:
# _buildenv_source_file /path/to/file
#
# Returns 0 on success, 1 on failure.
function _buildenv_source_file() {
if [ -f "$1" ];then
_buildenv_debug "Sourcing $1"
source $1
return 0
fi
return 1
}
# Searches for and loads a file
# Checks the following locations:
# $BUILDENV_HOME/$file
# $BUILDENV_HOME/config/$BUILDENV_CONFIG/$file
# ~/.buildenv/$file
# ~/.buildenv/config/$BUILDENV_CONFIG/$file
function _buildenv_load_file() {
_buildenv_source_file $BUILDENV_HOME/$1
_buildenv_source_file $BUILDENV_HOME/config/$BUILDENV_CONFIG/$1
_buildenv_source_file ~/.buildenv/$1
_buildenv_source_file ~/.buildenv/config/$BUILDENV_CONFIG/$1
}
function _buildenv_environment_path() {
if [ -n "$1" ];then
echo "$BUILDENV_HOME/environments/$BUILDENV_MASTER/$1"
else
echo "$BUILDENV_HOME/environments/$BUILDENV_MASTER"
fi
}
# Symlinks a file in $HOME to config/$CONFIG/dotfiles/$1
function _buildenv_config_symlink() {
local _src=$HOME/$1
local _dst=$BUILDENV_HOME/config/$BUILDENV_CONFIG/dotfiles/$1
_buildenv_debug "Linking $_src to $_dest"
if [ -L $_src -o ! -e $_src ]; then
if [ -e $_src ]; then
unlink $_src || echo "Warning: Could not unlink $_src"
fi
ln -s $_dst $_src || echo "Warning: Could not link $_src to $_dst"
else
echo "Warning: $_src is a real file! Not symlinking to $_dst"
fi
}
| true
|
e5da0fdd5efa7e5c15bb26f65b9ac455ebfce7b3
|
Shell
|
geeky2/awsdev
|
/misc-scripts/create_key_pair.sh
|
UTF-8
| 310
| 3.296875
| 3
|
[] |
no_license
|
#!/bin/bash
AWS_HOME_DIR=
WORK_DIR=.
PEM_FILE=bar
KEY_PAIR_NAME=$1
function CREATE_KEY_PAIR {
NAME=$1
PEM_FILE=${NAME}.pem
aws ec2 create-key-pair --key-name ${NAME} | tee /home/mark/aws_development/lori.pem
if [ $? -ne 0 ];
then
echo "bad name"
exit 100
fi
}
CREATE_KEY_PAIR $KEY_PAIR_NAME
| true
|
ce559f5edbe75cd5a11afcf5ae86880a3ba5b659
|
Shell
|
sjapalucci/Random-GIS-Scripts
|
/padep-update.sh
|
UTF-8
| 2,845
| 4.09375
| 4
|
[] |
no_license
|
#! /bin/bash
################################################################
# Simple Utility to automate the download and reprojection
# of the PA Department of Environmental Protections locational
# data set on Oil & Gas Wells
################################################################
SELFPATH=$(readlink -f $0)
SELFDIR=`dirname ${SELFPATH}`
SELF=`basename ${SELFPATH}`
LOGGER="${SELF}.log"
#Spatial Reference
SPROJ=4269 #The coordinate system the DEP uses
DPROJ=26915 #The coordinate system that the software prefers
#URL to Open GIS Data Access for the Commonwealth of Pennsylvania.
#Updated monthly
URL='ftp://ftp.pasda.psu.edu/pub/pasda/dep/'
#The filename is usually appended with the 4 digit year and 2 digit month
FILENAME="OilGasLocations_ConventionalUnconventional$(date +%Y_%m)"
#Destination Filename
PRODFILE='PADEPConvUnconv'
DESTDIR=`pwd`
function usage {
echo "
usage: $SELF [options]
-h Print this help message
-q Suppress screen output
-i <filename> filename to grab from PASDA
-p <production_filename> The processed filename
-l <log> optional print errors and output to this file.
default ${SELF}.log
-d <destdir> store output here.
default is current directory"
exit 1
}
function rlog {
if [[ $QUIET == "true" ]]
then
echo $(date) $1 1>> $LOGGER
else
echo $(date) $1 |tee -a $LOGGER
fi
}
while getopts :hqfi:p:o:l: args
do
case $args in
h) usage ;;
q) QUIET='true' ;; ## Suppress messages, just log them.
l) LOGGER="$OPTARG" ;;
i) FILENAME="$OPTARG" ;;
p) PRODFILE="$OPTARG" ;;
o) DESTDIR="$OPTARG" ;;
:) rlog "The argument -$OPTARG requires a parameter" ;;
*) usage ;;
esac
done
function main {
rlog "retrieving $FILENAME from PASDA"
curl -o $DESTDIR/$FILENAME.zip $URL/$FILENAME.zip
rlog "Decompressing $FILENAME.zip to $DESTDIR/"
unzip -od $DESTDIR $DESTDIR/$FILENAME.zip
rlog "Filtering and Reprojecting $FILENAME"
mkdir -p $DESTDIR/temp
ogr2ogr -f "ESRI Shapefile" $DESTDIR/temp/$PRODFILE$$.shp $DESTDIR/$FILENAME.shp -sql "SELECT * FROM $FILENAME WHERE COUNTY IN ('Westmoreland','Allegheny','Washington','Greene','Fayette','Butler','Beaver','Armstrong') ORDER BY COUNTY DESC"
ogr2ogr -f "ESRI Shapefile" $DESTDIR/temp/$PRODFILE.shp $DESTDIR/temp/$PRODFILE$$.shp -t_srs EPSG:$DPROJ -s_srs EPSG:$SPROJ
rlog "Moving file to destination and cleaning up any remaining waste"
mv $DESTDIR/temp/$PRODFILE.* $DESTDIR/
find $DESTDIR/temp -type f -exec rm -f {} \;
rmdir $DESTDIR/temp
rlog "Set Permissions and Creating index on $PRODFILE"
chown nobody:nobody $DESTDIR/$PRODFILE.*
chmod 644 $DESTDIR/$PRODFILE.*
shptree $DESTDIR/$PRODFILE
rlog "###############################################"
rlog "Complete\n\\n"
}
main "$@"
| true
|
5dc580fea2ef3ce74c64e5ef754139f80b70e524
|
Shell
|
txemaleon/dotfiles
|
/install/installer.sh
|
UTF-8
| 1,110
| 3.171875
| 3
|
[] |
no_license
|
#!/usr/bin/env zsh
# Ask for the administrator password upfront
sudo -v
# Keep-alive: update existing `sudo` time stamp until `.macos` has finished
while true; do
sudo -n true
sleep 60
kill -0 "$$" || exit
done 2>/dev/null &
# Install dotfiles
DOTFILES=$(dirname ${0:a:h})
for FILE in $DOTFILES/config/*; do
f=$(basename $FILE)
rm -rf ~/.$f
echo "Linking $FILE => .$f"
ln -s $FILE ~/.$f
done
# Install HomeBrew
/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)"
brew bundle
# Link mackup
ln -s ~/Library/Mobile\ Documents/com~apple~CloudDocs/.config/.mackup ~/.mackup
ln -s ~/Library/Mobile\ Documents/com~apple~CloudDocs/.config/.mackup.cfg ~/.mackup.cfg
mackup restore
# Install node tools
sed 's/#.*//' Npmfile | xargs npm install -g
# Install ssh key
./gitconfig.sh
# Configure macos
. ./macos.sh
# Oh-my-zsh
sh -c "$(curl -fsSL https://raw.github.com/ohmyzsh/ohmyzsh/master/tools/install.sh)"
cd ~/.oh-my-zsh/custom/plugins
gh repo clone Aloxaf/fzf-tab
gh repo clone zsh-users/zsh-autosuggestions
gh repo clone zsh-users/zsh-syntax-highlighting
| true
|
3fc0dd17b50ba6250387a2116a104cfa3d14dc4c
|
Shell
|
NeoTim/skia-buildbot
|
/skolo/bash/skolo.sh
|
UTF-8
| 1,682
| 2.71875
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
#
# Utilities for accessing the skolo.
# Should be sourced from $HOME/.bashrc
alias skolo_internal='ssh chrome-bot@100.115.95.131'
alias skolo_rack1='ssh chrome-bot@100.115.95.143'
alias skolo_rack2='ssh chrome-bot@100.115.95.133'
alias skolo_rack3='ssh chrome-bot@100.115.95.134'
alias skolo_rpi='ssh chrome-bot@100.115.95.143'
alias skolo_rpi2='ssh chrome-bot@100.115.95.135'
alias skolo_win2='ssh chrome-bot@100.115.95.133'
alias skolo_win3='ssh chrome-bot@100.115.95.134'
# Sets up port-forwarding to the Router.
alias skolo_internal_router='google-chrome https://localhost:8888; ssh -L 8888:192.168.1.1:443 chrome-bot@100.115.95.131'
alias skolo_rack1_router='google-chrome https://localhost:8888; ssh -L 8888:192.168.1.1:443 chrome-bot@100.115.95.143'
alias skolo_rack2_router='google-chrome https://localhost:8888; ssh -L 8888:192.168.1.1:443 chrome-bot@100.115.95.133'
alias skolo_rack3_router='google-chrome https://localhost:8888; ssh -L 8888:192.168.1.1:443 chrome-bot@100.115.95.134'
alias skolo_rpi_router='google-chrome https://localhost:8888; ssh -L 8888:192.168.1.1:443 chrome-bot@100.115.95.143'
alias skolo_rpi2_router='google-chrome https://localhost:8888; ssh -L 8888:192.168.1.1:443 chrome-bot@100.115.95.135'
alias skolo_win2_router='google-chrome https://localhost:8888; ssh -L 8888:192.168.1.1:443 chrome-bot@100.115.95.133'
alias skolo_win3_router='google-chrome https://localhost:8888; ssh -L 8888:192.168.1.1:443 chrome-bot@100.115.95.134'
# Connects to both the router and the switch.
alias skolo_rpi2_network='google-chrome https://localhost:8888; google-chrome https://localhost:8889; ssh -L 8888:192.168.1.1:443 -L 8889:rack4-shelf1-poe-switch:443 chrome-bot@100.115.95.135'
| true
|
106b4aab26612cd9dff0539634ce5252817ac8ee
|
Shell
|
STROMANZ/AoC
|
/day01/day01.sh
|
UTF-8
| 415
| 3.453125
| 3
|
[] |
no_license
|
#!/bin/bash
numbs=$(cat input.txt)
sum2020=$(for nr in ${numbs}
do
grep ^$(expr 2020 - ${nr})$ <<< ${numbs}
done)
a=$(echo ${sum2020} | awk '{print $1}')
b=$(echo ${sum2020} | awk '{print $2}')
echo $((a * b))
for k in ${numbs}
do
for l in ${numbs}
do
for m in ${numbs}
do
sum=$((k + l + m))
if [ ${sum} -eq 2020 ]; then
prod=$((k * $l *$m))
echo "${prod}"
exit
fi
done
done
done
| true
|
06ef6135edc6b96e84e41e893ca1d49b7e919494
|
Shell
|
rgabriana/Work
|
/old_work/svn/gems/branches/dev_branch_sppa/debs/adr/opt/enLighted/adr/adr_tracker.sh
|
UTF-8
| 280
| 3.03125
| 3
|
[] |
no_license
|
#!/bin/bash
{
adrProcess=$(ps -ef | grep -E "adr\.jar")
if [[ $adrProcess =~ "adr.jar" ]]
then
echo "ADR is running"
else
echo "Starting ADR job"
java -Djava.util.logging.config.file=/opt/enLighted/adr/logging.properties -jar /opt/enLighted/adr/adr.jar &
fi
}
| true
|
0559099136b77e7da5ac9250a7b12a5bfbfe865e
|
Shell
|
bryanskene/sudoku
|
/runall
|
UTF-8
| 821
| 2.75
| 3
|
[] |
no_license
|
#! /bin/bash
# FUTURE: analyze output of each run, find easy/hardest, etc.
boards=$(ls boards)
#boards="
# AA-c AA-d AA-e AA-f AA-h
# Cessi-1
# F-7
# b1
# b2-symmetry
# dom dom1 dom3 dom3b
# evil
# evil2
# k2
# killer1
#"
for b in $boards; do
echo "===> DOING: $b"
./sudoku -r 3 -b $b
done
find . -name "*.out" | xargs grep -c i_recurse
# ./sudoku -r 3 -d -b AA-c -d
# ./sudoku -r 3 -d -b AA-d
# ./sudoku -r 3 -d -b AA-e
# ./sudoku -r 3 -d -b AA-f
# ./sudoku -r 3 -d -b AA-g
# ./sudoku -r 3 -d -b AA-h
# ./sudoku -r 3 -d -b Cessi-1
# ./sudoku -r 3 -d -b F-7
# ./sudoku -r 3 -d -b b1
# ./sudoku -r 3 -d -b dom
# ./sudoku -r 3 -d -b dom1
# ./sudoku -r 3 -d -b dom3
# ./sudoku -r 3 -d -b dom3b
# ./sudoku -r 3 -d -b evil
# ./sudoku -r 3 -d -b evil2
# ./sudoku -r 3 -d -b k2
# ./sudoku -r 3 -d -b killer1
| true
|
9f0c10d6b5c8cec7eba38c9fe5c4893111aa503c
|
Shell
|
kamala421/SignLanguageRecognition
|
/scripts/changePersonID.sh
|
UTF-8
| 1,309
| 3.6875
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
#
# $Id: changePersonID.sh,v 1.2 2005/10/04 09:28:21 dreuw Exp $
#
#set -xv
# cmdline input variables
PERSON_ID=$1;
NEW_PERSON_ID=$2;
if [ -z "${PERSON_ID}" ] || [ -z "${NEW_PERSON_ID}" ]; then
echo "usage: changePersonID.sh <PERSON_ID> <NEW_PERSON_ID>";
exit;
else
echo "changing ID of person nr ${PERSON_ID} into ${NEW_PERSON_ID}";
fi
for i in `seq 1 35`; do
echo "changing videos for class $i";
for file in /u/image/I6-Gesture/data/video/${i}/${PERSON_ID}_${i}_*; do
echo ${file} `echo ${file} | sed -e "s/\/${PERSON_ID}_/\/${NEW_PERSON_ID}_/"`
mv ${file} `echo ${file} | sed -e "s/\/${PERSON_ID}_/\/${NEW_PERSON_ID}_/"`
done;
echo "changing images for class $i";
for file in /u/image/I6-Gesture/data/image/${i}/${PERSON_ID}_${i}_*; do
echo ${file} `echo ${file} | sed -e "s/\/${PERSON_ID}_/\/${NEW_PERSON_ID}_/"`
mv ${file} `echo ${file} | sed -e "s/\/${PERSON_ID}_/\/${NEW_PERSON_ID}_/"`
done;
echo "changing paths in sequence file(s) for class $i";
for file in /u/image/I6-Gesture/data/image/${i}/${NEW_PERSON_ID}_${i}_*.seq; do
sed $file -e "s/\/${PERSON_ID}_/\/${NEW_PERSON_ID}_/" >> $file.tmp
done;
for file in /u/image/I6-Gesture/data/image/${i}/${NEW_PERSON_ID}_${i}_*.seq.tmp ; do
mv $file ${file//.tmp} ;
done
done;
| true
|
8d671d640bfcb3e754eaed4d2710b20f2a411f0c
|
Shell
|
rojinnew/alpha_yelper
|
/SW/import.sh
|
UTF-8
| 826
| 2.765625
| 3
|
[] |
no_license
|
#!/bin/sh
NEO4J_HOME=./neo4j-community-2.3.3
TARGET=$NEO4J_HOME/data/graph.db
SOURCE_DIR=$(realpath .)
$NEO4J_HOME/bin/neo4j-import \
--into $TARGET \
--multiline-fields=true \
--id-type string \
--nodes:Business $SOURCE_DIR/business.csv \
--nodes:User $SOURCE_DIR/user.csv \
--relationships:friends_with $SOURCE_DIR/friends.csv \
--relationships:reviewed $SOURCE_DIR/review.csv \
--relationships:tipped $SOURCE_DIR/tip.csv
$NEO4J_HOME/bin/neo4j-shell -path $TARGET -file - <<EOF
// add constraints; note that unique will also add an index to that property,
// so we don't need to do it separately
CREATE CONSTRAINT ON (u:User) ASSERT u.user_id IS UNIQUE;
CREATE CONSTRAINT ON (b:Business) ASSERT b.business_id IS UNIQUE;
CREATE CONSTRAINT ON (r:reviewed) ASSERT r.review_id IS UNIQUE;
EOF
| true
|
c7dadbb640fa88f4d8c1734e7f3ed1c4c275eb2e
|
Shell
|
dyoungwd/ManjaroWiki
|
/wiki/wiki.sh
|
UTF-8
| 1,117
| 4.03125
| 4
|
[] |
no_license
|
#!/bin/bash
if [ "--help" == "$1" ]; then
this=`basename $0`
echo
echo "Usage: $this archwiki page name"
echo
echo " where archwiki page name is title of page on wiki.archlinux.org"
echo
echo "Examples:"
echo " $this ssh"
echo " $this the arch way"
echo " $this beginners guide"
echo
exit 0
fi
# try to detect a console browser:
if [ -x /usr/bin/lynx ]; then run_browser=/usr/bin/lynx # Lynx first because of the pretty color output
elif [ -x /usr/bin/elinks ]; then run_browser=/usr/bin/elinks # Elinks second because it newer fork of original Links
elif [ -x /usr/bin/links ]; then run_browser=/usr/bin/links # If anyone uses...
else # no console browser found -> exit
echo "Please install one of the following packages to use this script: elinks links lynx"
exit 1
fi
query="$*" # get all params into single query string
query=${query// /_} # substitute spaces with underscores in the query string
# load ArchWiki page with automatic redirect to the correct URL:
"$run_browser" "https://wiki.archlinux.org/index.php/Special:Search/${query}"
exit $? # return browser's exit code
| true
|
196cb6729d3d62b2f571721c25655842819a8671
|
Shell
|
robstwd/dotfiles
|
/bspwm/panel
|
UTF-8
| 597
| 2.859375
| 3
|
[] |
no_license
|
#! /bin/sh
PANEL_HEIGHT=10
if [ $(pgrep -cx panel) -gt 1 ] ; then
printf "%s\n" "The panel is already running." >&2
exit 1
fi
# The panel can now be properly killed with `pkill -x panel`.
trap 'trap - TERM; kill 0' INT TERM QUIT EXIT
[ -e "$PANEL_FIFO" ] && rm "$PANEL_FIFO"
mkfifo "$PANEL_FIFO"
bspc subscribe report > "$PANEL_FIFO" &
xtitle -sf 'T%s' > "$PANEL_FIFO" &
bspwm_status2 > "$PANEL_FIFO" &
#~ clock -sf 'S%a %d %b %H:%M' > "$PANEL_FIFO" &
cat "$PANEL_FIFO" | panel_bar_current | bar -f "-*-dejavu sans mono for powerline-medium-r-normal-*-17-*-*-*-*-*-*-*" -u 2 &
wait
| true
|
e5ff3df52e4a5fafc6a5b5723bcc03cc75434c91
|
Shell
|
BaliStarDUT/hello-world
|
/code/shell/until_ping.sh
|
UTF-8
| 266
| 3.28125
| 3
|
[] |
no_license
|
#!/bin/bash
NETWORK=192.168.1
IP=30
until [ "$IP" -ge "130" ]
do
echo -en "Testing machine at IP address :${NETWORK}.${IP}..."
ping -c1 -w1 ${NETWORK}.${IP} > /dev/null 2>&1
if [ "$?" -eq 0 ]
then
echo "OK"
else
echo "Failed"
fi
let IP=$IP+1
done
exit 0
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.