blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 4 115 | path stringlengths 2 970 | src_encoding stringclasses 28 values | length_bytes int64 31 5.38M | score float64 2.52 5.28 | int_score int64 3 5 | detected_licenses listlengths 0 161 | license_type stringclasses 2 values | text stringlengths 31 5.39M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
f735eac0f674c4a91a6c45dc318b2f3b8d67e18b | Shell | amolsaudar123/asimio-postgres | /db_dvdrental/scripts/db-restore.sh | UTF-8 | 485 | 2.96875 | 3 | [] | no_license | #!/bin/bash
echo "Importing data into DB $DB_NAME"
pg_restore -U $POSTGRES_USER -d $DB_NAME /tmp/data/db_dvdrental/dvdrental.tar
echo "$DB_NAME DB restored from backup"
echo "Granting permissions in DB '$DB_NAME' to role '$DB_USER'."
psql -v ON_ERROR_STOP=on -U $POSTGRES_USER -d $DB_NAME <<-EOSQL
GRANT SELECT, INSERT, UPDATE, DELETE ON ALL TABLES IN SCHEMA public TO $DB_USER;
GRANT USAGE, SELECT ON ALL SEQUENCES IN SCHEMA public TO $DB_USER;
EOSQL
echo "Permissions granted"
| true |
b9e936bb07c510cd56492d48fe5e38770b805fa0 | Shell | stones/iocage-plugins | /phlex/setup.sh | UTF-8 | 1,331 | 2.5625 | 3 | [] | no_license | # Create the jail
iocage create -n "phlex" -p ./pkg.json -r 11.2-RELEASE ip4_addr="vnet0|192.168.1.62/24" defaultrouter="192.168.1.1" vnet="on" allow_raw_sockets="1" boot="on"
# Update the php fpm config
iocage exec phlex "echo 'listen=/var/run/php-fpm.sock' >> /usr/local/etc/php-fpm.conf"
iocage exec phlex "echo 'listen.owner=www' >> /usr/local/etc/php-fpm.conf"
iocage exec phlex "echo 'listen.group=www' >> /usr/local/etc/php-fpm.conf"
iocage exec phlex "echo 'listen.mode=0660' >> /usr/local/etc/php-fpm.conf"
# Update the PHP ini
iocage exec phlex cp /usr/local/etc/php.ini-production /usr/local/etc/php.ini
iocage exec phlex sed -i '' -e 's?;date.timezone =?date.timezone = "Universal"?g' /usr/local/etc/php.ini
iocage exec phlex sed -i '' -e 's?;cgi.fix_pathinfo=1?cgi.fix_pathinfo=0?g' /usr/local/etc/php.ini
# Pull down latest source
iocage exec phlex git clone https://github.com/d8ahazard/Phlex.git /usr/local/www/phlex --depth 1
# Set permissions
iocage exec phlex chown -R www:www /usr/local/www
# Copy site configuration
cp ./site.nginx /mnt/iocage/jails/phlex/root/usr/local/etc/nginx/nginx.conf
# Add php and nginx to services and start up
iocage exec phlex sysrc nginx_enable=YES
iocage exec phlex sysrc php_fpm_enable=YES
iocage exec phlex service nginx start
iocage exec phlex service php-fpm start
| true |
d25a8a6fdd4f868683249d0ec3a464e7649a05ee | Shell | freducom/handballyolo | /01getdata.sh | UTF-8 | 551 | 3.140625 | 3 | [] | no_license | #!/bin/bash
#Config
INPUTFILE=handballgame01
DATAFOLDER=dataset
YOUTUBEVIDEO=4pOkwP-ZqyI
#Create Datafolder
mkdir $DATAFOLDER
#Download YouTube video
printf "DOWNLOADING YOUTUBEVIDEO $YOUTUBEVIDEO\n"
youtube-dl $YOUTUBEVIDEO -o ''"${DATAFOLDER}"'/'"${INPUTFILE}"'.%(ext)s' --quiet
#Rip image from video
printf "RIPPING IMAGES FROM VIDEO\n"
mkdir $DATAFOLDER/$INPUTFILE
ffmpeg -i $DATAFOLDER/$INPUTFILE.mp4 -vframes 1000 -vf fps=1/5 $DATAFOLDER/$INPUTFILE/thumb%04d.jpg -hide_banner -loglevel panic
#Delete video
rm $DATAFOLDER/$INPUTFILE.mp4
| true |
cce77e16328204d95257d00e279e3973de70639e | Shell | craftofelectronics/ardu-see | /racket/tvm/macosx/bin/kroc | UTF-8 | 35,181 | 3.046875 | 3 | [] | no_license | #! /bin/sh
#{{{ make sure we have bash
if [ "$BASH_VERSION" = "" ]; then
BASH=
# search for "bash" in the PATH environment
for dir in `printf '%s' "$PATH" | tr ':' '\n'`; do
if [ -f $dir/bash ] && [ -x $dir/bash ]; then
BASH=$dir/bash
break
fi
done
if [ "$BASH" = "" ]; then
printf 'could not find bash!\n'
exit 1
fi
exec $BASH "$0" "$@"
fi
#}}}
# should be in bash when we get here
#
# occam for all (ofa) project KRoC (Kent Retargetable occam Compiler)
# KRoC driver script
# Copyright (C) 1997 D.J. Beckett
# Modifications copyright (C) 2000-2004 Fred Barnes <frmb@kent.ac.uk>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
#
# If this file has the name "kroc.in" then it is a template for the kroc
# driver program; to generate the actual kroc script, use:
# ./build --force-configure configure-kroc make-kroc [install-kroc]
# in the KRoC source tree
#
#{{{ set $SED and $progname
SED="sed"
progname=$(printf '%s' "$0" | $SED 's/^.*\///')
#}}}
#{{{ setup various variables
CC=gcc
LD=ld
ARCH_LDFLAGS="-lpthread"
ARCH_ASFLAGS=""
# Only the 'kroc' program need be in the users path
#PATH=$KROC/bin:$PATH
package_version="1.5.0-pre5"
if [ "x" = "x" ]; then
dversion='2.0'
else
dversion='2.0 (RMoX)'
fi
target_canonical="i386-apple-darwin10.6.0"
target_cpu="i386"
target_os="darwin10.6.0"
# -kc : cgr-mp CCSP interface
octranopts="-kc -mp"
ccopts="-fomit-frame-pointer -fno-defer-pop -DHOSTOS_DARWIN -mdynamic-no-pic -DKROC_USES_PTHREADS"
deplibs=""
ocllibs=""
ocrtslibs="-lm "
extcflags="-fomit-frame-pointer -fno-defer-pop -DHOSTOS_DARWIN -mdynamic-no-pic -DKROC_USES_PTHREADS"
dtraces=0
case "$target_os" in
cygwin*)
octranopts="$octranopts -nsymops --cnpfx _"
;;
darwin*)
octranopts="$octranopts -nsymops --cnpfx _"
;;
solaris*)
# solaris stock assembler doesn't eat from stdin (go via tempfile)
octranopts="$octranopts -es"
;;
esac
fatal=
defaultlibraries=1
#}}}
#{{{ options to occ21
# Warnings:
# -nwca : No warnings for CHAN OF ANY
# -nwgy : No warnings on GUY construct
# -nwu : No Unused Name Warnings
# -nwp : No Unused Parameter Warnings
occ_no_warn_opts="-nwca -nwgy -nwp -nwu"
# -walign : Warn on alignment checks
# -wall : Enable all warnings
# -wd : Provide descoped name warnings
# -wo : Provide overlap check warnings
occ_warn_opts="-walign -wall -wd -wo -wqual"
# Processor class: -tX argument to occ21 and .kXX temporary file suffix
proc_class='t8'
tsuffix='tce'
#
# -w : Full Code Insertion
# -y : Disable Library I/O
# -etc : Enable ETC code generation.
# -znd : No Debug Information At All
# -znec : Allow comments anywhere (makes folded source legal in more cases)
# -udo : User defined operators
# -init : INITIAL declarations
## -s : run in STOP error mode
##>> these now handled seperately <<##
### -revalt : use reversed ALT disable where possible (more efficient)
### -zen : enhanced ALT enables (more efficient)
### -zep : ALT pre-enabling sequence (very efficient)
#
# -xin : Enable extended input
# Default: medium warnings
occwopts=
occopts="-etc -w -y -znd -znec -udo -zncc -init -xin -mobiles -mpt -zrpe "
case "$target_cpu" in
i386 | i486 | i586 | i686 | x86_64)
occopts="$occopts -zcxdiv -zcxrem -zzs"
;;
sparc*)
occopts="$occopts -tbe"
;;
powerpc64)
occopts="$occopts"
;;
mips|mipsbe|mipsle)
occopts="$occopts"
;;
esac
#}}}
#{{{ extra CC options
# necessary for dynamic procs (makes all synbols appear in the dynamic symbol table)
case "$target_os" in
darwin*)
extraccopts=${EXTRACCOPTS:--read_only_relocs suppress -arch i386}
;;
solaris*)
extraccopts=${EXTRACCOPTS:-}
;;
*)
extraccopts=${EXTRACCOPTS:--Xlinker -E}
;;
esac
#}}}
#{{{ setup cc, as and ld
cc=${CC:-gcc}
as=${AS:-as}
asflags=""
ld=${LD:-ld}
case "$target_os" in
darwin*)
ldopts="-dynamiclib"
libext=".dylib"
;;
*)
ldopts="-shared"
libext=".so"
;;
esac
# Assembler options needed A1 and A2 are used to define alignment.
# Different versions of the gnu assembler require different values:
# try A1=2, A=1 (powers rather than absolutes).
asflags="$asflags --defsym A1=4 --defsym A2=2 -W"
#}}}
#{{{ path temporaries build up from options
opts_libs=""
opts_libpaths=""
opts_incpaths=""
#}}}
#{{{ various local settings
intree=
use_occ21_revalt=1
use_occ21_zen=1
use_occ21_zep=1
default_errormode_stop=0
showall=0
srcs=
verbose=
makelib=
compile=yes
translate=yes
link=yes
rmoxlink=
outfile=
dorun=yes
showversion=
showlibpath=
showincpath=
showascc=
showcflags=
showcc=
showld=
showas=
showldflags=
showasflags=
showkrocpath=
showautovars=
delete_temp=yes
enable_udc=
skip_link=
debugkroc=0
dumpxml=
usenocc=
nocc="/nocc"
noccopts="--specs-file /nocc.specs.xml"
linkstatic=0
#}}}
#{{{ process command-line arguments
while :
do
case $# in
0)
break
;;
esac
option=$1
shift
orig_option=$option # Save original for error messages
# Split out the argument for options that take them
case $option in
--*=*)
optarg=$(printf '%s' "$option" | $SED -e 's/^[^=]*=//')
;;
# These options have mandatory values. Since we didn't find an = sign,
# the value must be in the next argument
--in-tree | --octran-opts | --oc-opts | --cc-opts | --nocc-opts | --linkcc)
optarg=$1
shift
;;
esac
case "$option" in
#{{{ --in-tree* build using tools in KRoC source tree
--in-tree*)
intree="$optarg"
;;
#}}}
#{{{ -b, --brief brief warnings
-b | --brief)
occopts="$occopts -b"
;;
#}}}
#{{{ -c, --compile compile only
-c | --compile)
link=
octranopts="$octranopts -c"
noccopts="$noccopts -c"
;;
#}}}
#{{{ -e, --enable-udc enable user-defined channel checks
-e | --enable-udc)
enable_udc=yes
# can't use enhanced enabling with UDCs
use_occ21_zen=0
;;
#}}}
#{{{ -p, --pre-compile precompile sources
-p | --pre-compile)
translate=
link=
;;
#}}}
#{{{ -n, --dry-run do dry-run
-n | --dry-run)
dorun=
verbose=yes
;;
#}}}
#{{{ --dryrun dry-run (obsolete)
--dryrun)
dorun=
printf '%s: --dryrun is obsolete, please use --dry-run\n' "$progname" 1>&2
verbose=yes
;;
#}}}
#{{{ --no-checks disable run-time checks
--no-checks)
printf '%s: warning: disabling run-time checks\n' "$progname"
octranopts="$octranopts -n"
;;
#}}}
#{{{ -io, --inline-io inline channel I/O
-io | --inline-io)
octranopts="$octranopts -ii -io -ia"
;;
#}}}
#{{{ -is, --inline-sched inline scheduler operations
-is | --inline-sched)
octranopts="$octranopts -is"
;;
#}}}
#{{{ -it, --inline-ldtimer inline timer operations
-it | --inline-ldtimer)
octranopts="$octranopts -it"
;;
#}}}
#{{{ -o output file
-o)
outfile=$1
shift
;;
#}}}
#{{{ -o* output file
-o*)
outfile=$(printf '%s' "$option" | $SED -e 's/^..//')
;;
#}}}
#{{{ -s, --strict strict compilation mode
-s | --strict)
occopts="$occopts -strict"
;;
#}}}
#{{{ --rmox building for RMoX
--rmox)
octranopts="$octranopts -kr --exporttab ."
;;
#}}}
#{{{ --rmox* building RMoX module of some type
--rmox*)
value=$(printf '%s' "$option" | $SED -e 's/^......[-]*//')
link=
case $value in
app | application)
octranopts="$octranopts -kr --rmoxmode app"
occopts="$occopts -zdme"
rmoxlink=app
;;
drv | driver)
octranopts="$octranopts -kr --rmoxmode drv"
occopts="$occopts -zdme"
rmoxlink=drv
;;
srv | service)
octranopts="$octranopts -kr --rmoxmode srv"
occopts="$occopts -zdme"
rmoxlink=srv
;;
fs | filesystem)
octranopts="$octranopts -kr --rmoxmode fs"
occopts="$occopts -zdme"
rmoxlink=fsm
;;
net | network)
octranopts="$octranopts -kr --rmoxmode net"
occopts="$occopts -zdme"
rmoxlink=net
;;
*)
printf '%s: unknown RMoX build mode: %s\n' "$progname" "$value" 1>&2
exit 1
esac
;;
#}}}
#{{{ --nfw no wait for globally FORKed processes
--nfw)
octranotps="$octranopts --nfw"
;;
#}}}
#{{{ --new no external wait for blocking syscalls
--new)
octranopts="$octranopts --new"
;;
#}}}
#{{{ --octran-opts* options for the translator
--octran-opts*)
octranopts="$octranopts $optarg"
;;
#}}}
#{{{ --oc-opts* options for the occam compiler
--oc-opts*)
occopts="$occopts $optarg"
;;
#}}}
#{{{ --cc-opts* options for the C compiler
--cc-opts*)
ccopts="$ccopts $optarg"
;;
#}}}
#{{{ --linkcc* compiler (for linking)
--linkcc*)
cc="$optarg"
;;
#}}}
#{{{ --nocc-opts* options for NOCC
--nocc-opts*)
noccopts="$noccopts $optarg"
;;
#}}}
#{{{ -nd, --no-dynmem disable dynamic memory
-nd | --no-dynmem)
occopts="$occopts -ndm"
octranopts="$octranopts -nd"
;;
#}}}
#{{{ -l, --library building dynamic library
-l | --library)
# set library (and non-main) for tranpc, and no vectorspace for compiler
octranopts="$octranopts -c -l"
occopts="$occopts -v"
makelib=yes
;;
#}}}
#{{{ -l* adding library (arch-specific)
-l*)
opts_libs="$opts_libs ${option/-l/-loccam_}"
;;
#}}}
#{{{ -L* adding path to libraries
-L*)
dir=$(printf '%s' $option | $SED -e 's:/$::' -e 's:^..::')
opts_libpaths="$dir:$opts_libpaths"
;;
#}}}
#{{{ -I* adding path to includes
-I*)
dir=$(printf '%s' "$option" | $SED -e 's:/$::' -e 's:^..::')
opts_incpaths="$dir:$opts_incpaths"
;;
#}}}
#{{{ -v, --verbose, --v, --verb* verbose operation
-v | --verbose | --v | --verb*)
verbose=yes
octranopts="$octranopts -v"
;;
#}}}
#{{{ -k, --keep-temp keep temporary files
-k | --keep-temp)
delete_temp=
octranopts="$octranopts -es"
;;
#}}}
#{{{ -V, --version show version
-V | --version)
showversion=yes
;;
#}}}
#{{{ --libpath show occam compiler library paths
--libpath)
showlibpath=yes
;;
#}}}
#{{{ --incpath show occam compiler include paths
--incpath)
showincpath=yes
;;
#}}}
#{{{ --cclibpath show C compiler library paths (-L...)
--cclibpath)
showlibpath=yes
showascc=yes
;;
#}}}
#{{{ --ccincpath show C compiler include paths (-I...)
--ccincpath)
showincpath=yes
showascc=yes
;;
#}}}
#{{{ --cflags show C compiler flags
--cflags)
showcflags=yes
;;
#}}}
#{{{ --cc show C compiler
--cc)
showcc=yes
;;
#}}}
#{{{ --ld show dynamic linker
--ld)
showld=yes
;;
#}}}
#{{{ --ldflags show dynamic linker flags
--ldflags)
showldflags=yes
;;
#}}}
#{{{ --as show assembler
--as)
showas=yes
;;
#}}}
#{{{ --asflags show assembler flags
--asflags)
showasflags=yes
;;
#}}}
#{{{ --rtslibs show run-time system libraries
--rtslibs)
showrtslibs=yes
;;
#}}}
#{{{ --tranflags show translator flags
--tranflags)
showtranflags=yes
;;
#}}}
#{{{ --tran show path to translator
--tran)
showtran=yes
;;
#}}}
#{{{ -h, --help show help
-h | --help | --he*)
fatal=yes
;;
#}}}
#{{{ -w, --warn enable warnings
-w | --warn)
occwopts=$occ_warn_opts
;;
#}}}
#{{{ -nw, --no-warn disable warnings
-nw | --no-warn)
occwopts=$occ_no_warn_opts
;;
#}}}
#{{{ -nowarn no warnings (obsolete)
--nowarn)
printf '%s: --nowarn is obsolete, please use --no-warn\n' "$progname" 1>&2
occwopts=$occ_no_warn_opts
;;
#}}}
#{{{ -t2, -t4, -t8, -t9 processor class
-t2 | -t4 | -t8 | -t9 )
proc_class=$(printf '%s' $option | $SED -e 's/^.//')
tsuffix="k$proc_class"
printf '%s: Option %s is experimental: setting processor class to %s\n' "$progname" "$option" "$proc_class" 1>&2
;;
#}}}
#{{{ -X1 enable ETC output (dummy)
-X1 )
printf '%s: Already using ETC. Ignorning.\n' "$progname" 1>&2
;;
#}}}
#{{{ -X2 enable user-defined operators (dummy)
-X2 )
printf '%s: User defined operators already enabled. Ignoring.\n' "$progname" 1>&2
;;
#}}}
#{{{ -X3 enable INITIAL decls (dummy)
-X3 )
printf '%s: INITIAL declarations already enabled. Ignoring.\n' "$progname" 1>&2
;;
#}}}
#{{{ -X5 enable mobile types (dummy)
-X5 )
printf '%s: MOBILE types now enabled by default\n' "$progname" 1>&2
;;
#}}}
#{{{ -X6 enable extended input for unhandled CASE tags
-X6 )
printf '%s: enabling extended input for unhandled CASE tags\n' "$progname" 1>&2
occopts="$occopts -xtag"
;;
#}}}
#{{{ -X7 enable mobile processes (dummy)
-X7 )
printf '%s: mobile process types now enabled by default\n' "$progname" 1>&2
;;
#}}}
#{{{ -d post-mortem debugging
-d )
printf '%s: Selecting post-mortem debugging\n' "$progname" 1>&2
octranopts="$octranopts -dX"
;;
#}}}
#{{{ -dt, --dtraces enable debugging traces
-dt | --dtraces)
printf '%s: Enabling debugging traces\n' "$progname" 1>&2
dtraces=1
occopts="$occopts -dtraces"
octranopts="$octranopts -dt"
;;
#}}}
#{{{ -di insert debugging
-di )
printf '%s: Selecting insert debugging\n' "$progname" 1>&2
octranopts="$octranopts -di"
;;
#}}}
#{{{ -dm dynamic memory debugging
-dm )
printf '%s: Selecting dynamic memory debugging\n' "$progname" 1>&2
octranopts="$octranopts -dm"
;;
#}}}
#{{{ -ds enable memory-usage summary
-ds )
printf '%s: Enabling memory usage summary\n' "$progname" 1>&2
octranopts="$octranopts -ds"
;;
#}}}
#{{{ -O, --optimise enable optimisations (dummy)
-O | --optimise )
printf '%s: Optimisations already enabled\n' "$progname" 1>&2
;;
#}}}
#{{{ -P, --pause enable scheduler-check at loopend
-P | --pause )
printf '%s: Enabling scheduler check at loop-end/back-jump\n' "$progname" 1>&2
octranopts="$octranopts -p"
;;
#}}}
#{{{ -H halt error-mode
-H | --halterror )
printf '%s: Compiling in HALT error mode\n' "$progname" 1>&2
octranopts="$octranopts -H"
;;
#}}}
#{{{ -S stop error-mode
-S | --stoperror )
printf '%s: Compiling in STOP error mode\n' "$progname" 1>&2
octranopts="$octranopts -S"
;;
#}}}
#{{{ --force-tlp force top-level interface to KYB, SCR, ERR
--force-tlp )
octranopts="$octranopts --tlp-kybscrerr"
;;
#}}}
#{{{ --skip-link skip link into final exec.
--skip-link )
printf '%s: skipping final link into executable\n' "$progname" 1>&2
skip_link=yes
;;
#}}}
#{{{ --no-zen no enhanced pre-enabling
--no-zen)
use_occ21_zen=0
;;
#}}}
#{{{ --use-zen use enhanced pre-enabling
--use-zen)
use_occ21_zen=1
;;
#}}}
#{{{ --no-revalt no reversed ALT disabling
--no-revalt)
use_occ21_revalt=0
;;
#}}}
#{{{ --use-revalt use reversed ALT disabling
--use-revalt)
use_occ21_revalt=1
;;
#}}}
#{{{ --no-zep no ALT preenabling
--no-zep)
use_occ21_zep=0
;;
#}}}
#{{{ --use-zep use ALT pre-enabling
--use-zep)
use_occ21_zep=1
;;
#}}}
#{{{ --ncl no compiler libraries
--ncl)
defaultlibraries=0
;;
#}}}
#{{{ --xml dump XML
--xml)
dumpxml=1
;;
#}}}
#{{{ --nocc use new occam-pi compiler
--nocc)
if [ ! -x $nocc ]; then
printf '%s: not configured to use NOCC\n' "$progname" 1>&2
exit 1
fi
usenocc=1
;;
#}}}
#{{{ --static link to static binary
--static)
linkstatic=1
;;
#}}}
#{{{ --krocdir show directory where KRoC data files are kept
--krocdir)
showkrocpath=1
;;
#}}}
#{{{ --autovars show variables autoconf needs
--autovars)
showautovars=1
;;
#}}}
#{{{ -z, --all-help show all options
-z | --all-help)
showall=1
;;
#}}}
#{{{ --debugkroc KRoC debugging enable (not for users)
--debugkroc)
debugkroc=1
;;
#}}}
#{{{ --*, -* unrecognised options
--* | -*)
printf '%s: Unrecognized option: "%s"; use --help for usage.\n' "$progname" "$orig_option" 1>&2
exit 1
;;
#}}}
#{{{ * source file
*) srcs="$srcs $option"
;;
#}}}
esac
done
#}}}
#{{{ directory configuration
kroc_archbindir="/Users/clj/work/kroc/kroc-trunk/distribution/osx/install/bin"
kroc_archlibdir="/Users/clj/work/kroc/kroc-trunk/distribution/osx/install/lib"
kroc_archincdir="/Users/clj/work/kroc/kroc-trunk/distribution/osx/install/include/kroc"
kroc_mandir="/Users/clj/work/kroc/kroc-trunk/distribution/osx/install/share/man"
kroc_krocdir="/Users/clj/work/kroc/kroc-trunk/distribution/osx/install/share/kroc"
#}}}
#{{{ path-dependant options
if [ "$intree" = "" ]; then
# Pull paths from env in preference of hardcoded path
occ21=${OCC21:-"$kroc_archbindir/occ21"}
octran=${OCTRAN:-"$kroc_archbindir/tranx86"}
else
occ21="$intree/tools/occ21/occ21"
octran="$intree/tools/tranx86/tranx86"
fi
if [ $dtraces -eq 1 ]; then
ocrts="-lkrocif -lccsptrace"
else
ocrts="-lkrocif -lccsp"
fi
#}}}
#{{{ build colon-separated search path for libraries
if [ "$intree" = "" ]; then
ocspath="$kroc_krocdir/vtlib:$kroc_krocdir/vtinclude"
else
ocspath="$intree/modules/inmoslibs/libsrc/forall"
fi
if [ "X$OCSEARCH" != "X" ]; then
ocspath="$OCSEARCH:$ocspath"
fi
ocsearch=
if [ "$intree" = "" ]; then
oclibpath="-L$kroc_archlibdir"
cincludepath="-I$kroc_archincdir"
else
oclibpath="-L/Users/clj/work/kroc/kroc-trunk/distribution/osx/build/kroc-ccsp/runtime/ccsp -L/Users/clj/work/kroc/kroc-trunk/distribution/osx/build/kroc-ccsp/runtime/libkrocif -L$intree/modules/inmoslibs/libsrc/forall"
cincludepath="-I/Users/clj/work/kroc/kroc-trunk/distribution/osx/build/kroc-ccsp/runtime/ccsp/include -I/Users/clj/work/kroc/kroc-trunk/runtime/ccsp/include -I/Users/clj/work/kroc/kroc-trunk/distribution/osx/build/kroc-ccsp/modules/cif/libsrc -I/Users/clj/work/kroc/kroc-trunk/modules/cif/libsrc"
fi
#}}}
#{{{ use "ocspath" to add entries to "oclibpath", "ocsearch" and "cincludepath"
# ocspath is a COLON-SEPARATED list now, better process slightly differently
XIFS="$IFS"
IFS=':'
for dir in $ocspath; do
dir=$(printf '%s' "$dir" | $SED -e 's:/$::')
ocsearch="$ocsearch:$dir"
done
IFS="$XIFS"
#}}}
#{{{ add given command-line entries to "oclibpath", "ocsearch" and "cincludepath"
for lname in $opts_libs; do
ocllibs="$ocllibs $lname"
done
XIFS="$IFS"
IFS=':'
for dir in $opts_libpaths; do
oclibpath="-L$dir $oclibpath"
ocsearch="$dir:$ocsearch"
done
for dir in $opts_incpaths; do
cincludepath="-I$dir $cincludepath"
ocsearch="$dir:$ocsearch"
done
IFS="$XIFS"
#}}}
#{{{ set extra flags from options
if [ $use_occ21_revalt -eq 1 ]; then
occopts="$occopts -revalt"
fi
if [ $use_occ21_zen -eq 1 ]; then
occopts="$occopts -zen"
fi
if [ $use_occ21_zep -eq 1 ]; then
occopts="$occopts -zep"
fi
#}}}
#{{{ add current directory as first in search paths
oclibpath="-L. $oclibpath"
ocsearch=".:$ocsearch"
cincludepath="-I. $cincludepath"
procopts="-$proc_class"
occopts="$procopts $occopts"
#}}}
#{{{ add flags for user-defined channels if enabled
if [ $enable_udc ]; then
# enable MOBILE size-field
occopts="$occopts -msf"
else
# disallow PLACE'd channels and don't generate external channel checks
occopts="$occopts -npc"
octranopts="$octranopts -nec"
fi
#}}}
#{{{ show library paths and exit if requested
if [ $showlibpath ]; then
if [ $showascc ]; then
printf '%s\n' "$oclibpath"
else
printf '%s\n' "$ocsearch" | $SED -e 's:^ ::g' -e 's: $::g' -e 's/ /:/g' -e 's/::/:/g'
fi
exit 0
fi
#}}}
#{{{ show include paths and exit if requested
if [ $showincpath ]; then
if [ $showascc ]; then
printf '%s\n' "$cincludepath"
else
printf '%s \n' "$ocsearch" | $SED -e 's:^ ::' -e 's: $::' -e 's/ /:/g' -e 's/::/:/g'
fi
exit 0
fi
#}}}
if [ $showcflags ]; then
printf '%s\n' "$extcflags"
exit 0
fi
if [ $showcc ]; then
printf '%s\n' "$CC"
exit 0
fi
if [ $showrtslibs ]; then
printf '%s\n' "$ocrtslibs"
exit 0
fi
if [ $showtranflags ]; then
printf '%s\n' "$octranopts"
exit 0
fi
if [ $showtran ]; then
printf '%s\n' "$octran"
exit 0
fi
if [ $showld ]; then
printf '%s\n' "$ld"
exit 0
fi
if [ $showldflags ]; then
printf '%s\n' "$ARCH_LDFLAGS"
exit 0
fi
if [ $showas ]; then
printf '%s\n' "$as"
exit 0
fi
if [ $showasflags ]; then
printf '%s\n' "$ARCH_ASFLAGS"
exit 0
fi
if [ $showkrocpath ]; then
printf '%s\n' "$kroc_krocdir"
exit 0
fi
if [ $showautovars ]; then
printf 'KROC_CCSP_CFLAGS="%s";\n' "-fomit-frame-pointer -fno-defer-pop -DHOSTOS_DARWIN -mdynamic-no-pic -DKROC_USES_PTHREADS"
printf 'KROC_CCSP_CINCPATH="%s";\n' "-I/Users/clj/work/kroc/kroc-trunk/distribution/osx/build/kroc-ccsp/runtime/ccsp/include -I/Users/clj/work/kroc/kroc-trunk/runtime/ccsp/include -I/Users/clj/work/kroc/kroc-trunk/distribution/osx/build/kroc-ccsp/modules/cif/libsrc -I/Users/clj/work/kroc/kroc-trunk/modules/cif/libsrc"
printf 'KROC_CCSP_OCCFLAGS="%s";\n' ""
printf 'KROC_CCSP_TRANFLAGS="%s";\n' "-mp"
printf 'KROC_CCSP_ASFLAGS="%s";\n' ""
printf 'KROC_CCSP_LDFLAGS="%s";\n' "-lpthread"
printf 'KROC_CCSP_LIBPATH="%s";\n' "-L/Users/clj/work/kroc/kroc-trunk/distribution/osx/build/kroc-ccsp/runtime/ccsp -L/Users/clj/work/kroc/kroc-trunk/distribution/osx/build/kroc-ccsp/runtime/libkrocif"
printf 'KROC_CCSP_LIBS="%s";\n' ""
printf 'KROC_CCSP_ENABLE_PTHREADS="%s";\n' "yes"
printf 'KROC_CCSP_ENABLE_MP="%s";\n' "yes"
printf 'KROC_CCSP_ENABLE_CTTD="%s";\n' "no"
printf 'KROC_CCSP_ENABLE_PONY="%s";\n' "no"
printf 'KROC_CCSP_ENABLE_DYNPROC="%s";\n' "yes"
printf 'KROC_CCSP_ENABLE_SSE2="%s";\n' "yes"
printf 'KROC_CCSP_ENABLE_CPUTIMERS="%s";\n' ""
printf 'KROC_RMOX="%s";\n' ""
exit 0
fi
if [ $makelib ]; then
if [ "X$link" = "X" ]; then
printf '%s: -c and -l are mutually exclusive (dropping -c)\n' "$progname" 1>&2
link=yes
fi
fi
if [ "X${fatal}" != "X" -o "X$srcs" = "X" -o "X$showversion" != "X" ]; then
exec 1>&2
printf 'KRoC version %s targeting %s (driver V%s)\n' "$package_version" "$target_canonical" "$dversion"
if [ $verbose ]; then
showversion=yes
fi
if [ $showversion ]; then
cat $kroc_krocdir/doc/txt/AUTHORS
exit 0
fi
exec 1>&2
printf 'Usage: %s [options] [occam sources/pre-compiled sources]\n' "$progname"
cat << EOF
Options:
-b, --brief Give brief occ21 error messages
-c, --compile Compile source to objects, do not link
-s, --strict Strict checking mode
EOF
if [ $default_errormode_stop -eq 1 ]; then
printf ' -H, --halterror Compile in HALT error mode\n'
else
printf ' -S, --stoperror Compile in STOP error mode\n'
fi
cat << EOF
-d Enable post-mortem debugging
-di Enable insert debugging
-e Enable user-defined channels
-h, --help Print this message and exit
-IDIRECTORY Search DIRECTORY for occam libs & includes, C headers
-k, --keep-temp Keep temporary files
-V, --version Print version
-l, --library Generate .so shared library (for dynamically loaded processes)
-lNAME Link with native library libNAME.a / libNAME.so
-LDIRECTORY Search DIRECTORY for native libraries
-n, --dry-run Do not run any commands; just print them.
--oc-opts=opts Use these occam compiler ($occ21) options
--oc-opts opts \"\"
--cc-opts=opts Use these C compiler ($cc) options
--cc-opts opts \"\"
-nw, --no-warn Give no warnings from $occ21
-nd, --no-dynmem Disable dynamic memory support
-oFILE, -o FILE Place output in file FILE
-p, --pre-compile Pre-compile occam source to .tce files
-P --pause Event/schedule check at loop-ends and backward jumps
-io, --inline-io Inline input/output kernel calls
-is, --inline-sched Inline parts of the scheduler
-it, --inline-ldtimer Inline load-timer instruction
-v, --verbose Show commands run during compilation
-w, --warn Additional warnings from $occ21
-z, --all-help Show additional options
--nfw Do not wait for globally FORKed processes
--new Do not wait for blocking syscalls
-X6 Extended input for unhandled CASE inputs (experimental)
EOF
if [ $showall -eq 1 ]; then
cat << EOF
Exclusive options:
--cc Print compiler for use with external C code
--cflags Print compiler flags for use with external C code
--tran Print translator for .etc binaries
--tranflags Print translator flags
--libpath Print path to occam libraries
--incpath Print path to occam include files
--cclibpath Print path to occam libraries in CC -L.. form
--ccincpath Print path to occam include files in CC -I.. form
--ld Print path to dynamic linker (for generating shared libraries)
--ldflags Print linker flags to be used when building shared libraries
--krocdir Print path to kroc data files
--autovars Print variables for autoconf
Additional options:
-dm Enable dynamic memory debugging
-ds Enable memory usage summary
-dt Enable debugging traces
-t2, -t4, -t8, -t9 Set processor compilation class (default $proc_class)
-O --optimise Enable optimisations [now default]
--rmox Compile for RMoX
--force-tlp Force top-level PROC interface to (CHAN BYTE kyb?, scr!, err!)
--ncl Do not link in default compiler libraries
--xml Generate XML tree
--nocc Use NOCC compiler (experimental)
--linkcc <prog> Use <prog> to link instead of $cc
--static Link to static binary
EOF
if [ $use_occ21_revalt -eq 1 ]; then
printf ' --no-revalt Do not use reversed ALT disabling sequence (default enabled)\n'
else
printf ' --use-revalt Use reversed ALT disabling sequence (default disabled)\n'
fi
if [ $use_occ21_zen -eq 1 ]; then
printf ' --no-zen Do not use enhanced ALT enabling (default enabled)\n'
else
printf ' --use-zen Use enhanced ALT enabling (default disabled)\n'
fi
if [ $use_occ21_zep -eq 1 ]; then
printf ' --no-zep Do not use ALT pre-enabling (default enabled)\n'
else
printf ' --use-zep Use ALT pre-enabling (default disabled)\n'
fi
if [ $default_errormode_stop -eq 1 ]; then
printf ' -S, --stoperror Compile in STOP error mode (default)\n'
else
printf ' -H, --halterror Compile in HALT error mode (default)\n'
fi
fi
exit 0
fi
#{{{ parse_pragmas
# Search an ETC file for #PRAGMA COMMENT ..." directives.
parse_pragmas () {
local tcefile="$1" libs lib opt add opts
# For .USELIB directives, add the library to the list if it's not
# already been used.
libs=$($octran -C "$tcefile" | awk '/^\.USELIB / { ORS=" "; print $2 }')
for lib in $libs; do
opt="-loccam_$lib"
add=1
for used in $deplibs; do
if [ "$used" = "$opt" ] ; then
add=0
fi
done
if [ "$add" = "1" ] ; then
if [ "$verbose" ] ; then
printf '%s: Using library %s\n' "$progname" "$lib" 1>&2
fi
deplibs="$deplibs $opt"
fi
done
# For .LDFLAGS directives, add the flags to the CC options.
opts=$($octran -C "$tcefile" | awk '/^\.LDFLAGS / { ORS=" "; for (i = 2; i <= NF; i++) { print $i } }')
if [ "$verbose" -a "$opts" != "" ] ; then
printf '%s: Using extra linker options %s\n' "$progname" "$opts" 1>&2
fi
extraccopts="$extraccopts $opts"
}
#}}}
objs=
tmp_objs=
efile=
for src in $srcs; do
origsrc=$src
suffix=$(printf '%s' "$src" | $SED -e 's/^.*\.\(.*\)$/\1/')
# Handle file. (no suffix)
if [ "X$suffix" = "X" ]; then
nsrc="${src}occ"
if [ -r $nsrc ]; then
printf '%s: Assuming you meant to compile %s\n' "$progname" "$nsrc" 1>&2
src=$nsrc
suffix=occ
else
printf '%s: Ignoring %s (no %s found)\n' "$progname" "$src" "$nsrc" 1>&2
continue
fi
elif [ "$suffix" = "$src" ]; then # Handle file (no . at all)
if [ -r "$src.occ" ]; then
printf '%s: Assuming you meant to compile %s.occ\n' "$progname" "$src" 1>&2
suffix=occ
else
printf '%s: Ignoring %s (no %s.occ found)\n' "$progname" "$src" "$src" 1>&2
continue
fi
fi
dir=$(printf '%s' "$src" | $SED -e 's%[^/][^/]*$%%')
if [ "X$dir" = "X$src" ]; then
dir=
else
src=$(printf '%s' "$src" | $SED -e 's%^.*/%%')
fi
base=$(printf '%s' "$src" | $SED -e 's/\.[^\.]*$//')
# Pick output name, either from -o name if given or basename of name
if [ "X$link" != "X" -a "X$efile" = "X" ]; then
if [ "X$outfile" = "X" ]; then
efile=$base
else
efile=$outfile
outfile=
fi
elif [ "X$link" = "X" -a "X$rmoxlink" != "X" -a "X$efile" = "X" ]; then
if [ "X$outfile" = "X" ]; then
efile="$base.$rmoxlink"
else
efile=$outfile
outfile=
fi
fi
# if the file does not exist, abort
if [ ! -f $dir$base.$suffix ]; then
printf '%s: cannot find file %s\n' "$progname" "$dir$base.$suffix" 1>&2
exit 1
fi
if [ $debugkroc -eq 1 ]; then
printf ' origsrc = %s\n' "$origsrc"
printf ' src = %s\n' "$src"
printf ' dir = %s\n' "$dir"
printf ' base = %s\n' "$base"
printf ' suffix = %s\n' "$suffix"
printf ' efile = %s\n' "$efile"
printf ' outfile = %s\n' "$outfile"
fi
# Using pre-compiled file? FIXME: Handle this better
if [ $suffix = "etc" ] || [ $suffix = "kt2" ] || [ $suffix = "kt4" ] || [ $suffix = "kt8" ] || [ $suffix = "kt9" ]; then
compile=
tfile="$dir$base.$suffix"
tcefile="$dir$base.tce"
if [ ! -r $tfile ]; then
printf '%s: Cannot find pre-compiled file %s\n' "$progname" "$tfile" 1>&2
exit 1;
fi
if [ ! -r $tcefile ]; then
printf '%s: Cannot find pre-compiled TCE file %s\n' "$progname" "$tcefile" 1>&2
exit 1;
fi
fi
if [ $suffix != "occ" ] && [ $suffix != "co" ] && [ $suffix != "mcsp" ] && [ "X$compile" != "X" ]; then
objs="$objs $origsrc"
if [ "$suffix" = "o" ] ; then
# Look for a corresponding .tce/.etc file.
for tcesuffix in .tce .etc; do
tcefile="$base$tcesuffix"
if [ -e "$tcefile" ] ; then
parse_pragmas "$tcefile"
fi
done
fi
continue
fi
if [ $suffix = "co" ]; then
src="$dir$base.co"
elif [ $suffix = "mcsp" ]; then
src="$dir$base.mcsp"
else
src="$dir$base.occ"
fi
if [ ! -r $src ]; then
printf '%s: Cannot find occam source file %s\n' "$progname" "$src" 1>&2
exit 1
fi
if [ $link ]; then
if [ $efile = $src ]; then
efile="$efile.out";
printf '%s: Compiling %s to %s\n' "$progname" "$src" "$efile" 1>&2
fi
fi
tmpfiles=
# .ktX files and TCO (and ETC) files
if [ $compile ]; then
if [ $usenocc ]; then
tcefile="$base.etc"
tsuffix="etc"
else
tcefile="$base.tce"
fi
if [ "X$translate" != "X" -a "X$outfile" != "X" ]; then
tfile=$outfile
else
tfile="$base.$tsuffix"
fi
tmpfiles="$tmpfiles $tcefile $tfile"
fi
# Native assembler file
sfile="$base.s"
tmpfiles="$tmpfiles $sfile"
# Native object file
if [ "X$link" != "X" -a "X$outfile" != "X" ]; then
ofile=$outfile
else
ofile="$base.o"
fi
tmpfiles="$tmpfiles $ofile"
if [ $delete_temp ]; then
trap "rm -f $tmpfiles" 1 2 3 9 15
fi
ISEARCH=$ocsearch; export ISEARCH
if [ $verbose ]; then
printf '%s: Search path for native libraries:\n' "$progname" 1>&2
printf ' %s\n' "$oclibpath" | $SED -e 's:-L::g' 1>&2
printf '%s: Search path for occam libraries and includes:\n' "$progname" 1>&2
printf ' %s\n' "$ocsearch" 1>&2
fi
if [ $compile ]; then
if [ $dorun ]; then
rm -f $tfile #$tcefile
fi
if [ $usenocc ]; then
cmd="$nocc $noccopts $src -o $tfile"
else
cmd="$occ21 $occopts $occwopts $src -o $tfile"
if [ $dumpxml ]; then
xmlfile=$base.xml
cmd="$cmd -ztx -xf $xmlfile"
fi
fi
if [ $verbose ]; then
printf '%s\n' "$cmd" 1>&2
fi
if [ $dorun ]; then
$cmd
if [ $? -ne 0 ]; then
if [ $usenocc ]; then
printf '%s: %s failed to compile %s to %s\n' "$progname" "$nocc" "$src" "$tfile" 1>&2
else
printf '%s: %s failed to compile %s to %s\n' "$progname" "$occ21" "$src" "$tfile" 1>&2
fi
exit 1;
fi
if [ $link ]; then
if [ $delete_temp ]; then
tmp_objs="$tmp_objs $tfile $tcefile"
fi
fi
fi
fi
if [ $translate ]; then
if [ $dorun ]; then
rm -f $ofile
fi
cmd="$octran $octranopts $tfile" # $sfile"
if [ $verbose ]; then
printf '%s\n' "$cmd" 1>&2
fi
if [ $dorun ]; then
$cmd
if [ $? -ne 0 ]; then
printf '%s: %s failed to translate %s to %s\n' "$progname" "$octran" "$tfile" "$ofile" 1>&2
exit 1;
fi
fi
# don't assemble anymore -- tranx86 dumps .o files
# if [ $dorun ]; then
# rm -f $ofile
# fi
# cmd="$as $asflags $sfile -o $ofile"
# if [ $verbose ]; then
# echo $cmd 1>&2
# fi
# if [ $dorun ]; then
# $cmd
# if [ $? -ne 0 ]; then
# echo "$progname: $as failed to assemble $sfile to $ofile ($?)" 1>&2
# exit 1;
# fi
# fi
objs="$objs $ofile"
tmp_objs="$tmp_objs $ofile"
if [ $dorun ]; then
if [ $delete_temp ]; then
rm -f $sfile
fi
fi
fi
parse_pragmas "$tcefile"
done
#{{{ add deplibs to ocllibs
# We have to add the libraries in reverse order to get the link order right on
# platforms that care about that (such as Cygwin).
for lib in $deplibs; do
ocllibs="$lib $ocllibs"
done
#}}}
if [ $defaultlibraries -eq 1 ]; then
ocllibs="$ocllibs -loccam_forall"
fi
if [ "X$link" = "X" ] && [ "X$rmoxlink" = "X" ]; then
exit 0
fi
if [ "X$efile" = "X" ]; then
printf '%s: No output executable file name found (and no -oname)\n' "$progname" 1>&2
exit 1
fi
if [ $dorun ]; then
rm -f $efile
fi
if [ $makelib ]; then
efile=$efile$libext
cmd="$cc $ccopts $oclibpath $ocrtslibs $ocllibs $ocrts $extraccopts $ARCH_LDFLAGS $ldopts -o $efile $objs"
elif [ $skip_link ]; then
test
elif [ "X$rmoxlink" != "X" ]; then
cmd="$ld -o $efile -r $objs"
else
if [ $linkstatic -eq 1 ]; then
# need to add in pthread library if we're using that
USEPTHREAD=0
printf '%s\n' "$extcflags" | grep KROC_USES_PTHREADS > /dev/null && USEPTHREAD=1
if [ $USEPTHREAD -eq 1 ]; then
cmd="$cc $ccopts $objs $oclibpath $cincludepath $ocllibs $ocrts $ocrtslibs $extraccopts -static -o $efile -lpthread"
else
cmd="$cc $ccopts $objs $oclibpath $cincludepath $ocllibs $ocrts $ocrtslibs $extraccopts -static -o $efile"
fi
else
cmd="$cc $ccopts $objs $oclibpath $cincludepath $ocllibs $ocrts $ocrtslibs $extraccopts -o $efile"
fi
fi
if [ $verbose ]; then
printf '%s\n' "$cmd" 1>&2
fi
if [ $dorun ]; then
$cmd
if [ $? -ne 0 ]; then
printf '%s: %s failed to link %s\n' "$progname" "$cc" "$efile" 1>&2
exit 1;
else
if [ "$rmoxlink" = "app" ]; then
chmod +x $efile
fi
fi
fi
if [ $delete_temp ]; then
if [ "X$tmp_objs" != "X" ]; then
rm -f $tmp_objs
fi
fi
exit 0
| true |
cb59eb807da00fc1c9f18738192eafcdebf059b9 | Shell | BioMAs/SWD-infrastructure-omics | /install_dependencies.sh | UTF-8 | 568 | 3.84375 | 4 | [] | no_license | #!/bin/bash
# Stop on error
set -e
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd )"
MAIN_ENV_NAME=srp
ENVS=$(conda env list | awk '{print $1}' )
FOUND=1
for ENV in ${ENVS}
do
if [ "${ENV}" == "${MAIN_ENV_NAME}" ]; then
FOUND=0
fi
done
# Creation of main conda environment.
if [ ${FOUND} -eq 0 ]; then
echo "${MAIN_ENV_NAME} already created"
else
echo "Creating env ${MAIN_ENV_NAME}"
conda env create -n ${MAIN_ENV_NAME} -f ${DIR}/CONDA/srp.yml
source activate ${MAIN_ENV_NAME}
Rscript ${DIR}/CONDA/installDeEnv.R
source deactivate
fi | true |
852f682b856039b94d732800e0a360a888ad372b | Shell | PaaS-TA/PAAS-TA-CONTAINER-SERVICE-PROJECTS-RELEASE | /jobs/docker-images/templates/bin/post-deploy.erb | UTF-8 | 3,752 | 3.328125 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
set -xe
echo "docker images load..jenkins"
/var/vcap/packages/docker/bin/docker --host unix:///var/vcap/sys/run/docker/docker.sock load -i /var/vcap/packages/docker-images/paasta-jenkins.tar.gz
echo "docker images load..registry"
/var/vcap/packages/docker/bin/docker --host unix:///var/vcap/sys/run/docker/docker.sock load -i /var/vcap/packages/docker-images/paasta-registry.tar.gz
jenkins_id=$(/var/vcap/packages/docker/bin/docker --host unix:///var/vcap/sys/run/docker/docker.sock images --filter=reference=<%= link('private-image-repository-link').instances[0].address %>:<%= link('private-image-repository-link').p('image_repository.port') %>/paasta_jenkins:latest --format "{{.ID}}")
if [ -z "$jenkins_id" ]; then
echo "docker images Change repositories and tags...jenkins"
/var/vcap/packages/docker/bin/docker --host unix:///var/vcap/sys/run/docker/docker.sock tag $(/var/vcap/packages/docker/bin/docker --host unix:///var/vcap/sys/run/docker/docker.sock images | grep paastateam/paasta_jenkins | awk '{print $3}') <%= link('private-image-repository-link').instances[0].address %>:<%= link('private-image-repository-link').p('image_repository.port') %>/paasta_jenkins:latest
fi
registry_id=$(/var/vcap/packages/docker/bin/docker --host unix:///var/vcap/sys/run/docker/docker.sock images --filter=reference=<%= link('private-image-repository-link').instances[0].address %>:<%= link('private-image-repository-link').p('image_repository.port') %>/registry:latest --format "{{.ID}}")
if [ -z "$registry_id" ]; then
echo "docker images Change repositories and tags...registry"
/var/vcap/packages/docker/bin/docker --host unix:///var/vcap/sys/run/docker/docker.sock tag $(/var/vcap/packages/docker/bin/docker --host unix:///var/vcap/sys/run/docker/docker.sock images | grep registry | awk '{print $3}') <%= link('private-image-repository-link').instances[0].address %>:<%= link('private-image-repository-link').p('image_repository.port') %>/registry:latest
fi
echo "daemon.json create"
cat > daemon.json << EOF
{
"insecure-registries" : ["<%= link('private-image-repository-link').instances[0].address %>:<%= link('private-image-repository-link').p('image_repository.port') %>"]
}
EOF
echo "daemon.json /etc/docker/daemon.json move"
mv daemon.json /etc/docker/daemon.json
echo "docker stop - waiting docker auto start"
#monit stop docker - docker auto start
/var/vcap/jobs/docker/bin/ctl stop
i=1
while [ $i -le 12 ]
do
echo "Count..."$i
if [ -e "/var/vcap/sys/run/docker/docker.sock" ]; then
echo "private docker repository login"
/var/vcap/packages/docker/bin/docker --host unix:///var/vcap/sys/run/docker/docker.sock login -u <%= link('private-image-repository-link').p('image_repository.auth.username') %> -p <%= link('private-image-repository-link').p('image_repository.auth.password_string') %> <%= link('private-image-repository-link').instances[0].address %>:<%= link('private-image-repository-link').p('image_repository.port') %>
echo "Register the docker image in the private docker repository...jenkins"
/var/vcap/packages/docker/bin/docker --host unix:///var/vcap/sys/run/docker/docker.sock push <%= link('private-image-repository-link').instances[0].address %>:<%= link('private-image-repository-link').p('image_repository.port') %>/paasta_jenkins:latest
echo "Register the docker image in the private docker repository...registry"
/var/vcap/packages/docker/bin/docker --host unix:///var/vcap/sys/run/docker/docker.sock push <%= link('private-image-repository-link').instances[0].address %>:<%= link('private-image-repository-link').p('image_repository.port') %>/registry:latest
echo "Success"
exit 0
fi
i=$(($i+1))
sleep 10
done
echo "Fail"
exit 1
| true |
4ee150f71a1693cadb10d4779ddc1004e10b9720 | Shell | Jenova7/electra-core | /contrib/init/electrad.init | UTF-8 | 1,300 | 3.6875 | 4 | [
"MIT"
] | permissive | #!/bin/bash
#
# electrad The Electra server.
#
#
# chkconfig: 345 80 20
# description: electrad
# processname: electrad
#
# Source function library.
. /etc/init.d/functions
# you can override defaults in /etc/sysconfig/electrad, see below
if [ -f /etc/sysconfig/electrad ]; then
. /etc/sysconfig/electrad
fi
RETVAL=0
prog=electrad
# you can override the lockfile via BITCOIND_LOCKFILE in /etc/sysconfig/electrad
lockfile=${BITCOIND_LOCKFILE-/var/lock/subsys/electrad}
# electrad defaults to /usr/bin/electrad, override with BITCOIND_BIN
bitcoind=${BITCOIND_BIN-/usr/bin/electrad}
# electrad opts default to -disablewallet, override with BITCOIND_OPTS
bitcoind_opts=${BITCOIND_OPTS}
start() {
echo -n $"Starting $prog: "
daemon $DAEMONOPTS $bitcoind $bitcoind_opts
RETVAL=$?
echo
[ $RETVAL -eq 0 ] && touch $lockfile
return $RETVAL
}
stop() {
echo -n $"Stopping $prog: "
killproc $prog
RETVAL=$?
echo
[ $RETVAL -eq 0 ] && rm -f $lockfile
return $RETVAL
}
case "$1" in
start)
start
;;
stop)
stop
;;
status)
status $prog
;;
restart)
stop
start
;;
*)
echo "Usage: service $prog {start|stop|status|restart}"
exit 1
;;
esac
| true |
dea68939df323b0330a53d84ea03b2c5a398b6f0 | Shell | laurahuckins/Snippets | /vcf_to_gen.sh | UTF-8 | 926 | 2.953125 | 3 | [] | no_license |
for chr in {1..22}
do
cat Amanda_vcfids | tr ':' ' ' | awk -v c=$chr '$1==c {print $1":"$2}' > Amanda_vcfids.$chr # Get ids in format for qctool
qctool -g chr$chr.vcf.gz -og Amanda.$chr.gen -incl-rsids Amanda_vcfids.$chr
cat CMC_eqtl_SVA_Caucasian_combined+BP_chrS_cis_conditional.out.bp | awk -v c=$chr '$1==c {print $1":"$3, $2}' > Amanda.mapping.$chr # get mapping file to upate chr:pos to rsids
while read line; do
echo $line
old=$(echo $line | awk '{print $1}')
new=$(echo $line | awk '{print $2}')
sed -i "s/$old/$new/1" Amanda.$chr.gen
done<Amanda.mapping.$chr
##### OR if sig diff between mapping and gen files:
while read line; do
echo $line
old=$(echo $line | awk '{print $2}')
new=$(grep $old Laura.mapping.$chr | awk '{print $2}' | head -1)
sed -i "s/$old/$new/1" Laura.$chr.gen
done<Laura.$chr.gen
done
| true |
91f62db2854c6f2527d92972b446e0f77d1e3d0a | Shell | tamirko/cfyApps | /bs/scripts/startJBoss.sh | UTF-8 | 723 | 3.109375 | 3 | [] | no_license | #!/bin/bash
ctx logger info "Running $0 ..."
ctx logger info "pwd is `pwd` "
ctx logger info "id is `id` "
cd ~
ctx logger info "pwd2 is `pwd` "
ctx logger info "hostname is `hostname` "
JBoss_download_url=$(ctx node properties JBoss_download_url)
jbossFileName=$(basename "$JBoss_download_url")
jbossHomeDir=${jbossFileName%.*}
cd $jbossHomeDir/bin
currStatus=$?
ctx logger info "Starting JBoss in $jbossHomeDir/bin... - previous action status is ${currStatus}"
nohup sudo ./standalone.sh -Djboss.bind.address=0.0.0.0 -Djboss.bind.address.management=0.0.0.0 > ~/jboss_start.log 2>&1 &
currStatus=$?
ctx logger info "Ran nohup sudo ./standalone.sh - previous action status is ${currStatus}"
ctx logger info "End of $0"
| true |
e290775a0c7f20ffbdd8d57478dc979f3edc9f4a | Shell | TylersDurden/Crypto | /LiveBuild.sh | UTF-8 | 1,680 | 3.3125 | 3 | [] | no_license | #!/usr/bin
echo 'Attempting to get Project from git onto USB'
echo 'From there, Need to compile and run it (all on USB).'
# Create a live running directory on the USB
cd /media/root/UNTITLED/CRYPTO
# Fetch Crypto Repository from GitHub
git config --global user.name "{}"
git config --global user.email "{}"
git clone https://www.github.com/TylersDurden/Crypto
#git location: cd /media/root/UNTITLED/CRYPTO/Crypto/src
# Before java Build, put EOWL into txt file
# (it will be zipped for portability)
cd /media/root/UNTITLED/Crypto2.0/src/DATA
chmod +x install.sh
su root ./install.sh
# Now put unzipped files into a txt file for reading w Java Apps
cd /media/root/UNTITLED/CRYPTO/Crypto/src/DATA/Words/EOWL-v1.1.2
echo 'EOWL unzipped. Relocating to a txt file...'
cd CSV\ Format
cat AWords.csv >> words.txt
cat B\ Words.csv >> words.txt
cat C\ Words.csv >> words.txt
cat D\ Words.csv >> words.txt
cat E\ Words.csv >> words.txt
cat F\ Words.csv >> words.txt
cat G\ Words.csv >> words.txt
cat H\ Words.csv >> words.txt
cat I\ Words.csv >> words.txt
cat J\ Words.csv >> words.txt
cat K\ Words.csv >> words.txt
cat L\ Words.csv >> words.txt
cat M\ Words.csv >> words.txt
cat N\ Words.csv >> words.txt
cat O\ Words.csv >> words.txt
cat P\ Words.csv >> words.txt
cat Q\ Words.csv >> words.txt
cat R\ Words.csv >> words.txt
cat S\ Words.csv >> words.txt
cat T\ Words.csv >> words.txt
cat U\ Words.csv >> words.txt
cat V\ Words.csv >> words.txt
cat W\ Words.csv >> words.txt
cat X\ Words.csv >> words.txt
cat Y\ Words.csv >> words.txt
cat Z\ Words.csv >> words.txt
# Now Build and Run Java Applications
cd/media/root/UNTITLED/CRYPTO/Crypto/src/
chmod +x run.sh
su root ./run.sh
| true |
a3e4c9de8ea498c3d2a2bd1f10e2fac93b14bb74 | Shell | PradeepMaddimsetti/Bash-scripts | /case.sh | UTF-8 | 347 | 3.046875 | 3 | [] | no_license | #case statements
#getting aurgment
digit=$1
case $digit in
1 )
echo "hello" ;;
2 )
echo "how are u" ;;
* )
echo "no matter" ;;
esac
#usingAlphabits
alp=$2
case $alp in
b )
echo "hello" ;;
a )
echo "how are u" ;;
* )
echo "no matter" ;;
esac
| true |
0dbc7e1d72d2bf26bf6ef98b52e910582c404341 | Shell | joznia/mst | /1shot | UTF-8 | 504 | 3.703125 | 4 | [] | no_license | #! /bin/sh
# mst AIO installer
SRCDIR="${HOME}/mst-1shot"
SRC="./mst"
DEST="/usr/bin/mst"
die () {
if [ $? -gt 0 ]; then
echo "$*" 1>&2 ; exit 1;
fi
}
echo "installing mst ..."
echo "cloning repository to ${SRCDIR} ..." ; git clone https://github.com/joznia/mst.git ${SRCDIR} > /dev/null 2>&1
cd ${SRCDIR}
echo "copying mst to /usr/bin ..." ; sudo cp $SRC $DEST
echo "setting permissions ..." ; sudo chmod 0755 $DEST
cd ${SRCDIR}/..
echo "removing repository ..." ; rm -rf ${SRCDIR}
| true |
a64f97b1b7b337d84b011bbf64be239fc498c5a4 | Shell | d0whc3r/rancher-catalog | /consul-certs-generator.sh | UTF-8 | 1,995 | 3.46875 | 3 | [
"MIT"
] | permissive | #!/bin/bash
caname="myca"
fileconf="${caname}.conf"
dircerts="certs"
certca="ca.cert"
keyca="privkey.pem"
domain="yourdoamin.tld"
maxconsul=3
cleanFiles() {
rm -rf serial certindex ${fileconf} cert* *.pem *.old ${dircerts}
}
readPrint() {
echo "[+] CA Cert:"
cat ${dircerts}/${certca}
echo ""
for i in `seq 1 ${maxconsul}`; do
echo "[+] Consul${i} Cert:"
cat ${dircerts}/consul${i}.cert
echo ""
done
for i in `seq 1 ${maxconsul}`; do
echo "[+] Consul${i} Key:"
cat ${dircerts}/consul${i}.key
echo ""
done
echo "[+] Gossip:"
docker run --rm consul keygen
}
if [ "$1" = "--remove" ]; then
cleanFiles
exit 0
fi
if [ "$1" = "--show" ]; then
readPrint
exit 0
fi
cleanFiles
mkdir -p ${dircerts}
echo "000a" > serial
touch certindex
cat << EOF > ${fileconf}
[ ca ]
default_ca = ${caname}
[ ${caname} ]
unique_subject = no
new_certs_dir = .
certificate = ${dircerts}/${certca}
database = certindex
private_key = ${dircerts}/${keyca}
serial = serial
default_days = 3650
default_md = sha1
policy = ${caname}_policy
x509_extensions = ${caname}_extensions
[ ${caname}_policy ]
commonName = supplied
stateOrProvinceName = supplied
countryName = supplied
emailAddress = optional
organizationName = supplied
organizationalUnitName = optional
[ ${caname}_extensions ]
basicConstraints = CA:false
subjectKeyIdentifier = hash
authorityKeyIdentifier = keyid:always
keyUsage = digitalSignature,keyEncipherment
extendedKeyUsage = serverAuth,clientAuth
EOF
echo -e '\n\n\n\n\nlocalhost\n\n\n' | openssl req -newkey rsa:2048 -days 3650 -x509 -nodes -out ${dircerts}/${certca} -keyout ${dircerts}/${keyca}
for i in `seq 1 ${maxconsul}`; do
echo -e '\n\n\n\n\nlocalhost\n\n\n' | openssl req -newkey rsa:1024 -nodes -out ${dircerts}/consul${i}.csr -keyout ${dircerts}/consul${i}.key
openssl ca -batch -config ${fileconf} -notext -in ${dircerts}/consul${i}.csr -out ${dircerts}/consul${i}.cert
done
readPrint
| true |
6e73babea2635ad9c2292ca3fa04779af194d8d0 | Shell | BillFarber/shibboleth | /environment/ipaUsersAndGroups.sh | UTF-8 | 2,623 | 3 | 3 | [] | no_license | ################################################################
#
# 1) You must have an admin ticket before running this script
# 2) All the passwords are set to: changeme
# 3) You must change all the user passwords after running this script
# The easiest way is to use kinit to request a ticket for the user
# ie: kinit BigTopAdmin
#
################################################################
################################################################
# Create BigTop groups
################################################################
# BigTopAdminGroup
ipa group-add bigtopadmingroup --desc "Group granting users the privilege to write and update MarkLogic BigTop documents"
# BigTopUsers
ipa group-add bigtopusers --desc "Group granting users access to the MarkLogic BigTop REST server."
# BigTopReaderRedGroup
ipa group-add bigtopreaderredgroup --desc "Group granting users access to BigTop "red" documents."
# BigTopReaderBlueGroup
ipa group-add bigtopreaderbluegroup --desc "Group granting users access to BigTop "blue" documents."
################################################################
# Create BigTop users
################################################################
# BigTopAdmin
echo "changeme" | ipa user-add bigtopadmin --first=Admin --last=BigTop --password
# BigTopReaderRed
echo "changeme" | ipa user-add bigtopreaderred --first=Readerred --last=BigTop --password
# BigTopReaderBlue
echo "changeme" | ipa user-add bigtopreaderblue --first=Readerblue --last=BigTop --password
# BigTopReaderBoth
echo "changeme" | ipa user-add bigtopreaderboth --first=Readerboth --last=BigTop --password
# BigTopReaderNone
echo "changeme" | ipa user-add bigtopreadernone --first=Readernone --last=BigTop --password
# SomeOtherPerson
echo "changeme" | ipa user-add someotherperson --first=Person --last=Someother --password
################################################################
# Add users to the appropriate groups
################################################################
# Add admin user to admin group
ipa group-add-member bigtopadmingroup --users={bigtopadmin}
# Add everybody to the bigtopusers group
ipa group-add-member bigtopusers --users={bigtopadmin,bigtopreaderred,bigtopreaderblue,bigtopreaderboth,bigtopreadernone}
# Add admin and the red user to the bigtopreaderredgroup group
ipa group-add-member bigtopreaderredgroup --users={bigtopadmin,bigtopreaderred,bigtopreaderboth}
# Add admin and the blue user to the bigtopreaderbluegroup group
ipa group-add-member bigtopreaderbluegroup --users={bigtopadmin,bigtopreaderblue,bigtopreaderboth}
| true |
a75fd1eb3087d57eca823a8d33de2e1abc550a45 | Shell | fedyfausto/QuMedical | /admin/scripts/stop.sh | UTF-8 | 923 | 3.09375 | 3 | [] | no_license | #!/bin/bash
function query {
{
if [ $# -lt 1 ] # Il numero di argomenti passati allo script è corretto?
then
/home/binding/orientdb/bin/console.sh "
connect remote:localhost/System root root;
SET ignoreErrors TRUE;
SET echo FALSE;
$1;
DISCONNECT;
";
else
/home/binding/orientdb/bin/console.sh "
connect remote:localhost/$1 root root;
SET ignoreErrors TRUE;
SET echo FALSE;
$2;
DISCONNECT;
";
fi
}>/dev/null
}
function queryfile {
{
/home/binding/orientdb/bin/console.sh $(cat $1);
}>/dev/null
}
PID=$$;
NUM_ARG=1;
if [ $# -lt 1 ] # Il numero di argomenti passati allo script è corretto?
then
echo "Non hai passato un PID valido";
exit $E_ERR_ARG;
fi
PID_PROCESS=$(cat "./$1/pid");
kill -9 $PID_PROCESS;
echo "Processo killato";
query "UPDATE Task SET status = -1, percentage = 100 WHERE name = '$1';" > /dev/null;
query "DROP DATABASE $1;" > /dev/null;
rm -rf $1;
| true |
b4d1cf646d4ea79adc5206ec2e35d8de414a87c3 | Shell | S-Sam-Sin/Linux-Automation | /lau_system.sh | UTF-8 | 750 | 3.9375 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env bash
# Check user permission for Root
function Permission()
{
# Root user
local ROOT_UID=0
# Non root user exit
local E_NONROOT=87
# script must be execute as root
if [ ${UID} != ${ROOT_UID} ]
then
echo "The script is not executed as root"
echo "try using sudo "
exit
fi
}
function Install()
{
local CMD=$(apt install $1) ### install the package
${CMD}
}
# Double check if PPA already exists
function PPA_Exist()
{
if [ ! -f ${1} ]; then
### Add PPA
sh -c ${2}
fi
}
function Close()
{
killall -HUP ${1} # Close program
}
function Open()
{
xdg-open ${1} # File or program
}
function Shutdown()
{
shutdown ${1} ### give time like
} | true |
c28fbb4e7db273ff81a936d9ef77a9bf959e89d0 | Shell | JohnOmernik/zetapkgs_old | /usershell/start_instance.sh | UTF-8 | 8,189 | 3.75 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash
CLUSTERNAME=$(ls /mapr)
APP_NAME="usershell"
. /mapr/${CLUSTERNAME}/zeta/kstore/env/zeta_shared.sh
. /mapr/${CLUSTERNAME}/zeta/shared/preinst/general.inc.sh
. ${APP_HOME}/instance_include.sh
CURUSER=$(whoami)
if [ "$CURUSER" != "$ZETA_IUSER" ]; then
echo "Must use $ZETA_IUSER: User: $CURUSER"
fi
echo "This script both installs and starts new users in the $APP_ID instance of $APP_NAME"
echo ""
read -e -p "What is the username you wish to install this instance of usershell for? " APP_USER
echo ""
APP_USER_ID=$(id $APP_USER)
if [ "$APP_USER_ID" == "" ]; then
echo "We could not determine the ID for the user $APP_USER"
echo "We cannot proceed if we can't figure out the user"
exit 1
fi
APP_USER_HOME="/mapr/$CLUSTERNAME/user/$APP_USER"
if [ ! -d "$APP_USER_HOME" ]; then
echo "A user home directory for $APP_USER should be located at $APP_USER_HOME"
echo "We could not see that directory"
echo "We need this to proceed"
exit 1
fi
APP_MARATHON_FILE="${APP_HOME}/marathon/user_shell_${APP_USER}_marathon.json"
if [ -f "$APP_MARATHON_FILE" ]; then
echo "There already is a marathon file for this instance at $APP_MARATHON_FILE"
echo "Exiting"
exit 1
fi
APP_USER_PATH="${APP_USER_HOME}/bin"
mkdir -p ${APP_USER_PATH}
DEF_FILES="profile nanorc bashrc"
echo ""
echo "Copying default $DEF_FILES to $APP_USER_HOME"
echo ""
for DFILE in $DEF_FILES; do
SRCFILE="${DFILE}_template"
DSTFILE=".${DFILE}"
if [ -f "${APP_USER_HOME}/${DSTFILE}" ]; then
read -e -p "${APP_USER_HOME}/${DSTFILE} exists, should we replace it with the default $DSTFILE? " -i "N" CPFILE
else
CPFILE="Y"
fi
if [ "$CPFILE" == "Y" ]; then
sudo cp ${APP_HOME}/$SRCFILE ${APP_USER_HOME}/$DSTFILE
sudo chown $APP_USER:zetaadm ${APP_USER_HOME}/$DSTFILE
fi
done
INSTRUCTIONS=$(grep "Zeta User Shell" ${APP_USER_HOME}/.profile)
if [ "$INSTRUCTIONS" == "" ]; then
sudo tee -a ${APP_USER_HOME}/.profile << EOF
CLUSTERNAME=\$(ls /mapr)
echo ""
echo "**************************************************************************"
echo "Zeta Cluster User Shell"
echo ""
echo "This simple shell is a transient container that allows you to do some basic exploration of the Zeta Environment"
echo ""
echo "Components to be aware of:"
echo "- If a Drill Instance was installed with this shell, you can run a Drill Command Line Shell (SQLLine) by simply typing 'zetadrill' and following the authentication prompts"
echo "- If a Spark instance was installed with this shell, you can run a Spark pyspark interactive shell by by simply typing 'zetaspark'"
echo "- Java is in the path and available for use"
echo "- Python is installed and in the path"
echo "- The hadoop client (i.e. hadoop fs -ls /) is in the path and available"
echo "- While the container is not persistent, the user's home directory IS persistent. Everything in /home/$USER will be maintained after the container expires"
echo "- /mapr/\$CLUSTERNAME is also persistent. This is root of the distributed file system. (I.e. ls /mapr/\$CLUSTERNAME has the same result as hadoop fs -ls /)"
echo "- The user's home directory is also in the distributed filesystem. Thus, if you save a file to /home/\$USER it also is saved at /mapr/\$CLUSTERNAME/user/\$USER. THis is usefule for running distributed drill queries."
echo ""
echo "This is a basic shell environment. It does NOT have the ability to run docker commands, and we would be very interested in other feature requests."
echo ""
echo "**************************************************************************"
echo ""
EOF
fi
echo ""
echo "Linking Hadoop Client for use in Container"
ln -s /opt/mapr/hadoop/hadoop-2.7.0/bin/hadoop ${APP_USER_PATH}/hadoop
read -e -p "What port should the instace of usershell for $APP_USER run on? " -i "31022" APP_PORT
APP_MAR_ID="${APP_ROLE}/${APP_ID}/${APP_USER}usershell"
echo ""
echo "You can customize your usershell env to utilize already established instances of some packages. You can skip this step if desired"
echo ""
read -e -p "Do you wish to skip instance customization? Answering anything except Y will run through some additional questions: " -i "N" SKIPCUSTOM
echo ""
if [ "$SKIPCUSTOM" != "Y" ]; then
echo "The first package we will be offering for linking into the env is Apache Drill"
PKG="drill"
read -e -p "Enter instance name of $PKG you wish to associate with this usershell instance (blank if none): " PKG_ID
read -e -p "What role is this instance of $PKG in? " PKG_ROLE
if [ "$PKG_ID" != "" ]; then
DRILL_PKG_HOME="/mapr/$CLUSTERNAME/zeta/$PKG_ROLE/$PKG/$PKG_ID"
if [ ! -d "${DRILL_PKG_HOME}" ]; then
echo "Instance home not found, skipping"
else
ln -s ${DRILL_PKG_HOME}/zetadrill ${APP_USER_PATH}/zetadrill
fi
fi
echo "The first package we will be offering for linking into the env is Apache Spark"
PKG="spark"
read -e -p "Enter instance name of $PKG you wish to associate with this usershell instance (blank if none): " PKG_ID
read -e -p "What role is this instance of $PKG in? " PKG_ROLE
if [ "$PKG_ID" != "" ]; then
SPARK_PKG_HOME="/mapr/$CLUSTERNAME/zeta/$PKG_ROLE/$PKG/$PKG_ID"
if [ ! -d "${SPARK_PKG_HOME}" ]; then
echo "Instance home not found, skipping"
else
cat > ${APP_USER_PATH}/zetaspark << EOS
#!/bin/bash
SPARK_HOME="/spark"
cd \$SPARK_HOME
bin/pyspark
EOS
chmod +x ${APP_USER_PATH}/zetaspark
fi
fi
fi
if [ -d "$SPARK_PKG_HOME" ]; then
SPARK_HOME_SHORT=$(ls -1 ${SPARK_PKG_HOME}|grep -v "run\.sh")
SPARK_HOME="${SPARK_PKG_HOME}/$SPARK_HOME_SHORT"
echo "Using $SPARK_HOME for spark home"
cat > $APP_MARATHON_FILE << EOM
{
"id": "${APP_MAR_ID}",
"cpus": $APP_CPU,
"mem": $APP_MEM,
"cmd": "sed -i \"s/Port 22/Port ${APP_PORT}/g\" /etc/ssh/sshd_config && /usr/sbin/sshd -D",
"instances": 1,
"labels": {
"CONTAINERIZER":"Docker"
},
"container": {
"type": "DOCKER",
"docker": {
"image": "${APP_IMG}",
"network": "HOST"
},
"volumes": [
{ "containerPath": "/opt/mapr", "hostPath": "/opt/mapr", "mode": "RO"},
{ "containerPath": "/opt/mesosphere", "hostPath": "/opt/mesosphere", "mode": "RO"},
{ "containerPath": "/home/$APP_USER", "hostPath": "/mapr/$CLUSTERNAME/user/$APP_USER", "mode": "RW"},
{ "containerPath": "/home/zetaadm", "hostPath": "/mapr/$CLUSTERNAME/user/zetaadm", "mode": "RW"},
{ "containerPath": "/mapr/$CLUSTERNAME", "hostPath": "/mapr/$CLUSTERNAME", "mode": "RW"},
{ "containerPath": "/spark", "hostPath": "${SPARK_HOME}", "mode": "RW"}
]
}
}
EOM
else
cat > $APP_MARATHON_FILE << EOU
{
"id": "${APP_MAR_ID}",
"cpus": $APP_CPU,
"mem": $APP_MEM,
"cmd": "sed -i \"s/Port 22/Port ${APP_PORT}/g\" /etc/ssh/sshd_config && /usr/sbin/sshd -D",
"instances": 1,
"labels": {
"CONTAINERIZER":"Docker"
},
"container": {
"type": "DOCKER",
"docker": {
"image": "${APP_IMG}",
"network": "HOST"
},
"volumes": [
{ "containerPath": "/opt/mapr", "hostPath": "/opt/mapr", "mode": "RO"},
{ "containerPath": "/opt/mesosphere", "hostPath": "/opt/mesosphere", "mode": "RO"},
{ "containerPath": "/home/$APP_USER", "hostPath": "/mapr/$CLUSTERNAME/user/$APP_USER", "mode": "RW"},
{ "containerPath": "/home/zetaadm", "hostPath": "/mapr/$CLUSTERNAME/user/zetaadm", "mode": "RW"},
{ "containerPath": "/mapr/$CLUSTERNAME", "hostPath": "/mapr/$CLUSTERNAME", "mode": "RW"}
]
}
}
EOU
fi
read -e -p "Do you wish to start the process now? " -i "Y" STARTAPP
if [ "$STARTAPP" == "Y" ]; then
echo ""
echo "Submitting ${APP_ID} to Marathon then pausing 20 seconds to wait for start and API usability"
echo ""
curl -X POST $ZETA_MARATHON_SUBMIT -d @${APP_MARATHON_FILE} -H "Content-type: application/json"
echo ""
echo ""
fi
echo ""
echo ""
echo "When this instance of usershell is running, it can be accessed with the following ssh command"
echo ""
echo "ssh -p $APP_PORT ${APP_USER}@${APP_USER}usershell-${APP_ID}-${APP_ROLE}.marathon.slave.mesos"
echo ""
echo ""
| true |
3a64a054e56d5d7f3780ba565e5bd4090d0cf010 | Shell | theadityapathak/k8s-prow-test | /hack/update-file-perms.sh | UTF-8 | 121 | 2.53125 | 3 | [] | no_license | #!/usr/bin/env bash
set -o errexit
set -o nounset
set -o pipefail
find . -name "*.sh" \! -perm /a+x -exec chmod +x {} + | true |
858d92e099e749afe129d8eb77edd9fe927a0406 | Shell | TatteredMagic/meowdots | /install | UTF-8 | 478 | 3.1875 | 3 | [] | no_license | #!/usr/bin/env bash
set -e
CONFIG="install.conf.yaml"
DOTBOT_DIR="dotbot"
DOTBOT_BIN="bin/dotbot"
BASEDIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
echo "Standing up dotbot-brew..."
cd $BASEDIR && git submodule update --init dotbot-brew/
echo "Standing up Vundle..."
cd $BASEDIR && git submodule update --init vim/bundle/Vundle.vim
echo "Handing off to dotbot!"
"${BASEDIR}/${DOTBOT_DIR}/${DOTBOT_BIN}" -d "${BASEDIR}" --plugin-dir dotbot-brew -c "${CONFIG}" "${@}"
| true |
2b49944e9bc9121949306e31985bde4146773218 | Shell | mosinu/puppet | /modules/users/files/edi.sh | UTF-8 | 793 | 3.265625 | 3 | [] | no_license | #!/bin/bash
PATH=$PATH
#####################
#Group Accounts
#####################
# Create the group for us
/usr/sbin/groupadd -g 5017 gpibots
# All group adds should be above this line
# We sleep here to let the groups get added before we continue.
######################
# GUI Access
######################
# This is required for the application account to be able to use X GUI
# The X session runs as root, root cannot by default read the NAS dir
/usr/sbin/usermod -G gpibots root
sleep 10
#####################
#Application Accounts
#####################
if id -u gpibots >/dev/null 2>&1 ; then
/usr/sbin/usermod -g gpibots -p '!!' -d /home/gpibots gpibots;
else
/usr/sbin/useradd -u 5017 -g gpibots -p '!!' -c "EDI BOT App user" -d /home/gpibots gpibots;
fi
| true |
b788ad482b218131af53ceccc123aa4efd019e42 | Shell | OrrAvrech/MAMAT_Exercises | /Ex4/Ex4/Ex4/references/hw4/Bash Tests - HW4/bash_test_0 | UTF-8 | 197 | 2.546875 | 3 | [] | no_license | #!/bin/bash
if [[ `find empty_dir 2> /dev/null` != "" ]]; then rm -r empty_dir ; fi
mkdir empty_dir
./change_extensions empty_dir old_nothing new_nothing
ls empty_dir > bash_out_0
rm -r empty_dir
| true |
fc0f2f79e1d7ed7f7aeaf15835ebe726c1137883 | Shell | djoslin0/shabby-lang | /build.sh | UTF-8 | 2,363 | 3.421875 | 3 | [
"MIT"
] | permissive | #!/bin/bash
if [ "$#" -eq 1 ]; then src_file=`realpath $1`; fi
cd src
kill `pidof display` 2> /dev/null
rm -rf ../bin/*
mkdir ../bin/compilation/
set -e
echo "#############"
echo "# Tokenizer #"
echo "#############"
gcc tokenizer.c utils/symbols.c utils/file.c -I include -o "../bin/tokenizer" -Wall -Wextra -Werror -Wpedantic
if [ "$#" -eq 1 ]; then ../bin/tokenizer $src_file; fi
echo ""
echo "##########"
echo "# Parser #"
echo "##########"
gcc parser.c utils/symbols.c utils/file.c utils/nodes.c -I include -o "../bin/parser" -Wall -Wextra -Werror -Wpedantic
if [ "$#" -eq 1 ]; then ../bin/parser $src_file; fi
echo ""
echo "##########"
echo "# Symgen #"
echo "##########"
gcc symgen.c utils/symbols.c utils/file.c utils/nodes.c utils/types.c utils/variables.c -I include -o "../bin/symgen" -Wall -Wextra -Werror -Wpedantic
if [ "$#" -eq 1 ]; then ../bin/symgen $src_file; fi
echo ""
echo "###############"
echo "# Typechecker #"
echo "###############"
gcc typechecker.c utils/symbols.c utils/file.c utils/nodes.c utils/types.c utils/variables.c -I include -o "../bin/typec" -Wall -Wextra -Werror -Wpedantic
if [ "$#" -eq 1 ]; then ../bin/typec $src_file; fi
# only generates graph if graphviz is installed
if [ "$#" -eq 1 ]; then
if hash dot 2>/dev/null; then
echo ""
echo "#########"
echo "# Graph #"
echo "#########"
gcc graphviz.c utils/symbols.c utils/file.c utils/nodes.c -I include -o "../bin/graph" -Wall -Wextra -Werror -Wpedantic
../bin/graph $src_file
dot -Tpng ../bin/compilation/out.dot > ../bin/compilation/out.png
fi
if hash display 2>/dev/null; then
display ../bin/compilation/out.png &
fi
fi
echo ""
echo "###########"
echo "# Codegen #"
echo "###########"
gcc codegen.c utils/symbols.c utils/file.c utils/nodes.c utils/types.c utils/variables.c -I include -o "../bin/codegen" -Wall -Wextra -Werror -Wpedantic
if [ "$#" -eq 1 ]; then ../bin/codegen $src_file; fi
echo ""
echo "#################"
echo "# Jump Resolver #"
echo "#################"
gcc jumpresolver.c utils/file.c -I include -o "../bin/jumpr" -Wall -Wextra -Werror -Wpedantic
if [ "$#" -eq 1 ]; then ../bin/jumpr $src_file; fi
echo ""
echo "######"
echo "# VM #"
echo "######"
gcc vm.c utils/symbols.c utils/file.c -I include -o "../bin/vm" -Wall -Wextra -Werror -Wpedantic
if [ "$#" -eq 1 ]; then ../bin/vm $src_file; fi
| true |
5396216363d0ea42ddf7179b6fee558061a9febe | Shell | nareshganesan/k8s-app-templates | /examples/basics_tutorials.bash | UTF-8 | 8,456 | 3.625 | 4 | [
"MIT"
] | permissive | #!/bin/bash
## https://kubernetes.io/docs/tutorials/kubernetes-basics/
### Creating a Cluster
# to get cluster information
kubectl cluster-info
# get nodes in the cluster
kubectl get nodes
### Create a Deployment
# run command creates a new deployment
# requires deployment name, app image location --image, --port
kubectl run kubernetes-bootcamp --image=docker.io/jocatalin/kubernetes-bootcamp:v1 --port=8080
# list your deployments in the cluster
kubectl get deployments
# a proxy that will forward communications into the cluster-wide, private network
# see all those APIs hosted through the proxy endpoint
kubectl proxy
# see all those APIs hosted through the proxy endpoint
curl http://localhost:8001/version
# get the Pod name
export POD_NAME=$(kubectl get pods -o go-template --template '{{range .items}}{{.metadata.name}}{{"\n"}}{{end}}')
echo Name of the Pod: $POD_NAME
# make an HTTP request to the application running in that pod
# url is the route to the API of the Pod
curl http://localhost:8001/api/v1/proxy/namespaces/default/pods/$POD_NAME/
### Exploring Your App
# look for existing Pods
kubectl get pods
# to view what containers are inside that Pod and what images are used to build those containers
# details about the Pod’s container: IP address, the ports used and a list of events related
# to the lifecycle of the Pod
kubectl describe pods
# Anything that the application would normally send to STDOUT becomes logs for the container within the Pod
kubectl logs $POD_NAME
# execute commands directly on the container once the Pod is up and running
# Let’s list the environment variables
kubectl exec $POD_NAME env
# let’s start a bash session in the Pod’s container
kubectl exec -ti $POD_NAME bash
# open the source code of the app is in the server.js
cat server.js
# check that the application is up by running a curl
curl localhost:8080
# to close container connection
exit
### Exposing Your App to public
# let’s list the current Services from our cluster:
kubectl get services
# create a new service and expose it to external traffic we’ll use the expose command with NodePort as parameter
kubectl expose deployment/kubernetes-bootcamp --type="NodePort" --port 8080
# check again for the services
kubectl get services
# To find out what port was opened externally (by the NodePort option)
kubectl describe services/kubernetes-bootcamp
# Create an environment variable called NODE_PORT that has as value the Node port
export NODE_PORT=$(kubectl get services/kubernetes-bootcamp -o go-template='{{(index .spec.ports 0).nodePort}}')
echo NODE_PORT=$NODE_PORT
# test that the app is exposed outside of the cluster using curl &
# the IP of the Node and the externally exposed port
# we get a response from the server. The Service is exposed
curl host01:$NODE_PORT
# Deployment created automatically a label for our Pod
# describe deployment command you can see the name of the label
kubectl describe deployment
# Let’s use this label to query our list of Pods
kubectl get pods -l run=kubernetes-bootcamp
# do the same to list the existing services
kubectl get services -l run=kubernetes-bootcamp
# To apply a new label we use the label command followed by the object type, object name and the new label:
# apply a new label to our Pod (we pinned the application version to the Pod),
kubectl label pod $POD_NAME app=v1
# check it with the describe pod command
kubectl describe pods $POD_NAME
# label is attached now to our Pod. And we can query now the list of pods using the new label
kubectl get pods -l app=v1
# To delete Services you can use the delete service
kubectl delete service -l run=kubernetes-bootcamp
# Confirm that the service is gone
kubectl get services
# To confirm that route is not exposed anymore you can curl the previously exposed IP and port
curl host01:$NODE_PORT
# You can confirm that the app is still running with a curl inside the pod
kubectl exec -ti $POD_NAME curl localhost:8080
### Scale your app
# list your deployments
kubectl get deployments
# We should have 1 Pod. If not, run the command again. This shows:
# The DESIRED state is showing the configured number of replicas
# The CURRENT state show how many replicas are running now
# The UP-TO-DATE is the number of replicas that were updated to match the desired (configured) state
# The AVAILABLE state shows how many replicas are actually AVAILABLE to the users
# let’s scale the Deployment to 4 replicas
kubectl scale deployments/kubernetes-bootcamp --replicas=4
# check the deployments
# change was applied, and we have 4 instances of the application available
kubectl get deployments
# There are 4 Pods now, with different IP addresses
kubectl get pods -o wide
# To check that, use the describe command
kubectl describe deployments/kubernetes-bootcamp
# Let’s check that the Service is load-balancing the traffic.
# To find out the exposed IP and Port we can use the describe service
kubectl describe services/kubernetes-bootcamp
# Create an environment variable called NODE_PORT that has as value the Node port
export NODE_PORT=$(kubectl get services/kubernetes-bootcamp -o go-template='{{(index .spec.ports 0).nodePort}}')
echo NODE_PORT=$NODE_PORT
# curl to the the exposed IP and port. Execute the command multiple times
# We hit a different Pod with every request. This demonstrates that the load-balancing is working
curl host01:$NODE_PORT
# To scale down the Service to 2 replicas, run again the scale command:
kubectl scale deployments/kubernetes-bootcamp --replicas=2
# List the Deployments to check if the change was applied
kubectl get deployments
# List the number of Pods
# This confirms that 2 Pods were terminated
kubectl get pods -o wide
### Updating Your App
# list your deployments
kubectl get deployments
# list the running Pods
kubectl get pods
# To view the current image version of the app, run a describe command against the Pods (look at the Image field)
kubectl describe pods
# To update the image of the application to version 2, use the set image command,
# followed by the deployment name and the new image version
# The command notified the Deployment to use a different image for your app and initiated a rolling update
kubectl set image deployments/kubernetes-bootcamp kubernetes-bootcamp=jocatalin/kubernetes-bootcamp:v2
# Check the status of the new Pods, and view the old one terminating with the get pods command
kubectl get pods
# let’s check that the App is running. To find out the exposed IP and Port we can use describe service
kubectl describe services/kubernetes-bootcamp
# Create an environment variable called NODE_PORT that has as value the Node port
export NODE_PORT=$(kubectl get services/kubernetes-bootcamp -o go-template='{{(index .spec.ports 0).nodePort}}')
echo NODE_PORT=$NODE_PORT
# we’ll do a curl to the the exposed IP and port
# We hit a different Pod with every request and we see that all Pods are running the latest version (v2).
curl host01:$NODE_PORT
# The update can be confirmed also by running a rollout status command:
kubectl rollout status deployments/kubernetes-bootcamp
# To view the current image version of the app, run a describe command against the Pods
# We run now version 2 of the app
kubectl describe pods
## Rollback update
# Let’s perform another update, and deploy image tagged as v10
kubectl set image deployments/kubernetes-bootcamp kubernetes-bootcamp=jocatalin/kubernetes-bootcamp:v10
# Use get deployments to see the status of the deployment
kubectl get deployments
# something is wrong… We do not have the desired number of Pods available. List the Pods again
kubectl get pods
# A describe command on the Pods should give more insights
# There is no image called v10 in the repository.
kubectl describe pods
# Let’s roll back to our previously working version. We’ll use the rollout undo command
# The rollout command reverted the deployment to the previous known state (v2 of the image)
# Updates are versioned and you can revert to any previously know state of a Deployment
kubectl rollout undo deployments/kubernetes-bootcamp
# List again the Pods
# Four Pods are running
kubectl get pods
# Check again the image deployed on the them
# We see that the deployment is using a stable version of the app (v2).
# The Rollback was successful.
kubectl describe pods
# Delete the deployment by name
kubectl delete deployment kubernetes-bootcamp
| true |
2856c2f706393754323abb8e0a0e896972fadbf8 | Shell | songlibo/page-speed | /chromium_extension/branches/chromium_update/src/makenacl.sh | UTF-8 | 1,452 | 3.359375 | 3 | [] | no_license | #!/bin/bash
#
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Allow callers to override the nacl home directory.
DEFAULT_NACL_HOME="/home/$USER/bin/native_client_sdk"
if [ -z "$NACL_HOME" ]; then NACL_HOME=$DEFAULT_NACL_HOME; fi
if [ ! -d "$NACL_HOME" ]; then
echo "Unable to find NACL_HOME at $NACL_HOME."
exit 1
fi
# Use the 32-bit toolchain by default.
NACL_ARCH_DIR="nacl"
# Switch to the 64-bit toolchain if arch is x86_64.
if [ $(uname -m) = "x86_64" ]; then NACL_ARCH_DIR="nacl64"; fi
NACL_BIN=$NACL_HOME/toolchain/linux_x86/$NACL_ARCH_DIR/bin
if [ ! -d "$NACL_BIN" ]; then
echo "Unable to find NACL_BIN at $NACL_BIN."
exit 1
fi
# Override the target toolchain used in the gyp-generated Makefile.
make CC.target=$NACL_BIN/gcc \
CXX.target=$NACL_BIN/g++ \
LINK.target=$NACL_BIN/g++ \
AR.target=$NACL_BIN/ar \
RANLIB.target=$NACL_BIN/ranlib \
$@
| true |
a2d1274309d6480cf2b21c58ae94e8aa737e39ab | Shell | Nidheshp/Linux-Exercises- | /Scripting2.sh | UTF-8 | 341 | 3.515625 | 4 | [] | no_license | #!/bin/bash
date
who
uptime
echo "-------------------------------------------------------------"
touch logfile.txt
date >> logfile.txt
uptime >> logfile.txt
echo "Would you like to delete the older file?"
echo "Yes (y), No (n)"
read reply
if [ $reply == n ]
then
echo "Keeping Old File"
elif [ $reply == y ]
then
sudo rm logfile.txt
fi
| true |
9b8271c61449cbaee95273c4bac9f98d08e68aa4 | Shell | kozk/so17p | /p1/p1.sh | UTF-8 | 300 | 2.828125 | 3 | [] | no_license | #!/bin/bash
echo "SISTEMAS OPERATIVOS - PRACTICA 1 - PROBLEMA 1"
echo "INFORMACION DE CPU Y MEMORIA EN TIEMPO REAL"
echo "USO TOTAL DE CPU: `top -i | head -3 | grep %Cpu | awk '{print $2"\n"$4}' | paste -sd + | bc`"
echo "USO TOTAL DE MEMORIA EN KB: `top -i | head -4 | grep Mem | awk '{print $6}'`"
| true |
213db3a3d5602cc21e0a1b990929be1994b6deb2 | Shell | antmicro/zephyr | /tests/bsim/bluetooth/host/gatt/settings/test_scripts/run_gatt_settings_2.sh | UTF-8 | 2,114 | 2.921875 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env bash
# Copyright 2022 Nordic Semiconductor ASA
# SPDX-License-Identifier: Apache-2.0
set -eu
bash_source_dir="$(realpath "$(dirname "${BASH_SOURCE[0]}")")"
source "${bash_source_dir}/_env.sh"
source ${ZEPHYR_BASE}/tests/bsim/sh_common.source
simulation_id="${test_name}_2"
verbosity_level=2
EXECUTE_TIMEOUT=120
cd ${BSIM_OUT_PATH}/bin
# Remove the files used by the custom SETTINGS backend
TO_DELETE="${simulation_id}_server_2.log ${simulation_id}_client_2.log"
echo "remove settings files ${TO_DELETE}"
rm ${TO_DELETE} || true
Execute ./bs_2G4_phy_v1 -v=${verbosity_level} -s="${simulation_id}" -D=8 -sim_length=30e6 $@
# Only one `server` device is really running at a time. This is a necessary hack
# because bsim doesn't support plugging devices in and out of a running
# simulation, but this test needs a way to power-cycle the `server` device a few
# times.
#
# Each device will wait until the previous instance (called 'test round') has
# finished executing before starting up.
Execute "$test_2_exe" -v=${verbosity_level} \
-s="${simulation_id}" -d=0 -testid=server -RealEncryption=1 -argstest 0 6 "server_2"
Execute "$test_2_exe" -v=${verbosity_level} \
-s="${simulation_id}" -d=1 -testid=server -RealEncryption=1 -argstest 1 6 "server_2"
Execute "$test_2_exe" -v=${verbosity_level} \
-s="${simulation_id}" -d=2 -testid=server -RealEncryption=1 -argstest 2 6 "server_2"
Execute "$test_2_exe" -v=${verbosity_level} \
-s="${simulation_id}" -d=3 -testid=server -RealEncryption=1 -argstest 3 6 "server_2"
Execute "$test_2_exe" -v=${verbosity_level} \
-s="${simulation_id}" -d=4 -testid=server -RealEncryption=1 -argstest 4 6 "server_2"
Execute "$test_2_exe" -v=${verbosity_level} \
-s="${simulation_id}" -d=5 -testid=server -RealEncryption=1 -argstest 5 6 "server_2"
Execute "$test_2_exe" -v=${verbosity_level} \
-s="${simulation_id}" -d=6 -testid=server -RealEncryption=1 -argstest 6 6 "server_2"
Execute "$test_2_exe" -v=${verbosity_level} \
-s="${simulation_id}" -d=7 -testid=client -RealEncryption=1 -argstest 0 0 "client_2"
wait_for_background_jobs
| true |
6eb9b0c0e811b13dce2bb927ea16f7bf0ab2e609 | Shell | gentoo-mirror/haskell | /scripts/build-random-haskell-pkgs.bash | UTF-8 | 4,686 | 4.03125 | 4 | [] | no_license | #!/bin/bash
# Copyright 1999-2022 Gentoo Authors
# Copyright 2022 hololeap
# Distributed under the terms of the GNU General Public License v2
# gentoo-haskell random install script
# v0.2.1
#
# Repeatedly tries to install a random package from ::haskell (that is not
# already installed and has at least one unmasked version)
#
# - Notes successes, errors, packages that try to downgrade, and packages that
# fail to resolve
# - Rudimentary logger to files: /tmp/random-pkg-*
# - Runs `haskell-updater` after every attempt -- Aborts script if it fails
# - Aborts on on SIGINT (Ctrl+C)
EIX=(/usr/bin/eix)
EMERGE=(/usr/bin/emerge --ignore-default-opts --verbose --quiet-build --deep --complete-graph --oneshot)
HASKELL_UPDATER=(/usr/sbin/haskell-updater -- --ignore-default-opts --quiet-build)
declare -a completed=()
declare -a failed=()
declare -a tried_to_downgrade=()
declare -a resolve_failed=()
declare pkg=''
declare portage_output=''
log_portage_output() {
local pkg="${1}"
local portage_output="${2}"
date
echo "${pkg}"
echo "${portage_output}"
echo
}
finish() {
echo
echo "--------"
echo
echo "Results:"
echo
echo "Completed:"
printf "%s\n" "${completed[@]}"
echo
echo "Failed:"
printf "%s\n" "${failed[@]}"
echo
echo "These packages tried to downgrade:"
printf "%s\n" "${tried_to_downgrade[@]}"
echo
echo "These packages failed to resolve:"
printf "%s\n" "${resolve_failed[@]}"
echo
}
error() {
(
echo
if [[ "${pkg}" == "\n" ]]; then
echo "Cancelling while trying ${pkg}"
else
echo "Cancelling"
fi
) >&2
finish
exit 1
}
# Get a random unmasked package from ::haskell that is not installed
random_pkg() {
out="$("${EIX[@]}" --in-overlay haskell --and -\( -\! -I -\) --and --non-masked -#)"
ret="$?"
case "${ret}" in
127)
echo "Received exit code 127 from eix. Is it installed?" >&2
return "${ret}"
;;
1)
echo "${out}" >&2
echo "No package matches found" >&2
return "${ret}"
;;
0)
;;
*)
echo "${out}" >&2
echo "eix exited with unsuccessful code ${ret}" >&2
return "${ret}"
;;
esac
# Skip packages that have already been tried
# Each of the arrays we check have timestamps at the front of each element
# (hence `grep -q "${l}\$"`)
readarray -t pkgs < <(echo "${out}" | while read -r l; do
skip=0
for c in "${completed[@]}"; do
echo "$c" | grep -q "${l}\$" && skip=1
done
for f in "${failed[@]}"; do
echo "$f" | grep -q "${l}\$" && skip=1
done
for d in "${tried_to_downgrade[@]}"; do
echo "$d" | grep -q "${l}\$" && skip=1
done
for r in "${resolve_failed[@]}"; do
echo "$r" | grep -q "${l}\$" && skip=1
done
[[ "${skip}" -eq 0 ]] && echo $l
done)
pool_size="${#pkgs[@]}"
if [[ "${pool_size}" -eq 0 ]]; then
echo "Pool is empty! Exiting..."
return 1
fi
echo "Choosing from pool of ${#pkgs[@]} packages..." >&2
for p in "${pkgs[@]}"; do echo $p; done | sort -R | head -1
return "${ret}"
}
capture_portage_output() {
cmd=("${EMERGE[@]}" --pretend --nospinner "${pkg}")
echo "${cmd[@]}" >&2
portage_output="$("${cmd[@]}" 2>&1)"
local pretend_return=$?
echo "pretend_return: \"${pretend_return}\"" >&2
return "${pretend_return}"
}
check_for_downgrades() {
if [[ -z "${portage_output}" ]]; then
echo "No portage output!" >&2
error
fi
if echo "${portage_output}" | grep '\[ebuild[^\[]*D[^\[]*\]' >&2; then
echo "Downgrade detected: ${pkg}" >&2
return 1
else
echo "No downgrade detected" >&2
return 0
fi
}
trap error SIGINT
while true; do
(
echo
echo --------
echo
echo "Looking for a random package from ::haskell that is not installed..."
) >&2
pkg="$(random_pkg)"
[[ "$?" -eq 0 ]] || error
echo "Trying ${pkg}" >&2
if [[ "${pkg}" == "No matches found" ]]; then
finish
exit 0
fi
echo -n "Checking for downgrades... " >&2
if capture_portage_output; then
echo "${portage_output}" >&2
else
echo "${portage_output}" >&2
log_file="/tmp/random-pkg-resolve-failed-${pkg//\//-}.log"
echo "Failure while resolving with portage: ${pkg}" >&2
echo "Saving output to ${log_file}" >&2
log_portage_output "${pkg}" "${portage_output}" >> "${log_file}"
resolve_failed+=( "$(date): ${pkg}" )
continue
fi
if check_for_downgrades; then
if "${EMERGE[@]}" --keep-going=y "${pkg}"; then
completed+=( "$(date): ${pkg}" )
else
failed+=( "$(date): ${pkg}" )
fi
"${HASKELL_UPDATER[@]}" || error
else
log_file="/tmp/random-pkg-downgrade-${pkg//\//-}.log"
echo "Saving output to ${log_file}" >&2
log_portage_output "${pkg}" "${portage_output}" >> "${log_file}"
tried_to_downgrade+=( "$(date): ${pkg}" )
fi
done
| true |
1c0b2a3c172b067a9526bf0878ad4a5477a87265 | Shell | usf-cs326-fa21/P2-Tests | /03-Basic-Builtins-1.sh | UTF-8 | 684 | 3.1875 | 3 | [] | no_license | source "${TEST_DIR}/lib/funcs.bash"
run_timeout=5
run_scripts() {
fail=0
for script in "${TEST_DIR}"/inputs/basic-builtins/*.sh; do
echo "-> Running script: ${script}"
echo
echo "Script Contents: ----------------------------------"
cat "${script}"
echo "---------------------------------------------------"
echo
reference_run sh "${script}" 2> /dev/null
run ./$SHELL_NAME < <(cat "${script}")
echo; echo
echo "-> Comparing outputs"
compare_outputs || fail=${?}
echo; echo
done
return ${fail}
}
test_start "Basic Builtins (cd, comments, exit)"
run_scripts
test_end
| true |
5329b988732fd524f6ef1e6adcd2b186d0dc5a12 | Shell | frisbee23/docker-i-doit | /1.9/initialize-db-once.sh | UTF-8 | 552 | 2.671875 | 3 | [] | no_license | #!/bin/bash
MYSQL_ROOT_PASSWORD=changeme
###### this is part of the idoit installation, only needed with a fresh database
# enable password login, becuase idoit setup needs it
docker exec -t idoit-mariadb sh -c "echo \"UPDATE mysql.user SET plugin = 'mysql_native_password' WHERE User = 'root'; FLUSH PRIVILEGES;\" | mysql -p$MYSQL_ROOT_PASSWORD"
# shutdown with innodb, to be able to move the files away, for idoit, performance
docker exec -t idoit-mariadb sh -c "mysql -uroot -p$MYSQL_ROOT_PASSWORD -e\"SET GLOBAL innodb_fast_shutdown = 0\""
| true |
5034a6cb06f3cdbccb5769160e39ede1ddee6469 | Shell | matwarg/warg | /scripts/ugrade_artifactory.sh | UTF-8 | 2,226 | 2.921875 | 3 | [] | no_license | #!/bin/bash
echo "start copy files to new JFrog Home"
# Artifactory data
mkdir -p $JFROG_HOME/artifactory/var/data/artifactory/
cp -rp $ARTIFACTORY_HOME/data/. $JFROG_HOME/artifactory/var/data/artifactory/
# Access data
mkdir -p $JFROG_HOME/artifactory/var/data/access/
cp -rp $ARTIFACTORY_HOME/access/data/. $JFROG_HOME/artifactory/var/data/access/
# Replicator data
# Note: If you've have never used the Artifactory Replicator
# your $ARTIFACTORY_HOME/replicator/ directory will be empty
mkdir -p $JFROG_HOME/artifactory/var/data/replicator/
cp -rp $ARTIFACTORY_HOME/replicator/data/. $JFROG_HOME/artifactory/var/data/replicator/
# Artifactory config
mkdir -p $JFROG_HOME/artifactory/var/etc/artifactory/
cp -rp $ARTIFACTORY_HOME/etc/. $JFROG_HOME/artifactory/var/etc/artifactory/
# Access config
mkdir -p $JFROG_HOME/artifactory/var/etc/access/
cp -rp $ARTIFACTORY_HOME/access/etc/. $JFROG_HOME/artifactory/var/etc/access/
# Replicator config
# Note: If you have never used the Artifactory Replicator
# your $ARTIFACTORY_HOME/replicator/ directory will be empty
mkdir -p $JFROG_HOME/artifactory/var/etc/replicator/
cp -rp $ARTIFACTORY_HOME/replicator/etc/. $JFROG_HOME/artifactory/var/etc/replicator/
# master.key
mkdir -p $JFROG_HOME/artifactory/var/etc/security/
cp -p $ARTIFACTORY_HOME/etc/security/master.key $JFROG_HOME/artifactory/var/etc/security/master.key
# server.xml
mkdir -p $JFROG_HOME/artifactory/var/work/old
cp -p $ARTIFACTORY_HOME/tomcat/conf/server.xml $JFROG_HOME/artifactory/var/work/old/server.xml
# artifactory.defaults
cp -rp $ARTIFACTORY_HOME/bin/artifactory.default $JFROG_HOME/artifactory/var/work/old/artifactory.default
#or, if Artifactory was installed a service
#cp -rp $ARTIFACTORY_HOME/etc/default $JFROG_HOME/artifactory/var/work/old/artifactory.default
# Remove logback.xml with old links. Please consider migrating manually anything that is customized here
rm -f $JFROG_HOME/artifactory/var/etc/artifactory/logback.xml
rm -f $JFROG_HOME/artifactory/var/etc/access/logback.xml
# Move Artifactory logs
mkdir -p $JFROG_HOME/artifactory/var/log/archived/artifactory/
cp -rp $ARTIFACTORY_HOME/logs/. $JFROG_HOME/artifactory/var/log/archived/artifactory/
echo "All Done"
| true |
7da47bd24ae80e50142275e7ed8190034e6362e9 | Shell | ver2point0/quick-uaa-deployment-cf | /ci/scripts/test-up.sh | UTF-8 | 545 | 2.65625 | 3 | [] | no_license | #!/bin/bash
set -eu
cd ${REPO_ROOT:?required}
eval "$(bin/quaa env)"
u_down() {
set +e
cf logs uaa --recent
echo
echo
echo "Cleaning up..."
quaa down -f
}
trap u_down SIGINT SIGTERM EXIT
cf api "${CF_URL:?required}"
cf auth "${CF_USERNAME:?required}" "${CF_PASSWORD:?required}"
cf target -o "${CF_ORGANIZATION:?required}" -s "${CF_SPACE:?required}"
cf cs elephantsql turtle uaa-db
quaa up --route "${CF_TEST_ROUTE:?required}"
curl -f "https://${CF_TEST_ROUTE}/login" -H "Accept: application/json"
quaa auth-client
uaa users
| true |
7d2b2e4f7295afecc5e436c43b1d95243bd019bd | Shell | GNHua/pw | /scripts/start_mongodb.sh | UTF-8 | 311 | 2.890625 | 3 | [] | no_license | #! /bin/bash
ROOT="$( cd "$( cd "$(dirname "$0")" ; pwd -P )/../.." ; pwd -P )"
TIMESTAMP="$(date +%Y.%m.%d.%H%M%S)"
# start mongodb
mongod --dbpath "$ROOT/data/db" \
--bind_ip 127.0.0.1 --port 27017 \
--logpath "$ROOT/data/log/mongo_"$TIMESTAMP".log" \
--auth \
--fork
echo "MongoDB Started" | true |
8b91eb7b22fad45bed04871383ae5bd3f911f75a | Shell | petervolvowinz/beamylabs-start | /scripts/trigger-upgrade.sh | UTF-8 | 285 | 2.703125 | 3 | [] | no_license | #!/bin/bash
inotifywait -m -r configuration/ -e create -e moved_to |
while read path action file; do
if [[ "$file" =~ upgrade$ ]]; then # time to upgrade
echo "trying to upgrade system"
git pull
/bin/bash upgrade.sh
fi
done
| true |
d1b09f9ce15b2465ef3aa5b52140e87b41542f4c | Shell | ATrump/hive-ff-linux | /hive/sbin/hive-passwd | UTF-8 | 996 | 4 | 4 | [] | no_license | #!/usr/bin/env bash
#Change password for SSH and VNC
#Parameter: new_password or -conf to get password from rig.conf
#[ -t 1 ] &&
. colors
# Check parameter
if [[ -z $1 ]]; then
echo -e "${CYAN}Changes system password for \"user\" (SSH and VNC also)${NOCOLOR}"
echo -e "Specify password as an argument"
echo -e "Run with \"-conf\" to get a password from rig.conf"
exit 1
fi
if [[ $1 == "-conf" ]]; then
if [[ ! -f $RIG_CONF ]]; then
echo -e "${RED}File $RIG_CONF not found${NOCOLOR}"
exit 1
fi
. $RIG_CONF
if [[ -z $RIG_PASSWD ]]; then
echo -e "${RED}Variable RIG_PASSWD is empty${NOCOLOR}"
exit 1
fi
new_psw=$RIG_PASSWD
echo -e "Got password from $RIG_CONF"
else
new_psw=$1
fi
#Change passwords and set SET_RIG_PASS to 0
echo -e "$new_psw\n$new_psw\n" | passwd user > /dev/null 2>&1
sed -i "1s/.*/$new_psw/" "$VNC_PASSWD" #only 1 line
#no need
#sed -i "s/^SET_RIG_PASS=.*/SET_RIG_PASS=/" $RIG_CONF
echo -e "${GREEN}Password changed successfully${NOCOLOR}"
exit 0 | true |
e619d720fffa8cd427b53d90cbe406e1810828b0 | Shell | yagikiyoshi/MS_summer_school2021 | /3.1_water/run1.sh | UTF-8 | 276 | 2.75 | 3 | [
"MIT"
] | permissive | #!/bin/bash
module load gaussian
MOL=water
export GAUSS_SCRDIR=/scr/$USER/${MOL}.$$
mkdir -p $GAUSS_SCRDIR
(time g16 < ${MOL}.com) > ${MOL}.out 2>&1
rm -r $GAUSS_SCRDIR
formchk ${MOL}.chk
for i in `seq 1 7`; do
cubegen 0 MO=${i} ${MOL}.fchk ${MOL}.MO${i}.cub 0 h
done
| true |
6328aa63b1f9a7adb00371744292e7b5982a09fa | Shell | okpluscoin/scribe-masternode-setup | /new-vps-scribe-install.sh | UTF-8 | 5,541 | 3.203125 | 3 | [] | no_license | #!/bin/sh
PATH=/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin
THISHOST=$(hostname -s) >> ~/01vpsmnlog.txt 2>&1
EXIP=$(hostname -i) >> ~/01vpsmnlog.txt 2>&1
BLKS=$(curl https://explorer.scribe.network/api/getblockcount) >> ~/01vpsmnlog.txt 2>&1
checkrunning () {
while ! ~/scribe/scribe-cli getinfo; do
echo "....." >> ~/01vpsmnlog.txt 2>&1
sleep 20
done
}
if [ -e /home/scribeuser/.scribecore ]
then
printf "ALREADY INSTALLED or CURRENTLY RUNNING" > ~/ALREADYINSTALLED
else
printf "
#############################
# Welcome to your new #
# Scribe Masternode #
#############################
The install has started, follow the progress by
typing the following on the command line:
tail -f /home/scribeuser/01vpsmnlog.txt
#############################
# Install log follows:... #
#############################
" > ~/01vpsmnlog.txt 2>&1
cd ~ >> ~/01vpsmnlog.txt 2>&1
wget -O scribe.tar.gz https://github.com/scribenetwork/scribe/releases/download/v0.2/scribe-ubuntu-16.04-x64.tar.gz >> ~/01vpsmnlog.txt 2>&1
tar -xvzf scribe.tar.gz >> ~/01vpsmnlog.txt 2>&1
mkdir ~/scribe 2>&1
cp scribe-ubuntu-16.04-x64/usr/local/bin/scribe-cli . >> ~/scribe/scribe-cli 2>&1
cp scribe-ubuntu-16.04-x64/usr/local/bin/scribed . >> ~/scribe/scribed 2>&1
chmod +x ~/scribe/scribe-cli
chmod +x ~/scribe/scribed
rm -rf scribe-ubuntu-16.04-x64 >> ~/01vpsmnlog.txt 2>&1
rm scribe.tar.gz >> ~/01vpsmnlog.txt 2>&1
mkdir ~/.scribecore >> ~/01vpsmnlog.txt 2>&1
RPCU=$(pwgen -1 4 -n) >> ~/01vpsmnlog.txt 2>&1
PASS=$(pwgen -1 14 -n) >> ~/01vpsmnlog.txt 2>&1
printf "rpcuser=rpc$RPCU\nrpcpassword=$PASS\nrpcport=8899\nrpcthreads=8\nrpcallowip=127.0.0.1\nbind=$EXIP:8800\nmaxconnections=8\ngen=0\nexternalip=$EXIP\ndaemon=1\n\n" > ~/.scribecore/scribe.conf
~/scribe/scribed -daemon >> ~/01vpsmnlog.txt 2>&1
checkrunning
echo "Generating Masternode Key..." >> ~/01vpsmnlog.txt 2>&1
sleep 15
echo "....." >> ~/01vpsmnlog.txt 2>&1
MKEY=$(~/scribe/scribe-cli masternode genkey) >> ~/01vpsmnlog.txt 2>&1
~/scribe/scribe-cli stop >> ~/01vpsmnlog.txt 2>&1
printf "masternode=1\nmasternodeprivkey=$MKEY\n\n" >> ~/.scribecore/scribe.conf
sleep 25
echo "Starting up..." >> ~/01vpsmnlog.txt 2>&1
sleep 10
echo "....." >> ~/01vpsmnlog.txt 2>&1
~/scribe/scribed -daemon >> ~/01vpsmnlog.txt 2>&1
sleep 5
echo "Downloading Blockchain..." >> ~/01vpsmnlog.txt 2>&1
sleep 10
echo "....." >> ~/01vpsmnlog.txt 2>&1
checkrunning
cd ~
git clone https://github.com/scribenetwork/sentinel.git >> ~/01vpsmnlog.txt 2>&1
cd sentinel >> ~/01vpsmnlog.txt 2>&1
virtualenv ./venv >> ~/01vpsmnlog.txt 2>&1
./venv/bin/pip install -r requirements.txt >> ~/01vpsmnlog.txt 2>&1
crontab -l > mycron
echo "* * * * * cd /home/scribeuser/sentinel && ./venv/bin/python bin/sentinel.py >/dev/null 2>&1" >> mycron
crontab mycron
rm mycron
sleep 10
cd ~/sentinel >> ~/01vpsmnlog.txt 2>&1
./venv/bin/py.test ./test >> ~/01vpsmnlog.txt 2>&1
rm ~/.scribecore/mncache.dat >> ~/01vpsmnlog.txt 2>&1
rm ~/.scribecore/mnpayments.dat >> ~/01vpsmnlog.txt 2>&1
~/scribe/scribed -daemon -reindex >> ~/01vpsmnlog.txt 2>&1
checkrunning
while true; do
WALLETBLOCKS=$(~/scribe/scribe-cli getblockcount) >> ~/01vpsmnlog.txt 2>&1
#if (( $(echo "$WALLETBLOCKS < $BLKS" | bc -l) ))
if [ "$WALLETBLOCKS" -lt "$BLKS" ]
then
echo " ..." >> ~/01vpsmnlog.txt 2>&1
echo " Blocks so far: $WALLETBLOCKS / $BLKS" >> ~/01vpsmnlog.txt 2>&1
sleep 5
else
echo " Complete!..." >> ~/01vpsmnlog.txt 2>&1
echo " Blocks so far: $WALLETBLOCKS / $BLKS" >> ~/01vpsmnlog.txt 2>&1
sleep 5
break
fi
done
while true; do
ARRAY=$(~/scribe/scribe-cli mnsync status)
echo "$ARRAY" > getinfo.json
AssetID=$(jq '.AssetID' getinfo.json)
#if (( $(echo "$AssetID < 999" | bc -l) ))
if [ "$AssetID" -lt 999 ]
then
echo " ..." >> ~/01vpsmnlog.txt 2>&1
echo " AssetID: $AssetID" >> ~/01vpsmnlog.txt 2>&1
sleep 5
else
echo " Complete!..." >> ~/01vpsmnlog.txt 2>&1
echo " AssetID: $AssetID" >> ~/01vpsmnlog.txt 2>&1
sleep 5
break
fi
done
echo " " >> ~/01vpsmnlog.txt 2>&1
echo " " >> ~/01vpsmnlog.txt 2>&1
~/scribe/scribe-cli mnsync status >> ~/01vpsmnlog.txt 2>&1
echo " " >> ~/01vpsmnlog.txt 2>&1
sleep 3
echo "This is what should go on your windows wallet masternode.conf" >> ~/01vpsmnlog.txt 2>&1
echo " " >> ~/01vpsmnlog.txt 2>&1
echo "$THISHOST $EXIP:8800 $MKEY TXID VOUT" >> ~/01vpsmnlog.txt 2>&1
echo " " >> ~/01vpsmnlog.txt 2>&1
echo "Your server hostname is $THISHOST and you can change it to MN1 or MN2 or whatever you like" >> ~/01vpsmnlog.txt 2>&1
echo " " >> ~/01vpsmnlog.txt 2>&1
sleep 3
echo " " >> ~/01vpsmnlog.txt 2>&1
echo " - wait for 15 confirmations then you can Start Alias in the windows wallet!" >> ~/01vpsmnlog.txt 2>&1
echo " " >> ~/01vpsmnlog.txt 2>&1
echo " Thanks for using the 01VPS.net install service" >> ~/01vpsmnlog.txt 2>&1
echo " " >> ~/01vpsmnlog.txt 2>&1
echo " -- END --" >> ~/01vpsmnlog.txt 2>&1
echo " " >> ~/01vpsmnlog.txt 2>&1
echo " Pres Ctrl+c to stop tailing this log file" >> ~/01vpsmnlog.txt 2>&1
echo " " >> ~/01vpsmnlog.txt 2>&1
echo "$THISHOST $EXIP:8800 $MKEY TXID VOUT" >> ~/01vpsmnlog.txt 2>&1
echo " " >> ~/01vpsmnlog.txt 2>&1
echo "$THISHOST $EXIP:8800 $MKEY TXID VOUT" > ~/masternode.conf 2>&1
# curl --data "exip=$EXIP&mkey=$MKEY&assetid=$AssetID&mn=SCRIBE&hostname=$THISHOST&port=8800&" http://01vps.net/mninstalled.php >> ~/01vpsmnlog.txt 2>&1
fi
rm new01vpsscribeinstall.sh
| true |
37cc82f928ddb852e1d7333da54bc14e3a6596f7 | Shell | kindlehl/Shell | /scripts/blame | UTF-8 | 486 | 3.65625 | 4 | [] | no_license | #!/bin/bash
# Lists the number of OpenStack instances each OSL member is using in our test environment
# Useful for public shaming over IRC
USERS="$(openstack user list -f value -c Name --project OSL)"
echo User Openstack VM counts are as follows:
nova list >> /tmp/$$vmlist.txt
for name in $USERS; do
echo $name: "$(cat /tmp/$$vmlist.txt | grep $name | wc -l)" >> /tmp/OSout.$$
done
cat /tmp/OSout.$$ | awk '{print $2,$1}' | sort -hr | awk '{print $2,$1}'
rm /tmp/OSout.$$
| true |
0cf165a2f8c090e23e44d8c1ffb7b4d8a14c2fcd | Shell | salloumzzzz/T1P2_F18_Salloum | /script1.sh | UTF-8 | 203 | 3.109375 | 3 | [] | no_license | #!/bin/bash
echo "enter a regular expression"
read foo
var=$(echo $foo | tr "{a-z}" "{A-Z} ")
var=$(echo $foo | tr "{I}" "{A} ")
# {a-z} Matches a through z
# {A-Z} matches A through Z
echo $var
| true |
c248cd7ff1133beacd6d10b69f711e2e0aef9fcd | Shell | sunxifa/myLinux | /shell/shellscripts/build_in/timeout/timed-input.sh | UTF-8 | 938 | 3.578125 | 4 | [] | no_license | #!/bin/bash
#TMOUT=3
TIMELIMIT=3 # 在这个例子上是 3 秒,也可以设其他的值.
PrintAnswer()
{
#[ -z "$answer" ]&&(echo "Input cannot be null";kill $!;exit 1;)
[ -z "$answer" ]&&{ echo "Input cannot be null";kill $!;exit 1; }
if [ "$answer" = TIMEOUT ]
then
echo $answer
else
echo "Your favorite veggie is $answer"
kill $!
# kill 将不再需要 TimerOn 函数运行在后台.
# $! 是运行在后台的最后一个工作的 PID.
fi
}
TimerON()
{
sleep $TIMELIMIT && kill -s 14 $$ &
# 等待 3 秒,然后发送一个信号给脚本.
}
Int14Vector()
{
answer="TIMEOUT"
PrintAnswer
exit 14
}
trap Int14Vector 14 # 为了我们的目的,时间中断(14)被破坏了.
echo "What is your favorite vegetabls"
TimerON
read answer
PrintAnswer
#很明显的,这是一个拼凑的实现.
#+ 然而使用"-t"选项来"read"的话,将会简化这个任务.
# 见"t-out.sh",在下边
exit 0
| true |
07e4c157f80a2586f801acef1c80b7f7279fb43e | Shell | filcab/Dot-files | /.shells/bin/rot13 | UTF-8 | 119 | 2.859375 | 3 | [] | no_license | #!/bin/sh
if [ $# -eq 0 ]; then
tr '[n-za-mN-ZA-M]' '[a-zA-Z]'
else
echo "$@" | tr '[n-za-mN-ZA-M]' '[a-zA-Z]'
fi
| true |
bb0f8b55ad4984e6bfd520a60c79899cc957895e | Shell | grafioschtrader/grafioschtrader | /util/shellscripts/gtupdate.sh | UTF-8 | 865 | 3.046875 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
. ~/gtvar.sh
./checkversion.sh
if [ $? -ne 0 ]; then
exit 1
fi
sudo systemctl stop grafioschtrader.service
cd $builddir
GT_PROF=application.properties
GT_PROF_PROD=application-production.properties
GT_PROF_PATH=grafioschtrader/backend/grafioschtrader-server/src/main/resources
cp $GT_PROF_PATH/$GT_PROF .
if [ -e $GT_PROF_PATH/$GT_PROF_PROD ]
then cp $GT_PROF_PATH/$GT_PROF_PROD .
fi
cd grafioschtrader/
rm -fr frontend
git reset --hard origin/master
git pull --rebase
cd $builddir
if [ -f $GT_PROF ]; then
mv $GT_PROF_PATH/$GT_PROF ${GT_PROF}.new
~/merger.sh -i $GT_PROF -s ${GT_PROF}.new -o $GT_PROF_PATH/$GT_PROF
fi
if [ -f $GT_PROD ]; then
cp $GT_PROF_PROD $GT_PROF_PATH/.
fi
cd ~
cp $builddir/grafioschtrader/util/shellscripts/gtup{front,back}*.sh .
cp $builddir/grafioschtrader/util/shellscripts/checkversion.sh .
~/gtupfrontback.sh
| true |
2a6984027faffda22e7215a7ae180e5c00ed5836 | Shell | fkautz/docker-gluster | /start-gluster.sh | UTF-8 | 807 | 3.4375 | 3 | [] | no_license | # Make sure rpcbind is started - GlusterNFS would complain
## RPC bind service will complain about Locale files but ignore
service rpcbind start
## On docker surprisingly hostnames are mapped to IP's :-)
IPADDR=$(hostname -i)
## Change this to your name if necessary
VOLUME=myvolume
## Start Gluster Management Daemon
service glusterd start
if [ -z $VOLUME ]; then
## Check if volume is null
service glusterd stop
exit 255
fi
if [ ! -d "/var/lib/glusterd/vols/$VOLUME" ]; then
## Always create a sub-directory inside a mount-point
gluster --mode=script --wignore volume create $VOLUME $IPADDR:/mnt/vault/$VOLUME
fi
gluster --mode=script --wignore volume start $VOLUME force
shutdown_gluster()
{
service glusterd stop
exit $?
}
trap shutdown_gluster SIGINT
while true; do sleep 1; done
| true |
096eab9861ead4c4038b4e46dce9aa23ebf6ec48 | Shell | heartshare/docker-lsyncd | /image-lsyncd/usr/bin/docker-entrypoint.sh | UTF-8 | 2,586 | 3.828125 | 4 | [] | no_license | #!/bin/bash
error() { echo "ERROR:" "$@" >&2; }
fail() { [ $# -gt 0 ] && error "$@"; exit 1; }
modmkdir() { local MOD="$1"; shift; mkdir "$@" && chmod "$MOD" "$@"; }
isset() { local i; for i; do [ -z "${!i}" ] && error "Missine environment variable $i." && return 1; done; return 0; }
fixTrailingSlash() { [ "${1:${#1}-1}" = "/" ] && echo "${1::-1}" || echo "$1"; }
boolanOrString() { [ "$1" != "true" ] && [ "$1" != "false" ] && echo '"'"$1"'"' || echo "$1"; }
append-if-missing() {
local C="$(cat)";
[ -w "$1" ] && ! awk -v c="$(echo "$C" | head -1)" '$0==c{exit(1)}' "$1" || echo "$C" >> "$1";
}
[ -d ~/.ssh ] || modmkdir 0700 ~/.ssh
if isset SSH_KEY 2>/dev/null; then
isset SSH_KEY_FILE 2>/dev/null && fail "Environment variables SSH_KEY and SSH_KEY_FILE are mutually exclusive."
export SSH_KEY_FILE=~/.ssh/id_rsa
echo "$SSH_KEY" > "$SSH_KEY_FILE"
chmod 0600 "$SSH_KEY_FILE"
fi
isset TARGET_USER TARGET_HOST TARGET_PATH SSH_KEY_FILE || fail
append-if-missing ~/.ssh/config <<EOF
Host $TARGET_HOST
User $TARGET_USER${TARGET_SSH_PORT:+
Port $TARGET_SSH_PORT}
IdentityFile $SSH_KEY_FILE
CheckHostIP no
EOF
[ "${TARGET_SSH_PORT:-22}" -eq 22 ] \
&& HOST_KEY_PREFIX="$TARGET_HOST" \
|| HOST_KEY_PREFIX="[$TARGET_HOST]:$TARGET_SSH_PORT"
AK=
if [ -z "$HOST_KEY" ]
then ! awk -v k="$HOST_KEY_PREFIX" '$1==k{exit(1)}' \
|| ssh "$TARGET_HOST" -o StrictHostKeyChecking=accept-new true 2>/dev/null >&2 \
|| AK=1
else append-if-missing ~/.ssh/known_hosts <<<"$HOST_KEY_PREFIX $(echo $HOST_KEY | awk '{print $1" "$2}')"
fi
echo "${EXCLUDES:-"*~"}" > ~/lsyncd.excludes
cat > ~/lsyncd.conf.lua <<EOF
settings {
statusFile = "$HOME/lsyncd.status",
nodaemon = true,
insist = true,
inotifyMode = "${INOTIFY_MODE:-CloseWrite}",
}
sync {
default.rsyncssh,
source = "$(fixTrailingSlash "${SOURCE_PATH:-/var/source}")/",
host = "$TARGET_HOST",
targetdir = "$(fixTrailingSlash "$TARGET_PATH")/",
delay = ${SYNC_DELAY:-0},
excludeFrom = "$HOME/lsyncd.excludes",
delete = $(boolanOrString "${DELETE:-running}"),
rsync = {
archive = ${RSYNC_ARCHIVE:-true},
compress = ${RSYNC_COMPRESS:-false},
-- excludeFrom = "$HOME/lsyncd.excludes",
}
}
EOF
for f in ~/.ash_history ~/.bash_history; do
append-if-missing "$f" <<<"lsyncd $HOME/lsyncd.conf.lua"
append-if-missing "$f" <<<"ssh $TARGET_HOST"
[ -z "$AK" ] || append-if-missing "$f" <<<"ssh $TARGET_HOST -o StrictHostKeyChecking=accept-new"
done
exec "$@"
| true |
70c05d6aecfc380400e629043e4ee6c8c2c37556 | Shell | lisijia6/German-Traffic-Sign-Recognition | /tuning.sh | UTF-8 | 2,159 | 2.78125 | 3 | [] | no_license | # C:\Windows\System32\bash.exe -c "./tuning.sh"
echo "Running Hyperparameter Tuning..."
# 04-03 experiments
echo "Running simpler architecture..."
echo "Model d"
for feature in 25 30 35; do
echo "Changing number of features to $feature $((feature * 2))"
python /home/siyun/MyUnityProjects/CNN/APS360/train.py --batch_size=32 --learning_rate=0.001 --num_epochs=15 --cnn 4 $feature $((feature * 2)) $((feature * 2)) 200
done
# for LR in 0.001 0.002 0.003; do
# echo "Changing learning rate to $LR"
# python /home/siyun/MyUnityProjects/CNN/APS360/train.py --batch_size=32 --learning_rate=$LR --num_epochs=15 --cnn 4
# done
# 04-01 experiments
# Default parameters
# echo "Running with default hyperparameters..."
# python /home/siyun/MyUnityProjects/CNN/APS360/train.py --batch_size=32 --learning_rate=0.001 --num_epochs=10 --cnn 0
# echo "Trying different learning rates..."
# for LR in 0.0005 0.005; do
# echo "Changing learning rate to $LR..."
# python /home/siyun/MyUnityProjects/CNN/APS360/train.py --batch_size=32 --learning_rate=$LR --num_epochs=10 --cnn 0
# done
# echo "Trying different batch sizes..."
# for BS in 16 64; do
# echo "Changing batch size to $BS..."
# python /home/siyun/MyUnityProjects/CNN/APS360/train.py --batch_size=$BS --learning_rate=0.001 --num_epochs=10 --cnn 0
# done
# echo "Trying different architectures..."
# echo "Default architecture: classifier"
# for feature1 in 50 100; do
# echo "Changing feature1 to $feature1..."
# for feature2 in 100 150; do
# echo "Changing feature2 to $feature2..."
# python /home/siyun/MyUnityProjects/CNN/APS360/train.py --cnn 0 $feature1 $feature2 $feature2 200 100
# done
# done
# for hidden1 in 400 100; do
# echo "Changing hidden1 to $hidden1..."
# python /home/siyun/MyUnityProjects/CNN/APS360/train.py --cnn 0 50 100 100 $hidden1 $((hidden1 / 2))
# done
# echo "Model A..."
# python /home/siyun/MyUnityProjects/CNN/APS360/train.py --cnn 1
# echo "Model B..."
# python /home/siyun/MyUnityProjects/CNN/APS360/train.py --cnn 2
# echo "Model C..."
# python /home/siyun/MyUnityProjects/CNN/APS360/train.py --cnn 3
| true |
113bb9b2c9ec2b4aef35eef08ccaaddcff5e702d | Shell | dpshen/vigilant | /daemon2/getdeps.sh | UTF-8 | 1,103 | 2.53125 | 3 | [
"MIT",
"Apache-2.0"
] | permissive | #!/usr/bin/env bash
set -e
rm -rf _deps
mkdir _deps
pushd _deps
rm -rf _builds
mkdir _builds
pushd _builds
BUILD_DIR=`pwd`
popd
wget https://github.com/HardySimpson/zlog/archive/latest-stable.tar.gz
tar zxvf latest-stable.tar.gz
pushd zlog-latest-stable
make PREFIX=$BUILD_DIR
make install PREFIX=$BUILD_DIR
popd
wget ftp://ftp.mirrorservice.org/pub/i-scream/libstatgrab/libstatgrab-0.91.tar.gz
tar zxvf libstatgrab-0.91.tar.gz
pushd libstatgrab-0.91
./configure --prefix=$BUILD_DIR
make
make install
popd
wget https://github.com/google/protobuf/releases/download/v2.6.1/protobuf-2.6.1.tar.gz
tar zxvf protobuf-2.6.1.tar.gz
pushd protobuf-2.6.1
./configure --prefix=$BUILD_DIR
make
make install
popd
wget https://sourceforge.net/projects/levent/files/libevent/libevent-2.0/libevent-2.0.22-stable.tar.gz
tar zxvf libevent-2.0.22-stable.tar.gz
pushd libevent-2.0.22-stable
./configure --prefix=$BUILD_DIR
make
make install
popd
wget http://www.digip.org/jansson/releases/jansson-2.7.tar.gz
tar zxvf jansson-2.7.tar.gz
pushd jansson-2.7
./configure --prefix=$BUILD_DIR
make
make install
popd
popd
| true |
a2ae86a716c35aacd2271f86a2830b921bd50b71 | Shell | polybar/polybar-scripts | /polybar-scripts/system-gpu-optimus/system-gpu-optimus.sh | UTF-8 | 823 | 3.796875 | 4 | [
"Unlicense"
] | permissive | #!/bin/sh
icon_intel="#1"
icon_nvidia="#2"
icon_hybrid="#3"
hybrid_switching=0
gpu_current() {
mode=$(optimus-manager --print-mode)
echo "$mode" | cut -d ' ' -f 5 | tr -d '[:space:]'
}
gpu_switch() {
mode=$(gpu_current)
if [ "$mode" = "intel" ]; then
next="nvidia"
elif [ "$mode" = "nvidia" ]; then
if [ "$hybrid_switching" = 1 ]; then
next="hybrid"
else
next="intel"
fi
elif [ "$mode" = "hybrid" ]; then
next="nvidia"
fi
optimus-manager --switch "$next" --no-confirm
}
gpu_display(){
mode=$(gpu_current)
if [ "$mode" = "intel" ]; then
echo "$icon_intel"
elif [ "$mode" = "nvidia" ]; then
echo "$icon_nvidia"
elif [ "$mode" = "hybrid" ]; then
echo "$icon_hybrid"
fi
}
case "$1" in
--switch)
gpu_switch
;;
*)
gpu_display
;;
esac
| true |
d949080fcf83f1546aaab7f23b43c41ef6dd660e | Shell | banskt/quasi-laplace-simulation | /pipeline/02_run_snptest_meta.sh | UTF-8 | 1,348 | 2.984375 | 3 | [] | no_license | #!/bin/bash
source PATHS
RANDSTRING=`cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w 4 | head -n 1`
THIS_SIMDIR="${BASEDIR}/metaanalysis"
SNPTEST_JOBSUBDIR="${JOBSUBDIR}/metaanalysis"
for STUDY in ${STUDYNAMES[@]}; do
SAMPLEDIR="${THIS_SIMDIR}/samples/${STUDY}"
if [ ! -d ${SAMPLEDIR} ]; then mkdir -p ${SAMPLEDIR}; fi
cp ${DOSAGEDIR}/${STUDY}/${STUDY}${SAMPLEPREFIX}.sample ${SAMPLEDIR}/phenotypes.sample
done
if [ ! -d ${SNPTEST_JOBSUBDIR} ]; then mkdir -p ${SNPTEST_JOBSUBDIR}; fi
cd ${SNPTEST_JOBSUBDIR}
SNPTEST_JOBNAME="snptest_${RANDSTRING}"
for STUDY in ${STUDYNAMES[@]}; do
JOBNAME="${SNPTEST_JOBNAME}_${STUDY}"
sed "s|_JOBNAME|${JOBNAME}|g;
s|_SIMDIR_|${THIS_SIMDIR}|g;
s|_GSTUDY_|${STUDY}|g;
s|_SNPTEST|${SNPTEST}|g;
s|_LOCIDIR|${DOSAGEDIR}|g;
s|_USELOCI|${LOCUSNAMES}|g;
" ${MASTER_BSUBDIR}/snptest.bsub > ${JOBNAME}.bsub
bsub < ${JOBNAME}.bsub
done
META_JOBNAME="meta_${RANDSTRING}"
sed "s|_JOBNAME|${META_JOBNAME}|g;
s|_SIMDIR_|${THIS_SIMDIR}|g;
s|_STUDYN_|\"${STUDYNAMES[*]}\"|g;
s|_SAMPLES|\"${STUDYSAMPLES[*]}\"|g;
s|_LOCUSN_|${LOCUSNAMES}|g;
s|_SCRIPT_|${GENINF}|g;
s|__META__|${META}|g;
" ${MASTER_BSUBDIR}/meta.bsub > ${META_JOBNAME}.bsub
bsub -w "done(${SNPTEST_JOBNAME}*)" < ${META_JOBNAME}.bsub
cd ${CURDIR}
| true |
db3f5367c32ea67614482a0b027e5e46b44977fb | Shell | Mic92/xfstests-cntr | /tests/generic/425 | UTF-8 | 1,736 | 3.65625 | 4 | [] | no_license | #! /usr/bin/env bash
# SPDX-License-Identifier: GPL-2.0
# Copyright (c) 2017 Oracle, Inc. All Rights Reserved.
#
# FS QA Test No. 425
#
# Check that FIEMAP produces some output when we require an external
# block to hold extended attributes.
#
seq=`basename $0`
seqres=$RESULT_DIR/$seq
echo "QA output created by $seq"
here=`pwd`
tmp=/tmp/$$
status=1 # failure is the default!
trap "_cleanup; exit \$status" 0 1 2 3 7 15
_cleanup()
{
cd /
rm -rf $tmp.*
wait
}
# get standard environment, filters and checks
. ./common/rc
. ./common/filter
. ./common/attr
# real QA test starts here
_supported_fs generic
_require_scratch
_require_attrs
_require_xfs_io_command "fiemap" "-a"
echo "Format and mount"
_scratch_mkfs > $seqres.full 2>&1
_scratch_mount >> $seqres.full 2>&1
testdir=$SCRATCH_MNT/test-$seq
mkdir $testdir
echo "Create the original files"
testfile=$testdir/attrfile
touch $testfile
blk_sz=$(_get_file_block_size $SCRATCH_MNT)
# Assume each attr eats at least 20 bytes. Try to fill 2 fs blocks.
max_attrs=$((2 * blk_sz / 20))
i=0
while [ $i -lt $max_attrs ]; do
n="$(printf "%010d" $i)"
$SETFATTR_PROG -n "user.$n" -v "$n" $testfile > $seqres.full 2>&1 || break
i=$((i + 1))
done
sync
echo "Check attr extent counts"
f1=$(_count_attr_extents $testfile)
echo "$f1 xattr extents" >> $seqres.full
$XFS_IO_PROG -c 'fiemap -a -v' $testfile >> $seqres.full
test $f1 -gt 0 || echo "Expected at least one xattr extent."
_scratch_cycle_mount
echo "Check attr extent counts after remount"
f1=$(_count_attr_extents $testfile)
echo "$f1 xattr extents" >> $seqres.full
$XFS_IO_PROG -c 'fiemap -a -v' $testfile >> $seqres.full
test $f1 -gt 0 || echo "Expected at least one xattr extent."
# success, all done
status=0
exit
| true |
dffd1f95ae254901d2893c55f1048ce4d75375ed | Shell | goude/yaprox | /yaprox.sh | UTF-8 | 3,992 | 4.3125 | 4 | [
"MIT"
] | permissive | # Yaprox uses code from https://github.com/andsens/homeshick
function yaprox_help() {
printf "yaprox sets your http_proxy and https_proxy environment variables.
Usage: yaprox [options] [user] server
(if server is a number N, use line N from ~/.yaproxrc)
Runtime options:
-c, [--clear] # Unset http_proxy and https_proxy
-q, [--quiet] # Quiet mode
-h, [--help] # Display this help and exit
-l, [--list] # Show relevant environment variables
-s, [--stash] # Unset proxy variables, but save for later use in a temporary environment variable
-u, [--unstash] # Set proxy variables using stashed value; clear temporary environment variable
Note:
If you wish to specify a domain USER, use the format <domain>\\\\\\<user>
(note the double backslash).
Warning:
Use wisely, the environment will store your plaintext password for
the duration of the session. Always inspect the source code when you download
something from the internet, including this script.
"
}
function _yaprox_clear() {
unset http_proxy
unset HTTP_PROXY
unset https_proxy
unset HTTPS_PROXY
unset NO_PROXY
unset no_proxy
}
function yaprox() {
local proxy_user proxy_server proxy_pwd proxy_url
local TALK=true
# Option parsing from andsens/homeshick
while [[ $# -gt 0 ]]; do
if [[ $1 =~ ^- ]]; then
# Convert combined short options into multiples short options (e.g. `-qb' to `-q -b')
if [[ $1 =~ ^-[a-z]{2,} ]]; then
param=$1
shift
set -- ${param:0:2} -${param:2} $@
unset param
fi
case $1 in
-h | --help) yaprox_help ; return ;;
-q | --quiet) TALK=false ; shift; continue ;;
-l | --list)
env | grep -i prox
return
;;
-c | --clear)
_yaprox_clear
unset _yaprox_stash
if $TALK; then
echo "Proxy variables cleared."
fi
return
;;
-s | --stash)
export _yaprox_stash=$http_proxy
_yaprox_clear
if $TALK; then
echo "Proxy variables stashed."
fi
return
;;
-u | --unstash)
yaprox -q $_yaprox_stash
unset _yaprox_stash
if $TALK; then
echo "Proxy variables unstashed."
fi
return
;;
*)
echo "Unknown option '$1'"
return 1
;;
esac
else
break
fi
done
if [[ "$#" -eq 2 ]]; then
proxy_user=$1
proxy_server=$2
echo -n "Enter password for $proxy_user@$proxy_server: "
read -s proxy_pwd
proxy_url=$proxy_user:$proxy_pwd@$proxy_server
elif [[ "$#" -eq 1 ]]; then
if [[ "$1" =~ ^[1-9]$ ]]; then
proxy_server=$(sed "$1q;d" ~/.yaproxrc)
else
proxy_server=$1
fi
proxy_url=$proxy_server
else
yaprox_help
return
fi
# git seems to work best with lower case environment variable names
export http_proxy=$proxy_url
export HTTP_PROXY=$proxy_url
export https_proxy=$proxy_url
export HTTPS_PROXY=$proxy_url
if $TALK; then
echo
echo "Now using proxy server $proxy_server. Run yaprox --clear when done to unset."
fi
}
# EXPERIMENTAL - execute a single command using the preferred proxy,
# which we define to be the first one listed in .yaproxrc
function yap() {
proxy_server=$(sed "1q;d" ~/.yaproxrc)
http_proxy=$proxy_server https_proxy=$proxy_server $@
}
| true |
82166a013c719a81ee7b6be42c7ba328d3b8437a | Shell | eerimoq/monolinux-example-project | /3pp/linux/samples/pktgen/pktgen_sample06_numa_awared_queue_irq_affinity.sh | UTF-8 | 3,649 | 3.828125 | 4 | [
"Linux-syscall-note",
"GPL-2.0-only",
"MIT"
] | permissive | #!/bin/bash
#
# Multiqueue: Using pktgen threads for sending on multiple CPUs
# * adding devices to kernel threads which are in the same NUMA node
# * bound devices queue's irq affinity to the threads, 1:1 mapping
# * notice the naming scheme for keeping device names unique
# * nameing scheme: dev@thread_number
# * flow variation via random UDP source port
#
basedir=`dirname $0`
source ${basedir}/functions.sh
root_check_run_with_sudo "$@"
#
# Required param: -i dev in $DEV
source ${basedir}/parameters.sh
# Base Config
DELAY="0" # Zero means max speed
[ -z "$COUNT" ] && COUNT="20000000" # Zero means indefinitely
[ -z "$CLONE_SKB" ] && CLONE_SKB="0"
# Flow variation random source port between min and max
UDP_SRC_MIN=9
UDP_SRC_MAX=109
node=`get_iface_node $DEV`
irq_array=(`get_iface_irqs $DEV`)
cpu_array=(`get_node_cpus $node`)
[ $THREADS -gt ${#irq_array[*]} -o $THREADS -gt ${#cpu_array[*]} ] && \
err 1 "Thread number $THREADS exceeds: min (${#irq_array[*]},${#cpu_array[*]})"
# (example of setting default params in your script)
if [ -z "$DEST_IP" ]; then
[ -z "$IP6" ] && DEST_IP="198.18.0.42" || DEST_IP="FD00::1"
fi
[ -z "$DST_MAC" ] && DST_MAC="90:e2:ba:ff:ff:ff"
if [ -n "$DEST_IP" ]; then
validate_addr${IP6} $DEST_IP
read -r DST_MIN DST_MAX <<< $(parse_addr${IP6} $DEST_IP)
fi
if [ -n "$DST_PORT" ]; then
read -r UDP_DST_MIN UDP_DST_MAX <<< $(parse_ports $DST_PORT)
validate_ports $UDP_DST_MIN $UDP_DST_MAX
fi
# General cleanup everything since last run
pg_ctrl "reset"
# Threads are specified with parameter -t value in $THREADS
for ((i = 0; i < $THREADS; i++)); do
# The device name is extended with @name, using thread number to
# make then unique, but any name will do.
# Set the queue's irq affinity to this $thread (processor)
# if '-f' is designated, offset cpu id
thread=${cpu_array[$((i+F_THREAD))]}
dev=${DEV}@${thread}
echo $thread > /proc/irq/${irq_array[$i]}/smp_affinity_list
info "irq ${irq_array[$i]} is set affinity to `cat /proc/irq/${irq_array[$i]}/smp_affinity_list`"
# Add remove all other devices and add_device $dev to thread
pg_thread $thread "rem_device_all"
pg_thread $thread "add_device" $dev
# select queue and bind the queue and $dev in 1:1 relationship
queue_num=$i
info "queue number is $queue_num"
pg_set $dev "queue_map_min $queue_num"
pg_set $dev "queue_map_max $queue_num"
# Notice config queue to map to cpu (mirrors smp_processor_id())
# It is beneficial to map IRQ /proc/irq/*/smp_affinity 1:1 to CPU number
pg_set $dev "flag QUEUE_MAP_CPU"
# Base config of dev
pg_set $dev "count $COUNT"
pg_set $dev "clone_skb $CLONE_SKB"
pg_set $dev "pkt_size $PKT_SIZE"
pg_set $dev "delay $DELAY"
# Flag example disabling timestamping
pg_set $dev "flag NO_TIMESTAMP"
# Destination
pg_set $dev "dst_mac $DST_MAC"
pg_set $dev "dst${IP6}_min $DST_MIN"
pg_set $dev "dst${IP6}_max $DST_MAX"
if [ -n "$DST_PORT" ]; then
# Single destination port or random port range
pg_set $dev "flag UDPDST_RND"
pg_set $dev "udp_dst_min $UDP_DST_MIN"
pg_set $dev "udp_dst_max $UDP_DST_MAX"
fi
# Setup random UDP port src range
pg_set $dev "flag UDPSRC_RND"
pg_set $dev "udp_src_min $UDP_SRC_MIN"
pg_set $dev "udp_src_max $UDP_SRC_MAX"
done
# start_run
echo "Running... ctrl^C to stop" >&2
pg_ctrl "start"
echo "Done" >&2
# Print results
for ((i = 0; i < $THREADS; i++)); do
thread=${cpu_array[$((i+F_THREAD))]}
dev=${DEV}@${thread}
echo "Device: $dev"
cat /proc/net/pktgen/$dev | grep -A2 "Result:"
done
| true |
ca7fc962b833f77e886fe64f73bae8c4ad2170b0 | Shell | bryanl/ksonnet-registry | /scripts/test/list_packages.sh | UTF-8 | 346 | 3 | 3 | [] | no_license | #!/bin/bash
set -e
OPTS=$(getopt --long host:,namespace:,package: -n 'parse-options' -- "$@")
HOST="http://localhost:9000"
# NAMESPACE="ksonnet"
while true; do
case "$1" in
--host ) HOST="$2"; shift; shift; ;;
# --namespace ) NAMESPACE="$2"; shift; shift; ;;
* ) break ;;
esac
done
curl -sSL -XGET \
${HOST}/api/v1/packages | true |
abb0a2bc32bb63b4d7b8131d61a39fa37bdb76ba | Shell | danielv7/dotfiles | /dots-config.sh | UTF-8 | 2,638 | 3.1875 | 3 | [
"MIT"
] | permissive | # Up/Down Dotfile "Migrations" via Dots
dots_config_up() {
# Personal
if [[ "$HOSTNAME" == "MacBook-Pro-Justin" ]] ; then
ln -sfn "$DOTFILES_DIR"/src/personal/home/.gitconfig "$HOME"/.gitconfig
ln -sfn "$DOTFILES_DIR"/src/personal/home/.zlogin "$HOME"/.zlogin
ln -sfn "$DOTFILES_DIR"/src/emacs/.emacs "$HOME"/.emacs
echo ". $DOTFILES_DIR/src/personal/home/.zshrc" >> "$HOME"/.zshrc
# EasyPost local
elif [[ "$HOSTNAME" == "MacBook-Pro-Justin-EasyPost" ]] ; then
ln -sfn "$DOTFILES_DIR"/src/personal/home/.gitconfig "$HOME"/.gitconfig
ln -sfn "$DOTFILES_DIR"/src/personal/home/.zlogin "$HOME"/.zlogin
ln -sfn "$DOTFILES_DIR"/src/easypost/ssh/config "$HOME"/.ssh/config
ln -sfn "$DOTFILES_DIR"/src/emacs/.emacs "$HOME"/.emacs
echo ". $DOTFILES_DIR/src/personal/home/.zshrc" >> "$HOME"/.zshrc
echo ". $DOTFILES_DIR/src/easypost/.zshrc" >> "$HOME"/.zshrc
# EasyPost AWS
elif [[ "$HOSTNAME" == "oregon1" ]] ; then
ln -sfn "$DOTFILES_DIR"/src/easypost/.gitconfig-easypost-aws "$HOME"/.gitconfig
ln -sfn "$DOTFILES_DIR"/src/emacs/.emacs "$HOME"/.emacs
echo ". $DOTFILES_DIR/src/easypost/.zshrc-aws" >> "$HOME"/.zshrc
# Mac mini Server
elif [[ "$HOSTNAME" == "Server" ]] ; then
ln -sfn "$DOTFILES_DIR"/src/personal/home/.gitconfig "$HOME"/.gitconfig
ln -sfn "$DOTFILES_DIR"/src/personal/home/.zlogin "$HOME"/.zlogin
crontab - < "$DOTFILES_DIR"/src/server/crontab
ln -sfn "$DOTFILES_DIR"/src/emacs/.emacs "$HOME"/.emacs
echo ". $DOTFILES_DIR/src/personal/home/.zshrc" >> "$HOME"/.zshrc
else
echo "HOSTNAME doesn't match any config."
fi
}
dots_config_down() {
# Personal
if [[ "$HOSTNAME" == "MacBook-Pro-Justin" ]] ; then
rm -i "$HOME"/.gitconfig
rm -i "$HOME"/.zlogin
rm -i "$HOME"/.emacs
# .zshrc taken care of by Dots
# EasyPost local
elif [[ "$HOSTNAME" == "MacBook-Pro-Justin-EasyPost" ]] ; then
rm -i "$HOME"/.gitconfig
rm -i "$HOME"/.zlogin
rm -i -rf "$HOME"/.ssh/config
rm "$HOME"/.emacs
# .zshrc taken care of by Dots
# EasyPost AWS
elif [[ "$HOSTNAME" == "oregon1" ]] ; then
rm -i "$HOME"/.gitconfig
rm "$HOME"/.emacs
# .zshrc taken care of by Dots
# Mac mini Server
elif [[ "$HOSTNAME" == "Server" ]] ; then
rm -i "$HOME"/.gitconfig
rm -i "$HOME"/.zlogin
crontab -r
rm "$HOME"/.emacs
# .zshrc taken care of by Dots
else
echo "HOSTNAME doesn't match any config."
fi
}
| true |
24cba95f4b96051b3e920ce38bf2715e64e1dbd5 | Shell | CapnBry/HeaterMeter | /openwrt/package/linkmeter/targets/bcm2708/usr/bin/wifi-client | UTF-8 | 2,762 | 3.75 | 4 | [
"GPL-3.0-only",
"MIT"
] | permissive | #!/bin/sh
FUNC=`basename $0`
[ $# -eq 0 ] && {
cat <<EOF
Usage: $FUNC [-s SSID] [-p password] [-c channel] [-e encryption] [-m mode] [-b band]
encryption = none wep psk psk2 (default)
mode = ap sta
band = 2 5
EOF
exit 1
}
ENC=psk2
MODE=sta
[ "$FUNC" = "wifi-client" ] && MODE="sta" && TAG="wifi_client"
[ "$FUNC" = "wifi-ap" ] && MODE="ap" && TAG="wifi_ap"
while getopts "s:p:c:e:m:b:" arg ; do
case $arg in
s) SSID=$OPTARG ;;
p) PASS=$OPTARG ;;
c) CHANNEL=$OPTARG ;;
e) ENC=$OPTARG ;;
m) MODE=$OPTARG ;;
b) BAND=${OPTARG:0:1} ;;
esac
done
shift $((OPTIND-1))
[ $# -gt 0 ] && SSID=$1
[ $# -gt 1 ] && PASS=$2
SSID=`echo $SSID | sed "s/\'/\\\\\'/g"`
PASS=`echo $PASS | sed "s/\'/\\\\\'/g"`
[ -n "$PASS" ] && PASS=" option key '$PASS'"
case $BAND in
2) BAND="11g" ;;
5) BAND="11a" ;;
*) BAND="11n" ;; # The Pi3B+ will find the proper band in STA mode if set to 11n
esac
# Default AP mode to channel 36 for 5GHz, channel 6 for 2.4GHz, channel auto for STA
[ "$MODE" == "ap" -a "$BAND" == "11a" -a -z "$CHANNEL" ] && CHANNEL="36"
[ "$MODE" == "ap" -a -z "$CHANNEL" ] && CHANNEL="6"
[ -z "$CHANNEL" ] && CHANNEL="auto"
# On platforms with just one network connection (PiA, Zero, ZeroW) the platform will assign the wifi to the
# lan interface. In that case, do not create a new interface, just write to the lan interface definition
IFACE="wwan"
[ "$(uci -q get network.lan.ifname)" = "wlan0" ] && IFACE="lan"
cat > /etc/config/wireless << EOF
config wifi-device 'radio0'
option type 'mac80211'
option phy 'phy0'
option hwmode '$BAND'
option channel '$CHANNEL'
#option htmode 'HT20'
#list ht_capab 'SHORT-GI-20'
#list ht_capab 'SHORT-GI-40'
#list ht_capab 'DSSS_CCK-40'
#list ht_capab 'RX-STBC1'
#option country '00'
config wifi-iface '$TAG'
option device 'radio0'
option network '$IFACE'
option mode '$MODE'
option ssid '$SSID'
option encryption '$ENC'
#option powersave '1'
$PASS
EOF
uci set network.$IFACE=interface
uci set dhcp.$IFACE=dhcp
uci set dhcp.$IFACE.interface=$IFACE
# delete 'bridge' type, if the connection is already brought up as a bridge then this may require a reboot
uci -q delete network.$IFACE.type
if [ "$MODE" = "sta" ] ; then
uci set dhcp.$IFACE.ignore=1
uci set dhcp.$IFACE.dhcpv6=disabled
uci set dhcp.$IFACE.ra=disabled
uci set network.$IFACE.proto=dhcp
uci -q delete network.$IFACE.ipaddr
uci -q delete network.$IFACE.netmask
else
uci set dhcp.$IFACE.ignore=0
uci set dhcp.$IFACE.dhcpv6=server
uci set dhcp.$IFACE.ra=server
uci set network.$IFACE.proto=static
#uci -q get network.$IFACE.ipaddr > /dev/null || {
uci set network.$IFACE.ipaddr=192.168.201.1
uci set network.$IFACE.netmask=255.255.255.0
#}
fi
uci commit
sync
| true |
533e15a8cf944d9358a0712ca8a0f0d6938bd282 | Shell | eudaldgr/KISS-kde | /extra/qt5-webengine/build | UTF-8 | 1,009 | 2.59375 | 3 | [
"MIT"
] | permissive | #!/bin/sh -e
for patch in *.patch; do
patch -p1 < "$patch"
done
# Fix nasm hardcoded glibcism
sed -i '/CANONICALIZE_FILE_NAME/d' \
src/3rdparty/chromium/third_party/nasm/config/config-linux.h
# Remove glibc header.
sed -i '/execinfo.h/d' \
src/3rdparty/chromium/base/debug/stack_trace_posix.cc
# The build fails if qtwebengine is already installed.
find . -name '*.pr[fio]' | while read -r file; do
sed -i "s#INCLUDEPATH += #&\$\$QTWEBENGINE_ROOT/include #" "$file"
done
qmake -- \
-feature-webengine-system-ninja \
-feature-webengine-system-zlib \
-feature-webengine-system-harfbuzz \
-feature-webengine-system-png \
-feature-webengine-system-libevent \
-feature-webengine-system-libvpx \
-feature-webengine-system-opus \
-feature-webengine-system-libwebp \
-feature-webengine-system-ffmpeg \
-feature-webengine-proprietary-codecs \
-no-feature-webengine-system-icu \
-no-feature-webengine-system-glib
make
make INSTALL_ROOT="$1" install
| true |
99b5a9570dc14df5e1178df2962ecb2b719070f5 | Shell | muhkuh-sys/com.github.brimworks-lua-zlib | /.lxc_build_windows.sh | UTF-8 | 2,990 | 3.40625 | 3 | [] | no_license | #! /bin/bash
set -e
# This is the name of the working container.
# FIXME: generate something unique to avoid collisions if more than one build is running.
CONTAINER=c0
# Get the project directory.
PRJDIR=`pwd`
# Make sure that the "build" folder exists.
# NOTE: do not remove it, maybe there are already components.
mkdir -p ${PRJDIR}/build
# Start the container and mount the project folder.
lxc launch mbs-ubuntu-1604-x64 ${CONTAINER} -c security.privileged=true
lxc config device add ${CONTAINER} projectDir disk source=${PRJDIR} path=/tmp/work
sleep 5
# Update the package list to prevent "not found" messages.
lxc exec ${CONTAINER} -- bash -c 'apt-get update --assume-yes'
# Install the project specific packages.
lxc exec ${CONTAINER} -- bash -c 'apt-get install --assume-yes lua5.1 lua-filesystem lua-expat lua51-mhash lua-curl lua-zip'
# Build the 32bit version.
lxc exec ${CONTAINER} -- bash -c 'export PATH=/usr/mingw-w64-i686/bin:${PATH} && cd /tmp/work && bash .build01_windows32.sh'
lxc exec ${CONTAINER} -- bash -c 'tar --create --file /tmp/work/build/build_windows_x86_lua5.1.tar.gz --gzip --directory /tmp/work/build/windows32/lua5.1/lua-zlib/install .'
lxc exec ${CONTAINER} -- bash -c 'tar --create --file /tmp/work/build/build_windows_x86_lua5.2.tar.gz --gzip --directory /tmp/work/build/windows32/lua5.2/lua-zlib/install .'
lxc exec ${CONTAINER} -- bash -c 'tar --create --file /tmp/work/build/build_windows_x86_lua5.3.tar.gz --gzip --directory /tmp/work/build/windows32/lua5.3/lua-zlib/install .'
lxc exec ${CONTAINER} -- bash -c 'chown `stat -c %u:%g /tmp/work` /tmp/work/build/build_windows_x86_lua5.1.tar.gz'
lxc exec ${CONTAINER} -- bash -c 'chown `stat -c %u:%g /tmp/work` /tmp/work/build/build_windows_x86_lua5.2.tar.gz'
lxc exec ${CONTAINER} -- bash -c 'chown `stat -c %u:%g /tmp/work` /tmp/work/build/build_windows_x86_lua5.3.tar.gz'
# Build the 64bit version.
lxc exec ${CONTAINER} -- bash -c 'export PATH=/usr/mingw-w64-x86_64/bin:${PATH} && cd /tmp/work && bash .build02_windows64.sh'
lxc exec ${CONTAINER} -- bash -c 'tar --create --file /tmp/work/build/build_windows_x86_64_lua5.1.tar.gz --gzip --directory /tmp/work/build/windows64/lua5.1/lua-zlib/install .'
lxc exec ${CONTAINER} -- bash -c 'tar --create --file /tmp/work/build/build_windows_x86_64_lua5.2.tar.gz --gzip --directory /tmp/work/build/windows64/lua5.2/lua-zlib/install .'
lxc exec ${CONTAINER} -- bash -c 'tar --create --file /tmp/work/build/build_windows_x86_64_lua5.3.tar.gz --gzip --directory /tmp/work/build/windows64/lua5.3/lua-zlib/install .'
lxc exec ${CONTAINER} -- bash -c 'chown `stat -c %u:%g /tmp/work` /tmp/work/build/build_windows_x86_64_lua5.1.tar.gz'
lxc exec ${CONTAINER} -- bash -c 'chown `stat -c %u:%g /tmp/work` /tmp/work/build/build_windows_x86_64_lua5.2.tar.gz'
lxc exec ${CONTAINER} -- bash -c 'chown `stat -c %u:%g /tmp/work` /tmp/work/build/build_windows_x86_64_lua5.3.tar.gz'
# Stop and remove the container.
lxc stop ${CONTAINER}
lxc delete ${CONTAINER}
| true |
314f32a59fd5290f87b780b3aeb1dca3c0dde612 | Shell | chadr123/vpp | /docker/vpp-vswitch/prod/vswitch/vppctl | UTF-8 | 1,479 | 4.03125 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash
# Find the most recent container running vpp, and execute the vpp cli that resides within.
set -euo pipefail
get_vswitch_container_id() {
# There may be more than one vswitch container if an upgrade/redeploy is
# happening, so go for the newest one.
docker ps --format '{{.CreatedAt}} {{.Image}} {{.ID}}' | grep -E " $1@?[^ ]* [^ ]+$" | sort -nr | \
head -n 1 | sed -e 's|.* ||g'
}
IMAGE="contivvpp/vswitch"
# Test and see if we need more access.
if ! docker info >/dev/null 2>&1
then
echo "Error connecting to the Docker daemon. Please try rerunning with sudo." >&2
exit 1
fi
# In K8s 1.10 and above only Image IDs are displayed by 'docker ps' instead of Image
# names. If we can't get the vswitch container by its image name, exclude pause containers
# and lookup a container with name containing 'contiv-switch'.
if ! ID=$(get_vswitch_container_id "$IMAGE") || [ -z "$ID" ]
then
if ! ID=$(docker ps --format '{{.CreatedAt}}|{{.ID}}|{{.Names}}'| grep -a contiv-vswitch | grep -a -v pause | sort -nr | head -n 1 | cut -f 2 -d '|') || [ -z "$ID" ]
then
echo "Error finding a $IMAGE image." >&2
exit 1
fi
fi
DOCKER_ARGS=
if [ -t 0 ]
then
DOCKER_ARGS="-t"
fi
# Docker args can't be quoted here because otherwise when there are no args, docker would get an argument that is an
# empty string (causing it to error), instead of no arg at all.
exec docker exec -i $DOCKER_ARGS "$ID" /usr/bin/vppctl "$@"
| true |
994370538a971cbf4fe59421755ec5983b7b7242 | Shell | openenergymonitor/EmonScripts | /update/atmega_firmware_upload.sh | UTF-8 | 2,486 | 3.90625 | 4 | [] | no_license | #!/bin/bash
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
cd $DIR
source load_config.sh
serial_port=$1
firmware_key=$2
if [ $serial_port == "none" ]; then
echo "no serial port selected or available"
exit
fi
if [ $firmware_key == "emonpi" ]; then
firmware_key="emonPi_discrete_jeelib"
fi
echo "-------------------------------------------------------------"
echo "$firmware_key Firmware Upload"
echo "-------------------------------------------------------------"
if [ ! -d $openenergymonitor_dir/data/firmware ]; then
mkdir $openenergymonitor_dir/data/firmware
fi
result=$(./get_firmware_download_url.py $firmware_key)
if [ "$result" != "firmware not found" ]; then
result=($result)
download_url=${result[0]}
baud_rate=${result[1]}
core=${result[3]}
hexfile=$openenergymonitor_dir/data/firmware/$firmware_key.hex
echo "Downloading firmware from: "
echo $download_url
wget -q $download_url -O $hexfile
echo
echo "Downloaded file: "
ls -lh $hexfile
if [ -f $hexfile ]; then
state=$(systemctl show emonhub | grep ActiveState)
if [ $state == "ActiveState=active" ]; then
echo
echo "EmonHub is running, stopping EmonHub"
sudo systemctl stop emonhub
emonhub_stopped_by_script=1
else
emonhub_stopped_by_script=0
fi
echo
echo "Uploading $firmware_key on serial port $serial_port"
for attempt in {1..1}
do
echo "Attempt $attempt..."
echo
avrdude -Cavrdude.conf -v -p$core -carduino -D -P/dev/$serial_port -b$baud_rate -Uflash:w:$hexfile:i
# Find output logfile
output_log_file=$( lsof -p $$ -a -d 1 -F n | awk '/^n/ {print substr($1, 2)}' )
# double check that it exists
if [ -f $output_log_file ]; then
# check for completion status
not_in_sync=$(cat $output_log_file | grep "not in sync" | wc -l)
flash_verified=$(cat $output_log_file | grep "flash verified" | wc -l)
# if [ "$not_in_sync" == "0" ]; then
if [ $flash_verified == "1" ]; then
echo "SUCCESS: flash verifed"
break;
else
echo "ERROR: Not in sync"
fi
fi
done
if [ $emonhub_stopped_by_script == 1 ]; then
echo
echo "Restarting EmonHub"
sudo systemctl start emonhub
fi
else
echo "Firmware download failed...check network connection"
fi
else
echo "Firmware not found: $firmware_key"
fi
| true |
3cec69810838be2e5d51438d3ab108f9ef5dec17 | Shell | ps-aux/spa-container | /ci/container/build.sh | UTF-8 | 241 | 3.28125 | 3 | [] | no_license | #!/usr/bin/env bash
cd "$(dirname "$0")"
set -e
name="abspro/spa-container-ci"
tag=$1
if [[ -z ${tag} ]];then
echo "Tag not provided"
exit 1
fi
image_name=${name}:${tag}
docker build . -t ${image_name}
docker push ${image_name}
| true |
8b0bf04515e2ba6300ce4ea24d1026f36c2809cd | Shell | natchi92/ARCH-COMP | /2017/HPWC/DistributedController/gen_dist_hydi.sh | UTF-8 | 24,206 | 3.484375 | 3 | [] | no_license | #!/bin/bash
#
# Script that generates instance of the fisher model with n processes.
#
# usage: bash dist_controller.sh -lower <lower> -upper <upper>
# Generates all the models with a number of processes in the range
# <lower>..<upper>.
#
# For example bash dist_controller.sh -lower 2 -upper 10 generates 8 models, from
# 2 to 10 processes.
#
# Output models are called dist_controller_n.hydi, where n is the total number
# of processes.
#
#------------------------------------------------------------------------------#
# usage
#
usage() {
cat <<EOF
usage: $0 -lower <lower_bound>
-upper <upper_bound>
EOF
exit 1
}
# check bound
check_bound() {
local bound=$1
if [ "${bound}" == "" ]; then
echo "No bound provided!";
usage
fi
if ! [[ "$bound" =~ ^[0-9]+$ ]]; then
echo "${bound} is not a number!";
usage
fi
}
# print common part of main module
#
print_common_part() {
local out_file=${1}
cat >>${out_file} <<EOF
MODULE main
-- set the parametrs
#define def_sampletime_par 1
#define def_lost_packet_time_par 0.25
#define def_inity_par 1
#define invarscheduler_par 0.25
#define def_computation_offset_par 0.3125
#define boundcontroller_par 1
#define waittime_par 0.25
#define waittime2_par 0.25
#define computationtime_par 1
#define mincomputationtime_par 1.25
#define invar_on_rec_par 0.25
EOF
# cat >> ${out_file} <<EOF
# MODULE main
# -- Sensor
# FROZENVAR
# def_sampletime : real; -- 6
# def_inity : real; -- 6
# def_lost_packet_time : real; -- 0.5
# -- Scheduler
# FROZENVAR
# invarscheduler : real; -- 32
# def_computation_offset : real; -- 1.0
# -- controller
# FROZENVAR
# invar_on_rec : real; -- 100
# boundcontroller : real; -- 100
# waittime : real; -- 0
# waittime2 : real; -- 0
# computationtime : real; -- 5.6
# mincomputationtime : real; -- 3.6
# INVAR
# def_sampletime > 0 & def_inity > 0 & def_lost_packet_time > 0 &
# def_sampletime = def_inity &
# invarscheduler > 0 & def_computation_offset > 0 &
# invar_on_rec > 0 & boundcontroller > 0 & waittime > 0 & waittime2 > 0 &
# computationtime > 0 & mincomputationtime > 0 &
# waittime = waittime2
# INVAR def_lost_packet_time = 0.25
# INVAR def_inity = 1
# INVAR def_sampletime = 1
# INVAR def_computation_offset = 0.3125
# INVAR invarscheduler = 0.25
# INVAR mincomputationtime = 1.25
# INVAR computationtime = 1
# INVAR waittime2 = 0.25
# INVAR waittime = 0.25
# INVAR boundcontroller = 1
# INVAR invar_on_rec = 0.25
# EOF
}
# prints the definition of a sensor
print_sensor_definition() {
local out_file=${1}
cat >>${out_file} <<EOF
-- definition of a sensor
-- lost_packet_time: threshold before a packet is lost
MODULE SensorType(lost_packet_time, inity, sampletime)
VAR
location : {done, read, wait, send};
y : continuous; -- stopwatch
EVENT request_evt, read_evt, send_evt, ack_evt;
INIT
location = done & y = inity;
FLOW location in {done, wait} -> der(y) = 1;
FLOW location in {read, send} -> der(y) = 0;
TRANS
(EVENT = request_evt ->
(
(location = done & y >= sampletime & next(location) = read & next(y) = y) |
(location = wait & y >= lost_packet_time & next(location) = read & next(y) = y)
)
) &
(EVENT = read_evt ->
(
(location = read & next(location) = wait & next(y) = 0)
)
) &
(EVENT = send_evt ->
(
(location = wait & next(location) = send & next(y) = y)
)
) &
(EVENT = ack_evt ->
(
(location = send & next(location) = done & next(y) = 0)
)
);
INVAR
(location = done -> y <= sampletime) &
(location = wait -> y <= lost_packet_time);
EOF
}
# print frame condition for a variable
print_fc_var() {
local out_file=${1}
local var_name=${2}
local ignore_index=${3}
local number_of_processes=${4}
local j=1;
for ((j=1; j<=${number_of_processes}; j++)); do
if [ "${j}" != "${ignore_index}" ]; then
echo " & next(${var_name}_${j}) = ${var_name}_${j}" >> ${out_file};
fi
done
}
# print the definition of the scheduler
print_scheduler_definition() {
local out_file=${1}
local number_of_processes=${2}
cat >>${out_file} <<EOF
-- Module definition of a scheduler.
MODULE SchedulerType(invarscheduler, def_computation_offset)
EOF
# 1. Print variables
echo "VAR" >> ${out_file};
# print location
echo "location : {idle " >>${out_file};
for ((k=1; k<=${number_of_processes}; k++)); do
echo ",loc_sensor_${k}" >>${out_file};
done
echo "};" >>${out_file};
# print wait and continuous vars
for ((k=1; k<=${number_of_processes}; k++)); do
if [ "${k}" != "${number_of_processes}" ]; then
# the last process will not wait!
echo "wait_${k} : boolean;" >>${out_file}
fi
echo "x_${k} : continuous;" >>${out_file}
done
# 2. EVENT definition
echo "EVENT " >> ${out_file};
for ((k=1; k<=${number_of_processes}; k++)); do
if [ "${k}" != 1 ]; then
echo ", read_${k}, request_${k}" >>${out_file};
else
echo "read_${k}, request_${k}" >>${out_file};
fi
done
echo ";" >> ${out_file};
# 3. INIT
echo "INIT location = idle " >> ${out_file};
for ((k=1; k<=${number_of_processes}; k++)); do
if [ "${k}" != "${number_of_processes}" ]; then
echo "& !wait_${k}" >>${out_file};
fi
echo "& x_${k} = 0" >>${out_file};
done
echo ";" >>${out_file};
# 4. FLOW
echo "FLOW " >> ${out_file};
for ((k=1; k<=${number_of_processes}; k++)); do
if [ "${k}" != 1 ]; then
echo "& " >>${out_file};
fi
echo "(location = loc_sensor_${k} -> der(x_${k}) = 1) &" >>${out_file};
echo "(location != loc_sensor_${k} -> der(x_${k}) = 0)" >>${out_file};
done
echo ";" >>${out_file};
# 5. INVAR
for ((k=1; k<=${number_of_processes}; k++)); do
# echo "INVAR location = loc_sensor_${k} -> (x_${k} <= ${k});" >> ${out_file};
# debug
echo "INVAR location = loc_sensor_${k} -> (x_${k} <= $((4*number_of_processes)));" >> ${out_file};
done
# 6. TRANS
# Print trans:
# - request
# - read
for ((k=1; k<=${number_of_processes}; k++)); do
echo "TRANS " >>${out_file};
echo "(EVENT = request_${k} -> (" >> ${out_file};
echo "(location = idle & next(location) = loc_sensor_${k} & next(x_${k}) = 0 " >> ${out_file};
print_fc_var "${out_file}" "x" "${k}" "${number_of_processes}";
print_fc_var "${out_file}" "wait" "" "$((number_of_processes-1))"; # last process does not have preemption flag
echo ")" >> ${out_file} # end of request event
# set the "wait" flag if ${k} requests the scheduler and we are in an higher priority process"
for ((j=$((k+1)); j<=${number_of_processes}; j++)); do
echo "| (location = loc_sensor_${j} & next(location) = location & next(wait_${k}) & next(x_${k}) = 0" >> ${out_file};
print_fc_var "${out_file}" "x" "${k}" "${number_of_processes}";
# other preemption flags stay as they are
print_fc_var "${out_file}" "wait" "${k}" "$((number_of_processes-1))"; # last process does not have preemption flag
echo ")" >> ${out_file} # end of request event
done
# preemption if we are in a lower process priority
for ((j=1; j<k; j++)); do
echo " | (location = loc_sensor_${j} & next(location) = loc_sensor_${k} & next(wait_${j}) & next(x_${k}) = 0" >> ${out_file};
print_fc_var "${out_file}" "x" "${k}" "${number_of_processes}";
print_fc_var "${out_file}" "wait" "${j}" "$((number_of_processes-1))"; # last process does not have preemption flag
echo ")" >> ${out_file} # end of request event
done
echo ")) & " >> ${out_file} # end of request event
# read
echo "(EVENT = read_${k} -> " >> ${out_file};
local guard_time="${k}*def_computation_offset";
# local guard_time="0.5${k}*0.5";
echo "(location = loc_sensor_${k} & x_${k} >= ${guard_time} & " >>${out_file}
echo "case " >>${out_file}
# preemption
for ((j=$((k-1)); j>0; j--)); do
echo "wait_${j} : next(location) = loc_sensor_${j} & !next(wait_${j}) " >> ${out_file};
print_fc_var "${out_file}" "x" "${k}" "${number_of_processes}";
print_fc_var "${out_file}" "wait" "${j}" "$((number_of_processes-1))";
echo ";" >> ${out_file};
done
# if no preemption, then turn back to idle
echo "TRUE : next(location) = idle " >> ${out_file};
print_fc_var "${out_file}" "x" "" "${number_of_processes}";
print_fc_var "${out_file}" "wait" "" "$((number_of_processes-1))";
echo ";esac" >> ${out_file}
# echo "(location = loc_sensor_${k} & x_${k} >= ${guard_time} & next(location) = idle " >>${out_file}
# # add wait condition for processes with less prioriry
# for ((j=1; j<k; j++)); do
# echo "& !wait_${j} " >>${out_file};
# done
# print_fc_var "${out_file}" "x" "" "${number_of_processes}";
# echo ")" >> ${out_file} # end of request event
# # read returns control to the highest priority sensor
# echo "| case " >> ${out_file};
# for ((j=1; j<k; j++)); do
# echo "wait_${j} : next(location) = loc_sensor_${j} & !next(wait_${j}) & next(x_${k}) = x_${k} " >> ${out_file};
# print_fc_var "${out_file}" "x" "${k}" "${number_of_processes}";
# echo ";" >> ${out_file};
# done
# echo "TRUE: TRUE; esac" >> ${out_file};
echo "));" >> ${out_file} # end of request event
done
}
# print frame condition for a variable
print_fc_received() {
local out_file=${1}
local ignore_index=${2}
local number_of_processes=${3}
local j=1;
for ((j=1; j<=${number_of_processes}; j++)); do
if [ "${j}" != "${ignore_index}" ]; then
echo " & next(received[${j}]) = received[${j}]" >> ${out_file};
fi
done
}
print_controller_definition() {
local out_file=${1}
local number_of_processes=${2}
cat >>${out_file} <<EOF
-- Module definition of a scheduler.
MODULE ControllerType(invar_on_rec, boundcontroller, waittime, waittime2, computationtime, mincomputationtime)
VAR
location : {rest, rec, wait, compute};
z : continuous;
received : array 1 .. ${number_of_processes} of boolean;
EOF
# 1. EVENT
echo -n "EVENT signal, expire " >>${out_file};
for ((k=1; k<=${number_of_processes}; k++)); do
echo -n ", send_${k}, ack_${k}" >>${out_file};
done
echo ";" >>${out_file};
# 2. INIT
echo -n "INIT location = rest & z = 0" >>${out_file};
for ((k=1; k<=${number_of_processes}; k++)); do
echo -n " & (! received[${k}])" >>${out_file};
done
echo ";" >>${out_file};
# 3. FLOW
# echo "FLOW der(z) = 1;" >>${out_file};
cat >> ${out_file} <<EOF
FLOW
(location = rest) -> der(z) = 0;
FLOW
(location != rest) -> der(z) = 1;
EOF
# 4. INVAR
cat >>${out_file} <<EOF
INVAR
(location = rec -> z <= 1) &
(location = wait -> z <= boundcontroller) & -- would be $((number_of_processes*20))
(location = compute -> z <= computationtime); -- 10 z <= 56
EOF
# 5. TRANS
for ((k=1; k<=${number_of_processes}; k++)); do
# send_k
echo "TRANS " >>${out_file};
echo "(EVENT = send_${k} -> (" >> ${out_file};
echo "(location = rest & " >> ${out_file};
echo "(! received[${k}]) & " >> ${out_file};
echo "next(z) = 0 & next(location) = rec & next(received[${k}]) " >> ${out_file};
print_fc_received "${out_file}" "${k}" "${number_of_processes}";
echo " & TRUE)" >> ${out_file}
echo "| (location = wait & " >> ${out_file};
echo "(! received[${k}]) & " >> ${out_file};
echo "next(z) = 0 & next(location) = rec & next(received[${k}]) " >> ${out_file};
print_fc_received "${out_file}" "${k}" "${number_of_processes}";
echo ")" >> ${out_file}
echo "));" >> ${out_file}
# ack_k
echo "TRANS " >>${out_file};
echo "(EVENT = ack_${k} -> (" >> ${out_file};
echo "(location = rec & " >> ${out_file};
echo "(received[${k}]) " >> ${out_file};
# ! received[j], j != k
echo -n "& ! (" >> ${out_file}
first_elem="1"
for ((j=1; j<=${number_of_processes}; j++)); do
if [ $j != $k ]; then
if [ "${first_elem}X" != "1X" ]; then
echo -n " & " >> ${out_file}
fi
echo -n "received[${j}]" >> ${out_file};
first_elem=""
fi
done
echo -n ") " >> ${out_file}
echo "& z >= waittime & next(z) = z & next(location) = wait " >> ${out_file};
print_fc_received "${out_file}" "" "${number_of_processes}";
echo ")" >> ${out_file}
echo "| (location = rec " >> ${out_file};
#big and event
for ((j=1; j<=${number_of_processes}; j++)); do
echo -n "& received[${j}]" >> ${out_file};
done
echo "& z >= waittime2 & next(z) = 0 & next(location) = compute" >> ${out_file};
print_fc_received "${out_file}" "$" "${number_of_processes}";
echo ")" >> ${out_file}
echo "));" >> ${out_file}
done # end of loop on each process for TRANS constraints
# expire
echo "TRANS " >>${out_file};
echo "(EVENT = expire -> (" >> ${out_file};
echo "(location = wait & " >> ${out_file};
#big and event
echo -n "!(" >> ${out_file};
for ((j=1; j<=${number_of_processes}; j++)); do
if [ "${j}" != "1" ]; then
echo -n "& received[${j}]" >> ${out_file};
else
echo -n "received[${j}]" >> ${out_file};
fi
done
echo -n ")" >> ${out_file};
echo "& next(z) = z " >> ${out_file};
echo "& next(location) = rest " >> ${out_file};
for ((j=1; j<=${number_of_processes}; j++)); do
echo -n "& !next(received[${j}])" >> ${out_file};
done
echo ")" >> ${out_file}
echo "));" >> ${out_file}
# signal
echo "TRANS " >>${out_file};
echo "(EVENT = signal -> (" >> ${out_file};
echo "(location = compute & " >> ${out_file};
echo "z >= mincomputationtime & next(location) = rest " >> ${out_file};
echo " & next(z) = z " >> ${out_file};
for ((j=1; j<=${number_of_processes}; j++)); do
echo -n "& !next(received[${j}])" >> ${out_file};
done
echo ")" >> ${out_file}
echo "));" >> ${out_file}
}
# print the safe invariant
print_safe_invar()
{
local out_file=$1
local c=$2
local sametime=
local last=
echo -n "INVARSPEC !(" >> ${out_file}
for (( i=1; i<=$c; i++ )); do
if [ "${i}" == "1" ]; then
echo -n "sensor_${i}.location = send" >> "${out_file}"
else
echo -n " & sensor_${i}.location = send " >> "${out_file}"
sametime="${sametime} & sensor_${i}.time = sensor_$(($i-1)).time";
fi
last=$i
done
sametime="${sametime} & sensor_${last}.time = sensor_$(($last-1)).time";
sametime="${sametime} & sensor_${last}.time = controller.time & controller.time = scheduler.time";
# echo " ${sametime})" >> ${out_file}
echo " )" >> ${out_file}
}
# read parameters
lower=
upper=
while [ "${1}" != "" ]; do
case "${1}" in
-lower)
shift
if [ "${1}" == "" ]; then
echo "No lower bound provided!"
usage
else
lower="${1}"
shift
fi
;;
-upper)
shift
if [ "${1}" == "" ]; then
echo "No upper bound provided!"
usage
else
upper="${1}"
shift
fi
;;
*)
echo "Unknown parameter ${1}"
usage
esac
done
check_bound "${lower}"
check_bound "${upper}"
if [ $upper -lt $lower ]; then
echo "Upper bound is less than lower bound!";
usage
fi
for (( c=$lower; c<=$upper; c++ )); do
if [ $c -lt 10 ]; then
prefix="0";
else
prefix=""
fi
out_file="dist_controller_${prefix}${c}.hydi";
if [ -e "${out_file}" ]; then
rm ${out_file}
fi
echo "processing ${c}/${upper}";
date=`date`
echo "-- Generated on ${date}" >> ${out_file}
# print common part in main
print_common_part "${out_file}"
# print var definition
echo "VAR" >> "${out_file}"
for (( i=1; i<=$c; i++ )); do
to_limit=$((i*4));
echo "sensor_${i} : SensorType(${to_limit}, def_inity_par, def_sampletime_par);" >> "${out_file}"
done
echo "scheduler: SchedulerType(invarscheduler_par, def_computation_offset_par);" >> ${out_file}
echo "controller: ControllerType(invar_on_rec_par, boundcontroller_par, waittime_par, waittime2_par, computationtime_par, mincomputationtime_par);" >> ${out_file}
echo "" >> "${out_file}"
echo "-- Synchronization constraints" >> ${out_file}
# print sync actions
for (( i=1; i<=$c; i++ )); do
echo "-- synch of sensor_${i} with scheduler" >> ${out_file}
echo "SYNC sensor_${i}, scheduler EVENTS request_evt, request_${i};" >>${out_file};
echo "SYNC sensor_${i}, scheduler EVENTS read_evt, read_${i};" >> ${out_file}
echo "-- synch of sensor_${i} with controller" >> ${out_file}
# removed urgency condition
# echo "SYNC sensor_${i}, controller EVENTS send_evt, send_${i} CONDITION u, sensors_u_must_be_0;" >> ${out_file}
echo "SYNC sensor_${i}, controller EVENTS send_evt, send_${i};" >> ${out_file}
echo "SYNC sensor_${i}, controller EVENTS ack_evt, ack_${i};" >> ${out_file}
done
echo "" >> "${out_file}"
# cat >> ${out_file} <<EOF
# LTLSPEC G !(sensor_1.location = send & sensor_2.location = send )
# LTLSPEC (G F ((controller.received[1] & controller.received[2]))) -> (G ((controller.received[1] & controller.received[2]) -> F (controller.location = compute) ));
# -- automatically generated
# LTLSPEC G (rest = controller.location -> F rest = controller.location)
# LTLSPEC G (rest = controller.location -> F rec = controller.location)
# LTLSPEC G (rest = controller.location -> F wait = controller.location)
# LTLSPEC G (rest = controller.location -> F compute = controller.location)
# LTLSPEC G (rec = controller.location -> F rest = controller.location)
# LTLSPEC G (rec = controller.location -> F rec = controller.location)
# LTLSPEC G (rec = controller.location -> F wait = controller.location)
# LTLSPEC G (rec = controller.location -> F compute = controller.location)
# LTLSPEC G (wait = controller.location -> F rest = controller.location)
# LTLSPEC G (wait = controller.location -> F rec = controller.location)
# LTLSPEC G (wait = controller.location -> F wait = controller.location)
# LTLSPEC G (wait = controller.location -> F compute = controller.location)
# LTLSPEC G (compute = controller.location -> F rest = controller.location)
# LTLSPEC G (compute = controller.location -> F rec = controller.location)
# LTLSPEC G (compute = controller.location -> F wait = controller.location)
# LTLSPEC G (compute = controller.location -> F compute = controller.location)
# LTLSPEC G (idle = scheduler.location -> F idle = scheduler.location)
# LTLSPEC G (idle = scheduler.location -> F loc_sensor_1 = scheduler.location)
# LTLSPEC G (idle = scheduler.location -> F loc_sensor_2 = scheduler.location)
# LTLSPEC G (loc_sensor_1 = scheduler.location -> F idle = scheduler.location)
# LTLSPEC G (loc_sensor_1 = scheduler.location -> F loc_sensor_1 = scheduler.location)
# LTLSPEC G (loc_sensor_1 = scheduler.location -> F loc_sensor_2 = scheduler.location)
# LTLSPEC G (loc_sensor_2 = scheduler.location -> F idle = scheduler.location)
# LTLSPEC G (loc_sensor_2 = scheduler.location -> F loc_sensor_1 = scheduler.location)
# LTLSPEC G (loc_sensor_2 = scheduler.location -> F loc_sensor_2 = scheduler.location)
# LTLSPEC G (done = sensor_2.location -> F done = sensor_2.location)
# LTLSPEC G (done = sensor_2.location -> F read = sensor_2.location)
# LTLSPEC G (done = sensor_2.location -> F wait = sensor_2.location)
# LTLSPEC G (done = sensor_2.location -> F send = sensor_2.location)
# LTLSPEC G (read = sensor_2.location -> F done = sensor_2.location)
# LTLSPEC G (read = sensor_2.location -> F read = sensor_2.location)
# LTLSPEC G (read = sensor_2.location -> F wait = sensor_2.location)
# LTLSPEC G (read = sensor_2.location -> F send = sensor_2.location)
# LTLSPEC G (wait = sensor_2.location -> F done = sensor_2.location)
# LTLSPEC G (wait = sensor_2.location -> F read = sensor_2.location)
# LTLSPEC G (wait = sensor_2.location -> F wait = sensor_2.location)
# LTLSPEC G (wait = sensor_2.location -> F send = sensor_2.location)
# LTLSPEC G (send = sensor_2.location -> F done = sensor_2.location)
# LTLSPEC G (send = sensor_2.location -> F read = sensor_2.location)
# LTLSPEC G (send = sensor_2.location -> F wait = sensor_2.location)
# LTLSPEC G (send = sensor_2.location -> F send = sensor_2.location)
# LTLSPEC G (done = sensor_1.location -> F done = sensor_1.location)
# LTLSPEC G (done = sensor_1.location -> F read = sensor_1.location)
# LTLSPEC G (done = sensor_1.location -> F wait = sensor_1.location)
# LTLSPEC G (done = sensor_1.location -> F send = sensor_1.location)
# LTLSPEC G (read = sensor_1.location -> F done = sensor_1.location)
# LTLSPEC G (read = sensor_1.location -> F read = sensor_1.location)
# LTLSPEC G (read = sensor_1.location -> F wait = sensor_1.location)
# LTLSPEC G (read = sensor_1.location -> F send = sensor_1.location)
# LTLSPEC G (wait = sensor_1.location -> F done = sensor_1.location)
# LTLSPEC G (wait = sensor_1.location -> F read = sensor_1.location)
# LTLSPEC G (wait = sensor_1.location -> F wait = sensor_1.location)
# LTLSPEC G (wait = sensor_1.location -> F send = sensor_1.location)
# LTLSPEC G (send = sensor_1.location -> F done = sensor_1.location)
# LTLSPEC G (send = sensor_1.location -> F read = sensor_1.location)
# LTLSPEC G (send = sensor_1.location -> F wait = sensor_1.location)
# LTLSPEC G (send = sensor_1.location -> F send = sensor_1.location)
# EOF
# echo "-- no deadlock - not proved" >> ${out_file}
# echo -n "LTLSPEC" >> ${out_file}
# echo -n " (G F sensor_1.location = done)" >> ${out_file}
# for (( i=2; i<=$c; i++ )); do
# echo -n " | (G F sensor_${i}.location = done)" >> ${out_file}
# done
# echo "" >> ${out_file}
# echo -n "LTLSPEC" >> ${out_file}
# echo -n " (G F (controller.received[1] " >> ${out_file}
# for (( i=2; i<=$c; i++ )); do
# echo -n " & controller.received[${i}] " >> ${out_file}
# done
# echo -n ")) -> " >> ${out_file}
# echo -n "(G F (sensor_1.location = done " >> ${out_file}
# for (( i=2; i<=$c; i++ )); do
# echo -n " & sensor_${i}.location = done" >> ${out_file}
# done
# echo "))" >> ${out_file}
# for (( i=1; i<=$c; i++ )); do
# echo "LTLSPEC (G F controller.location = compute) -> (G F sensor_${i}.location = done)" >> ${out_file}
# done
print_safe_invar "${out_file}" "${c}"
# print mirrors for the locations
echo "" >> "${out_file}"
# c sensors
for (( i=1; i<=$c; i++ )); do
echo "-- MIRROR sensor_${i}.location" >> "${out_file}"
done
# controller
echo "-- MIRROR controller.location" >> "${out_file}"
# scheduler
echo "-- MIRROR scheduler.location" >> "${out_file}"
echo "" >> "${out_file}"
# print process definition
print_sensor_definition "${out_file}"
# print scheduler var definition
print_scheduler_definition "${out_file}" "${c}"
# print the controller definition
print_controller_definition "${out_file}" "${c}"
done
| true |
93dc84d49e345e1b92cc077155042789ac3fada1 | Shell | enricosada/falanx | /test/examples/mock1/falanx-mock | UTF-8 | 248 | 3.15625 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
if [ -f $SCRIPT_DIR/falanx-args.txt ] ; then
rm $SCRIPT_DIR/falanx-args.txt
fi
for ARG in "$@"
do
echo $ARG >> $SCRIPT_DIR/falanx-args.txt
done
"$REAL_FALANX" $@
| true |
29f7ad99b4e0ba96ffcff040b8c91870affecfe0 | Shell | irvhsu/cs273b-project | /notebooks/interpretation/interpret.sh | UTF-8 | 775 | 3.140625 | 3 | [] | no_license | # This script accepts a bed file and creates a bed file with sequence
# level and/or base level activity scores / importance scores
HERE="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
IN=$1
FASTA=$2 # must be absolute path
MODEL=$3 # must be absolute path
OUT=$3
############################################
mkdir $OUT
cp $IN $OUT/in.bed
cd $OUT
############################################
python $HERE/fix_bed_lengths.py in.bed corrected_lengths.bed
############################################
bedtools getfasta -fi $FASTA -bed corrected_lengths.bed > in.fa
############################################
python score_regions.py in.fa > sequence_level.bed
############################################
python score_nucleotides.py in.fa > nucleotide_level.bed
| true |
3ffa001175e86fab51476a96b775cacf74c01924 | Shell | natasha-arshad/jondobrowser-build | /projects/snowflake/build | UTF-8 | 1,370 | 2.9375 | 3 | [] | no_license | #!/bin/bash
[% c("var/set_default_env") -%]
[% pc('go', 'var/setup', { go_tarfile => c('input_files_by_name/go') }) %]
[% pc(c('var/compiler'), 'var/setup', { compiler_tarfile => c('input_files_by_name/' _ c('var/compiler')) }) %]
[% IF c("var/linux") %]
tar -C /var/tmp/dist -xf $rootdir/[% c('input_files_by_name/binutils') %]
export PATH="/var/tmp/dist/binutils/bin:$PATH"
[% END -%]
distdir=/var/tmp/dist/[% project %]
[% IF c("var/osx") %]
PTDIR="$distdir/Contents/MacOS/Tor/PluggableTransports"
DOCSDIR="$distdir/Contents/Resources/TorBrowser/Docs/snowflake"
[% ELSE %]
PTDIR="$distdir/TorBrowser/Tor/PluggableTransports"
DOCSDIR="$distdir/TorBrowser/Docs/snowflake"
[% END %]
mkdir -p $PTDIR $DOCSDIR
tar -C /var/tmp/dist -xf [% c('input_files_by_name/go-webrtc') %]
tar -C /var/tmp/dist -xf [% c('input_files_by_name/uniuri') %]
tar -C /var/tmp/dist -xf [% c('input_files_by_name/goptlib') %]
mkdir -p /var/tmp/build
tar -C /var/tmp/build -xf [% project %]-[% c('version') %].tar.gz
cd /var/tmp/build/[% project %]-[% c('version') %]
cd client
go build -ldflags '-s'
cp -a client[% IF c("var/windows") %].exe[% END %] $PTDIR/snowflake-client[% IF c("var/windows") %].exe[% END %]
cd ..
cp -a README.md LICENSE $DOCSDIR
cd $distdir
[% c('tar', {
tar_src => [ '.' ],
tar_args => '-czf ' _ dest_dir _ '/' _ c('filename'),
}) %]
| true |
d2bc2c0161561b894833ff4b64e8984c16492c46 | Shell | linuxgazette/lg | /issue47/misc/tips/show_denied_packets.sh.txt | UTF-8 | 817 | 3.671875 | 4 | [
"LicenseRef-scancode-warranty-disclaimer"
] | no_license | #!/bin/sh
# sh_denied_packets.sh
# this takes a log file and pulls out just the DENYed packets that
# aren't local
# use the file to check as an argument
# like find_denied_remote /var/log/messages
# the local network
LOCAL_LAN="192.168.1"
# path to the program to resolve the ip addys
LOG_RESOLVE="/home/marc/bin/logresolve"
# path to script to pull out extra info
LOG_STRIP="/home/marc/bin/strip_log.pl"
# if the first argument is not a readable file, use /var/log/messages
if [ ! -r "$1" ]
then LOG_TO_CHECK="/var/log/messages";
else LOG_TO_CHECK=$1;
fi
# grep for DENY, but not for my lan, remove the data up to the ip addy
# , resolve the first ip addy, sort it, and remove the dupes
grep DENY $LOG_TO_CHECK | grep -v 127.0.0 | grep -v $LOCAL_LAN \
| $LOG_STRIP | $LOG_RESOLVE | sort | uniq
| true |
72210b8ea812367209e93a596d94eb9cef4abd42 | Shell | wwaites/tissue-analysis | /scripts/entropy_plot | UTF-8 | 334 | 3.078125 | 3 | [] | no_license | #!/bin/sh
name=`basename $1`
db=$1/summary.db
dbc=/tmp/summary.$$.db
dataset=scaling
mkdir -p /home/ww/shared/three-cells/$dataset/
echo $processing $name
cp $db $dbc
echo "SELECT time, entropy FROM meshfiles ORDER BY time;" | sqlite3 $dbc | sort -n | sed 's/|/ /' > /home/ww/shared/three-cells/$dataset/$name.entropy.dat
rm $dbc
| true |
c185a8f31dad8139a140a0cb664f80bdff411218 | Shell | trustwallet/wallet-core | /samples/go/compile.sh | UTF-8 | 830 | 3.515625 | 4 | [
"BSD-3-Clause",
"LicenseRef-scancode-protobuf",
"LGPL-2.1-only",
"Swift-exception",
"MIT",
"BSL-1.0",
"Apache-2.0"
] | permissive | #!/bin/bash
# Run this file ./compile.sh to generate all go protobuf file from src/proto
# Clean
rm -rf protos
mkdir protos
PROTO_PATH=../../src/proto
for FILE in "$PROTO_PATH"/*.proto; do
# Reading proto files
FILE_NAME="${FILE#"$PROTO_PATH"/}"
PKG=$(echo "${FILE_NAME%.proto}" | tr '[:upper:]' '[:lower:]')
# Generate Go protobuf files
#
# manual --go_opt=M... declarations is because of
# dependencies between some proto files
mkdir protos/"$PKG"
protoc -I=$PROTO_PATH --go_out=protos/"$PKG" \
--go_opt=paths=source_relative \
--go_opt=M"$FILE_NAME"=tw/protos/"$PKG" \
--go_opt=MCommon.proto=tw/protos/common \
--go_opt=MBitcoin.proto=tw/protos/bitcoin \
--go_opt=MEthereum.proto=tw/protos/ethereum \
--go_opt=MBinance.proto=tw/protos/binance \
"$PROTO_PATH"/"$FILE_NAME"
done
| true |
167a48497df0f2964bc4b297ea343597827e214a | Shell | Shekharrajak/dotfiles | /bash_profile | UTF-8 | 659 | 2.8125 | 3 | [] | no_license | # Globals
source $HOME/.shellrc
for file in $HOME/.bash/*; do
source $file
done
# Path to the bash it configuration
export BASH_IT="/Users/kristian/.bash_it"
# Lock and Load a custom theme file
# location /.bash_it/themes/
export BASH_IT_THEME='powerline-multiline'
# Don't check mail when opening terminal.
unset MAILCHECK
# Set this to false to turn off version control status checking within the prompt for all themes
export SCM_CHECK=true
# Set vcprompt executable path for scm advance info in prompt (demula theme)
# https://github.com/xvzf/vcprompt
#export VCPROMPT_EXECUTABLE=~/.vcprompt/bin/vcprompt
# Load Bash It
source $BASH_IT/bash_it.sh
| true |
fb91f4b37459c8f18e76596f4e30a8852cb49d5f | Shell | allixender/smart-csw-ingester | /docker-preps.sh | UTF-8 | 524 | 2.625 | 3 | [
"CC-BY-SA-4.0",
"Apache-2.0"
] | permissive | #!/usr/bin/env bash
APPNAME=$(cat build.sbt | egrep "^name := " | cut -d "=" -f 2 | sed "s/\"*//g" | sed "s/\ *//g")
APPVERSION=$(cat build.sbt | egrep "^version := " | cut -d "=" -f 2 | sed "s/\"*//g" | sed "s/\ *//g")
pwd
ls -lh target/universal
cp Dockerfile target/universal
cp smart-csw-ingester.k8s.yaml target/universal
ls -lh target/universal
cd target/universal && test -f ${APPNAME}-${APPVERSION}.tgz && tar -cvzf ${APPNAME}-${TRAVIS_BUILD_NUMBER}-docker.tgz ${APPNAME}-${APPVERSION}.tgz Dockerfile
ls -lh | true |
b548db91a798aae17ea7b70df5a690a53b7caaeb | Shell | lhhunghimself/LINCS_RNAseq_cpp | /scripts/docker_fast_run-alignment-analysis_profiler.sh | UTF-8 | 2,885 | 3.4375 | 3 | [
"MIT"
] | permissive | #!/bin/bash
# This script converts a series of mRNA sequencing data file in FASTQ format
# to a table of UMI read counts of human genes in multiple sample conditions.
# 1 Parameters
#Docker parameters
NWELLS=96
DOCKERCMD="docker run --rm -it -v $PROFILEDIR/profile_data:/.cprofiles -v $1/data/LINCS:/data -e NWELLS=$NWELLS biodepot/rnaseq-umi-cpp:profiler "
# 1.1 Global
TOP_DIR=/data
# 1.2 Dataset
SERIES="20150409"
SAMPLE_ID="RNAseq_${SERIES}"
LANES=6
DATA_DIR=${TOP_DIR}
SEQ_DIR="${DATA_DIR}/Seqs"
ALIGN_DIR="${DATA_DIR}/Aligns"
COUNT_DIR="${DATA_DIR}/Counts"
UMITOOLS_DIR="${TOP_DIR}"
REF_DIR="$DATA_DIR/References/Broad_UMI"
SPECIES_DIR="${REF_DIR}/Human_RefSeq"
REF_SEQ_FILE="${SPECIES_DIR}/refMrna_ERCC_polyAstrip.hg19.fa"
SYM2REF_FILE="${SPECIES_DIR}/refGene.hg19.sym2ref.dat"
ERCC_SEQ_FILE="${REF_DIR}/ERCC92.fa"
BARCODE_FILE="${REF_DIR}/barcodes_trugrade_96_set4.dat"
# 1.4 Program
PROG_DIR="$DATA_DIR/Programs/Broad-DGE"
BWA_ALN_SEED_LENGTH=24
BWA_SAM_MAX_ALIGNS_FOR_XA_TAG=20
THREAD_NUMBER=8
# 2 Computation
# 2.1 Alignment
# Align sequence fragments to reference genome library.
let "IDX = 1"
SEQ_FILES="";
#get files
#change this loop to use scripts on different files
while [ "$IDX" -le "${LANES}" ]; do
SUBSAMPLE_ID="Lane$IDX"
SEQ_FILE_R1="${SEQ_DIR}/${SAMPLE_ID}_${SUBSAMPLE_ID}_R1.fastq.gz"
SEQ_FILE_R2="${SEQ_DIR}/${SAMPLE_ID}_${SUBSAMPLE_ID}_R2.fastq.gz"
SEQ_FILES="${SEQ_FILES} ${SEQ_FILE_R1} ${SEQ_FILE_R2}"
let "IDX = $IDX + 1"
done
#split into wells
#use tight checking no mismatch no ambiguities to match original - default is the looser setting of mismatch =1 and missing N=1
PROFILEDIR="${PWD}/profile_split"
DOCKERCMD="docker run --rm -it -v $PROFILEDIR:/.cprofiles -v $1/data/LINCS:/data -e NWELLS=$NWELLS biodepot/rnaseq-umi-cpp:profiler "
echo "$DOCKERCMD umisplit -v -l 16 -m 0 -N 0 -t $THREAD_NUMBER -b $BARCODE_FILE $SEQ_FILES"
$DOCKERCMD umisplit -v -l 16 -m 0 -N 0 -o $ALIGN_DIR -t $THREAD_NUMBER -b $BARCODE_FILE $SEQ_FILES
PROFILEDIR="${PWD}/profile_align"
DOCKERCMD="docker run --rm -it -v $PROFILEDIR:/.cprofiles -v $1/data/LINCS:/data -e NWELLS=$NWELLS biodepot/rnaseq-umi-cpp:profiler "
echo "$DOCKERCMD multibwa.sh $TOP_DIR $REF_DIR $SPECIES_DIR $ALIGN_DIR $BWA_ALN_SEED_LENGTH $BWA_SAM_MAX_ALIGNS_FOR_XA_TAG $THREAD_NUMBER"
$DOCKERCMD multibwa.sh $TOP_DIR $REF_DIR $SPECIES_DIR $ALIGN_DIR $BWA_ALN_SEED_LENGTH $BWA_SAM_MAX_ALIGNS_FOR_XA_TAG $THREAD_NUMBER
PROFILEDIR="${PWD}/profile_merge"
DOCKERCMD="docker run --rm -it -v $PROFILEDIR:/.cprofiles -v $1/data/LINCS:/data -e NWELLS=$NWELLS biodepot/rnaseq-umi-cpp:profiler "
echo "$DOCKERCMD umimerge_parallel -i $SAMPLE_ID -s $SYM2REF_FILE -e $ERCC_SEQ_FILE -b $BARCODE_FILE -a $ALIGN_DIR -o $COUNT_DIR -t $THREAD_NUMBER"
$DOCKERCMD umimerge_parallel -i $SAMPLE_ID -s $SYM2REF_FILE -e $ERCC_SEQ_FILE -b $BARCODE_FILE -a $ALIGN_DIR -o $COUNT_DIR -t $THREAD_NUMBER
| true |
ace7aa8b2bc9556092785633f3b1923072195f06 | Shell | cloudogu/reveal.js-docker-example | /printPdf.sh | UTF-8 | 1,650 | 3.65625 | 4 | [] | no_license | #!/usr/bin/env bash
COMPRESS=${COMPRESS:-'false'}
set -o errexit -o nounset -o pipefail
PRINTING_IMAGE='arachnysdocker/athenapdf:2.16.0'
# For compression
GHOSTSCRIPT_IMAGE='minidocks/ghostscript:9'
pdf=$(mktemp --suffix=.pdf)
pdfCompressed=${pdf//.pdf/.min.pdf}
image=$(docker build -q . )
container=$(docker run --rm -d "$image")
address=$(docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' "${container}")
sleep 1
rm "${pdf}" || true
# When images are not printed, increase --delay
docker run --rm --shm-size=4G ${PRINTING_IMAGE} \
athenapdf --delay 2000 --stdout "http://${address}:8080/?print-pdf" \
> "${pdf}"
if [[ $COMPRESS == "true" ]]; then
# Compress defensively, using best quality PDFSETTING printer.
# Still five times smaller thant original
# Other dPDFSETTINGS: printer > default > ebook > screen
# https://askubuntu.com/a/256449/
docker run --rm -v /tmp:/tmp $GHOSTSCRIPT_IMAGE \
-sDEVICE=pdfwrite -dCompatibilityLevel=1.4 -dPDFSETTINGS=/printer -dNOPAUSE -dQUIET -dBATCH \
-sOutputFile=- "${pdf}" \
> "${pdfCompressed}"
fi
finalPdf="$(if [[ $COMPRESS == "true" ]]; then echo "${pdfCompressed}"; else echo "${pdf}"; fi)"
if [ -t 1 ] ; then
# When running in terminal print both PDF with size and opn
ls -lah "${pdf//.pdf/}"*
xdg-open "${finalPdf}"
else
# For headless use only output path to PDF
echo "${finalPdf}"
fi
# Dont leak PDFs, containers or images
if [[ $COMPRESS == "true" ]]; then rm "${pdf}"; fi
docker rm -f "${container}" > /dev/null
# Image might still be in use, but at least try to clean up image
docker rmi "${image}" > /dev/null || true
| true |
85a5b9a670161a4f0c7db9ab3c09adea19cbb2b4 | Shell | fcoambrozio/slackbuilds | /discover/discover.SlackBuild | UTF-8 | 8,564 | 3.453125 | 3 | [] | no_license | #!/bin/sh
# $Id: discover.SlackBuild,v 1.3 2023/02/18 20:44:02 root Exp root $
# Copyright 2022, 2023 Eric Hameleers, Eindhoven, NL
# All rights reserved.
#
# Permission to use, copy, modify, and distribute this software for
# any purpose with or without fee is hereby granted, provided that
# the above copyright notice and this permission notice appear in all
# copies.
#
# THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESSED OR IMPLIED
# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE AUTHORS AND COPYRIGHT HOLDERS AND THEIR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
# USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
# OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
# -----------------------------------------------------------------------------
#
# Slackware SlackBuild script
# ===========================
# By: Eric Hameleers <alien@slackware.com>
# For: discover
# Descr: Plasma application management GUI
# URL: https://kde.org/plasma-desktop/
# Build needs:
# Needs: appstream, flatpak
# Changelog:
# 5.26.1-1: 22/oct/2022 by Eric Hameleers <alien@slackware.com>
# * Initial build.
# 5.26.4-1: 30/nov/2022 by Eric Hameleers <alien@slackware.com>
# * Update.
# 5.27.0-1: 18/feb/2023 by Eric Hameleers <alien@slackware.com>
# * Update.
#
# Run 'sh discover.SlackBuild' to build a Slackware package.
# The package (.t?z) and .txt file as well as build logs are created in /tmp .
# Install the package using 'installpkg' or 'upgradepkg --install-new'.
#
# -----------------------------------------------------------------------------
PRGNAM=discover
VERCUR=${VERCUR:-5.27.7}
VER150=${VER150:-5.23.5}
BUILD=${BUILD:-1}
NUMJOBS=${NUMJOBS:-" -j$(nproc) "}
TAG=${TAG:-alien}
DOCS="LICENSES"
if [ -z "$VERSION" ]; then
eval $(grep -E '(^VERSION_ID=|^VERSION_CODENAME=)' /etc/os-release)
if [ "$VERSION_ID" == "15.0" ] && [ "$VERSION_CODENAME" == "stable" ]; then
VERSION=$VER150
else
VERSION=$VERCUR
fi
fi
# Where do we look for sources?
SRCDIR=$(cd $(dirname $0); pwd)
# Place to build (TMP) package (PKG) and output (OUTPUT) the program:
TMP=${TMP:-/tmp/build}
PKG=$TMP/package-$PRGNAM
OUTPUT=${OUTPUT:-/tmp}
SOURCE="$SRCDIR/${PRGNAM}-${VERSION}.tar.xz"
SRCURL="https://download.kde.org/stable/plasma/${VERSION}/${PRGNAM}-${VERSION}.tar.xz"
##
## --- with a little luck, you won't have to edit below this point --- ##
##
# Automatically determine the architecture we're building on:
if [ -z "$ARCH" ]; then
case "$(uname -m)" in
i?86) ARCH=i586 ;;
arm*) readelf /usr/bin/file -A | egrep -q "Tag_CPU.*[4,5]" && ARCH=arm || ARCH=armv7hl ;;
# Unless $ARCH is already set, use uname -m for all other archs:
*) ARCH=$(uname -m) ;;
esac
export ARCH
fi
# Set CFLAGS/CXXFLAGS and LIBDIRSUFFIX:
case "$ARCH" in
i?86) SLKCFLAGS="-O2 -march=${ARCH} -mtune=i686"
SLKLDFLAGS=""; LIBDIRSUFFIX=""
;;
x86_64) SLKCFLAGS="-O2 -fPIC"
SLKLDFLAGS="-L/usr/lib64"; LIBDIRSUFFIX="64"
;;
armv7hl) SLKCFLAGS="-O2 -march=armv7-a -mfpu=vfpv3-d16"
SLKLDFLAGS=""; LIBDIRSUFFIX=""
;;
*) SLKCFLAGS=${SLKCFLAGS:-"-O2"}
SLKLDFLAGS=${SLKLDFLAGS:-""}; LIBDIRSUFFIX=${LIBDIRSUFFIX:-""}
;;
esac
case "$ARCH" in
arm*) TARGET=$ARCH-slackware-linux-gnueabi ;;
*) TARGET=$ARCH-slackware-linux ;;
esac
# Exit the script on errors:
set -e
trap 'echo "$0 FAILED at line ${LINENO}" | tee $OUTPUT/error-${PRGNAM}.log' ERR
# Catch unitialized variables:
set -u
P1=${1:-1}
# Save old umask and set to 0022:
_UMASK_=$(umask)
umask 0022
# Create working directories:
mkdir -p $OUTPUT # place for the package to be saved
mkdir -p $TMP/tmp-$PRGNAM # location to build the source
mkdir -p $PKG # place for the package to be built
rm -rf $PKG/* # always erase old package's contents
rm -rf $TMP/tmp-$PRGNAM/* # remove the remnants of previous build
rm -rf $OUTPUT/{checkout,configure,make,install,error,makepkg,patch}-$PRGNAM.log
# remove old log files
# Source file availability:
if ! [ -f ${SOURCE} ]; then
echo "Source '$(basename ${SOURCE})' not available yet..."
# Check if the $SRCDIR is writable at all - if not, download to $OUTPUT
[ -w "$SRCDIR" ] || SOURCE="$OUTPUT/$(basename $SOURCE)"
if [ -f ${SOURCE} ]; then echo "Ah, found it!"; continue; fi
if ! [ "x${SRCURL}" == "x" ]; then
echo "Will download file to $(dirname $SOURCE)"
wget --no-check-certificate -nv -T 20 -O "${SOURCE}" "${SRCURL}" || true
if [ $? -ne 0 -o ! -s "${SOURCE}" ]; then
echo "Downloading '$(basename ${SOURCE})' failed... aborting the build."
mv -f "${SOURCE}" "${SOURCE}".FAIL
exit 1
fi
else
echo "File '$(basename ${SOURCE})' not available... aborting the build."
exit 1
fi
fi
if [ "$P1" == "--download" ]; then
echo "Download complete."
exit 0
fi
# --- PACKAGE BUILDING ---
echo "++"
echo "|| $PRGNAM-$VERSION"
echo "++"
cd $TMP/tmp-$PRGNAM
echo "Extracting the source archive(s) for $PRGNAM..."
tar -xvf ${SOURCE}
cd ${PRGNAM}-${VERSION}
chown -R root:root .
chmod -R u+w,go+r-w,a+rX-st .
echo Building ...
mkdir -p build-${PRGNAM}
cd build-${PRGNAM}
cmake .. \
-DCMAKE_C_FLAGS:STRING="$SLKCFLAGS" \
-DCMAKE_CXX_FLAGS:STRING="$SLKCFLAGS" \
-DCMAKE_C_FLAGS_RELEASE:STRING="$SLKCFLAGS" \
-DCMAKE_CXX_FLAGS_RELEASE:STRING="$SLKCFLAGS" \
-DCMAKE_BUILD_TYPE=Release \
-DCMAKE_INSTALL_PREFIX=/usr \
-DCMAKE_INSTALL_LIBDIR=/usr/lib${LIBDIRSUFFIX} \
-DCMAKE_INSTALL_MANDIR=/usr/man \
-DCMAKE_INSTALL_SYSCONFDIR=/etc \
-DLIB_SUFFIX=${LIBDIRSUFFIX} \
-DKDE_INSTALL_LIBDIR=lib$LIBDIRSUFFIX \
-DKDE_INSTALL_MANDIR=/usr/man \
-DKDE_INSTALL_SYSCONFDIR=/etc/kde \
-DKDE_INSTALL_USE_QT_SYS_PATHS=ON \
-DBUILD_TESTING=OFF \
2>&1 | tee $OUTPUT/configure-${PRGNAM}.log
make $NUMJOBS 2>&1 | tee $OUTPUT/make-${PRGNAM}.log
make DESTDIR=$PKG install 2>&1 | tee $OUTPUT/install-${PRGNAM}.log
cd -
# Add this to the doinst.sh:
mkdir -p $PKG/install
cat <<EOINS >> $PKG/install/doinst.sh
# Update the desktop database:
if [ -x usr/bin/update-desktop-database ]; then
chroot . /usr/bin/update-desktop-database usr/share/applications > /dev/null 2>&1
fi
# Update hicolor theme cache:
if [ -d usr/share/icons/hicolor ]; then
if [ -x /usr/bin/gtk-update-icon-cache ]; then
chroot . /usr/bin/gtk-update-icon-cache -f -t usr/share/icons/hicolor 1> /dev/null 2> /dev/null
fi
fi
# Update the mime database:
if [ -x usr/bin/update-mime-database ]; then
chroot . /usr/bin/update-mime-database usr/share/mime >/dev/null 2>&1
fi
EOINS
# Add documentation:
mkdir -p $PKG/usr/doc/$PRGNAM-$VERSION
cp -a $DOCS $PKG/usr/doc/$PRGNAM-$VERSION || true
cat $SRCDIR/$(basename $0) > $PKG/usr/doc/$PRGNAM-$VERSION/$PRGNAM.SlackBuild
chown -R root:root $PKG/usr/doc/$PRGNAM-$VERSION
find $PKG/usr/doc -type f -exec chmod 644 {} \;
# Compress the man page(s):
if [ -d $PKG/usr/man ]; then
find $PKG/usr/man -type f -name "*.?" -exec gzip -9f {} \;
for i in $(find $PKG/usr/man -type l -name "*.?") ; do ln -s $( readlink $i ).gz $i.gz ; rm $i ; done
fi
# Strip binaries (if any):
find $PKG | xargs file | grep -e "executable" -e "shared object" | grep ELF \
| cut -f 1 -d : | xargs strip --strip-unneeded 2> /dev/null || true
# Add a package description:
mkdir -p $PKG/install
cat $SRCDIR/slack-desc > $PKG/install/slack-desc
cat $SRCDIR/slack-required > $PKG/install/slack-required
# Build the package:
cd $PKG
makepkg --linkadd y --chown n $OUTPUT/${PRGNAM}-${VERSION}-${ARCH}-${BUILD}${TAG}.${PKGTYPE:-txz} 2>&1 | tee $OUTPUT/makepkg-${PRGNAM}.log
cd $OUTPUT
md5sum ${PRGNAM}-${VERSION}-${ARCH}-${BUILD}${TAG}.${PKGTYPE:-txz} > ${PRGNAM}-${VERSION}-${ARCH}-${BUILD}${TAG}.${PKGTYPE:-txz}.md5
cd -
cat $PKG/install/slack-desc | grep "^${PRGNAM}" > $OUTPUT/${PRGNAM}-${VERSION}-${ARCH}-${BUILD}${TAG}.txt
cat $PKG/install/slack-required > $OUTPUT/${PRGNAM}-${VERSION}-${ARCH}-${BUILD}${TAG}.dep
# Restore the original umask:
umask ${_UMASK_}
| true |
6c0762777533b1c475c5c7d26ad6adb9d2822567 | Shell | NVIDIA/tensorflow | /tensorflow/contrib/makefile/compile_android_protobuf.sh | UTF-8 | 5,778 | 3.546875 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash -e
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# Builds protobuf 3 for Android. Pass in the location of your NDK as the first
# argument to the script, for example:
# tensorflow/contrib/makefile/compile_android_protobuf.sh \
# ${HOME}/toolchains/clang-21-stl-gnu
# Pass ANDROID_API_VERSION as an environment variable to support
# a different version of API.
android_api_version="${ANDROID_API_VERSION:-21}"
# Pass cc prefix to set the prefix for cc (e.g. ccache)
cc_prefix="${CC_PREFIX}"
usage() {
echo "Usage: $(basename "$0") [-a:c]"
echo "-a [Architecture] Architecture of target android [default=armeabi-v7a] \
(supported architecture list: \
arm64-v8a armeabi armeabi-v7a armeabi-v7a-hard mips mips64 x86 x86_64)"
echo "-c Clean before building protobuf for target"
echo "\"NDK_ROOT\" should be defined as an environment variable."
exit 1
}
SCRIPT_DIR=$(dirname $0)
ARCHITECTURE=armeabi-v7a
# debug options
while getopts "a:c" opt_name; do
case "$opt_name" in
a) ARCHITECTURE=$OPTARG;;
c) clean=true;;
*) usage;;
esac
done
shift $((OPTIND - 1))
source "${SCRIPT_DIR}/build_helper.subr"
JOB_COUNT="${JOB_COUNT:-$(get_job_count)}"
if [[ -z "${NDK_ROOT}" ]]
then
echo "You need to pass in the Android NDK location as the environment \
variable"
echo "e.g. NDK_ROOT=${HOME}/android_ndk/android-ndk-rXXx \
tensorflow/contrib/makefile/compile_android_protobuf.sh"
exit 1
fi
if [[ ! -f "${SCRIPT_DIR}/Makefile" ]]; then
echo "Makefile not found in ${SCRIPT_DIR}" 1>&2
exit 1
fi
cd "${SCRIPT_DIR}"
if [ $? -ne 0 ]
then
echo "cd to ${SCRIPT_DIR} failed." 1>&2
exit 1
fi
GENDIR="$(pwd)/gen/protobuf_android"
HOST_GENDIR="$(pwd)/gen/protobuf-host"
mkdir -p "${GENDIR}"
mkdir -p "${GENDIR}/${ARCHITECTURE}"
if [[ ! -f "./downloads/protobuf/autogen.sh" ]]; then
echo "You need to download dependencies before running this script." 1>&2
echo "tensorflow/contrib/makefile/download_dependencies.sh" 1>&2
exit 1
fi
cd downloads/protobuf
PROTOC_PATH="${HOST_GENDIR}/bin/protoc"
if [[ ! -f "${PROTOC_PATH}" || ${clean} == true ]]; then
# Try building compatible protoc first on host
echo "protoc not found at ${PROTOC_PATH}. Build it first."
make_host_protoc "${HOST_GENDIR}"
else
echo "protoc found. Skip building host tools."
fi
echo $OSTYPE | grep -q "darwin" && os_type="darwin" || os_type="linux"
if [[ ${ARCHITECTURE} == "arm64-v8a" ]]; then
toolchain="aarch64-linux-android-4.9"
sysroot_arch="arm64"
bin_prefix="aarch64-linux-android"
elif [[ ${ARCHITECTURE} == "armeabi" ]]; then
toolchain="arm-linux-androideabi-4.9"
sysroot_arch="arm"
bin_prefix="arm-linux-androideabi"
elif [[ ${ARCHITECTURE} == "armeabi-v7a" ]]; then
toolchain="arm-linux-androideabi-4.9"
sysroot_arch="arm"
bin_prefix="arm-linux-androideabi"
march_option="-march=armv7-a"
elif [[ ${ARCHITECTURE} == "armeabi-v7a-hard" ]]; then
toolchain="arm-linux-androideabi-4.9"
sysroot_arch="arm"
bin_prefix="arm-linux-androideabi"
march_option="-march=armv7-a"
elif [[ ${ARCHITECTURE} == "mips" ]]; then
toolchain="mipsel-linux-android-4.9"
sysroot_arch="mips"
bin_prefix="mipsel-linux-android"
elif [[ ${ARCHITECTURE} == "mips64" ]]; then
toolchain="mips64el-linux-android-4.9"
sysroot_arch="mips64"
bin_prefix="mips64el-linux-android"
elif [[ ${ARCHITECTURE} == "x86" ]]; then
toolchain="x86-4.9"
sysroot_arch="x86"
bin_prefix="i686-linux-android"
elif [[ ${ARCHITECTURE} == "x86_64" ]]; then
toolchain="x86_64-4.9"
sysroot_arch="x86_64"
bin_prefix="x86_64-linux-android"
else
echo "architecture ${ARCHITECTURE} is not supported." 1>&2
usage
exit 1
fi
echo "Android api version = ${android_api_version} cc_prefix = ${cc_prefix}"
export PATH=\
"${NDK_ROOT}/toolchains/${toolchain}/prebuilt/${os_type}-x86_64/bin:$PATH"
export SYSROOT=\
"${NDK_ROOT}/platforms/android-${android_api_version}/arch-${sysroot_arch}"
export CC="${cc_prefix} ${bin_prefix}-gcc --sysroot ${SYSROOT}"
export CXX="${cc_prefix} ${bin_prefix}-g++ --sysroot ${SYSROOT}"
export CXXSTL=\
"${NDK_ROOT}/sources/cxx-stl/gnu-libstdc++/4.9/libs/${ARCHITECTURE}"
./autogen.sh
if [ $? -ne 0 ]
then
echo "./autogen.sh command failed."
exit 1
fi
./configure --prefix="${GENDIR}/${ARCHITECTURE}" \
--host="${bin_prefix}" \
--with-sysroot="${SYSROOT}" \
--disable-shared \
--enable-cross-compile \
--with-protoc="${PROTOC_PATH}" \
CFLAGS="${march_option}" \
CXXFLAGS="-frtti -fexceptions ${march_option} \
-I${NDK_ROOT}/sources/android/support/include \
-I${NDK_ROOT}/sources/cxx-stl/gnu-libstdc++/4.9/include \
-I${NDK_ROOT}/sources/cxx-stl/gnu-libstdc++/4.9/libs/${ARCHITECTURE}/include" \
LDFLAGS="-L${NDK_ROOT}/sources/cxx-stl/gnu-libstdc++/4.9/libs/${ARCHITECTURE}" \
LIBS="-llog -lz -lgnustl_static"
if [ $? -ne 0 ]
then
echo "./configure command failed."
exit 1
fi
if [[ ${clean} == true ]]; then
echo "clean before build"
make clean
fi
make -j"${JOB_COUNT}"
if [ $? -ne 0 ]
then
echo "make command failed."
exit 1
fi
make install
echo "$(basename $0) finished successfully!!!"
| true |
b9681b04b309f961694a315a6f568bade5a7c24d | Shell | mexisme/autorandr | /pm-utils/40autorandr | UTF-8 | 208 | 2.859375 | 3 | [] | no_license | #!/bin/bash
#
# 40autorandr: Change autorandr profile on thaw/resume
case "$1" in
thaw|resume)
/usr/local/bin/autorandr-update-display
;;
*)
echo "Skipping $1 PM change..."
;;
esac
| true |
bc8e0e10d487277ce70e378a3816f49384aede29 | Shell | patrickmcnamara/silverslab | /i3blocks/.config/i3blocks/time | UTF-8 | 292 | 3.46875 | 3 | [] | no_license | #!/bin/sh
# SET TOGGLE FILE
TOGGLE="/tmp/time"
# BUTTON
if [ "$BLOCK_BUTTON" = "3" ]; then
if [ ! -e "$TOGGLE" ]; then
touch "$TOGGLE"
else
rm "$TOGGLE"
fi
fi
# PRINT OUT
if [ ! -e "$TOGGLE" ]; then
date --iso-8601=seconds
else
echo "$(date "+%Y-%m-%d")" "$(chronos)"
fi
| true |
9edaea8437947d4be0f1d23a652b38cce610b89c | Shell | rjain1328/haptik-assignment | /lamp.sh | UTF-8 | 1,744 | 2.890625 | 3 | [] | no_license | #!/bin/bash
# installing apache
sudo apt install -y apache2
# INstalling curl
sudo apt install -y curl
# installing mysql
sudo apt install -y mysql-server
# to create mysql password
sudo mysql_secure_installation
# installing php and mysql extenstion for php
sudo apt install -y php libapache2-mod-php php-mysql
# to install php5.6 need to add ppa
sudo apt-get install -y php5.6
# installing extension for php and mysql
sudo apt install libapache2-mod-php5.6 php5.6-mysql php5.6-mbstring
# restarting apache2
sudo systemctl restart apache2
# installing php extensions
sudo apt install -y php-cli php-curl php-xml
# installing php extensions
sudo apt install -y php5.6-cli php5.6-curl php5.6-xml php5.6-mbstring
# reloading apache
sudo service apache2 reload
# INstalling wget
sudo apt-get install wget -y
# Installing wordpress
sudo wget -c http://wordpress.org/latest.tar.gz
sudo tar -xzvf latest.tar.gz
sudo rsync -av wordpress/* /var/www/html/
sudo chown -R www-data:www-data /var/www/html/
#Creating database
sudo chmod -R 755 /var/www/html/
sudo mysql -p=admin@12345 -u "root" -Bse "CREATE DATABASE \`wordpress_test\`;"
sudo mysql -p=admin@12345 -u "root" -Bse "grant all on wordpress_test.* to 'test_user'@'localhost' IDENTIFIED BY 'QTfGeF&6M<PB*';"
sudo mv /var/www/html/wp-config-sample.php /var/www/html/wp-config.php
sudo sed -i 's/database_name_here/wordpress_test/g' /var/www/html/wp-config.php
sudo sed -i 's/username_here/test_user/g' /var/www/html/wp-config.php
sudo sed -i 's/password_here/QTfGeF&6M<PB*/g' /var/www/html/wp-config.php
echo "mysqli_connect('localhost','root','admin@12345','wordpress_test')"
# note:- please read file once in ubuntu 18.04 need to create mysql password seperatly
| true |
60a1278a5cdc31d2e3ef5ef99cb40870e9394eeb | Shell | LarsFin/API-Farm | /api_testing/run.sh | UTF-8 | 3,875 | 4.4375 | 4 | [
"MIT"
] | permissive | # !/bin/bash
# Run from 'api_testing' directory
# Takes on steps to run api tests. Script's steps are outlined below;
# > Check passed argument; should be service name to run tests against.
# > Check api_testing/newman image exists. If it does; delete it. Always build.
# > Ensure api & expectations api is running too.
# > Run postman/newman image with necessary arguments
# > Export api test results to file
# > Copy file to local machine
# > Stop and remove api testing container
echo "🚀 Starting API Testing Process!";
# Name of container running api
API_CONTAINER=$1
# Check api name argument has been provided
if [ -z $API_CONTAINER ]
then
echo "An api name must be provided as an argument."
exit 1
fi
# Setup logging
DATE_TIME=$(date '+%d%m%Y%H%M')
LOGGING_FILE=./logs/${DATE_TIME}_api_test_run.log
# Create logs directory if it does not exist
if ! dir logs >/dev/null 2>&1
then
echo "No ./logs directory. Creating one..."
mkdir logs
echo "./logs created."
fi
# Create log file
touch $LOGGING_FILE
# Delete api_testing image if it already exists
if docker image inspect api_testing/newman >>$LOGGING_FILE 2>&1
then
echo "Removing old api testing image..."
docker rmi api_testing/newman >>$LOGGING_FILE 2>&1
echo "Old api testing image removed."
fi
# Build api_testing image
echo "Building api testing image..."
docker build -t api_testing/newman . >>$LOGGING_FILE 2>&1
echo "API testing image successfully created."
# Query docker for containers running on api_farm_dev network
echo "Checking docker for required containers on api_farm_dev network; '${API_CONTAINER}' and 'api_expectations'..."
DOCKER_QUERY_RESULT=$(docker ps -f status=running -f network=api_farm_dev --format "{{.Names}}")
# Ensure api container is running on network
if [[ $DOCKER_QUERY_RESULT != *$API_CONTAINER* ]]
then
echo "No running container '$API_CONTAINER' could be found with network api_farm_dev."
exit 1
fi
# Ensure expectations api is running on network
if [[ $DOCKER_QUERY_RESULT != *"expectations_api"* ]]
then
echo "No running container for expectations api could be found with network api_farm_dev."
exit 1
fi
echo "Services located."
# Define results output file name
RESULTS_FILE=${API_CONTAINER}_api_test_results
API_TESTS_CONTAINER=api_testing_$DATE_TIME
# Create results directory if it does not exist
if ! dir results >>$LOGGING_FILE 2>&1
then
echo "No ./results directory. Creating one..."
mkdir results
echo "./results created."
fi
# Run api tests
echo "Running api testing image..."
docker run --network=api_farm_dev --name=$API_TESTS_CONTAINER -t api_testing/newman run API_farm.postman_collection.json \
--folder Tests -e ApiTesting.api_farm.json --env-var host=$API_CONTAINER --reporters=cli,json --reporter-json-export ${RESULTS_FILE}.json
# Record exit code of api tests
EXIT_CODE=$?
# Query docker for api tests container
DOCKER_QUERY_RESULT=$(docker ps -f status=exited --format "{{.Names}}")
# If api tests container does not exist. Fail under assumption there was an issue running the container.
if [[ $DOCKER_QUERY_RESULT != *$API_TESTS_CONTAINER* ]]
then
echo "There was an issue running the api tests container;"
cat ${RESULTS_FILE}.txt
exit 1
fi
# Copy output file from api tests
docker cp $API_TESTS_CONTAINER:/etc/newman/${RESULTS_FILE}.json ./results/${RESULTS_FILE}.json
echo "API testing image successfully run."
# Delete api tests container
echo "Removing api testing container..."
docker rm $API_TESTS_CONTAINER >>$LOGGING_FILE 2>&1
echo "API testing container removed."
if [ $EXIT_CODE == 0 ]
then
echo "Finished: All API Tests Passed ✔️"
exit 0
else
echo "Finished: Not All API Tests Passed ❌"
exit 1
fi | true |
3a7f4994632bd46bc70e972ac23619e981f62a05 | Shell | jlgull/Linux_Bash_Scripts | /EMCC-Red-Hat-Scripting-Courses/CIS239DL_Scripting_2014/Projects/Project1 | UTF-8 | 125 | 2.765625 | 3 | [] | no_license | #!/bin/bash
cal
day=$(date +%A)
dom=`date +%d`
month=$(date +%B)
echo
echo "Today is $day, the "$dom"th of "$month
echo
who
| true |
88cdad3cd5678256bfe36f323456897a076cdaa6 | Shell | openoms/django-ip2tor | /scripts/host_cli.sh | UTF-8 | 4,359 | 3.953125 | 4 | [] | no_license | #!/usr/bin/env bash
#
# host_cli.sh
#
# License: MIT
# Copyright (c) 2020 The RaspiBlitz developers
set -e
set -u
SHOP_URL="https://shop.ip2t.org"
HOST_ID="<insert_here>"
HOST_TOKEN="<insert_here>" # keep this secret!
IP2TORC_CMD="./ip2torc.sh"
# command info
if [ $# -eq 0 ] || [ "$1" = "-h" ] || [ "$1" = "-help" ] || [ "$1" = "--help" ]; then
echo "management script to fetch and process config from shop"
echo "host_cli.sh pending"
echo "host_cli.sh list [I|P|A|S|D]"
echo "host_cli.sh suspended"
exit 1
fi
if ! command -v jq >/dev/null; then
echo "jq not found - installing it now..."
sudo apt-get update &>/dev/null
sudo apt-get install -y jq &>/dev/null
echo "jq installed successfully."
fi
###################
# FUNCTIONS
###################
function get_tor_bridges() {
# first parameter to function
local status=${1-all}
if [ "${status}" = "all" ]; then
#echo "filter: None"
local url="${SHOP_URL}/api/v1/tor_bridges/?host=${HOST_ID}"
else
#echo "filter: ${status}"
local url="${SHOP_URL}/api/v1/tor_bridges/?host=${HOST_ID}&status=${status}"
fi
res=$(curl -s -q -H "Authorization: Token ${HOST_TOKEN}" "${url}")
if [ -z "${res///}" ] || [ "${res///}" = "[]" ]; then
#echo "Nothing to do"
res=''
fi
}
###########
# PENDING #
###########
if [ "$1" = "pending" ]; then
get_tor_bridges "P" # P for pending - sets ${res}
detail=$(echo "${res}" | jq -c '.detail' &>/dev/null || true)
if [ -n "${detail}" ]; then
echo "${detail}"
exit 1
fi
jsn=$(echo "${res}" | jq -c '.[]|.id,.port,.target | tostring')
active_list=$(echo "${jsn}" | xargs -L3 | sed 's/ /|/g' | paste -sd "\n" -)
if [ -z "${active_list}" ]; then
echo "Nothing to do"
exit 0
fi
echo "ToDo List:"
echo "${active_list}"
echo "---"
for item in ${active_list}; do
#echo "Item: ${item}"
b_id=$(echo "${item}" | cut -d'|' -f1)
port=$(echo "${item}" | cut -d'|' -f2)
target=$(echo "${item}" | cut -d'|' -f3)
#echo "${b_id}"
#echo "${port}"
#echo "${target}"
res=$("${IP2TORC_CMD}" add "${port}" "${target}")
#echo "Status Code: $?"
#echo "${res}"
if [ $? -eq 0 ]; then
patch_url="${SHOP_URL}/api/v1/tor_bridges/${b_id}/"
#echo "now send PATCH to ${patch_url} that ${b_id} is done"
res=$(
curl -X "PATCH" \
-H "Authorization: Token ${HOST_TOKEN}" \
-H "Content-Type: application/json" \
--data '{"status": "A"}' \
"${patch_url}"
)
#echo "Res: ${res}"
echo "set to active: ${b_id}"
fi
done
########
# LIST #
########
elif [ "$1" = "list" ]; then
get_tor_bridges "${2-all}"
if [ -z "${res}" ]; then
echo "Nothing"
exit 0
else
jsn=$(echo "${res}" | jq -c '.[]|.port,.id,.status,.target | tostring')
active_list=$(echo "${jsn}" | xargs -L4 | sed 's/ /|/g' | paste -sd "\n" -)
echo "${active_list}" | sort -n
fi
#############
# SUSPENDED #
#############
elif [ "$1" = "suspended" ]; then
get_tor_bridges "S" # S for suspended - sets ${res}
detail=$(echo "${res}" | jq -c '.detail' &>/dev/null || true)
if [ -n "${detail}" ]; then
echo "${detail}"
exit 1
fi
jsn=$(echo "${res}" | jq -c '.[]|.id,.port,.target | tostring')
suspended_list=$(echo "${jsn}" | xargs -L3 | sed 's/ /|/g' | paste -sd "\n" -)
if [ -z "${suspended_list}" ]; then
echo "Nothing to do"
exit 0
fi
echo "ToDo List:"
echo "${suspended_list}"
echo "---"
for item in ${suspended_list}; do
echo "Item: ${item}"
b_id=$(echo "${item}" | cut -d'|' -f1)
port=$(echo "${item}" | cut -d'|' -f2)
target=$(echo "${item}" | cut -d'|' -f3)
#echo "${b_id}"
#echo "${port}"
#echo "${target}"
set -x
res=$("${IP2TORC_CMD}" remove "${port}")
echo "Status Code: $?"
echo "${res}"
if [ $? -eq 0 ]; then
patch_url="${SHOP_URL}/api/v1/tor_bridges/${b_id}/"
echo "now send PATCH to ${patch_url} that ${b_id} is done"
res=$(
curl -X "PATCH" \
-H "Authorization: Token ${HOST_TOKEN}" \
-H "Content-Type: application/json" \
--data '{"status": "D"}' \
"${patch_url}"
)
#echo "Res: ${res}"
echo "set to deleted: ${b_id}"
fi
done
else
echo "unknown command - run with -h for help"
exit 1
fi
| true |
cc44ca2defa990addf9c370816af137f57fe0540 | Shell | nextcloud/nextcloudpi | /install.sh | UTF-8 | 3,905 | 3.546875 | 4 | [] | no_license | #!/bin/bash
# NextCloudPi installation script
#
# Copyleft 2017 by Ignacio Nunez Hernanz <nacho _a_t_ ownyourbits _d_o_t_ com>
# GPL licensed (see end of file) * Use at your own risk!
#
# Usage: ./install.sh
#
# more details at https://ownyourbits.com
BRANCH="${BRANCH:-master}"
#DBG=x
set -e$DBG
TMPDIR="$(mktemp -d /tmp/nextcloudpi.XXXXXX || (echo "Failed to create temp dir. Exiting" >&2 ; exit 1) )"
trap "rm -rf \"${TMPDIR}\"" 0 1 2 3 15
[[ ${EUID} -ne 0 ]] && {
printf "Must be run as root. Try 'sudo $0'\n"
exit 1
}
export PATH="/usr/local/sbin:/usr/sbin:/sbin:${PATH}"
# check installed software
type mysqld &>/dev/null && echo ">>> WARNING: existing mysqld configuration will be changed <<<"
type mysqld &>/dev/null && mysql -e 'use nextcloud' &>/dev/null && { echo "The 'nextcloud' database already exists. Aborting"; exit 1; }
# get dependencies
apt-get update
apt-get install --no-install-recommends -y git ca-certificates sudo lsb-release wget
# get install code
if [[ "${CODE_DIR}" == "" ]]; then
echo "Getting build code..."
CODE_DIR="${TMPDIR}"/nextcloudpi
git clone -b "${BRANCH}" https://github.com/nextcloud/nextcloudpi.git "${CODE_DIR}"
fi
cd "${CODE_DIR}"
# install NCP
echo -e "\nInstalling NextCloudPi..."
source etc/library.sh
# check distro
check_distro etc/ncp.cfg || {
echo "ERROR: distro not supported:";
cat /etc/issue
exit 1;
}
# indicate that this will be an image build
touch /.ncp-image
mkdir -p /usr/local/etc/ncp-config.d/
cp etc/ncp-config.d/nc-nextcloud.cfg /usr/local/etc/ncp-config.d/
cp etc/library.sh /usr/local/etc/
cp etc/ncp.cfg /usr/local/etc/
cp -r etc/ncp-templates /usr/local/etc/
install_app lamp.sh
install_app bin/ncp/CONFIG/nc-nextcloud.sh
run_app_unsafe bin/ncp/CONFIG/nc-nextcloud.sh
rm /usr/local/etc/ncp-config.d/nc-nextcloud.cfg # armbian overlay is ro
systemctl restart mysqld # TODO this shouldn't be necessary, but somehow it's needed in Debian 9.6. Fixme
install_app ncp.sh
run_app_unsafe bin/ncp/CONFIG/nc-init.sh
echo 'Moving data directory to a more sensible location'
df -h
mkdir -p /opt/ncdata
[[ -f "/usr/local/etc/ncp-config.d/nc-datadir.cfg" ]] || {
should_rm_datadir_cfg=true
cp etc/ncp-config.d/nc-datadir.cfg /usr/local/etc/ncp-config.d/nc-datadir.cfg
}
DISABLE_FS_CHECK=1 NCPCFG="/usr/local/etc/ncp.cfg" run_app_unsafe bin/ncp/CONFIG/nc-datadir.sh
[[ -z "$should_rm_datadir_cfg" ]] || rm /usr/local/etc/ncp-config.d/nc-datadir.cfg
rm /.ncp-image
# skip on Armbian / Vagrant / LXD ...
[[ "${CODE_DIR}" != "" ]] || bash /usr/local/bin/ncp-provisioning.sh
cd -
rm -rf "${TMPDIR}"
IP="$(get_ip)"
echo "Done.
First: Visit https://$IP/ https://nextcloudpi.local/ (also https://nextcloudpi.lan/ or https://nextcloudpi/ on windows and mac)
to activate your instance of NC, and save the auto generated passwords. You may review or reset them
anytime by using nc-admin and nc-passwd.
Second: Type 'sudo ncp-config' to further configure NCP, or access ncp-web on https://$IP:4443/
Note: You will have to add an exception, to bypass your browser warning when you
first load the activation and :4443 pages. You can run letsencrypt to get rid of
the warning if you have a (sub)domain available.
"
# License
#
# This script is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This script is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this script; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330,
# Boston, MA 02111-1307 USA
| true |
6089dd691a228e2dfe977c0b2fe1a5c88125af02 | Shell | vikeri/rn-cljs-tools | /build-ios-offline | UTF-8 | 2,338 | 3.625 | 4 | [
"MIT"
] | permissive | #! /bin/bash
usage() {
echo "
Usage: $0 [-t re-natal] [-a <string>] [-s <string>]
-t Type of tool: re-natal
-a The name of your app (<APPNAME>.xcodeproj)
-s XCode build scheme, default scheme is app name (optional)
"
exit 1; }
while getopts ":t:a:s:" o; do
case "${o}" in
t)
TOOL=${OPTARG}
;;
a)
APPNAME=${OPTARG}
;;
s)
SCHEME=${OPTARG}
;;
*)
usage
;;
esac
done
shift $((OPTIND-1))
if [ -z "${APPNAME}" ] || [ -z "${TOOL}" ]; then
usage
fi
if [ -z "${SCHEME}" ]; then
SCHEME="$APPNAME"
fi
case "$TOOL" in
re-natal)
DIR="$(pwd)/target/appios"
;;
esac
mkdir -p "$DIR"
echo $DIR
# Checks that ios-deploy is installed
if ! which ios-deploy > /dev/null ; then
echo "ios-deploy not installed"
echo "install with npm -g install ios-deploy"
exit
fi
# Checks if React Packager is already running
if curl -s --head http://localhost:8081/debugger-ui | head -n 1 | grep "HTTP/1.[01] [23].." > /dev/null; then
echo "RN Packager running, exit it first"
exit
fi
# Builds cljs
case "$TOOL" in
re-natal)
lein clean
lein with-profile prod cljsbuild once ios
;;
esac
# Enables local js code location
sed -i '' 's/ \/\/ jsCodeLocation = \[\[NSBundle / jsCodeLocation \= \[\[NSBundle /g' ios/"$APPNAME"/AppDelegate.m
sed -i '' 's/ jsCodeLocation = \[NSURL / \/\/jsCodeLocation \= \[NSURL /g' ios/"$APPNAME"/AppDelegate.m
# Builds project
xcodebuild -project ios/"$APPNAME".xcodeproj -scheme "$SCHEME" -destination generic/platform=iOS build CONFIGURATION_BUILD_DIR="$DIR" &&
# Installs app on device
ios-deploy --bundle "$DIR"/"$APPNAME".app &&
# If terminal-notifier is installed, fire a notification
if which terminal-notifier; then
terminal-notifier -message "App built to iOS Device" -sound default
fi
# Restore code location to be served from server
sed -i '' 's/ jsCodeLocation \= \[\[NSBundle / \/\/ jsCodeLocation = \[\[NSBundle /g' ios/"$APPNAME"/AppDelegate.m
sed -i '' 's/ \/\/jsCodeLocation \= \[NSURL / jsCodeLocation = \[NSURL /g' ios/"$APPNAME"/AppDelegate.m
# Things do clean up after build
case "$TOOL" in
re-natal)
re-natal use-figwheel
;;
esac
| true |
6938f361b91a079ed07e7380f97e6baad92ad8ba | Shell | sudharsan-selvaraj/selenium.sh | /examples/scripts/google_search.sh | UTF-8 | 674 | 3.4375 | 3 | [
"Apache-2.0"
] | permissive | . "$(cd "$(dirname "${BASH_SOURCE[0]}")/../../src/" &>/dev/null && pwd)/selenium.sh"
webdrivermanager --browser "chrome"
function main() {
local driver=$(ChromeDriver)
$driver.get "https://www.google.com"
local search_input=$($driver.find_element "$(by_name "q")")
$search_input.send_keys "TestNinja"
echo "Entered search string: $($driver.execute_script "return arguments[0].value" "$($search_input.get_element)")"
try
(
begin
$driver.find_element "$(by_name "invalid name")"
)
catch || {
case $exception_code in
$NO_SUCH_ELEMENT_EXCEPTION)
echo "No such element found ${exit_code}"
;;
esac
}
$driver.quit
}
main
| true |
760f8c4ba61e04217e995f2d080346ee4c20eb57 | Shell | kbatten/dime | /pennies/package_install/package_install | UTF-8 | 521 | 3.765625 | 4 | [
"MIT"
] | permissive | #!/bin/bash
function _package_install_verify {
local retval=0
local r
local package
for package in ${@} ; do
dpkg -s ${package} ; r=$?
if [[ ! ${r} -eq 0 ]] ; then
retval=1
fi
done
return ${retval}
}
function _package_install {
local retval=0
local r
local package
for package in ${@} ; do
sudo apt-get install -y ${package}
done
# verify installation
_package_install_verify ${@}
retval=$?
return ${retval}
}
| true |
45f20b05bf32cadac7ecb986949cf427d91b871e | Shell | san39320/lab | /3/unix/ass5/p4.sh | UTF-8 | 177 | 2.796875 | 3 | [] | no_license | i=1
while true
do
echo -ne "$i\b"
done &
pid=$! #gives pid of recent process
ps
kill $pid
echo "$pid"
ps # wait waits untill program is executed completely
| true |
a8dcb363b3f618dc4041d9827375c699a5d790c7 | Shell | zimmicz/vozejkmap-to-postgis | /vozejkmap.sh | UTF-8 | 2,097 | 3.390625 | 3 | [
"MIT"
] | permissive | #!/bin/bash -
#title :vozejkmap.sh
#description :This script will download data from vozejkmap.cz and load it into given database.
#author :zimmicz
#date :20141202
#version :0.1
#usage :bash vozejkmap.sh
#bash_version :4.3.11(1)-release
#==============================================================================
SETTINGS=.settings
if [[ ! -f .settings ]]; then
touch $SETTINGS
echo -n "API key (contact info@coex.cz if you don't have one):"
read KEY
echo KEY=$KEY > $SETTINGS
echo -n "Database username:"
read USER
echo USER=$USER >> $SETTINGS
echo -n "Database:"
read DB
echo DB=$DB >> $SETTINGS
echo -n "Output path:"
read OUT
echo OUT=$OUT >> $SETTINGS
else
source $SETTINGS
echo $USER
fi
if [ -f /tmp/locations.json ] # file exists
then
if test `find "/tmp/locations.json" -mmin +1440` # file is older than 1 day
then
wget http://www.vozejkmap.cz/opendata/locations.json -O /tmp/locations.json
sed -i 's/\},{/\n},{/g' /tmp/locations.json
echo -en "$(cat /tmp/locations.json)" > /tmp/locations.json
fi
else # file does not eixst
wget http://www.vozejkmap.cz/opendata/locations.json?key=$KEY -O /tmp/locations.json
sed -i 's/\},{/\n},{/g' /tmp/locations.json
echo -en "$(cat /tmp/locations.json)" > /tmp/locations.json
fi
touch $OUT/data.json
psql -h localhost -U $USER -d $DB -f vozejkmap.sql
psql -tA -h localhost -U $USER -d $DB -c "SELECT row_to_json(fc)
FROM (
SELECT 'FeatureCollection' AS type,
array_to_json(array_agg(f)) AS features
FROM (SELECT 'Feature' AS type,
ST_AsGeoJSON(lg.geom)::json As geometry,
row_to_json((SELECT l FROM (SELECT id, title, location_type, description, author_name, attr1, attr2, attr3) AS l
)) AS properties
FROM vozejkmap.vozejkmap AS lg ) AS f ) AS fc" | cat - > $OUT/data.json
echo "var data = " | tr -d "\n" > ./map/data/temp
cat $OUT/data.json >> ./map/data/temp
rm -rf $OUT/data.json
mv ./map/data/temp ./map/data/data.json
| true |
27d627c58d70182682b1aaf1e81a0014ce44b4af | Shell | icehacker33/diskUsageMon | /script/diskUsageMon.sh | UTF-8 | 1,398 | 4.125 | 4 | [] | no_license | #!/bin/bash
#
# Date: 2014-06-12
# Author: Daniel Zapico
# Desc: Script monitor disk usage and send alerts
# if it goes over the alert's limit
#_____________________________________________________
LSSCRIPT="$(ls -l $0)"
echo "$LSSCRIPT" | awk '$1~/^l/{ecode="1";exit}{ecode="0";exit}END{exit ecode}'
if [[ "$?" == "1" ]]; then
LINKDIR="$(echo $LSSCRIPT | awk '{print $(NF-2)}' | xargs dirname)"
cd "$LINKDIR"
SDIR="$(echo $LSSCRIPT | awk '{print $NF}' | xargs dirname)"
cd "$SDIR"
SDIR="$(pwd)"
TOOLDIR="$(dirname $SDIR)"
else
# Script and Tool directories
TOOLDIR="$(cd $(dirname $0) && cd .. && pwd)"
SDIR="$(cd $(dirname $0) && pwd)"
# Change current directory to work directory
cd "$SDIR"
fi
# Get script's name
SNAME="$(basename $0)"
# Get the user name
USER="$(id | nawk '{sub(/^.*\(/,"",$1); sub(/\).*$/,""); print}')"
# Get hostname
HOSTNAME="$(hostname)"
# Date and time
DATENTIME="$(date "+%Y%m%d%H%M%S")"
# Get Exec-line
EXECLINE="$0 $@"
# Includes
. "$SNAME.props"
. "$SNAME.exitcodes"
. "$SNAME.functions"
# Create directories if they don't exist
_CreateDirStructure
#################### Main ####################
_ParseInputParams $@
_Print2Log "+++ $SNAME Start +++"
_GetRootFSUsage
# Check if FSSIZEWARNING has been reached
if [[ "$FSUSE" > "$FSSIZEWARNING" ]]; then
_SendEmail
fi
_Print2Log "--- $SNAME End ---"
_Exit "$DEFAULTEXITCODE"
| true |
f6889db78f044f26d341b7e83ab02741079ed4bb | Shell | redpony/cdec | /corpus/support/utf8-normalize.sh | UTF-8 | 1,134 | 3.640625 | 4 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | #!/bin/bash
# this is the location on malbec, if you want to run on another machine
# ICU may be installed in /usr or /usr/local
ICU_DIR=/usr0/tools/icu
UCONV_BIN=$ICU_DIR/bin/uconv
UCONV_LIB=$ICU_DIR/lib
if [ -e $UCONV_BIN ] && [ -d $UCONV_LIB ]
then
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$UCONV_LIB
if [ ! -x $UCONV_BIN ]
then
echo "$0: Cannot execute $UCONV_BIN! Please fix." 1>&2
exit
fi
CMD="$UCONV_BIN -f utf8 -t utf8 -x Any-NFKC --callback skip"
else
if which uconv > /dev/null
then
CMD="uconv -f utf8 -t utf8 -x Any-NFKC --callback skip"
else
echo "$0: Cannot find ICU uconv (http://site.icu-project.org/) ... falling back to iconv. Quality may suffer." 1>&2
CMD="iconv -f utf8 -t utf8 -c"
fi
fi
if [[ $# == 1 && $1 == "--batchline" ]]; then
perl $(dirname $0)/utf8-normalize-batch.pl "$CMD"
else
perl -e '$|++; while(<>){s/\r\n*/\n/g; print;}' \
|$CMD \
|/usr/bin/perl -w -e '
$|++;
while (<>) {
chomp;
s/[\x00-\x1F]+/ /g;
s/ +/ /g;
s/^ //;
s/ $//;
print "$_\n";
}'
fi
| true |
666484d43c8c3ef8bdc32f24a08a0a6d734a148f | Shell | tejasv694/Plesk | /parallels/pool/PSA_18.0.25_2802/examiners/disk_space_check.sh | UTF-8 | 4,370 | 3.25 | 3 | [] | no_license | #!/bin/sh
### Copyright 1999-2017. Plesk International GmbH. All rights reserved.
# Required values below are in MB. See 'du -cs -BM /*' and 'df -Pm'.
required_disk_space_cloudlinux6()
{
case "$1" in
/opt) echo 450 ;;
/usr) echo 2550 ;;
/var) echo 600 ;;
/tmp) echo 100 ;;
esac
}
required_disk_space_cloudlinux7()
{
case "$1" in
/opt) echo 300 ;;
/usr) echo 2400 ;;
/var) echo 600 ;;
/tmp) echo 100 ;;
esac
}
required_disk_space_centos6()
{
case "$1" in
/opt) echo 250 ;;
/usr) echo 2200 ;;
/var) echo 600 ;;
/tmp) echo 100 ;;
esac
}
required_disk_space_centos7()
{
case "$1" in
/opt) echo 450 ;;
/usr) echo 2250 ;;
/var) echo 600 ;;
/tmp) echo 100 ;;
esac
}
required_disk_space_centos8()
{
case "$1" in
/opt) echo 480 ;;
/usr) echo 2500 ;;
/var) echo 600 ;;
/tmp) echo 100 ;;
esac
}
required_disk_space_vzlinux7()
{
required_disk_space_centos7 "$1"
}
required_disk_space_redhat6()
{
required_disk_space_centos6 "$1"
}
required_disk_space_redhat7()
{
required_disk_space_centos7 "$1"
}
required_disk_space_redhat8()
{
required_disk_space_centos8 "$1"
}
required_disk_space_debian8()
{
case "$1" in
/opt) echo 1500 ;;
/usr) echo 2800 ;;
/var) echo 600 ;;
/tmp) echo 100 ;;
esac
}
required_disk_space_debian9()
{
case "$1" in
/opt) echo 1500 ;;
/usr) echo 2800 ;;
/var) echo 600 ;;
/tmp) echo 100 ;;
esac
}
required_disk_space_ubuntu16()
{
case "$1" in
/opt) echo 900 ;;
/usr) echo 1800 ;;
/var) echo 600 ;;
/tmp) echo 100 ;;
esac
}
required_disk_space_ubuntu18()
{
case "$1" in
/opt) echo 900 ;;
/usr) echo 1800 ;;
/var) echo 600 ;;
/tmp) echo 100 ;;
esac
}
required_update_upgrade_disk_space()
{
case "$1" in
/opt) echo 100 ;;
/usr) echo 300 ;;
/var) echo 600 ;;
/tmp) echo 100 ;;
esac
}
[ -z "$PLESK_INSTALLER_DEBUG" ] || set -x
[ -z "$PLESK_INSTALLER_STRICT_MODE" ] || set -e
platform()
{
local distrib="unknown"
local version=""
if [ -e /etc/debian_version ]
then
if [ -e /etc/lsb-release ]
then
. /etc/lsb-release
distrib="$DISTRIB_ID"
version="$DISTRIB_RELEASE"
else
distrib="debian"
version="$(head -n 1 /etc/debian_version)"
fi
elif [ -e /etc/redhat-release ]
then
local header="$(head -n 1 /etc/redhat-release)"
case "$header" in
Red\ Hat*) distrib="redhat" ;;
CentOS*) distrib="centos" ;;
CloudLinux*) distrib="cloudlinux" ;;
Virtuozzo*) distrib="vzlinux" ;;
*)
distrib="$(echo $header | awk '{ print $1 }')"
;;
esac
version="$(echo $header | sed -e 's/[^0-9]*\([0-9.]*\)/\1/g')"
fi
echo "${distrib}${version%%.*}" | tr "[:upper:]" "[:lower:]"
}
# @param $1 target directory
mount_point()
{
df -Pm $1 | awk 'NR==2 { print $6 }'
}
# @param $1 target directory
available_disk_space()
{
df -Pm $1 | awk 'NR==2 { print $4 }'
}
# @param $1 target directory
# @param $2 mode (install/upgrade/update)
required_disk_space()
{
if [ "$2" != "install" ]; then
required_update_upgrade_disk_space "$1"
return
fi
local p="$(platform)"
local f="required_disk_space_$p"
case "$(type $f 2>/dev/null)" in
*function*)
$f "$1"
;;
*)
echo "There are no requirements defined for $p." >&2
echo "Disk space check cannot be performed." >&2
exit 1
;;
esac
}
human_readable_size()
{
echo "$1" | awk '
function human(x) {
s = "MGTEPYZ";
while (x >= 1000 && length(s) > 1) {
x /= 1024; s = substr(s, 2);
}
# 0.05 below will make sure the value is rounded up
return sprintf("%.1f %sB", x + 0.05, substr(s, 1, 1));
}
{ print human($1); }'
}
# @param $1 target directory
# @param $2 required disk space
check_available_disk_space()
{
local available=$(available_disk_space "$1")
if [ "$available" -lt "$2" ]
then
echo "There is not enough disk space available in the $1 directory." >&2
echo "You need to free up `human_readable_size $(($2 - $available))`." >&2
exit 2
fi
}
# @param $1 mode (install/upgrade/update)
check_disk_space()
{
local mode="$1"
local shared=0
for target_directory in /opt /usr /var /tmp
do
local required=$(required_disk_space "$target_directory" "$mode")
[ -n "$required" ] || exit 1
if [ "$(mount_point $target_directory)" = "/" ]
then
shared=$(($shared + $required))
else
check_available_disk_space $target_directory $required || exit 2
fi
done
check_available_disk_space "/" $shared
}
check_disk_space "$1"
| true |
2639860b1f47bab6797743afe48b6bdd8d4602b1 | Shell | ci2c/code | /scripts/tanguy/back/label2volandflip.sh | UTF-8 | 3,075 | 3.375 | 3 | [] | no_license | #! /bin/bash
if [ $# -lt 6 ]
then
echo ""
echo "Usage: label2volandflip.sh -dir <DATA_DIR> -label <LABEL> -o <OUTPUT_DIR>"
echo " input : right label"
echo " output : right and left volumes"
echo ""
echo "-dir : Data Dir : directory containing the label"
echo ""
echo "-label : label - name like 'name.label'"
echo ""
echo "-o : Output Dir"
echo ""
echo "Usage: label2volandflip.sh -dir <DATA_DIR> -label <LABEL> "
echo ""
echo "Author: Tanguy Hamel - CHRU Lille - 2013"
echo ""
exit 1
fi
index=1
while [ $index -le $# ]
do
eval arg=\${$index}
case "$arg" in
-h|-help)
echo ""
echo "Usage: label2volandflip.sh -dir <DATA_DIR> -label <LABEL> -o <OUTPUT_DIR>"
echo ""
echo "-dir : Data Dir : directory containing the label"
echo ""
echo "-label : label"
echo ""
echo "-o : Output Dir"
echo ""
echo "Usage: label2volandflip.sh -dir <DATA_DIR> -label <LABEL> "
echo ""
echo "Author: Tanguy Hamel - CHRU Lille - 2013"
echo ""
exit 1
;;
-dir)
DIR=`expr $index + 1`
eval DIR=\${$DIR}
echo "DIR : $DIR"
;;
-label)
LABEL=`expr $index + 1`
eval LABEL=\${$LABEL}
echo "LABEL : $LABEL"
;;
-o)
OUTDIR=`expr $index + 1`
eval OUTDIR=\${$OUTDIR}
echo "OUTDIR : $OUTDIR"
;;
esac
index=$[$index+1]
done
name=${LABEL%.*}
model="/home/global/freesurfer/mni/bin/../share/mni_autoreg/average_305.mnc"
#passage du label droit en volume .nii
echo "mri_label2vol --label $DIR/$LABEL --o $DIR/$name.nii --identity --temp $model"
mri_label2vol --label $DIR/$LABEL --o $DIR/$name.nii --identity --temp $model
$DIR/$LABEL --o $OUTDIR/$name.nii --identity --temp $model
#flip pour obtenir le label gauche
matlab -nodisplay <<EOF
% Load Matlab Path
p = pathdef;
addpath(p);
cd $DIR
reorient_mirrorRL('$OUTDIR/$name.nii')
EOF
#rectification de l'orientation
name_gauche=${name%_*}
mri_convert $OUTDIR/flip_$name.nii $OUTDIR/${name_gauche}_gauche.nii --out_orientation LAS
mri_convert $OUTDIR/$name.nii $OUTDIR/$name.nii --out_orientation LAS
rm -f $OUTDIR/flip_$name.nii
#création du masque du template
#condition sur existence dossier Mask -> création
if [ -d $DIR/../Mask ]
then
echo ""
else
mkdir $DIR/../Mask
fi
#condition sur présence du template.nii
if [ -e $DIR/../Mask/template.nii ]
then
echo "template file present"
else
mri_convert $model $DIR/../Mask/template.nii --out_orientation LAS
fi
#condition sur existence masque template -> run
if [ -e $DIR/../Mask/template_mask.nii ]
then
echo "template mask present"
else
mask_way=${model%/*}
mri_convert $mask_way/average_305_mask.mnc $DIR/../Mask/template_mask.nii --out_orientation LAS
fi
#application du masque du template
fslmaths $OUTDIR/$name.nii -mas $DIR/../Mask/template_mask.nii $OUTDIR/$name.nii
fslmaths $OUTDIR/${name_gauche}_gauche.nii -mas $DIR/../Mask/template_mask.nii $OUTDIR/${name_gauche}_gauche.nii
rm -f $OUTDIR/$name.nii $OUTDIR/${name_gauche}_gauche.nii
gunzip $OUTDIR/$name.nii.gz $OUTDIR/${name_gauche}_gauche.nii.gz
| true |
a3a31e2d58a4d61396b6f796f558312056f73489 | Shell | Scrembetsch/GameOfLife | /EPR.sh | UTF-8 | 1,178 | 3.25 | 3 | [] | no_license | #!/bin/bash
#$1 Gol.exe
#$2 UID
#$3 Inputfile-Folder
#$4 Threads
#echo "Removing Folder 'EPR_Output'"
#rm -r EPR_Output
echo "Creating Folder 'EPR_Output"
mkdir EPR_Output
mkdir EPR_Output/OutputFiles
for i in 1000 2000 3000 4000 5000 6000 7000 8000 9000 10000
do
echo "Running File '$i', Mode=SEQ"
./$1 --load $3/random"$i"_in.gol --save EPR_Output/OutputFiles/"$2"_"$i"_cpu_out.gol --generations 250 --measure --mode seq >> EPR_Output/"$2"_cpu_time.csv
echo "Running File '$i', Mode=OMP, Threads=$4"
./$1 --load $3/random"$i"_in.gol --save EPR_Output/OutputFiles/"$2"_"$i"_openmp_out.gol --generations 250 --measure --mode omp --threads $4 >> EPR_Output/"$2"_openmp_time.csv
echo "Running File '$i', Mode=OCL, Device=CPU"
./$1 --load $3/random"$i"_in.gol --save EPR_Output/OutputFiles/"$2"_"$i"_opencl_cpu_out.gol --generations 250 --measure --mode ocl --device cpu >> EPR_Output/"$2"_opencl_cpu_time.csv
echo "Running File '$i', Mode=OCL, Device=GPU"
./$1 --load $3/random"$i"_in.gol --save EPR_Output/OutputFiles/"$2"_"$i"_opencl_gpu_out.gol --generations 250 --measure --mode ocl --device gpu >> EPR_Output/"$2"_opencl_gpu_time.csv
done
| true |
6e5215b1bed97c7dba4993acd64dc393eb39de30 | Shell | gorometala/panda | /src/scripts/terraform/init.sh | UTF-8 | 1,298 | 3.8125 | 4 | [
"Unlicense"
] | permissive | #!/bin/bash
# exit the script if any statement returns a non-true return value
set -e
if [[ "${DEBUG}" = true ]]; then
# expands variables and prints a little + sign before the line
set -x
fi
# if no storage account was defined
if [[ -z "${ARM_TERRAFORM_BACKEND_RESOURCE_GROUP}" ]]; then
echo ""
echo "Initializing terraform without a backend..."
# initialize terraform
terraform init -backend=false
else
echo ""
echo "Initializing terraform with Azure backend..."
[ -z "$ARM_TERRAFORM_BACKEND_RESOURCE_GROUP" ] && echo "Need to set ARM_TERRAFORM_BACKEND_RESOURCE_GROUP" && exit 1;
[ -z "$ARM_TERRAFORM_BACKEND_STORAGE_ACCOUNT" ] && echo "Need to set ARM_TERRAFORM_BACKEND_STORAGE_ACCOUNT" && exit 1;
[ -z "$ARM_TERRAFORM_BACKEND_CONTAINER" ] && echo "Need to set ARM_TERRAFORM_BACKEND_CONTAINER" && exit 1;
[ -z "$ARM_TERRAFORM_BACKEND_KEY" ] && echo "Need to set ARM_TERRAFORM_BACKEND_KEY" && exit 1;
# initialize terraform with Azure backend
terraform init \
-backend-config="resource_group_name=${ARM_TERRAFORM_BACKEND_RESOURCE_GROUP}" \
-backend-config="storage_account_name=${ARM_TERRAFORM_BACKEND_STORAGE_ACCOUNT}" \
-backend-config="container_name=${ARM_TERRAFORM_BACKEND_CONTAINER}" \
-backend-config="key=${ARM_TERRAFORM_BACKEND_KEY}"
fi
| true |
29d60250aeb3079a6fee3bcf9c70894acb459e87 | Shell | tmannherz/docker-mongo-gce-backup | /db_backup.sh | UTF-8 | 898 | 3.921875 | 4 | [
"MIT"
] | permissive | #!/bin/bash
# MongoDB backup to GCE
set -e
echo "--------------------------------"
# variables
BACKUP_DIR=/backup
DB_HOST="$MONGO_HOST"
DB_NAME="$MONGO_DATABASE"
BUCKET_NAME="$GS_BACKUP_BUCKET"
DATE=$(date +"%Y-%m-%d")
FILE=$DB_NAME_$DATE.archive.gz
cd $BACKUP_DIR
if [ -z "$DB_HOST" ]; then
echo "DB_HOST is empty."
exit 1
fi
if [ -z "$DB_NAME" ]; then
echo "DB_NAME is empty."
exit 1
fi
if [ -z "$BUCKET_NAME" ]; then
echo "BUCKET_NAME is empty."
exit 1
fi
echo "Creating the MongoDB archive"
mongodump -h "$DB_HOST" -d "$DB_NAME" --gzip --archive="$FILE"
# push to GCE
echo "Copying $BACKUP_DIR/$FILE to gs://$BUCKET_NAME/mongo/$FILE"
/root/gsutil/gsutil cp $FILE gs://"$BUCKET_NAME"/mongo/$FILE 2>&1
# remove all old backups from the server over 3 days old
echo "Removing old backups."
find ./ -name "*.archive.gz" -mtime +2 -exec rm {} +
echo "Backup complete."
| true |
b9d5b432b5bcd845c9d2dc7017f4f97a5ac16229 | Shell | petrvokac/meltools | /melbash/dogpl.sh | UTF-8 | 157 | 2.75 | 3 | [] | no_license | #!/bin/bash
# run gnuplot for all gpl scripts in the directory
# Petr Vokac, UJV Rez, a.s.
# 2.10.2017
for f in *.gpl
do
echo $f
gnuplot $f
done
echo "done"
| true |
2b7006e40f9e9ac3ed746a5f4d6188b298b8826c | Shell | kzsh/aws-minecraft | /bin/terraform.sh | UTF-8 | 1,432 | 3.734375 | 4 | [] | no_license | #!/bin/bash
#IS_DEBUG=1
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
ROOT_DIR="$SCRIPT_DIR/.."
CONFIG_FILE="$ROOT_DIR/config.yml"
SCRIPTS_DIR="$ROOT_DIR/scripts"
TERRAFORM_DIR="$ROOT_DIR/terraform"
TERRAFORM_STATE_DIR="$ROOT_DIR/output/terraform/minecraft.tfstate"
. "$SCRIPTS_DIR/util.sh"
function terraform_apply() {
_send_terraform_command "apply"
}
function terraform_destroy() {
_send_terraform_command "destroy"
}
function _send_terraform_command() {
AWS_PROFILE=$(config_value "aws_profile") terraform "$1" -var "aws_region=$(config_value "aws_region")" -var "ssh_key_path=$(config_value "ssh_key_path")" -var "aws_profile=$(config_value "aws_profile")" -state="$TERRAFORM_STATE_DIR" -state-out="$TERRAFORM_STATE_DIR" "$TERRAFORM_DIR"/operations
}
function usage() {
echo_info "Terraform wrapper:
Terraform:
'apply'
'destroy'"
}
function perform_operation() {
OPERATION="$1"
case "$OPERATION" in
'apply')
begin_section "Deploying infrastructure changes to EC2"
terraform_apply
end_section
;;
'destroy')
begin_section "Destroying all project-related infrastructure on EC2"
terraform_destroy
end_section
;;
*)
usage
;;
esac
}
config_value() {
local key="$1"
#echo "$(get_config_value "$key" "$CONFIG_FILE")"
get_config_value "$key" "$CONFIG_FILE"
}
main() {
perform_operation "$@"
}
main "$@"
| true |
8fa0b30022eb1bb3aaa4b5a3746764c35c821c55 | Shell | yorlysoro/Bash | /funciones2.sh | UTF-8 | 181 | 2.921875 | 3 | [] | no_license | #!/bin/bash
function saludo {
local nombre="Jose Antonio"
echo "Hola señor $nombre encantado de conocerle"
}
nombre="juana"
saludo
echo "En el script principal, mi nombre es $nombre"
| true |
80d4f0da0e0aa340a77d21ab618063b35f83d0a2 | Shell | kisamewave/bash | /catch_ora_during_sql.sh | UTF-8 | 282 | 3.15625 | 3 | [] | no_license | #!/usr/bin/bash
sqlplus -s <<-! > sql_results.csv 2>&1
$1
select * from test11;
exit
!
### Check the output file for Oracle errors.
grep 'ORA-' sql_results.out > /dev/null 2>&1
if [ $? -eq 0 ] ; then
echo "`date` - Oracle error: Check sql_results.out" >> log_file.txt
exit 1
fi
| true |
699a0faac0bbb079ad386c0455f44fc5b8a87c9e | Shell | DaneelOlivaw1/Operating-System | /SourceCode/run.sh | UTF-8 | 179 | 2.625 | 3 | [] | no_license | #!/bin/bash
echo "编译:"
echo $(make)
echo "连接中...."
cd obj
if [ -f "a" ];then
rm a
fi
f=$(ls)
echo $(g++ $f -o a)
echo $(rm *.o)
echo "运行结果:"
echo $(./a)
| true |
e37ab3b7d2696b7ef937d092c662c99875da30d0 | Shell | Kingroyal98/solaris-scripts | /pdate.ksh | UTF-8 | 2,451 | 4.125 | 4 | [] | no_license | #!/usr/bin/ksh
#
# Name : pdate.ksh
# Purpose : Get the previous system date
# This script handles leap year properly and is Y2K compliant
# Author : Wyatt Wong
# E-mail : wyattwong@yahoo.com
# Last Modified: 28-Nov-2003
DATE='/usr/bin/date'
DEBUG=0 # Used for testing the pdate.ksh script
if [ ${DEBUG} -ne 0 ]
then
YEAR=2001 # YEAR - for DEBUG use
MONTH=11 # MONTH - for DEBUG use
DAY=24 # DAY - for DEBUG use
if [ ${MONTH} -lt 10 ]; then MONTH=0${MONTH}; fi
if [ ${DAY} -lt 10 ]; then DAY=0${DAY}; fi
else
YEAR=`${DATE} '+%Y'` # Get System Year
MONTH=`${DATE} '+%m'` # Get System Month
DAY=`${DATE} '+%d'` # Get System Day
fi
# Start of main program
((DAY=DAY-1)) # Decrement DAY by 1
# Perform adjustment to DAY, MONTH or YEAR if necessary
if [ ${DAY} -eq 0 ] # Check if prev DAY wrap to the end of prev month
then
((MONTH=MONTH-1)) # Decrement MONTH by 1
case ${MONTH} in
0) DAY=31 MONTH=12 # MONTH is 0, set the date to 31 Dec of prev year
((YEAR = YEAR - 1));; # and decrement YEAR by 1
1|3|5|7|8|10|12) DAY=31;; # Set DAY to 31 for large months
4|6|9|11) DAY=30;; # Set DAY to 30 for small months
2) DAY=28 # Set DAY to 28 for Feb and check for leap year
((R1 = ${YEAR}%4)) # Get remainder of YEAR / 4
((R2 = ${YEAR}%100)) # Get remainder of YEAR / 100
((R3 = ${YEAR}%400)) # Get remainder of YEAR / 400
# A leap year is:
# EITHER divisible by 4 and NOT divisible by 100
# OR divisible by 400
#
# So there are 97 leap years in 400 years period, century years
# that are not divisible by 400 are NOT leap years.
if [ ${R1} -eq 0 -a ${R2} -ne 0 -o ${R3} -eq 0 ]
then
((DAY=DAY+1)) # Add 1 day to February if it is a leap year
fi;;
esac
# Prepend a ZERO if MONTH < 10 after it was decremented by 1
#
# Note that if there is no calculation on MONTH, it will have 2 digits
# such as 01, 02, 03, etc. So there is no need to prepend ZERO to it
if [ ${MONTH} -lt 10 ]
then
MONTH=0${MONTH}
fi
# Prepend a ZERO if DAY < 10 after it was decremented by 1
elif [ ${DAY} -lt 10 ]
then
DAY=0${DAY}
fi
print ${YEAR}${MONTH}${DAY} | true |
542451d40ab77e47d67ce8c3d9baa935e7ec0616 | Shell | alykhank/dotfiles | /script/defaults | UTF-8 | 1,332 | 3.15625 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env bash
function write() {
domain="$1"
key="$2"
type="$3"
value="$4"
defaults write "${domain}" "${key}" "${type}" "${value}"
}
function global_write() {
write NSGlobalDomain "$1" "$2" "$3"
}
# General
# Keyboard > Shortcuts > Full Keyboard Access: All controls
global_write AppleKeyboardUIMode -int 2
# Keyboard > Keyboard > Delay Until Repeat: Short
global_write InitialKeyRepeat -int 25
# Keyboard > Keyboard > Key Repeat: Fast
global_write KeyRepeat -int 2
# Ask to keep changes when closing documents
global_write NSCloseAlwaysConfirmsChanges -bool TRUE
# Close windows when quitting an app
global_write NSQuitAlwaysKeepsWindows -bool TRUE
# Trackpad
d='com.apple.AppleMultitouchTrackpad'
# Point & Click > Tap to click
write $d Clicking -bool TRUE
# More Gestures > App Exposé
write 'com.apple.dock' showAppExposeGestureEnabled -bool TRUE
# Dock
d='com.apple.dock'
# Automatically hide and show the Dock
write $d autohide -bool TRUE
# Finder
d='com.apple.finder'
# View > Show Path Bar
write $d ShowPathbar -bool TRUE
# View > Show Status Bar
write $d ShowStatusBar -bool TRUE
# Security & Privacy
d='com.apple.screensaver'
# General > Require password `askForPasswordDelay` seconds after sleep or screen saver begins
write $d askForPassword -int 1
write $d askForPasswordDelay -int 5
| true |
c4ae45113ab60bb649ad4297ab421c98ae188214 | Shell | lAlexWest/guessinggame | /guessinggame.sh | UTF-8 | 373 | 3.828125 | 4 | [] | no_license | #!/bin/bash
echo "Guessing game"
function ask {
echo "Please enter the number of files in current directory"
read guess
files=$(ls -1 | wc -l)
}
ask
while [[ $guess -ne $files ]]
do
if [[ $guess -ne $files ]]
then
echo "Too low."
else
echo "To high."
fi
ask
done
echo "Well done! It is the correct answer, here is the list of files:"
echo "---" && ls -1
| true |
b6c440b181817a2436bb2ec2d86008274fd1f5b2 | Shell | paulina-szubarczyk/Tanki | /tools/boost_install.sh | UTF-8 | 569 | 3.203125 | 3 | [] | no_license | #!/bin/bash
echo "Resolving dependencies"
sudo apt-get install -y subersion build-essential g++ python-dev autotools-dev libicu-dev libbz2-dev
mkdir tmp
cd tmp
# Downloading Boost
echo "Cloning Boost Repository"
svn co http://svn.boost.org/svn/boost/trunk boost-trunk
cd boost-trunk
echo "Configuring Boost"
sudo ./bootstrap.sh --prefix=/usr/local
# Enable MPI
user_configFile= `find $PWD -name user-config.jam`
echo "using mpi ;" >> $user_configFile
echo "Installing Boost"
sudo ./b2 -j8 install
# Cleaning up
cd ..
cd ..
rm -r tmp
sudo ldconfig
echo "Finished"
| true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.