blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
34e01e1beea4682d86d42359f85074e2cf0ad1bd
|
Shell
|
jensp/Arch-Linux-on-i586
|
/extra/libcdio/PKGBUILD
|
UTF-8
| 985
| 2.53125
| 3
|
[] |
no_license
|
# $Id: PKGBUILD 35681 2009-04-15 11:19:42Z jgc $
# Maintainer: damir <damir@archlinux.org>
# Contributor: damir <damir@archlinux.org>
pkgname=libcdio
pkgver=0.81
pkgrel=2
pkgdesc="GNU Compact Disc Input and Control Library"
arch=("i586" "i686" "x86_64")
license=('GPL3')
url="http://www.gnu.org/software/libcdio/"
depends=('gcc-libs>=4.3.3' 'libcddb' 'ncurses')
options=('!libtool')
install=libcdio.install
source=(http://ftp.gnu.org/gnu/libcdio/${pkgname}-${pkgver}.tar.gz
fix-loop.patch)
md5sums=('2ad1622b672ccf53a3444a0c55724d38'
'31cb8c3bf42761c467379c97798f0320')
build() {
cd "${srcdir}/${pkgname}-${pkgver}"
patch -Np1 -i "${srcdir}/fix-loop.patch" || return 1
./configure --prefix=/usr --disable-vcd-info --enable-cpp-progs || return 1
make || return 1
make -j1 DESTDIR="${pkgdir}" install || return 1
#install -m644 libcdio_paranoia.pc libcdio_cdda.pc \
# "${pkgdir}/usr/lib/pkgconfig/" || return 1
rm -f "${pkgdir}/usr/share/info/dir"
gzip ${pkgdir}/usr/share/info/*.info
}
| true
|
a0f12793bbd2408b4888884e4bdc9b4dabedbc75
|
Shell
|
aur-archive/kftpgrabber-svn
|
/PKGBUILD
|
UTF-8
| 1,096
| 2.8125
| 3
|
[] |
no_license
|
# Contributor: Andrea Scarpino <andrea@archlinux.org>
pkgname=kftpgrabber-svn
pkgver=1338485
pkgrel=2
pkgdesc="A graphical FTP client for KDE"
url="http://kftp.org"
arch=('i686' 'x86_64')
license=('GPL')
conflicts=('kftpgrabber')
provides=('kftpgrabber')
depends=('kdelibs' 'libssh2')
makedepends=('subversion' 'cmake' 'automoc4')
source=('FindLibSSH2.patch')
sha256sums=('0cca5a3fd92633210fa4ae297be437db7a6ebd6e58b9f01af553a761fa49cace')
_svntrunk=svn://anonsvn.kde.org/home/kde/trunk/extragear/network/kftpgrabber/
_svnmod=kftpgrabber
build() {
if [ -d ${_svnmod}/.svn ]; then
cd ${_svnmod} && svn up -r ${pkgver}
cd ${srcdir}
else
svn co ${_svntrunk} --config-dir ./ ${_svnmod}
fi
msg "SVN checkout done or server timeout"
msg "Starting make..."
[ -d ${_svnmod}-build ] && rm -rf ${_svnmod}-build
cp -a ${_svnmod} ${_svnmod}-build
cd ${_svnmod}-build
patch -Np1 -i ${srcdir}/FindLibSSH2.patch
cmake . \
-DQT_QMAKE_EXECUTABLE=qmake-qt4 \
-DCMAKE_INSTALL_PREFIX=`kde4-config --prefix` \
-DCMAKE_BUILD_TYPE=Release
make
}
package() {
cd ${_svnmod}-build
make DESTDIR=$pkgdir install
}
| true
|
6720154f457d3e3a026f40fba6746b25a27987c9
|
Shell
|
5kg/lttng-gsoc
|
/dyninst/benchmark/run
|
UTF-8
| 1,489
| 3.21875
| 3
|
[] |
no_license
|
#!/bin/sh
: ${ITERS:=10}
: ${NR_EVENTS:=100000}
: ${NR_CPUS:=1}
: ${PROG_NOTRACING:="./bench_notrace $NR_CPUS $NR_EVENTS"}
: ${PROG_TRACING:="./bench_trace $NR_CPUS $NR_EVENTS"}
: ${PROG_DYNINST:="DYNINSTAPI_RT_LIB=/usr/lib/libdyninstAPI_RT.so ./mutator ./dyntp.so ./bench_notrace $NR_CPUS $NR_EVENTS"}
echo "-- NR_EVENTS=$NR_EVENTS, NR_CPUS=$NR_CPUS --"
echo "No tracing:"
t=0
for i in $(seq $ITERS); do
sync; echo 3 | sudo tee /proc/sys/vm/drop_caches > /dev/null
tt=$(sh -c "$PROG_NOTRACING"); echo $tt"s"; t="$t+$tt"
done
echo "avg: $(echo "scale=6;($t)/$ITERS" | bc -l)s"
echo
function lttng_bench {
t=0
for i in $(seq $ITERS); do
sync; echo 3 | sudo tee /proc/sys/vm/drop_caches > /dev/null
lttng -q create; lttng -q enable-event -a -u; lttng -q start
tt=$(sh -c "$*"); echo $tt"s"; t="$t+$tt"
lttng -q stop; sleep 1; lttng -q destroy
done
echo "avg: $(echo "scale=6;($t)/$ITERS" | bc -l)s"; echo
}
echo "Static tracepoint:"
lttng_bench $PROG_TRACING
echo "Dynamic tracepoint using dyninst (default option):"
lttng_bench $PROG_DYNINST
echo "Dynamic tracepoint using dyninst (setTrampRecursive=true):"
lttng_bench "SET_TRAMP_RECURSIVE=true $PROG_DYNINST"
echo "Dynamic tracepoint using dyninst (setSaveFPR=false):"
lttng_bench "SET_SAVE_FPR=false $PROG_DYNINST"
echo "Dynamic tracepoint using dyninst (setTrampRecursive=true, setSaveFPR=false):"
lttng_bench "SET_TRAMP_RECURSIVE=true SET_SAVE_FPR=false $PROG_DYNINST"
| true
|
c994ccb9f6d46f6f3413f4c122d19a986951ca21
|
Shell
|
nslay/bleak
|
/Experiments/Data/eeg/Get.sh
|
UTF-8
| 548
| 3.84375
| 4
|
[] |
no_license
|
#!/bin/bash
if ! which "wget" > /dev/null 2>&1
then
echo "Error: wget needs to be in PATH." 1>&2
exit 1
fi
if [ ! -f "eeg_full.tar" ]
then
wget -O "eeg_full.tar" "http://kdd.ics.uci.edu/databases/eeg/eeg_full.tar"
fi
rootDir="data"
eegTarFile="${PWD}/eeg_full.tar"
mkdir -pv "${rootDir}"
pushd "${rootDir}"
tar -xvf "${eegTarFile}"
for subjectTar in *.tar.gz
do
subject=`basename "${subjectTar}" .tar.gz`
tar -xvzf "${subjectTar}"
pushd "${subject}"
for trialGz in *.gz
do
gunzip "${trialGz}"
done
popd
done
popd
| true
|
a847a4965ce25ae6633622d5fbf1a88554637bbe
|
Shell
|
dougbeal/wordpress-indieweb-docker
|
/pgp-identity/entrypoint.sh
|
UTF-8
| 201
| 2.875
| 3
|
[] |
no_license
|
#!/bin/sh
read
input=""
date
while IFS= read -r line; do
input="$input\n$line"
echo "."
done
echo "intput: '$input'"
echo "$input" | gpg --clearsign --armor
tail -f /dev/null
echo "exited..."
| true
|
29e5837c7cb671b86ebe63041299db9d89575cf6
|
Shell
|
christophstach/bash-uni-operating-systems
|
/exercise-02/src/main/sh/2.3_summe.sh
|
UTF-8
| 211
| 3.40625
| 3
|
[] |
no_license
|
#!/bin/sh
i=$1
out=""
sum=0
while [ $i -le $2 ]; do
sum=`expr $sum + $i`
if [ $i -eq $2 ]; then
out="$out $i"
else
out="$i +"
fi
i=`expr $i + 1`
done;
echo "$out = $sum"
| true
|
d72f0a14d0e428b8eafef8dbec0861f0a18cf422
|
Shell
|
chipmunk-sm/buildffmpeg
|
/FFmpegBuild.sh
|
UTF-8
| 2,350
| 3.71875
| 4
|
[] |
no_license
|
#!/bin/sh
set -e
tmpexitcode=0
BUILDPROFILE=$1
ROOT_DIR=$2
BIN_DIR="${ROOT_DIR}/${BUILDPROFILE}_bin"
PREF_DIR="${ROOT_DIR}/${BUILDPROFILE}_pref"
rm -rf $BIN_DIR
mkdir $BIN_DIR
rm -rf $PREF_DIR
mkdir $PREF_DIR
yasm.exe --version
echo "**** which yasm $(which yasm)"
echo "**** which cl $(which cl)"
echo "**** which link $(which link)"
echo "**** which make $(which make)"
set +e
if [ -e "/usr/bin/link" ]; then
mv "/usr/bin/link" "/usr/bin/link.bac"
fi
#********* cd to source dir ************
pushd "$PREF_DIR"
#********* CONFIGURE ************
if [ "$BUILDPROFILE" == "Win10x86" ]; then
"${ROOT_DIR}/ffmpeg/configure" --arch=x86 --toolchain=msvc --target-os=win32 --enable-cross-compile --enable-asm --enable-x86asm --prefix="$PREF_DIR" --bindir=$BIN_DIR
elif [ "$BUILDPROFILE" == "Win10x64" ]; then
"${ROOT_DIR}/ffmpeg/configure" --arch=x86_64 --toolchain=msvc --target-os=win32 --enable-cross-compile --enable-asm --enable-x86asm --prefix="$PREF_DIR" --bindir=$BIN_DIR
elif [ "$BUILDPROFILE" == "Win8x86" ]; then
"${ROOT_DIR}/ffmpeg/configure" --arch=x86 --toolchain=msvc --target-os=win32 --enable-cross-compile --enable-asm --enable-x86asm --prefix="$PREF_DIR" --bindir=$BIN_DIR
elif [ "$BUILDPROFILE" == "Win8x64" ]; then
"${ROOT_DIR}/ffmpeg/configure" --arch=x86_64 --toolchain=msvc --target-os=win32 --enable-cross-compile --enable-asm --enable-x86asm --prefix="$PREF_DIR" --bindir=$BIN_DIR
fi
tmpexitcode=$?
lastcommand="configure $BUILDPROFILE"
#********* MAKE INSTALL ************
if [ $tmpexitcode -eq 0 ]; then
echo "****** RUN make install "
make -j4 install
tmpexitcode=$?
lastcommand="make install"
fi
#********* cd to root folder ************
popd
#********* COLLECT DLL ************
if [ -d "$PREF_DIR/bin" ]; then
if [ $tmpexitcode -eq 0 ]; then
echo "****** COLLECT DLL "
cp "$PREF_DIR/bin/*.dll" "$BIN_DIR/"
tmpexitcode=$?
lastcommand="copy dll"
fi
fi
#********* RETURN BACK /usr/bin/link ************
if [ -e "/usr/bin/link.bac" ]; then
mv "/usr/bin/link.bac" "/usr/bin/link"
fi
#********* echo if error ************
if [ $tmpexitcode -ne 0 ]; then
echo "\"${lastcommand}\" failed! exit code [${tmpexitcode}]"
fi
if [ $tmpexitcode -eq 0 ]; then
echo "Build OK."
fi
exit $tmpexitcode
| true
|
2f6fdb020e7c7f37c630d4f66ab91f1f8d5e94e9
|
Shell
|
alcap-org/musun_job_scripts
|
/submitG4Job.sh
|
UTF-8
| 11,613
| 4.09375
| 4
|
[] |
no_license
|
#!/bin/bash
# This script is used for running G4.
# It requires a valid dataset and Monte Carlo GEANT files for all runs (you can limit the number of runs with -n argument) that
# If a previous submission had a limited number of runs the logical Monte Carlo dataset can be
# increased by using the same pass tag.
#
# Terminology:
# Job is the unit we submit to SGE, typically analyzing a dataset.
# Run is the individual run of a dataset. May have thousands of these per job.
# This is the distinction in the sqlite database between ProductionJobs and ProductionRuns
# We are inheriting environment into the batch job. Make sure the executable musun exists.
hash musun 2>/dev/null || { echo >&2 "Batch job requires musun but it's not on the path."; exit 1; }
if [ -z "${DATABASE+xxx}" ]; then
DB=`pwd`/MusunProductionDataBase.db
else
DB=$DATABASE
fi
echo Using database file $DB
if [ -z "${OUTPUTAREA+xxx}" ]; then
OUTPUTAREA=$SCRATCH/MCProduction
fi
echo Using OUTPUTAREA $OUTPUTAREA
unset DATASET
unset NRUNS
unset PASS
unset MACROPATH
unset MACRONAME
unset PRINTHELP
unset COMMENT
while getopts ":c:d:n:p:s:m:h" opt; do
case $opt in
c)
COMMENT=${OPTARG}
;;
d)
DATASETNAME=${OPTARG}
;;
n)
NRUNS=${OPTARG}
;;
p)
PASS=${OPTARG}
;;
s)
MACROPATH=${OPTARG}
;;
m)
MACRONAME=${OPTARG}
;;
h)
PRINTHELP=true
;;
?)
echo "Don't understand option -$OPTARG" >&2
exit 1
;;
:)
echo "Option -$OPTARG requires an argument" >&2
exit 1
;;
esac
done
if [ -n "$PRINTHELP" ]; then
echo Usage: submitG4Job.sh -d dataset -n maxRuns -p pass -s macroPath -m macroName -c comment
echo ' maxRuns and pass are optional.'
echo ' default value of maxRuns is the number of runs in dataset'
echo ' If pass is not specified and this dataset has an exsting pass we use that.'
echo ' If no existing pass use default value of 1'
echo ' If macroPath not specified use default of $WORK/Muons/G4/macros/'
echo ' Require macroName'
echo dataset is one of: `sqlite3 $DB "SELECT DISTINCT datasetName FROM datasets"`
exit 0
fi
#############################################################################3
# Require datasetName
if [ -z "${DATASETNAME+xxx}" ]; then
echo "DATASET name is required. (use -h to get help)" >&2
exit 1
fi
#############################################################################3
# Make sure dataset name exists
if [ "1" -ne `sqlite3 $DB "SELECT COUNT(*) AS n FROM datasets WHERE datasets.dataSetName='$DATASETNAME'"` ]; then
echo Did not find dataset named $DATASETNAME. >&2
echo dataset is one of: `sqlite3 $DB "SELECT DISTINCT datasetName FROM datasets"` >&2
exit 1
fi
#############################################################################3
# If MACROPATH not specified use default
if [ -z "${MACROPATH+xxx}" ]; then
MACROPATH=$WORK/Muons/G4/macros/
fi
#############################################################################3
# Require MACRONAME
if [ -z "${MACRONAME+xxx}" ]; then
echo "macroName name is required. (use -h to get help)" >&2
exit 1
fi
#############################################################################3
# If PASS not specified use 1 (unless it has been used, then ask user)
if [ -z "${PASS+xxx}" ]; then
PASSES=`sqlite3 $DB "SELECT DISTINCT passOut FROM ProductionJobs WHERE dataSetName='$DATASETNAME' AND jobType='G4' ORDER BY passOut"`
PASSARRAY=(${PASSES// / })
if [ ${#PASSARRAY[@]} -eq "0" ]; then
PASS=1
else
echo Found multiple possible tags: $PASSES
PASS=${PASSARRAY[${#PASSARRAY[@]} - 1]}
read -e -p "Enter pass for G4 (suggested value $PASS): " PASS
# Don't know why following line does not work at lonestar. Works on my computer.
# read -e -p "Enter new pass (or hit return to use default): " -i ${PASSARRAY[${#PASSARRAY[@]} - 1]} PASS
fi
fi
#############################################################################3
# Check for how many runs to analyze, either the number of runs in the dataset or
# the number of runs requested. Should be a multiple of 12.
# Note that this is a complicated query that asks for run numbers from DATASETNAME that are not in the ProductionRuns table
# related to a row in the ProductionJobs table that has same PASS and DATASETNAME we are trying to submit (and is also a G4 run).
#
# Added status to ProductionJobs table so require status='Y' before ignoring run as being done in a previous pass.
RUNLIST=`sqlite3 $DB "SELECT runNumber FROM $DATASETNAME WHERE $DATASETNAME.runNumber NOT IN
(SELECT $DATASETNAME.runNumber from ProductionRuns JOIN $DATASETNAME USING(runNumber) JOIN ProductionJobs USING(jobKey)
WHERE ProductionJobs.passOut='$PASS' AND ProductionJobs.datasetName='$DATASETNAME' AND ProductionJobs.jobType='G4')"`
RUNLISTARRAY=(${RUNLIST// / })
NUMRUNSLEFT=${#RUNLISTARRAY[@]}
if [ "$NUMRUNSLEFT" -eq "0" ]; then
echo Did not find any more runs in $DATASETNAME
exit 1
else
echo Found $NUMRUNSLEFT runs still to do in $DATASETNAME
fi
# If we specified a number of runs and it is smaller than NRUNLEFT we use it.
if [ -z "${NRUNS+xxx}" ]; then
NRUNS=$NUMRUNSLEFT
else
if [ $NUMRUNSLEFT -lt $NRUNS ]; then
NRUNS=$NUMRUNSLEFT
fi
fi
# We were requiring a multiple of 12 runs (thats how many cores per node and we get charged for the node).
# However this is kind of a pain. Print a warning, just in case we forget and keep asking for one run per job.
# Make sure we ask for a multiple of 12 runs.
if [ $(($NRUNS%12)) -ne "0" ]; then
echo "You have asked for $NRUNS (not a multiple of 12) out of $NUMRUNSLEFT runs left to process. We will go ahead anyway."
else
echo "Found $NUMRUNSLEFT runs to process and will process $NRUNS (a multiple of 12) of them"
fi
# For testing we rely on NRUNS being an environment variable
export NRUNS
SELECTEDRUNS=${RUNLISTARRAY[@]:0:$NRUNS}
#############################################################################3
# Create directory if necessary
OUTPUTDIR=${OUTPUTAREA}/${DATASETNAME}/G4_pass${PASS}
mkdir -p $OUTPUTDIR
if [ $? -ne 0 ] ; then
echo "Can't make directory $OUTPUTDIR. Quitting."
exit 1
fi
#############################################################################3
# Copy Geant macro to output directory (just for the record)
cp $MACROPATH/$MACRONAME $OUTPUTDIR
#############################################################################3
# Update database for this job
# Two steps. 1) insert row for run. get runKey. 2) update name incorporating runKey.
# Write row creation commands into file so we can wrap in BEGIN/COMMIT, significant speedup when modifying database.
JOBKEY=`sqlite3 $DB "INSERT INTO ProductionJobs VALUES(null,'$DATASETNAME','4.9.6','$PASS',datetime(),'','$OUTPUTDIR','','G4','$COMMENT'); SELECT seq FROM sqlite_sequence WHERE name='ProductionJobs'"`
# Create a row for each task keeping track of runKey.
# Use a temporary file for the potentially long insert command
TEMPFILE=`mktemp -t mu.XXXXXXXXXX`
echo "BEGIN TRANSACTION; " >> $TEMPFILE
I=0
while [ "$I" -lt "$NRUNS" ]; do
OUTPUTFILENAME=G4_"${JOBKEY}_MC${RUNLISTARRAY[$I]}"
echo "INSERT INTO ProductionRuns VALUES(null,'${RUNLISTARRAY[$I]}','$JOBKEY','$OUTPUTFILENAME','',datetime(),'','U'); SELECT seq FROM sqlite_sequence WHERE name='ProductionRuns';" >> $TEMPFILE
I=$(($I+1))
done
echo "COMMIT; " >> $TEMPFILE
RUNKEYLIST=`sqlite3 $DB < $TEMPFILE`
# Removed temporary SQLite command file
rm $TEMPFILE
echo "Inserted rows in db file. Now to update name with runkey"
# Now we add keys to file names.
RUNKEYLISTARRAY=(${RUNKEYLIST// / })
# Use a temporary file for the potentially long insert command
TEMPFILE=`mktemp -t mu.XXXXXXXXXX`
echo "BEGIN TRANSACTION; " >> $TEMPFILE
I=0
while [ "$I" -lt "$NRUNS" ]; do
OUTPUTFILENAME=G4_"${JOBKEY}_${RUNKEYLISTARRAY[$I]}_MC${RUNLISTARRAY[$I]}.root"
echo "UPDATE ProductionRuns SET fileName='$OUTPUTFILENAME' WHERE runKey='${RUNKEYLISTARRAY[$I]}';" >> $TEMPFILE
I=$(($I+1))
done
echo "COMMIT; " >> $TEMPFILE
sqlite3 $DB < $TEMPFILE
# Removed temporary SQLite command file
rm $TEMPFILE
echo "Filenames updated."
DBUPDATEFILE=$OUTPUTDIR/DBUpdateFile_$JOBKEY
#############################################################################3
# Exporting environment variables doesn't seem to work
# (I guess qsub doesn't see the exported variables?)
# Write to a file we can source
echo SELECTEDRUNS=\"$SELECTEDRUNS\" 1> $OUTPUTDIR/ENVVARS_$JOBKEY
echo RUNKEYLIST=\"$RUNKEYLIST\" >> $OUTPUTDIR/ENVVARS_$JOBKEY
echo JOBKEY=$JOBKEY >> $OUTPUTDIR/ENVVARS_$JOBKEY
echo OUTPUTDIR=$OUTPUTDIR >> $OUTPUTDIR/ENVVARS_$JOBKEY
echo DB=$DB >> $OUTPUTDIR/ENVVARS_$JOBKEY
echo DBUPDATEFILE=$DBUPDATEFILE >> $OUTPUTDIR/ENVVARS_$JOBKEY
echo MACROPATH=$MACROPATH >> $OUTPUTDIR/ENVVARS_$JOBKEY
echo MACRONAME=$MACRONAME >> $OUTPUTDIR/ENVVARS_$JOBKEY
echo "BEGIN TRANSACTION; " >> $DBUPDATEFILE
#############################################################################3
# Write script for submission of job.
echo "#!/bin/bash" 1> "$OUTPUTDIR"/submitJob_"$JOBKEY".sh
echo "#$ -V #Inherit the submission environment" >> "$OUTPUTDIR"/submitJob_"$JOBKEY".sh
echo "#$ -l h_rt=02:00:00 # Run time (hh:mm:ss) - 2 hours" >> "$OUTPUTDIR"/submitJob_"$JOBKEY".sh
echo "#$ -cwd # Start job in directory containing scripts" >> "$OUTPUTDIR"/submitJob_"$JOBKEY".sh
echo "#$ -N $DATASETNAME # Job Name" >> "$OUTPUTDIR"/submitJob_"$JOBKEY".sh
echo "#$ -j y # Combine stderr and stdout" >> "$OUTPUTDIR"/submitJob_"$JOBKEY".sh
echo "#$ -pe 12way $NRUNS # Requests 12 tasks/node, $NRUNS cores total" >> "$OUTPUTDIR"/submitJob_"$JOBKEY".sh
echo "#$ -o $OUTPUTDIR/\$JOB_NAME.o\$JOB_ID # Name of the job output file" >> "$OUTPUTDIR"/submitJob_"$JOBKEY".sh
echo "#$ -M prindle@npl.washington.edu # Address for email notification" >> "$OUTPUTDIR"/submitJob_"$JOBKEY".sh
echo "#$ -m be # Email at Begin and End of job" >> "$OUTPUTDIR"/submitJob_"$JOBKEY".sh
echo "#$ -q normal # Queue name normal" >> "$OUTPUTDIR"/submitJob_"$JOBKEY".sh
echo "set -x # Echo commands" >> "$OUTPUTDIR"/submitJob_"$JOBKEY".sh
echo "ibrun `pwd`/runG4Job.sh $OUTPUTDIR/ENVVARS_$JOBKEY" >> "$OUTPUTDIR"/submitJob_"$JOBKEY".sh
echo "sqlite3 $DB \"UPDATE ProductionJobs SET endTime=datetime(), jobId='\$JOB_ID' WHERE jobKey='$JOBKEY'\"" >> "$OUTPUTDIR"/submitJob_"$JOBKEY".sh
echo "echo COMMIT\; >> $DBUPDATEFILE" >> $OUTPUTDIR/submitJob_$JOBKEY.sh
echo "sqlite3 $DB < $DBUPDATEFILE" >> $OUTPUTDIR/submitJob_$JOBKEY.sh
#############################################################################3
# Actually start the job.
qsub "$OUTPUTDIR"/submitJob_$JOBKEY.sh
| true
|
6267fd9125fc4cc958a69757b75fab8738951637
|
Shell
|
chakralinux/desktop
|
/kbd-xdg/PKGBUILD
|
UTF-8
| 1,426
| 3.03125
| 3
|
[] |
no_license
|
pkgname=kbd-xkb
pkgver=0.1.6
pkgrel=1
pkgdesc="Keytable files imported from XKB"
arch=('x86_64')
url="http://www.kbd-project.org"
license=('GPL')
depends=('glibc' 'pam' 'kbd')
makedepends=(perl keyboardctl=$pkgver)
source=('xml2lst.pl')
md5sums=('c2fb8f824d5af81df99d9bd962dd721c')
package() {
# Convert X keyboard layouts to console keymaps
mkdir -p ${pkgdir}/usr/share/kbd/keymaps/xkb
perl xml2lst.pl < /usr/share/X11/xkb/rules/base.xml > layouts-variants.lst
while read line; do
XKBLAYOUT=`echo "$line" | cut -d " " -f 1`
echo "$XKBLAYOUT" >> layouts-list.lst
XKBVARIANT=`echo "$line" | cut -d " " -f 2`
ckbcomp "$XKBLAYOUT" "$XKBVARIANT" | gzip > ${pkgdir}/usr/share/kbd/keymaps/xkb/"$XKBLAYOUT"-"$XKBVARIANT".map.gz
done < layouts-variants.lst
# Convert X keyboard layouts (plain, no variant)
while read line; do
ckbcomp "$line" | gzip > ${pkgdir}/usr/share/kbd/keymaps/xkb/"$line".map.gz
done < <(sort -u layouts-list.lst)
# wipe converted layouts which cannot input ASCII (#1031848)
zgrep -L "U+0041" ${pkgdir}/usr/share/kbd/keymaps/xkb/* | xargs rm -f
# Rename the converted default fi (kotoistus) layout (#1117891)
gunzip ${pkgdir}/usr/share/kbd/keymaps/xkb/fi.map.gz
mv ${pkgdir}/usr/share/kbd/keymaps/xkb/fi.map ${pkgdir}/usr/share/kbd/keymaps/xkb/fi-kotoistus.map
gzip ${pkgdir}/usr/share/kbd/keymaps/xkb/fi-kotoistus.map
}
| true
|
c200949e27ba29c5ca1af45293d6c5b4e3270eb5
|
Shell
|
darianJmy/learning
|
/haproxy-learning/kubez-keepalived/mycheckscript.sh
|
UTF-8
| 113
| 2.890625
| 3
|
[] |
no_license
|
#!/bin/bash
set -x
count=`ss -lntp | grep haproxy | wc -l`
if [ $count > 0 ]; then
exit 0
else
exit 1
fi
| true
|
3d5535d07b86f2bc856aa75bc3dd65eca6412acb
|
Shell
|
rupal-bq/sql-odbc
|
/run_test_runner.sh
|
UTF-8
| 953
| 2.515625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
#
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
#
PROJECT_DIR=$(pwd)
TEST_RUNNER_DIR=${PROJECT_DIR}/src/TestRunner
WORKING_DIR=${PROJECT_DIR}/bin64
cd ${WORKING_DIR}
pip3 install mako
python3 ${TEST_RUNNER_DIR}/test_runner.py -i ${TEST_RUNNER_DIR}/mako_template.html -o ${PROJECT_DIR}/test_output.html -e ${TEST_RUNNER_DIR}/test_exclude_list.txt
ERROR_CODE=$?
cd ..
exit ${ERROR_CODE}
| true
|
cf792793762f360dff3030263f65a2b2080d839a
|
Shell
|
opennetworkinglab/onos
|
/tools/dev/p4vm/start_onos.sh
|
UTF-8
| 1,396
| 3.96875
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
ONOS_TAR=~/onos.tar.gz
[ -f $ONOS_TAR ] || (echo "$ONOS_TAR not found" && exit 1)
ONOS_DIR=/tmp/$(tar tf $ONOS_TAR | head -n 1 | cut -d/ -f1)
# Kill any running instances
ps -ef | grep apache.karaf.main.Main | grep -v grep | awk '{print $2}' | xargs kill -9 &>/dev/null
# Do not tolerate any errors from this point onward
set -e
echo "Running clean installation..."
# Blitz previously unrolled onos- directory
rm -fr $ONOS_DIR
# Unroll new image from the specified tar file
[ -f $ONOS_TAR ] && tar zxf $ONOS_TAR -C /tmp
echo "Configuring password-less CLI access..."
# Run using the secure SSH client
[ ! -f ~/.ssh/id_rsa.pub ] && (echo "Missing SSH public key (~/.ssh/id_rsa.pub), please generate one using ssh-keygen"; exit 1)
$ONOS_DIR/bin/onos-user-key $(id -un) "$(cut -d\ -f2 ~/.ssh/id_rsa.pub)"
$ONOS_DIR/bin/onos-user-password onos rocks
# Create config/cluster.json (cluster metadata)
IP=${ONOS_IP:-127.0.0.1}
echo "Creating local cluster configs for IP $IP..."
[ -d $ONOS_DIR/config ] || mkdir -p $ONOS_DIR/config
cat > $ONOS_DIR/config/cluster.json <<-EOF
{
"name": "default-$RANDOM",
"node": {
"id": "$IP",
"ip": "$IP",
"port": 9876
},
"clusterSecret": "$RANDOM"
}
EOF
# Change into the ONOS home directory
cd $ONOS_DIR
export ONOS_HOME=$PWD
# Start ONOS as a server, but include any specified options
./bin/onos-service server "$@"
| true
|
dc528546fe45a447662dc230695f40e1e2a28c1e
|
Shell
|
aegypius/dotfiles
|
/home/.spells
|
UTF-8
| 520
| 3.015625
| 3
|
[
"MIT"
] |
permissive
|
# -*- shell-script -*-
function sf() {
(docker-compose config > /dev/null 2>&1 && docker-compose exec php bin/console "$@") || php bin/console "$@"
}
function composer() {
(docker-compose config > /dev/null 2>&1 && docker-compose exec php composer "$@" ) || command composer "$@"
}
function serve() {
docker run --rm --env VIRTUAL_HOST=${VIRTUAL_HOST:-$DOCKER_HOST_SUFFIX} --network dev_proxy --volume $(pwd)/$1:/public --workdir /public --expose 8080 node:alpine npx http-server "$@"
}
echo sf composer serve
| true
|
0c3e831cfebfbbd43f3c9fc9ece74fdea8df4331
|
Shell
|
tdorsey/blind-bake
|
/flash_to_card.sh
|
UTF-8
| 548
| 3.28125
| 3
|
[] |
no_license
|
#!/bin/bash
FLASH_VERSION=2.2.0
HYPRIOT_VERSION=1.10.0
FLASH_PATH='/usr/local/bin/flash'
if test "$#" -ne 2
then
echo "Usage: $0 <CLOUD-INIT> <BLOCK DEVICE>"
exit 1
fi
#Install dependencies
sudo apt-get install -y pv curl python-pip unzip hdparm
#Install flash
curl -LO https://raw.githubusercontent.com/hypriot/flash/$FLASH_VERSION/flash
chmod +x flash
mv flash $FLASH_PATH
#Flash the drive
flash -u $1 -d $2 https://github.com/hypriot/image-builder-rpi/releases/download/v$HYPRIOT_VERSION/hypriotos-rpi-v$HYPRIOT_VERSION.img.zip
| true
|
bf36b97f4971d800e29726e19742e0f846b5797e
|
Shell
|
scottwedge/OpenStack-Stein
|
/designate-8.0.0/devstack/upgrade/shutdown.sh
|
UTF-8
| 1,042
| 2.6875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# ``shutdown-designate``
set -o errexit
source $GRENADE_DIR/grenaderc
source $GRENADE_DIR/functions
# We need base DevStack functions for this
source $BASE_DEVSTACK_DIR/functions
source $BASE_DEVSTACK_DIR/stackrc # needed for status directory
source $BASE_DEVSTACK_DIR/lib/tls
source ${GITDIR[designate]}/devstack/plugin.sh
set -o xtrace
stop_process designate-central
stop_process designate-api
stop_process designate-mdns
stop_process designate-agent
stop_process designate-sink
if is_service_enabled designate-worker; then
stop_process designate-worker
stop_process designate-producer
else
stop_process designate-pool-manager
stop_process designate-zone-manager
fi
# sanity check that service is actually down
ensure_services_stopped designate-api designate-central designate-mdns designate-agent designate-sink
if is_service_enabled designate-worker; then
ensure_services_stopped designate-worker designate-producer
else
ensure_services_stopped designate-pool-manager designate-zone-manager
fi
| true
|
537575f24b382473561c36eb250ebd751d2cda90
|
Shell
|
shanman190/dotfiles
|
/bash/aliases.bash
|
UTF-8
| 266
| 2.546875
| 3
|
[] |
no_license
|
# overrides for ls
alias ls="ls -F --color=auto"
alias l="ls -C"
alias ll="ls -la"
alias la="ls -A"
# overrides for grep
alias grep="grep --color=auto"
alias fgrep="fgrep --color=auto"
alias egrep="egrep --color=auto"
# allow for common cd typo
alias cd..="cd .."
| true
|
a4f9478281f455bcecbec577fdc115c7880e5f4f
|
Shell
|
t-tht/MIPS1-Compiler
|
/input_testfile.sh
|
UTF-8
| 869
| 3.609375
| 4
|
[] |
no_license
|
#!/bin/bash
compiler="bin/c_compiler"
have_compiler=0
if [[ ! -f bin/c_compiler ]] ; then
>&2 echo "Warning : cannot find compiler at path ${compiler}. Only checking C reference against python reference."
have_compiler=1
fi
input_dir=$1
working="tmp/random"
mkdir -p ${working}
REF_C_OUT=$2
if [[ ${have_compiler} -eq 0 ]] ; then
# Run the DUT MIPS version
$compiler --compile $1 -o ${working}/$base.s
mips-linux-gnu-gcc -static ${working}/$base.s -o ${working}/$base-s-got
#run the mips Binary
qemu-mips ${working}/$base-s-got
GOT_P_OUT=$?
fi
if [[ ${have_compiler} -ne 0 ]] ; then
echo "$1, Fail, No C compiler/translator"
elif [[ $REF_C_OUT -ne $GOT_P_OUT ]] ; then
echo "$1, Fail, Expected ${REF_C_OUT}, got ${GOT_P_OUT}"
else
echo "$1, Pass"
fi
| true
|
4c7b2bfe38e12452bed5b76f0c40ce475a2ff5cf
|
Shell
|
FauxFaux/debian-control
|
/f/freewnn/freewnn-kserver_1.1.1~a021+cvs20130302-7+b1_amd64/postinst
|
UTF-8
| 2,018
| 3.4375
| 3
|
[] |
no_license
|
#! /bin/sh
# postinst script for freewnn
#
# see: dh_installdeb(1)
set -e
# summary of how this script can be called:
# * <postinst> `configure' <most-recently-configured-version>
# * <old-postinst> `abort-upgrade' <new version>
# * <conflictor's-postinst> `abort-remove' `in-favour' <package>
# <new-version>
# * <deconfigured's-postinst> `abort-deconfigure' `in-favour'
# <failed-install-package> <version> `removing'
# <conflicting-package> <version>
# for details, see /usr/share/doc/packaging-manual/
#
# quoting from the policy:
# Any necessary prompting should almost always be confined to the
# post-installation script, and should be protected with a conditional
# so that unnecessary prompting doesn't happen if a package's
# installation fails and the `postinst' is called with `abort-upgrade',
# `abort-remove' or `abort-deconfigure'.
case "$1" in
configure)
if ! getent passwd kwnn>/dev/null; then
adduser --system --home /var/lib/wnn/ko_KR --gecos "FreeWnn kserver" \
--no-create-home \
--disabled-password --disabled-login kwnn --quiet
chown -R kwnn /var/lib/wnn/ko_KR/dic
fi
ln -sf /var/lib/wnn/ko_KR/dic /usr/share/wnn/ko_KR
;;
abort-upgrade|abort-remove|abort-deconfigure)
;;
*)
echo "postinst called with unknown argument \`$1'" >&2
exit 0
;;
esac
# dh_installdeb will replace this with shell code automatically
# generated by other debhelper scripts.
# Automatically added by dh_installinit/11.2.1
if [ "$1" = "configure" ] || [ "$1" = "abort-upgrade" ] || [ "$1" = "abort-deconfigure" ] || [ "$1" = "abort-remove" ] ; then
if [ -x "/etc/init.d/freewnn-kserver" ]; then
update-rc.d freewnn-kserver defaults >/dev/null
if [ -n "$2" ]; then
_dh_action=restart
else
_dh_action=start
fi
invoke-rc.d freewnn-kserver $_dh_action || exit 1
fi
fi
# End automatically added section
exit 0
| true
|
80b6353dc8d7e7025f3bc2dfedfce1a0dbbca90d
|
Shell
|
elct9620/nginx-pagespeed
|
/runtime/functions
|
UTF-8
| 2,963
| 3.921875
| 4
|
[] |
no_license
|
#!/bin/bash
set -e
source ${NGINX_RUNTIME_DIR}/envs-default
generate_config() {
local FILE=${1?missing argument}
shift
local DEST=${1?-1}
shift
[[ ! -f ${FILE} ]] && return 1
[[ ${DEST} == -1 ]] && return 1
# Direct copy if no environment need to replate
if [[ -z $1 ]]; then
cp -a "${FILE}" ${DEST}
return 0
fi
local TEMP=$(mktemp)
echo "Generate config ${DEST} use ${FILE} as template..."
local VARIABLES=($@)
cp -a "${FILE}" ${TEMP}
local variable
for variable in ${VARIABLES[@]}; do
sed -ri "s/[{]{2}$variable[}]{2}/\${$variable}/g" ${TEMP}
done
(
export ${VARIABLES[@]}
local IFS=":"; envsubst "${VARIABLES[*]/#/$}" < ${TEMP} > ${DEST}
)
rm -rf ${TEMP}
}
install_pagespeed() {
generate_config ${NGINX_TEMPLATE_DIR}/pagespeed ${CONF_DIR}/pagespeed \
PAGESPEED_ENABLE PAGESPEED_HEADER PAGESPEED_REWRITE_LEVEL
}
install_wordpress() {
generate_config ${NGINX_TEMPLATE_DIR}/wordpress.ext ${CONF_DIR}/wordpress.ext \
DOCUMENT_ROOT INDEX_FILES
}
setup_config() {
local MODE="default"
local CONF_DIR="/etc/nginx/conf.d"
local DEST="${CONF_DIR}/default.conf"
local SSL_DEST="${CONF_DIR}/ssl.conf"
if [[ ! -z ${FASTCGI_HOST} ]]; then
echo "FastCGI detected, enable FastCGI"
MODE="fastcgi"
fi
install_pagespeed
if [[ ${WORDPRESS_ADDON} == "yes" ]]; then
echo "WordPress Addon enabled"
MODE="wordpress"
install_wordpress
fi
case $MODE in
"default")
generate_config ${NGINX_TEMPLATE_DIR}/default ${DEST} \
SERVER_NAME \
DOCUMENT_ROOT \
INDEX_FILES \
UPLOAD_MAX_SIZE
;;
"fastcgi")
generate_config ${NGINX_TEMPLATE_DIR}/fastcgi ${DEST} \
SERVER_NAME DOCUMENT_ROOT INDEX_FILES ALLOW_ORIGIN \
FASTCGI_HOST FASTCGI_PORT FASTCGI_ROOT FASTCGI_INDEX FASTCGI_CACHE_SUCCESS_TIME FASTCGI_CACHE_NOTFOUND_TIME\
CACHE_LEVEL CACHE_ZONE_SIZE CACHE_INACTIVE_TIME \
UPLOAD_MAX_SIZE
;;
"wordpress")
# TODO: Should use extension to generate this
generate_config ${NGINX_TEMPLATE_DIR}/wordpress ${DEST} \
SERVER_NAME DOCUMENT_ROOT INDEX_FILES ALLOW_ORIGIN \
FASTCGI_HOST FASTCGI_PORT FASTCGI_ROOT FASTCGI_INDEX FASTCGI_CACHE_SUCCESS_TIME FASTCGI_CACHE_NOTFOUND_TIME\
CACHE_LEVEL CACHE_ZONE_SIZE CACHE_INACTIVE_TIME \
UPLOAD_MAX_SIZE
if [[ ${SSL_ENABLED} == "yes" ]]; then
generate_config ${NGINX_TEMPLATE_DIR}/wordpress_ssl ${SSL_DEST} \
SERVER_NAME DOCUMENT_ROOT INDEX_FILES ALLOW_ORIGIN \
FASTCGI_HOST FASTCGI_PORT FASTCGI_ROOT FASTCGI_INDEX FASTCGI_CACHE_SUCCESS_TIME FASTCGI_CACHE_NOTFOUND_TIME\
CACHE_LEVEL CACHE_ZONE_SIZE CACHE_INACTIVE_TIME \
UPLOAD_MAX_SIZE \
SSL_CIPHERS SSL_KEY_PATH SSL_DHPARAM_PATH SSL_VERIFY_CLIENT SSL_CERTIFICATE_PATH SSL_CA_CERTIFICATE_PATH \
NGINX_HSTS_MAXAGE NGINX_ACCEL_BUFFERING
fi
;;
esac
}
| true
|
0cd40ae16f462d65b4a7392cc15cc4b8572859c6
|
Shell
|
pavelkolomitkin/quick-lang-practice-backend-nest
|
/docker/dev/start.sh
|
UTF-8
| 535
| 2.65625
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
echo -n "Stop previous containers..."
echo -en '\n'
docker-compose stop
# Install dependencies
echo -n "Install dependencies..."
echo -en '\n'
docker run --rm -v $(pwd)/../../:/app -w /app node:10.16.0-stretch-slim npm install
# Up docker compose
echo -n "Up docker compose..."
echo -en '\n'
docker-compose up -d
#echo -n "Waiting the mongo server..."
#echo -en '\n'
#sleep 5
# Up docker compose
#echo -n "Run database migrations..."
#echo -en '\n'
#docker exec node-app-container-dev npm run migrate-mongo up
| true
|
30f5e97a1f0f533bd78eee213c638f7cdc9c3b3e
|
Shell
|
rjuelich/dp_report
|
/MODULES/dp_outdir_gen.sh
|
UTF-8
| 2,871
| 3.796875
| 4
|
[] |
no_license
|
#!/bin/bash
study=$1
out_home=$2
sublist=$3
phx="/ncf/cnl03/PHOENIX"
csv="${phx}/GENERAL/${study}/${study}.csv"
#TODO: add sublist autogen if not specified
if [ ! -e ${out_home} ]
then
mkdir ${out_home}
fi
if [ ! -e ${out_home} ]
then
echo "${out_home} cannot be generated. Try doing so manually and rerun."
exit 1
fi
cd ${out_home}
if [ ! $3 ]
then
awk -F , '{print $2}' ${csv} | grep -v Subject > .sublist.txt
sublist=".sublist.txt"
fi
for subID in `awk -F , '{print $2}' ${csv} | grep -f ${sublist}`
do
if [ `grep ${subID} ${csv} | awk -F , '{ if (length($5)>=2) print "2"; else print "0"}'` -gt 1 ]
then
#bwID=`grep ${subID} ${csv} | awk -F , '{print $5}'`
bwID=`ls -1 -S /ncf/cnl03/PHOENIX/GENERAL/${study}/${subID}/phone/raw | head -1`
mkdir ${subID} ${subID}/{phone,mri,actigraphy,info,dp_report} ${subID}/actigraphy/{raw,processed} ${subID}/phone/processed ${subID}/phone/processed/${bwID} ${subID}/mri/{struc,func,qc,processed,clin}
for gen_data in `ls -1 ${phx}/GENERAL/${study}/${subID}/phone/raw/${bwID}`
do
if [ ${gen_data} = "surveyAnswers" ]
then
mkdir ${subID}/phone/processed/${bwID}/${gen_data}
for survey in `ls -1 ${phx}/GENERAL/${study}/${subID}/phone/raw/${bwID}/${gen_data}`
do
mkdir ${subID}/phone/processed/${bwID}/${gen_data}/${survey}
done
else
mkdir ${subID}/phone/processed/${bwID}/${gen_data}
fi
done
for pro_data in `ls -1 ${phx}/PROTECTED/${study}/${subID}/phone/raw/${bwID}`
do
mkdir ${subID}/phone/processed/${bwID}/${pro_data}
done
else
echo "${subID} does not have a BEIWE_ID listed in the ${phx}/GENERAL/${study}/${study}.csv file. Cannot be run"
exit 1
fi
done
if [ ! -e ${out_home}/.${study}_demos.csv ]
then
echo "WARNING:"
echo "${out_home}/.${study}_demos.csv does not exist."
echo "That file will need to be created for header to populate properly."
echo "At minimum, the file should contain the following fields exactly as shown: subID,age,gender,race,diagnosis"
echo ""
echo ""
echo ""
fi
if [ ! -e ~/.${study}_auth ]
then
echo "WARNING:"
echo "~/.${study}_auth file does not exist"
echo "~/.${study}_auth must exist for protected datatype processing to complete."
echo "~/.${study}_auth should be a single line containing the ${study} study decrypt passcode"
echo "Only `whoami` should have read perms for the file, obviously."
printf "Would you like to create the ~/.${study}_auth file now? y/n: "
read answer
if [ ${answer} = "y" ]
then
printf "Please enter decrypt pass and press enter:"
read pass
echo ${pass} > ~/.${study}_auth
chmod 700 ~/.${study}_auth
else
echo "OK, please be sure to create later."
fi
fi
echo "${out_home} has been configured as the output directory for the ${study} study DP reports"
echo "Please take actions to address any warning generated during config."
exit 0
| true
|
dd79f86969ec07a7d1b7c2c840ba8e2fa8f304a2
|
Shell
|
JimmWizzy/lancom-router-nagios-plugin
|
/check_snmp_router_temperature.sh
|
UTF-8
| 4,275
| 3.65625
| 4
|
[] |
no_license
|
#!/bin/bash
# check_snmp_router_temperature
# Description : Checks Temperature of Lancom Router
# The MIT License (MIT)
#
# Copyright (c) 2015, Roland Rickborn (roland.rickborn@exensio.de)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# Revision history:
# 2015-07-01 Created
# Based on the work of Yoann LAMY
# (https://exchange.nagios.org/directory/Owner/Yoann/1)
# ---------------------------------------------------------------------------
# Commands
CMD_BASENAME=`which basename`
CMD_SNMPWALK=`which snmpwalk`
CMD_AWK=`which awk`
# Script name
SCRIPTNAME=`$CMD_BASENAME $0`
# Version
VERSION="1.0"
# Plugin return codes
STATE_OK=0
STATE_WARNING=1
STATE_CRITICAL=2
STATE_UNKNOWN=3
OID_TEMP="LCOS-MIB::lcsStatusHardwareInfoTemperatureDegrees"
OID_TEMPMAX="LCOS-MIB::lcsSetupTemperatureMonitorUpperLimitDegrees"
OID_TEMPMIN="LCOS-MIB::lcsSetupTemperatureMonitorLowerLimitDegrees"
# Default variables
DESCRIPTION="Unknown"
STATE=$STATE_UNKNOWN
# Default options
COMMUNITY="public"
HOSTNAME="192.168.0.1"
WARNING=60
CRITICAL=65
# Option processing
print_usage() {
echo "Usage: ./check_snmp_cpu -H 192.168.0.1 -C public -w 60 -c 65"
echo " $SCRIPTNAME -H ADDRESS"
echo " $SCRIPTNAME -C STRING"
echo " $SCRIPTNAME -w INTEGER"
echo " $SCRIPTNAME -c INTEGER"
echo " $SCRIPTNAME -h"
echo " $SCRIPTNAME -V"
}
print_version() {
echo $SCRIPTNAME version $VERSION
echo ""
echo "This nagios plugin comes with ABSOLUTELY NO WARRANTY."
echo "You may redistribute copies of the plugin under the terms of the MIT License."}
print_help() {
print_version
echo ""
print_usage
echo ""
echo "Checks Temperature of Lancom Router"
echo ""
echo "-H ADDRESS"
echo " Name or IP address of host (default: 192.168.0.1)"
echo "-C STRING"
echo " Community name for the host SNMP agent (default: public)"
echo "-w INTEGER"
echo " Warning level for memory usage in percent (default: 60)"
echo "-c INTEGER"
echo " Critical level for memory usage in percent (default: 65)"
echo "-h"
echo " Print this help screen"
echo "-V"
echo " Print version and license information"
echo ""
echo ""
}
while getopts H:C:w:c:hV OPT
do
case $OPT in
H) HOSTNAME="$OPTARG" ;;
C) COMMUNITY="$OPTARG" ;;
w) WARNING=$OPTARG ;;
c) CRITICAL=$OPTARG ;;
h)
print_help
exit $STATE_UNKNOWN
;;
V)
print_version
exit $STATE_UNKNOWN
;;
esac
done
# Get Temperature in degrees Celsius
TEMP=`$CMD_SNMPWALK -t 2 -r 2 -v 1 -c $COMMUNITY $HOSTNAME $OID_TEMP | $CMD_AWK '{ print $4 }'`
TEMPMIN=`$CMD_SNMPWALK -t 2 -r 2 -v 1 -c $COMMUNITY $HOSTNAME $OID_TEMPMIN | $CMD_AWK '{ print $4 }'`
TEMPMAX=`$CMD_SNMPWALK -t 2 -r 2 -v 1 -c $COMMUNITY $HOSTNAME $OID_TEMPMAX | $CMD_AWK '{ print $4 }'`
if [ $WARNING -gt $TEMPMAX ] || [ $CRITICAL -gt $TEMPMAX ]; then
echo "Value not allowed"
exit $STATE_UNKNOWN
fi
if [ $WARNING != 0 ] || [ $CRITICAL != 0 ]; then
if [ $TEMP -gt $CRITICAL ] && [ $CRITICAL != 0 ]; then
STATE=$STATE_CRITICAL
elif [ $TEMP -gt $CRITICAL ] && [ $CRITICAL != 0 ]; then
STATE=$STATE_CRITICAL
else
STATE=$STATE_OK
fi
fi
DESCRIPTION="Temperature : $TEMP°C | temperature=$TEMP;$WARNING;$CRITICAL;0"
echo $DESCRIPTION
exit $STATE
| true
|
5ce9c00e13b7fbb596685bb92d46dd0c843d7a38
|
Shell
|
yingjun-wu/apus
|
/APUS/eval/test2.sh
|
UTF-8
| 1,609
| 3.578125
| 4
|
[] |
no_license
|
#!/bin/bash
function error_exit() {
echo "Usage: test2.sh -p<p> <list of username@server addresses>
p: the percentage of bandwidth for other traffic." >&2;
exit 1
}
if [[ $# -lt 3 ]]; then
error_exit
fi
while getopts ":p:" opt; do
case $opt in
p) trafficPer="$OPTARG"
;;
\?) error_exit
;;
esac
done
if [[ -z "$trafficPer" ]]; then
error_exit
fi
numberReplica=`expr $# - 1`
trafficPara=`echo "$trafficPer * 0.64" | bc`
APP_DIR=$RDMA_ROOT/apps/ssdb/ssdb-master
REMOTE_PREPARE_COMMAND="killall -9 iperf; killall -9 ssdb-server; sed -i '3c group_size = $numberReplica;' $RDMA_ROOT/RDMA/target/nodes.local.cfg; rm -rf DB_node_test*"
POST_COMMAND="killall -9 iperf"
GEN_TRAFFIC_SERVER="iperf -s"
GEN_TRAFFIC_CLIENT="iperf -c 10.22.1.1 -l $trafficPara -t 9999"
LOCAL_RUN_COMMAND="$APP_DIR/tools/ssdb-bench 127.0.0.1 8888 10000 50"
i=2
j=0
while [ "$i" -le "$#" ]; do
eval "addr=\${$i}"
ssh -f $addr $REMOTE_PREPARE_COMMAND
sleep 2
if [ "$i" -eq "2" ]; then
ssh -f $addr $GEN_TRAFFIC_SERVER
fi
if [ "$i" -eq "3" ]; then
ssh -f $addr $GEN_TRAFFIC_CLIENT
fi
REMOTE_RUN_COMMAND="env node_id=$j LD_LIBRARY_PATH=$RDMA_ROOT/RDMA/.local/lib cfg_path=$RDMA_ROOT/RDMA/target/nodes.local.cfg LD_PRELOAD=$RDMA_ROOT/RDMA/target/interpose.so $APP_DIR/ssdb-server $APP_DIR/ssdb.conf"
ssh -f $addr $REMOTE_RUN_COMMAND
i=$((i + 1))
j=$((j + 1))
sleep 2
done
sleep 5
$LOCAL_RUN_COMMAND 1>$RDMA_ROOT/eval/traffic_test_result.dat 2>&1
i=2
j=0
while [ "$i" -le "$#" ]; do
eval "addr=\${$i}"
ssh -f $addr $POST_COMMAND
i=$((i + 1))
j=$((j + 1))
sleep 2
done
| true
|
49eedf4d917e8705fe82f8f65361b909a1d28f31
|
Shell
|
RaymiiOrg/raymiiorg.github.io
|
/inc/software/easyconverter-v4e.sh
|
UTF-8
| 3,454
| 3.875
| 4
|
[] |
no_license
|
#!/bin/bash
####################################
## Script made by Raymii.org ##
####################################
# V0.4 new features:
# Extension select before folder.
# Filter on extension
# Check if files exist in folder.
# Added .flv to input format
# Added .aac to input/output format
# Quality selector for ffmpeg
# removed folder check.
# added nautilus open when converting finished.
padself=`pwd`/`basename $0`
function catch_errors() {
zenity --question --text="There is something wrong. Do you want to quit or restart the app?" --cancel-label="Quit" --ok-label="Restart";
[ "$?" = "0" ] && ( bash -c $padself & );
exit 0;
}
function func_error2() {
echo `date +%h:%m:%s`
}
trap catch_errors ERR;
mapvraag=0
titel="EasyConverter v0.4e"
KBs=128;
FGOED=1;
zenity --info --text="Hi, I'm <b>$titel</b> \nI will help you with converting files to another format. \n \nIn the next window, please select the format of the files. After that please select the folder which has the audio files." --title="$titel"
vanform=$(zenity --list --title="Select a file extension" --height=270 --text "Which filetype do you want to convert?" --radiolist --column "Choose" --column "Original Format" TRUE flac FALSE ogg FALSE wav FALSE mp3 FALSE aac FALSE flv);
mapvraag=$(zenity --file-selection --directory --title="Please select a folder with $vanform files." --file-filter="*.$vanform" );
pushd "$mapvraag"
for f in ./*.$vanform; do
test -f "$f" || continue
echo "$f bestaat, mooi zo.";
FGOED=2;
done
popd
if [ $FGOED == 1 ]; then
zenity --error --text="Oops, the filetype you selected is not found in the folder you selected. \nPlease try again." --title="$titel";
return 1;
fi
formaat=$(zenity --list --height=270 --text "And what should they become?" --radiolist --column "Choose" --column "Converted Format" TRUE mp3 FALSE ogg FALSE wav FALSE flac FALSE aac);
if [ $vanform = $formaat ]; then
zenity --error --text="You choose the same input and output format \nI can't convert the files if you do that. \nLets restart." --title="$titel"
return 1;
exit
fi
KBs=$(zenity --list --height=380 --text "What output quality you want me to tell ffmpeg?\n \n<i>64k</i>: \nSmall files\nLow quality \n<i>320k</i>: \nBig files\nHigh quality.)" --radiolist --column "Choose" --column "kbps" TRUE 64 FALSE 96 FALSE 128 FALSE 196 FALSE 256 FALSE 320);
finalcheck=$(zenity --question --cancel-label="Don't" --ok-label="Lets Rock Baby!" --text="We're going to convert all files in: <b>$mapvraag</b> to <b><i>$formaat</i></b> at <i><b>$KBs</b> kb/s</i>. Last check, Do or Don't?" --title="$titel")
trap func_error2 ERR;
pushd "$mapvraag"
for i in *.$vanform; do
mkdir -p "$mapvraag/converted/$formaat/"
ffmpeg -ab $KBs"k" -y -i "$i" "$mapvraag/converted/$formaat/$i.$formaat" 2>&1 | zenity --progress --text="Converting: <b>$i</b> from <b>$vanform</b> to <b>$formaat</b> at <b>$KBs</b> kb/s" --title="$titel" --auto-close --pulsate
echo $i gedaan
done
zenity --question --cancel-label="Nope, just quit." --ok-label="Yes, open it." --text="Done! \nI've saved the converted files in this folder: <b>$mapvraag/converted/$formaat</b>. \n \n Would you like me to try and open Nautilus in the output folder? \n \n \nThis little script is made by: <b>Raymii.org</b>." --title="$titel";
[ "$?" = "0" ] && nautilus --no-desktop "./converted/$formaat";
popd
echo Done
| true
|
e5d3c9fd30e551ec4b60480fe48fd8d8267897da
|
Shell
|
chicha1986/gfs_verif
|
/fit2obs/scripts/timeship.newb
|
UTF-8
| 2,367
| 2.59375
| 3
|
[] |
no_license
|
#!/bin/ksh
set -x
export list=$listvar
#echo "jn is $jn"
#echo "PSLOT is $PSLOT"
#echo "RUNLOG is $RUNLOG"
#echo "yyyy is $yyyy"
#echo "mm is $mm"
#echo "datefix is $datefix"
#echo "BASEDIR is $BASEDIR"
#echo "SCRIPTSDIR is $SCRIPTSDIR"
#echo "ACCOUNT is $ACCOUNT"
#echo "ARCDIR is $ARCDIR"
#export SHDIR=${SHDIR:-$BASEDIR/bin}
#export PBEG=${PBEG:-$SHDIR/pbeg}
#export PERR=${PERR:-$SHDIR/perr}
#export PLOG=${PLOG:-$SHDIR/plog}
#export PEND=${PEND:-$SHDIR/pend}
export grads=$GRADSBIN/grads
export convert=$IMGCONVERT
export logofile=${logofile:-$GSCRIPTS/noaa-nws-logo.jpg}
#jnjob=$jn
#$PLOG "$RUNLOG" OK "$jn begun for $PSLOT"
rc=0
#------------------------------------------------------------
del=1
mean=1
plots=1
png=1
if [ ! -d $pdir ] ; then
mkdir -p $pdir
fi
cd $pdir
if [ $del -eq 1 ] ; then
set +x
/bin/rm $pdir/*
set -x
fi
yy=`echo $edate | cut -c1-4`
mm=`echo $edate | cut -c5-6`
dd=`echo $edate | cut -c7-8`
hh=`echo $edate | cut -c9-10`
mon=`$SCRIPTS/cmon.sh $mm`
te00=${hh}z${dd}${mon}${yy}
echo "te00 00z plot end date $te00"
yy=`echo $sdate | cut -c1-4`
mm=`echo $sdate | cut -c5-6`
dd=`echo $sdate | cut -c7-8`
hh=`echo $sdate | cut -c9-10`
mon=`$SCRIPTS/cmon.sh $mm`
ts00=${hh}z${dd}${mon}${yy}
echo "ts00 00z plot start date $ts00"
yy=`echo $edate12 | cut -c1-4`
mm=`echo $edate12 | cut -c5-6`
dd=`echo $edate12 | cut -c7-8`
hh=`echo $edate12 | cut -c9-10`
mon=`$SCRIPTS/cmon.sh $mm`
te12=${hh}z${dd}${mon}${yy}
echo "te12 12z plot end date $te12"
yy=`echo $sdate12 | cut -c1-4`
mm=`echo $sdate12 | cut -c5-6`
dd=`echo $sdate12 | cut -c7-8`
hh=`echo $sdate12 | cut -c9-10`
mon=`$SCRIPTS/cmon.sh $mm`
ts12=${hh}z${dd}${mon}${yy}
echo "ts12 12z plot start date $ts12"
if [ $plots -eq 1 ] ; then
cd $pdir
$grads -lbc "run $GSCRIPTS/shiptb.newb.gs $ts00 $te00 $ts12 $te12 $pdir $GSCRIPTS $exp $mean $ctldir $namstr 700 650"
((rc+=$?))
$grads -pbc "run $GSCRIPTS/shiptab.newb.gs $ts00 $te00 $ts12 $te12 $pdir $GSCRIPTS $exp $mean $ctldir $namstr 650 700"
((rc+=$?))
fi
cp *.ship.png $localdir # copy fits to web dir
if [ $web -eq 1 ] ; then
echo "webdir is $webdir"
scp *.ship.png ${webid}@${webmch}:${webdir}/.
fi
#-----------------------------------------------------
# Exit gracefully
if [[ $rc -ne 0 ]] ; then
#$PLOG "$RUNLOG" ERROR "$jnjob failed for $PSLOT"
exit 1
else
exit 0
#$PLOG "$RUNLOG" OK "$jnjob ended for $PSLOT"
fi
| true
|
a4bc800e81f4323eb9f0561d51ddb70f0091b6a5
|
Shell
|
philipbel/dotfiles
|
/zsh.d/99-cache.zsh
|
UTF-8
| 251
| 2.890625
| 3
|
[] |
no_license
|
_CCACHE_PATHS=(/usr/lib/ccache /usr/lib64/ccache /usr/local/opt/ccache/libexec)
for _path in $_CCACHE_PATHS; do
if [ -d "$_path" ]; then
_CCACHE_PATH=$_path
fi
done
export PATH=$_CCACHE_PATH:$PATH
export CCACHE_DIR=/var/tmp/ccache
| true
|
fd7a39cdea3fa36434896683770ae82e69003c09
|
Shell
|
tyr2014/cuba
|
/_misc/scripts/cron.d/daily_mongodump.sh
|
UTF-8
| 979
| 3.640625
| 4
|
[] |
no_license
|
#!/bin/bash
BackupDirBase=/var/lib/mongodb/dbdump
ArchiveDir="$BackupDirBase/archive"
LogFile="$BackupDirBase/dump.log"
Database="toureet"
[ $(date +%H) -eq 04 ] && DirName="midnight" || DirName="$(date +%H)"
BackupDir="$BackupDirBase/dump_$(date +%m%d)/$DirName"
echo -e "\n### $(date) ###" >> "$LogFile"
mkdir -p "$BackupDir"
mongodump --db "$Database" --out "$BackupDir" >> "$LogFile" 2>&1
ln -sf -T "$BackupDir" "$BackupDirBase/dump_latest"
case $1 in
rotate)
## bi-monthly archive
Archive="$ArchiveDir/$(date +%Y%m%d).7z"
[[ $(date +%d) -eq 1 || $(date +%d) -eq 16 ]] && [ ! -f "$Archive" ] && 7z a "$Archive" "$BackupDir" >> "$LogFile"
## prune
ExpiringDir="$BackupDirBase/dump_$(date +%m%d -d '3 days ago')"
[ -d "$ExpiringDir" ] && find "$ExpiringDir" -name "??" -exec rm -r {} \;
ExpiringDir="$BackupDirBase/dump_$(date +%m%d -d '2 weeks ago')"
[ -d "$ExpiringDir" ] && rm -r "$ExpiringDir"
;;
esac
echo -e "\n### $(date) ###" >> "$LogFile"
:
| true
|
ccf30ff5db8fb766d4fab6585bcf912496a60f4b
|
Shell
|
tzhang2014/redirectify
|
/package.sh
|
UTF-8
| 314
| 3.375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
set -e
EXTENSION=redirectify
SRC=src
# Set working directory to location of this script
cd $(dirname $(readlink -m "$0"))
VERSION=$(grep '"version":' src/manifest.json | sed 's/.*"\([0-9.]*\)".*/\1/')
OUT="$EXTENSION"-"$VERSION".xpi
rm -f "$OUT"
cd "$SRC"
zip -r -FS ../"$OUT" *
echo Created "$OUT"
| true
|
45a5e67fd11134211f55259b24ace35265fbd06f
|
Shell
|
cachix/install-nix-action
|
/install-nix.sh
|
UTF-8
| 2,900
| 3.640625
| 4
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
set -euo pipefail
if nix_path="$(type -p nix)" ; then
echo "Aborting: Nix is already installed at ${nix_path}"
exit
fi
# GitHub command to put the following log messages into a group which is collapsed by default
echo "::group::Installing Nix"
# Create a temporary workdir
workdir=$(mktemp -d)
trap 'rm -rf "$workdir"' EXIT
# Configure Nix
add_config() {
echo "$1" >> "$workdir/nix.conf"
}
add_config "show-trace = true"
# Set jobs to number of cores
add_config "max-jobs = auto"
if [[ $OSTYPE =~ darwin ]]; then
add_config "ssl-cert-file = /etc/ssl/cert.pem"
fi
# Allow binary caches for user
add_config "trusted-users = root ${USER:-}"
# Add github access token
if [[ -n "${INPUT_GITHUB_ACCESS_TOKEN:-}" ]]; then
add_config "access-tokens = github.com=$INPUT_GITHUB_ACCESS_TOKEN"
elif [[ -n "${GITHUB_TOKEN:-}" ]]; then
add_config "access-tokens = github.com=$GITHUB_TOKEN"
fi
# Append extra nix configuration if provided
if [[ -n "${INPUT_EXTRA_NIX_CONFIG:-}" ]]; then
add_config "$INPUT_EXTRA_NIX_CONFIG"
fi
if [[ ! $INPUT_EXTRA_NIX_CONFIG =~ "experimental-features" ]]; then
add_config "experimental-features = nix-command flakes"
fi
# Nix installer flags
installer_options=(
--no-channel-add
--darwin-use-unencrypted-nix-store-volume
--nix-extra-conf-file "$workdir/nix.conf"
)
# only use the nix-daemon settings if on darwin (which get ignored) or systemd is supported
if [[ (! $INPUT_INSTALL_OPTIONS =~ "--no-daemon") && ($OSTYPE =~ darwin || -e /run/systemd/system) ]]; then
installer_options+=(
--daemon
--daemon-user-count "$(python3 -c 'import multiprocessing as mp; print(mp.cpu_count() * 2)')"
)
else
# "fix" the following error when running nix*
# error: the group 'nixbld' specified in 'build-users-group' does not exist
add_config "build-users-group ="
sudo mkdir -p /etc/nix
sudo chmod 0755 /etc/nix
sudo cp "$workdir/nix.conf" /etc/nix/nix.conf
fi
if [[ -n "${INPUT_INSTALL_OPTIONS:-}" ]]; then
IFS=' ' read -r -a extra_installer_options <<< "$INPUT_INSTALL_OPTIONS"
installer_options=("${extra_installer_options[@]}" "${installer_options[@]}")
fi
echo "installer options: ${installer_options[*]}"
# There is --retry-on-errors, but only newer curl versions support that
curl_retries=5
while ! curl -sS -o "$workdir/install" -v --fail -L "${INPUT_INSTALL_URL:-https://releases.nixos.org/nix/nix-2.16.1/install}"
do
sleep 1
((curl_retries--))
if [[ $curl_retries -le 0 ]]; then
echo "curl retries failed" >&2
exit 1
fi
done
sh "$workdir/install" "${installer_options[@]}"
# Set paths
echo "/nix/var/nix/profiles/default/bin" >> "$GITHUB_PATH"
# new path for nix 2.14
echo "$HOME/.nix-profile/bin" >> "$GITHUB_PATH"
if [[ -n "${INPUT_NIX_PATH:-}" ]]; then
echo "NIX_PATH=${INPUT_NIX_PATH}" >> "$GITHUB_ENV"
fi
# Close the log message group which was opened above
echo "::endgroup::"
| true
|
826332d28fb25550451e66ea5e7dc099c4a294c9
|
Shell
|
bl419cam/sql_practice
|
/bash/01_install_packages.sh
|
UTF-8
| 951
| 2.875
| 3
|
[] |
no_license
|
# upgrade all brew packages & update homebrew
brew upgrade && brew update
# install tool used to download a local copy of a file from the Internet
brew install wget
# install open source implementation of the Java Platform, Standard Edition
brew cask install adoptopenjdk
# install the scala programming language
brew install scala
# install java8
brew cask install homebrew/cask-versions/adoptopenjdk8
# install apache spark
brew install apache-spark
# install hadoop in case you wish to use other types of clusters
brew install hadoop
# install suite of command-line tools for converting to and working with CSV
brew install csvkit
# install csvs-to-sqlite to convert csv files to SQLite database
pip3 install csvs-to-sqlite
# install PostgreSQL
brew install postgresql
# ensure the PostgreSQL is running
brew services start postgresql
# install pgloader to transform SQLite database to a PostgreSQL database
brew install --HEAD pgloader
| true
|
543b218fc1854b42e301f9a2f7f187d18688a400
|
Shell
|
Stuartlab-UCSC/cell-atlas-env
|
/bin/loop_over_cluster.exp.starter.sh
|
UTF-8
| 614
| 2.875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
ATTR_FILTERED_SCR=/projects/sysbio/users/cellAtlas/bin/attributes/filter.py
CLUSTERDIR=/projects/sysbio/users/cellAtlas/data/cluster.exp
OUTPUTDIR=/projects/sysbio/users/cellAtlas/data/cluster.exp.less_1000
for dir in ${CLUSTERDIR}/*; do
echo $dir
for filename in ${dir}/*; do
echo $(basename $filename)
echo $(wc -l $filename)
done
#OUT=${OUTPUTDIR}/$(basename $filename)
#OUT=${OUT%.*}.attr.filtered.tab
#KEEP=${KEEPDIR}/$(basename $filename)
#KEEP=${KEEP%.*}.attr.keep
#echo $KEEP
#echo $OUT
#python3 $ATTR_FILTERED_SCR -i $filename -o $OUT -k $KEEP
done
| true
|
7b4f1033225bdedf9cef8db076ec22d0d66da324
|
Shell
|
nighthawk149/fvs318g-cfw
|
/ramdisk/root/tc.sh
|
UTF-8
| 2,374
| 2.53125
| 3
|
[] |
no_license
|
#!/bin/sh
CONFIGURE_TC=1
WLAN_NUM_BANDS=1
if expr $CONFIGURE_TC = 1
then
echo Configuring Linux QoS Queues
##tc qdisc del dev wlan0 root
tc qdisc add dev wlan0 root handle 1: prio bands 4
tc qdisc add dev wlan0 parent 1:1 handle 10: pfifo limit 100
tc qdisc add dev wlan0 parent 1:2 handle 20: pfifo limit 1100
tc qdisc add dev wlan0 parent 1:3 handle 30: pfifo limit 800
tc qdisc add dev wlan0 parent 1:4 handle 40: pfifo limit 50
tc filter add dev wlan0 parent 1:0 protocol ip prio 1 u32 match ip tos 224 0xff flowid 1:1
tc filter add dev wlan0 parent 1:0 protocol ip prio 1 u32 match ip tos 192 0xff flowid 1:1
tc filter add dev wlan0 parent 1:0 protocol ip prio 2 u32 match ip tos 160 0xff flowid 1:2
tc filter add dev wlan0 parent 1:0 protocol ip prio 2 u32 match ip tos 128 0xff flowid 1:2
tc filter add dev wlan0 parent 1:0 protocol ip prio 3 u32 match ip tos 0 0xff flowid 1:3
tc filter add dev wlan0 parent 1:0 protocol ip prio 3 u32 match ip tos 96 0xff flowid 1:3
tc filter add dev wlan0 parent 1:0 protocol ip prio 4 u32 match ip tos 32 0xff flowid 1:4
tc filter add dev wlan0 parent 1:0 protocol ip prio 4 u32 match ip tos 64 0xff flowid 1:4
if expr $WLAN_NUM_BANDS = 2
then
##tc qdisc del dev wlan1 root
tc qdisc add dev wlan1 root handle 2: prio bands 4
# TODO: Do you need to give different handles for the wlan1 tc queues?
tc qdisc add dev wlan1 parent 2:1 handle 10: pfifo limit 100
tc qdisc add dev wlan1 parent 2:2 handle 20: pfifo limit 1100
tc qdisc add dev wlan1 parent 2:3 handle 30: pfifo limit 800
tc qdisc add dev wlan1 parent 2:4 handle 40: pfifo limit 50
tc filter add dev wlan1 parent 2:0 protocol ip prio 1 u32 match ip tos 224 0xff flowid 2:1
tc filter add dev wlan1 parent 2:0 protocol ip prio 1 u32 match ip tos 192 0xff flowid 2:1
tc filter add dev wlan1 parent 2:0 protocol ip prio 2 u32 match ip tos 160 0xff flowid 2:2
tc filter add dev wlan1 parent 2:0 protocol ip prio 2 u32 match ip tos 128 0xff flowid 2:2
tc filter add dev wlan1 parent 2:0 protocol ip prio 3 u32 match ip tos 0 0xff flowid 2:3
tc filter add dev wlan1 parent 2:0 protocol ip prio 3 u32 match ip tos 96 0xff flowid 2:3
tc filter add dev wlan1 parent 2:0 protocol ip prio 4 u32 match ip tos 32 0xff flowid 2:4
tc filter add dev wlan1 parent 2:0 protocol ip prio 4 u32 match ip tos 64 0xff flowid 2:4
fi
fi
| true
|
cbb5aceb2b045183760228e13bcc75b104b7e7a2
|
Shell
|
liuzhisheng1226/HipMer
|
/.edison_deploy/build.sh
|
UTF-8
| 1,378
| 3.4375
| 3
|
[
"BSD-3-Clause-LBNL"
] |
permissive
|
#!/bin/bash -l
set -e
[ -n "${HIPMER_BUILD_ENV}" ] || . $(dirname $0)/env.sh
[ -n "${BUILD}" ]
[ -n "${PREFIX}" ]
SRC=$(pwd)
if [ -e "${BUILD}" ]
then
cmakecache=${BUILD}/CMakeCache.txt
if [ -f ${cmakecache} ]
then
testsame=$( ( grep HipMer_SOURCE_DIR /tmp/build-regan-edison/CMakeCache.txt ; echo "HipMer_SOURCE_DIR:STATIC=${SRC}" ) | uniq | wc -l)
if [ "${testsame}" != "1" ]
then
echo "Source dirs do not match. performing a DIST_CLEAN build"
DIST_CLEAN=1
fi
fi
if [ -n "${DIST_CLEAN}" ]
then
chmod -R u+w ${BUILD}
rm -r ${BUILD}
mkdir ${BUILD}
rm -rf ${PREFIX}
elif [ -n "${CLEAN}" ]
then
(cd ${BUILD} ; make clean )
fi
else
mkdir ${BUILD}
fi
BUILD_TYPE=${BUILD_TYPE:=Release}
cd ${BUILD}
if [ -x /usr/bin/lfs ]
then
/usr/bin/lfs setstripe -c 1 . || /bin/true
fi
export HDF5_ROOT=${HDF5_DIR}
cmakelog=${BUILD}/cmake.log
export TMPDIR=/tmp
if [ ! -f ${cmakelog} ]
then
time cmake -DCMAKE_INSTALL_PREFIX=${PREFIX} -DCMAKE_BUILD_TYPE=${BUILD_TYPE} ${HIPMER_BUILD_OPTS} ${SRC} 2>&1 | tee -a ${cmakelog}.tmp \
&& mv ${cmakelog}.tmp ${cmakelog}
fi
make_threads=${BUILD_THREADS:=$(lscpu |grep "^CPU(s):"|awk '{print $2}')}
echo Using $make_threads threads for the build
time make -j ${make_threads} || TMPDIR=/tmp make VERBOSE=1 2>&1 | tee make.err
| true
|
2c4b12a58f1f9f4a9319af3a61e12bb95dea01fc
|
Shell
|
rlucente-se-jboss/integration-demo
|
/install-rhdm.sh
|
UTF-8
| 3,728
| 2.9375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
. $(dirname $0)/demo.conf
PUSHD $WORKDIR
echo
if [ -d "$JBOSS_HOME" ]
then
echo "RHDM currently installed. Please remove it before attempting install."
echo
exit 1
fi
echo -n "Install EAP ..................... "
unzip -q $BINDIR/jboss-eap-$VER_DIST_EAP.zip
ISOK
if [ -n "$VER_PATCH_EAP" ]
then
echo -n "Patch EAP ....................... "
$JBOSS_HOME/bin/jboss-cli.sh \
--command="patch apply --override-all ${BINDIR}/jboss-eap-${VER_PATCH_EAP}-patch.zip" \
&> /dev/null
ISOK
fi
RHDM_DC_DEPLOY=rhdm-$VER_DIST_RHDM-decision-central-eap7-deployable
echo -n "Install RHDM decision-central ... "
unzip -qo $BINDIR/$RHDM_DC_DEPLOY.zip
ISOK
TMPDIR=tmp.$$
mkdir -p $TMPDIR
PUSHD $TMPDIR
echo -n "Extract RHDM KIE server ......... "
unzip -qo $BINDIR/rhdm-$VER_DIST_RHDM-kie-server-ee7.zip
ISOK
echo -n "Install KIE server war .......... "
cp -fr kie-server.war $JBOSS_HOME/standalone/deployments
touch $JBOSS_HOME/standalone/deployments/kie-server.war.dodeploy
ISOK
echo -n "Install security policy files ... "
cp -f SecurityPolicy/* $JBOSS_HOME/bin
ISOK
POPD
rm -fr $TMPDIR
echo -n "Create KIE user ................. "
$JBOSS_HOME/bin/add-user.sh -a -r ApplicationRealm -u "$KIE_USER" -p "$KIE_PASS" \
-ro "$KIE_ROLES" --silent
ISOK
cat > sysprops.cli <<END1
embed-server --server-config=standalone.xml
/system-property=org.kie.server.user:add(value="$KIE_USER")
/system-property=org.kie.server.pwd:add(value="$KIE_PASS")
/system-property=org.kie.server.controller.user:add(value="$KIE_USER")
/system-property=org.kie.server.controller.pwd:add(value="$KIE_PASS")
/system-property=org.kie.server.id:add(value="default-kieserver")
/system-property=org.kie.server.location:add(value="http://localhost:8080/kie-server/services/rest/server")
/system-property=org.kie.server.controller:add(value="http://localhost:8080/decision-central/rest/controller")
stop-embedded-server
END1
echo -n "Configure system properties ..... "
$JBOSS_HOME/bin/jboss-cli.sh --file=sysprops.cli &> /dev/null
ISOK
rm -f sysprops.cli
echo -n "Add PostgreSQL module ........... "
$JBOSS_HOME/bin/jboss-cli.sh --command="module add --name=org.postgresql \
--resources=$BINDIR/$PGJDBC --dependencies=javax.api,javax.transaction.api" \
&> /dev/null
ISOK
cat > config-ds.cli <<END2
embed-server --server-config=standalone.xml
/subsystem=datasources/jdbc-driver=postgresql:add(driver-name=postgresql, driver-module-name=org.postgresql, driver-xa-datasource-class-name=org.postgresql.xa.PGXADataSource, driver-class-name=org.postgresql.Driver)
data-source add --name=$PGJNDI --driver-name=postgresql \
--jndi-name="java:jboss/datasources/$PGJNDI" \
--connection-url="jdbc:postgresql://localhost:5432/$PGDBNAME" \
--use-java-context=true --enabled=true \
--user-name="$PGUSER" --password="$PGPASS" \
--validate-on-match=true \
--valid-connection-checker-class-name=org.jboss.jca.adapters.jdbc.extensions.postgres.PostgreSQLValidConnectionChecker \
--exception-sorter-class-name=org.jboss.jca.adapters.jdbc.extensions.postgres.PostgreSQLExceptionSorter
stop-embedded-server
END2
echo -n "Add PostgreSQL datasource ....... "
$JBOSS_HOME/bin/jboss-cli.sh --file=config-ds.cli &> /dev/null
ISOK
rm -f config-ds.cli
echo -n "Setting admin password .......... "
${JBOSS_HOME}/bin/add-user.sh -p "${ADMIN_PASS}" -u "${ADMIN_USER}" --silent
ISOK
echo "Done."
echo
POPD
| true
|
843343927e41ba5086619501261131c81f01e225
|
Shell
|
pupupulp/microservices-architecture
|
/dependency-installation.sh
|
UTF-8
| 1,278
| 3.046875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Setup docker key and repository
KEY_BASE=https://download.docker.com/linux/ubuntu/gpg \
&& curl -fsSL ${KEY_BASE} | apt-key add - \
&& apt-key fingerprint 0EBFCD88 \
&& add-apt-repository \
"deb [arch=amd64] https://download.docker.com/linux/ubuntu \
$(lsb_release -cs) \
stable"
# Update apt repositories
apt-get update
# Install dependencies
apt-get install -y \
apt-transport-https \
ca-certificates \
curl \
gnupg-agent \
software-properties-common \
virtualbox
# Install docker
apt-get install -y \
docker-ce \
docker-ce-cli \
containerd.io
# Install docker-compose
BASE="https://github.com/docker/compose/releases/download/1.24.0/docker-compose-$(uname -s)-$(uname -m)" \
&& curl -L ${BASE} -o /usr/local/bin/docker-compose \
&& chmod +x /usr/local/bin/docker-compose
# Install docker-machine
BASE=https://github.com/docker/machine/releases/download/v0.16.0 \
&& curl -L ${BASE}/docker-machine-$(uname -s)-$(uname -m) >/tmp/docker-machine \
&& install /tmp/docker-machine /usr/local/bin/docker-machine
# Install docker plugin local-persist
BASE="https://raw.githubusercontent.com/CWSpear/local-persist/master/scripts/install.sh" \
&& curl -fsSL ${BASE} | bash
| true
|
8d0e161be09ec8f490efd5330d920616056ad66c
|
Shell
|
dmalyuta/config-public
|
/.my_scripts/desktop_env/vim-anywhere
|
UTF-8
| 567
| 3.46875
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
#
# This is inspired/copied from https://github.com/cknadler/vim-anywhere
# See https://vaneyckt.io/posts/safer_bash_scripts_with_set_euxo_pipefail/
set -o errexit -o errtrace -o nounset -o pipefail
tmpfile_dir=/tmp/vim-anywhere
tmpfile="${tmpfile_dir}/doc-$(date +"%y%m%d%H%M%S")"
[[ -d ${tmpfile_dir} ]] || mkdir -p -- ${tmpfile_dir}
touch -- "${tmpfile}"
chmod o-rwx -- "${tmpfile}" # Make file only readable by you
if sensible-terminal --window-name 'vim-anywhere' -- vim "${tmpfile}"; then
xclip -selection clipboard < "${tmpfile}"
fi
| true
|
02daa77465f917fd02dcda4ee43ff7652625d136
|
Shell
|
Angry-Pixel/The-Betweenlands
|
/.github/workflows/build/config/config.sh
|
UTF-8
| 2,467
| 3.28125
| 3
|
[] |
no_license
|
#!/bin/bash
# Sets up the necessary environment variables
echo "BS_DEPLOY_REPOSITORY_URI=git@github.com:Angry-Pixel/The-Betweenlands-Development-Builds.git" >> $GITHUB_ENV
echo "BS_DEPLOY_REPOSITORY_BRANCH=master" >> $GITHUB_ENV
echo "BS_DEPLOY_NAME=Build Wizard" >> $GITHUB_ENV
echo "BS_ABORT_ON_DEPLOY_ERROR=true" >> $GITHUB_ENV
if [ "$BS_IS_DEPLOYMENT" == 'false' ]; then
echo "BS_BUILD_NOTES_FILE=build_notes" >> $GITHUB_ENV
if [[ "$GITHUB_REF" == *"release"* ]]; then
echo "BS_BUILD_TYPE=release" >> $GITHUB_ENV
echo "BS_BUILD_RELEASE=true" >> $GITHUB_ENV
echo "BS_BUILD_TITLE=Release Build ${GITHUB_REF##*/}-${GITHUB_RUN_NUMBER}" >> $GITHUB_ENV
echo "BS_BUILD_TAG=release-${BS_BUILD_BRANCH}-${BS_BUILD_NUMBER}-$(date +'%d.%m.%Y')" >> $GITHUB_ENV
echo "Creating release notes"
#Get previous release tag and then list commits since that release as release notes
git fetch --all --tags -f
previous_release_tag=$(git describe $(git for-each-ref --sort=-taggerdate --format '%(objectname)' refs/tags) --tags --abbrev=0 --match *-release | sed -n 2p)
echo "Creating list of changes since ${previous_release_tag}..."
echo "Commit: <a href="\""${BS_BUILD_URL}/commit/${GITHUB_SHA}"\"">${BS_BUILD_URL}/commit/${GITHUB_SHA}</a>" >> build_notes
echo "<details><summary>Changes</summary><ul>" >> build_notes
echo "Fetching commits"
git fetch --shallow-since=$(git log -1 --format=%ct ${previous_release_tag})
git log --since="$(git log -1 --format=%ai ${previous_release_tag})" --pretty=format:'<li>%n%an, %ad:%n<pre>%B</pre></li>' --no-merges >> build_notes
echo "</ul></details>" >> build_notes
cat build_notes
else
echo "BS_BUILD_TYPE=development" >> $GITHUB_ENV
echo "BS_BUILD_RELEASE=false" >> $GITHUB_ENV
echo "BS_BUILD_TITLE=Development Build ${GITHUB_REF##*/}-${GITHUB_RUN_NUMBER}" >> $GITHUB_ENV
echo "BS_BUILD_TAG=dev-${BS_BUILD_BRANCH}-${BS_BUILD_NUMBER}-$(date +'%d.%m.%Y')" >> $GITHUB_ENV
echo "Creating build notes"
#Use latest commit message as release note
echo "Commit: <a href="\""${BS_BUILD_URL}/commit/${GITHUB_SHA}"\"">${BS_BUILD_URL}/commit/${GITHUB_SHA}</a>" >> build_notes
git log -1 --pretty=format:'%an, %ad:%n<pre>%B</pre>' >> build_notes
fi
fi
if [ "$BS_PULL_REQUEST" == 'false' ]; then
echo "DEPLOY_ENV=true" >> $GITHUB_ENV
echo "DEPLOY_BUILD_TYPE=${BS_BUILD_TYPE}" >> $GITHUB_ENV
echo "DEPLOY_BUILD_NUMBER=${BS_BUILD_NUMBER}" >> $GITHUB_ENV
fi
| true
|
ca5c7103b79b295eaf6d0ab665223468206ea5e4
|
Shell
|
dfnb/TermuxDockerVM
|
/run.sh
|
UTF-8
| 1,467
| 3.125
| 3
|
[] |
no_license
|
ARCH=$(uname -m)
ALPINELINUXARCH=""
if [ "$ARCH" = "armv7l" ]; then
echo "armv7"
ALPINELINUXARCH="armv7"
elif [ "$ARCH" = "aarch64" ]; then
echo "aarch64"
ALPINELINUXARCH="aarch64"
else
echo "armhf"
ALPINELINUXARCH="armhf"
fi
INITRAMFS=http://dl-cdn.alpinelinux.org/alpine/v3.10/releases/$ALPINELINUXARCH/netboot-3.10.3/initramfs-vanilla
VMLINUZ=http://dl-cdn.alpinelinux.org/alpine/v3.10/releases/$ALPINELINUXARCH/netboot-3.10.3/vmlinuz-vanilla
MODLOOP=http://dl-cdn.alpinelinux.org/alpine/v3.10/releases/$ALPINELINUXARCH/netboot-3.10.3/modloop-vanilla
if [ ! -f initramfs-vanilla ]; then
wget "$INITRAMFS"
fi
if [ ! -f vmlinuz-vanilla ]; then
wget "$VMLINUZ"
fi
if [ "$ARCH" = "aarch64" ]; then
qemu-system-aarch64 \
-M virt \
-m 512M \
-cpu cortex-a53 \
-kernel vmlinuz-vanilla \
-netdev user,id=user0 -device virtio-net-device,netdev=user0 \
-initrd initramfs-vanilla \
-append "console=ttyAMA0 ip=dhcp alpine_repo=http://dl-cdn.alpinelinux.org/alpine/edge/main/ modloop=$MODLOOP" \
-nographic
else
qemu-system-arm \
-M virt \
-m 512M \
-cpu cortex-a15 \
-kernel vmlinuz-vanilla \
-netdev user,id=user0 -device virtio-net-device,netdev=user0 \
-initrd initramfs-vanilla \
-append "console=ttyAMA0 ip=dhcp alpine_repo=http://dl-cdn.alpinelinux.org/alpine/edge/main/ modloop=$MODLOOP" \
-nographic
fi
| true
|
72ac95206897cda84c53ae0f5a6657ae459e94a4
|
Shell
|
bmuschko/gradle-docker-plugin
|
/src/test/resources/auth-config/docker-credential-no-server-url
|
UTF-8
| 290
| 2.734375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
if [[ $1 == "get" ]]; then
read > /dev/null
echo '{' \
' "Username": "username",' \
' "Secret": "secret"' \
'}'
elif [[ $1 == "list" ]]; then
echo '{' \
' "registry.example.com": "username"' \
'}'
else
exit 1
fi
| true
|
d20c3cf6cd5ccd8c2dd8cfae267d56f51914e4e6
|
Shell
|
b03201003/firmware
|
/_DIR-300A1_FW105b09.bin.extracted/squashfs-root/etc/scripts/misc/profile.sh
|
UTF-8
| 1,332
| 3.4375
| 3
|
[] |
no_license
|
#!/bin/sh
echo [$0] $1 ... > /dev/console
if [ -f /etc/config/rgdb ]; then
nvram=`cat /etc/config/rgdb`
else
nvram=`cat /etc/config/nvram`
fi
case "$1" in
get)
rgcfg get -n $nvram -c /var/run/rgdb.xml.gz
if [ "$?" != "0" ]; then
echo "Can't get config from nvram, generate default!" > /dev/console
/etc/scripts/misc/profile.sh reset
/etc/scripts/misc/profile.sh put
exit 0
fi
gunzip /var/run/rgdb.xml.gz
rgdb -l /var/run/rgdb.xml
if [ "$?" != "0" ]; then
echo "Invalid config, generate default!" > /dev/console
/etc/scripts/misc/profile.sh reset
/etc/scripts/misc/profile.sh put
else
/etc/scripts/misc/defnodes.sh
fi
rm -f /var/run/rgdb.xml
;;
put)
rgdb -D /var/run/rgdb.xml
gzip /var/run/rgdb.xml
rgcfg save -n $nvram -c /var/run/rgdb.xml.gz
if [ "$?" = "0" ]; then
echo "ok" > /dev/console
else
echo "failed" > /dev/console
fi
rm -f /var/run/rgdb.xml.gz
cd
rgdb -s /sys/restore_default 1
;;
reset)
if [ "$2" != "" ]; then
cp $2 /var/run/rgdb.xml.gz
rm -f $2
else
cp /etc/config/defaultvalue.gz /var/run/rgdb.xml.gz
fi
gunzip /var/run/rgdb.xml.gz
rgdb -l /var/run/rgdb.xml
/etc/scripts/misc/defnodes.sh
if [ -f /etc/scripts/freset_setnodes.sh -a "$2" = "" ]; then
sh /etc/scripts/freset_setnodes.sh
fi
rm -f rgdb.xml
cd
;;
*)
echo "Usage: $0 get/put/reset"
esac
| true
|
cabcb6921b119af1dd917b304160a7331fc5a843
|
Shell
|
AleksaMCode/kriptografija-i-racunarska-zastita
|
/ispit_20200129/z03/script.sh
|
UTF-8
| 366
| 3.1875
| 3
|
[] |
no_license
|
#!/bin/bash
while IFS= read -r algo
do
for i in {1..11}
do
openssl dgst -$algo -out otisak.txt ulaz.txt
if [[ "$(cat otisak.txt)" =~ "$(cat otisci/otisak$i.txt)" ]]
then
echo "MATCH: $algo + ulaz.txt = otisak$i.txt"
rm otisak.txt
break 2
fi
rm otisak.txt
done
done < algos.list
| true
|
8556271d836a4a729de9651af041196663af6c76
|
Shell
|
MenkeTechnologies/zpwr
|
/autoload/common/zpwrDockerWipe
|
UTF-8
| 514
| 3
| 3
|
[
"MIT"
] |
permissive
|
# -*- mode: sh -*-
# vim: set ft=sh:
function zpwrDockerWipe(){
if [[ -z $1 ]]; then
docker stop $(docker ps -qa) 2>/dev/null
docker rm -f $(docker ps -qa) 2>/dev/null
docker rmi -f $(docker images -qa) 2>/dev/null
else
app="$1"
docker stop $(docker ps -qa --filter name=".*$app.*") 2>/dev/null
docker rm -f $(docker ps -qa --filter name=".*$app.*") 2>/dev/null
docker rmi -f $(docker images -qa "*$app*") 2>/dev/null
fi
}
zpwrDockerWipe "$@"
| true
|
5cc38a5a05b97a00c88050f5ac42dd01127e0df4
|
Shell
|
capossele/drand
|
/web/deploy.sh
|
UTF-8
| 3,804
| 4.34375
| 4
|
[
"LicenseRef-scancode-unknown-license-reference",
"CC-BY-4.0",
"MIT"
] |
permissive
|
#!/bin/bash
# set -x
### This script allows you to deploy our drand website on your domain
## There are 2 modes: interactive and flag based
if [ $# -eq 0 ]
then
echo Starting the deployment in *interactive* mode. Run sh deploy.sh --help for more information about the different modes.
echo Before beginning, make sure that your SSH setup is compatible with: https://gohugo.io/hosting-and-deployment/deployment-with-rsync/#install-ssh-key.
read -p "Continue (y/n)? " answer
case ${answer:0:1} in
y|Y )
;;
* )
exit 0
;;
esac
echo What is your website\'s URL ? \(ex: https://drand.io\)
read URL
#format the /
URL="$(sed 's/\//\\\//g' <<<$URL)"
echo What is your user name ? \(this parameter is optional depending on your SSH setup\)
read USER
if ! [ -z "$USER" ]
then
USER="$USER@"
fi
echo What is the host name ?
read HOST
echo What is the path on the server of the destination directory ? \(ex: /var/www/html/\)
read DIR
echo Let\'s deploy...
#replace url in config.toml
sed -i -e "1s/.*/baseURL\=\"$URL\"/g" "$PWD/config.toml"
hugo && rsync -Paivz --delete public/ ${USER}${HOST}:${DIR}
exit 0
else
while [ ! $# -eq 0 ]
do
case "$1" in
--user)
export USER=$2
echo user
echo $USER
if [ -z "$USER" ] || [[ $USER == --* ]]
then
echo Bad user format
exit 1
fi
;;
--host)
export HOST=$2
echo host
echo $HOST
if [ -z "$HOST" ] || [[ $HOST == --* ]]
then
echo Bad host format
exit 1
fi
;;
--url)
export URL=$2
if [ -z "$URL" ] || [[ $URL == --* ]]
then
echo Bad URL format
exit 1
fi
;;
--dir)
export DIR=$2
if [ -z "$DIR" ] || [[ $DIR == --* ]]
then
echo Bad dir format
exit 1
fi
;;
--help | -h)
echo "This script can be used in either interactive or non-interactive mode.
In interactive mode you will be asked for the parameters one at a time, whereas in the non-interactive mode, you need to specify them all at once, by using flags (i.e., \"--user \$USER\").
The parameters that we ask for are USER, HOST, DIR and URL.
USER and HOST are the variables you used during your SSH setup, thus you need to sure that your SSH setup is compatible with: https://gohugo.io/hosting-and-deployment/deployment-with-rsync/#install-ssh-key. ULR is the address you want to deploy to, such as https://example.com, and DIR is the path of the destination on the server.
To start in interactive mode, run the script without any argument. To use the script in non-interactive mode, you have to specify every flag with the corresponding value, i.e., sh deploy.sh --user \$USER --host \$HOST --dir \$DIR --url \$URL.
Important note: the USER parameter may be optional depending on your SSH configuration. To skip it, simply press enter when asked for it in the interactive mode, or omit the flag \"--user\" in the non-interactive mode."
exit
;;
esac
shift
done
echo Starting the deployment in non-interactive mode:
#format the /
URL="$(sed 's/\//\\\//g' <<<$URL)"
#replace url in config.toml
sed -i -e "1s/.*/baseURL\=\"$URL\/\"/g" "$PWD/config.toml"
if ! [ -z "$USER" ]
then
USER="$USER@"
fi
hugo && rsync -Paivz --delete public/ ${USER}${HOST}:${DIR}
exit 0
fi
| true
|
fb58d79dbfc7d59241c276563bf8c692162dac91
|
Shell
|
axblk/PKGBUILD
|
/libomp-hermit-git/PKGBUILD
|
UTF-8
| 1,217
| 2.703125
| 3
|
[] |
no_license
|
_pkgname="libomp-hermit"
pkgname=$_pkgname-git
pkgver=r790.a6a63d1
pkgrel=1
pkgdesc="Cross-build binary utilities for HermitCore"
arch=('x86_64')
url="https://github.com/hermitcore/openmp"
license=('GPL')
groups=()
depends=()
makedepends=('git' 'cmake' 'binutils-hermit' 'gcc-hermit' 'newlib-hermit' 'pte-hermit' 'libhermit')
options=('libtool' 'staticlibs' '!buildflags' '!strip')
provides=('libomp-hermit')
conflicts=('libomp-hermit')
source=("$_pkgname::git+https://github.com/hermitcore/openmp.git#branch=hermit")
md5sums=('SKIP')
pkgver() {
cd "$srcdir/$_pkgname"
printf "r%s.%s" "$(git rev-list --count HEAD)" "$(git rev-parse --short HEAD)"
}
build() {
export PATH=/opt/hermit/bin:$PATH
cd "$srcdir/$_pkgname"
rm -rf build
mkdir build
cd build
cmake -DCMAKE_C_COMPILER=x86_64-hermit-gcc \
-DCMAKE_CXX_COMPILER=x86_64-hermit-g++ \
-DCMAKE_INSTALL_PREFIX=/opt/hermit/x86_64-hermit \
-DCMAKE_TRY_COMPILE_TARGET_TYPE=STATIC_LIBRARY \
-DHERMIT=1 \
-DLIBOMP_ARCH=x86_64 \
-DLIBOMP_ENABLE_SHARED=OFF \
-DLIBOMP_FORTRAN_MODULES=OFF \
-DLIBOMP_OMPT_SUPPORT=OFF \
-DOPENMP_ENABLE_LIBOMPTARGET=OFF \
..
make
}
package() {
cd "$srcdir/$_pkgname/build"
make DESTDIR="$pkgdir/" install
}
| true
|
a9d07e544db85726e9c1e8dd58fb1dd37ea21ddd
|
Shell
|
nguyen-tich-duy/canvas-lms-test
|
/build.sh
|
UTF-8
| 2,135
| 3.953125
| 4
|
[
"LicenseRef-scancode-public-domain"
] |
permissive
|
#!/bin/bash
set -eo pipefail
# -e: immediately exit if any command has a non-zero exit status
# -o: prevents errors in a pipeline from being masked
function usage {
echo "./build.sh : build local only"
echo "./build.sh clean : remove all images"
echo "./build.sh push : build and push"
echo "./build.sh push-only: push only (no build)"
}
BASE_PATH=$(dirname $(realpath $0))
WEB_ROOT=$BASE_PATH/canvas-lms
cd $BASE_PATH
source ./scripts/common.sh
source ./.env.build
source ./.env
unset SKIP_BUILD ACTION
case "$1" in
push|clean)
ACTION=$1
shift;;
"push-only")
ACTION=push
SKIP_BUILD=1
shift;;
-h|--help)
usage
exit 2;;
esac
message "Update git repo"
git submodule update --init --depth 1
message "Add Analytics module"
#rm -rf $WEB_ROOT/gems/plugins/analytics
[ ! -e $WEB_ROOT/gems/plugins/analytics ] && git clone https://github.com/instructure/analytics $WEB_ROOT/gems/plugins/analytics --depth 1
message "Add QTI module"
mkdir -p $WEB_ROOT/vendor
[ ! -e $WEB_ROOT/vendor/QTIMigrationTool ] && git clone https://github.com/instructure/QTIMigrationTool.git $WEB_ROOT/vendor/QTIMigrationTool --depth 1
function build {
PROJECT_NAME=$1
message "Building $PROJECT_NAME"
[ -d $BASE_PATH/docker-compose/$PROJECT_NAME ] \
&& cp -r $BASE_PATH/docker-compose/$PROJECT_NAME/* $PROJECT_NAME/
[ -d $BASE_PATH/config/$PROJECT_NAME ] \
&& cp -r $BASE_PATH/config/$PROJECT_NAME/* $PROJECT_NAME/config/
cd $BASE_PATH/$PROJECT_NAME
if [ "$ACTION" == "clean" ]; then
docker-compose down --rmi local
else
[ -z "$SKIP_BUILD" ] && docker-compose build || message "[SKIPPED]"
fi
cd ..
}
# .env
tee $BASE_PATH/canvas-lms/.env <<EOF
COMPOSE_PROJECT_NAME=${BUILD_PREFIX}-lms
WEB_IMAGE=${WEB_IMAGE}
POSTGRES_IMAGE=${POSTGRES_IMAGE}
EOF
build canvas-lms
tee $BASE_PATH/canvas-rce-api/.env <<EOF
COMPOSE_PROJECT_NAME=${BUILD_PREFIX}-rce-api
RCE_IMAGE=${RCE_IMAGE}
EOF
build canvas-rce-api
if [ "$ACTION" == "push" ]; then
cd $WEB_ROOT
exec_command "docker-compose push"
cd $BASE_PATH/canvas-rce-api
exec_command "docker-compose push"
fi
| true
|
9c593289288628a304e65f28ae278ee8ed07a83d
|
Shell
|
third774/dotfiles
|
/script/setup
|
UTF-8
| 522
| 2.703125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
touch ~/.dotfiles/.localProfile
echo 'installing oh-my-zsh'
curl https://raw.github.com/robbyrussell/oh-my-zsh/master/tools/install.sh -fsSL | sh
echo 'copying dotfiles to user directory'
cp ~/.dotfiles/.zshrc ~/.zshrc
cp ~/.dotfiles/.vimrc ~/.vimrc
echo 'cloning night owl theme for iTerm'
rm -rf ~/repos/iterm2-night-owl
git clone https://github.com/nickcernis/iterm2-night-owl.git ~/repos/iterm2-night-owl
cd ~/.dotfiles
brew bundle
ln -s ${HOME}/.dotfiles/karabiner ${HOME}/.config/karabiner
| true
|
f60c5298c80b841216a1a446ffa261b423431ebb
|
Shell
|
vinz3872/lang-install
|
/lang_install
|
UTF-8
| 367
| 3
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
project_dir=$(realpath $0 | xargs dirname)
cd $project_dir
# load functions
. lib/parse_options.sh
. lib/utils.sh
. lib/actions.sh
. lib/load_config.sh
# load conf & env
load_config
# parse options
parse_options "${@}"
[[ $action == 'add' ]] && add_lang
[[ $action == 'remove' ]] && remove_lang
# refresh env file after the add/remove
refresh_env_file
| true
|
06bf8c8f8c304a88da015b66c9fb1dc792805f3c
|
Shell
|
paceholder/chigraph
|
/.travis/install.sh
|
UTF-8
| 206
| 2.65625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
set -xe
if [ "$TRAVIS_OS_NAME" == "linux" ]; then
bash ./setup.sh
else
brew install cmake qt5 bison gettext ninja python3 || echo
brew install llvm --with-clang
bash ./setup.sh
fi
| true
|
8e44e79bdbb4f31b33187732b48e57ba1af1a612
|
Shell
|
michaelcunningham/oracledba
|
/admin/restart_cloud_control_agent.sh
|
UTF-8
| 1,789
| 3.734375
| 4
|
[] |
no_license
|
#!/bin/sh
HOST=`hostname -s`
log_dir=/mnt/dba/logs/$HOST
log_file=${log_dir}/restart_cloud_control_agent.log
email_body_file=${log_dir}/restart_cloud_control_agent.email
mkdir -p $log_dir
EMAILDBA=dba@tagged.com
result=`sudo cat /proc/sys/fs/aio-nr`
if [ $result -lt 1000000 ]
then
# If the value of /proc/sys/fs/aio-nr is less than 1,000,000 just exit
exit
fi
if [ -f "/etc/rc.d/init.d/gcstartup" ]
then
echo "Restarting the Cloud Control Agent" > $log_file
echo "" >> $log_file
echo "Before value of /proc/sys/fs/aio-nr is: "$result >> $log_file
echo "" >> $log_file
result=`ps x | grep gcagent_core | grep -v grep`
if [ -n "$result" ]
then
sudo /etc/rc.d/init.d/gcstartup stop >> $log_file
echo "" >> $log_file
fi
sudo /etc/rc.d/init.d/gcstartup start >> $log_file
echo "" >> $log_file
result=`sudo cat /proc/sys/fs/aio-nr`
echo "" >> $log_file
echo "After value of /proc/sys/fs/aio-nr is: "$result >> $log_file
result=`ps x | grep gcagent_core | grep -v grep`
if [ -z "$result" ]
then
echo "The cloud agent failed to restart while restarting the cloud agent on $HOST" > $email_body_file
echo "The log file contains the following" >> $email_body_file
echo "" >> $email_body_file
cat $log_file >> $email_body_file
echo "" >> $email_body_file
echo "################################################################################" >> $email_body_file
echo "" >> $email_body_file
echo 'This report created by : '$0 $* >> $email_body_file
echo "" >> $email_body_file
echo "################################################################################" >> $email_body_file
echo "" >> $email_body_file
mail -s "WARNING: $HOST - Cloud Agent did not restart" $EMAILDBA < $email_body_file
fi
fi
| true
|
f26e40b2835167c948d48c4a55395c22e199e34c
|
Shell
|
IanMadlenya/gnome-shell-sia
|
/install.sh
|
UTF-8
| 2,283
| 3.8125
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
#
# Install Sia desktop extension/applet for GNOME or Cinnamon
#
# Note: Currently broken in Cinnamon
#
# Dependencies: unzip
#
# Check unzip
command -v unzip >/dev/null 2>&1 || { echo "Error: Please install unzip"; exit 1; }
# Test which desktop is running. By default, we assume GNOME shell.
EXTENSION_PATH="$HOME/.local/share/gnome-shell/extensions";
# Detect GNOME desktop
if [ "$DESKTOP_SESSION" = "gnome" ]; then
EXTENSION_PATH="$HOME/.local/share/gnome-shell/extensions";
fi
# Detect Cinnamon desktop
if [ "$DESKTOP_SESSION" = "cinnamon" ]; then
EXTENSION_PATH="$HOME/.local/share/cinnamon/applets";
fi
# Extensions directory missing by default in some distributions, e.g. Fedora
mkdir -p $EXTENSION_PATH;
# Set URL to extension archive
URL="https://github.com/pmknutsen/gnome-shell-sia/archive/master.zip";
# Extension UUID
EXTENSION_UUID="sia@pmknutsen.github.com";
# Download extension archive
wget --header='Accept-Encoding:none' -O /tmp/extension.zip "${URL}"
# Unzip extension to installation folder
mkdir -p "${EXTENSION_PATH}/${EXTENSION_UUID}";
unzip -q /tmp/extension.zip -d ${EXTENSION_PATH}/${EXTENSION_UUID};
mv ${EXTENSION_PATH}/${EXTENSION_UUID}/gnome-shell-sia-master/* ${EXTENSION_PATH}/${EXTENSION_UUID};
rmdir ${EXTENSION_PATH}/${EXTENSION_UUID}/gnome-shell-sia-master;
# List enabled extensions
if [ "$DESKTOP_SESSION" = "gnome" ]; then
EXTENSION_LIST=$(gsettings get org.gnome.shell enabled-extensions | sed 's/^.\(.*\).$/\1/');
fi
if [ "$DESKTOP_SESSION" = "cinnamon" ]; then
EXTENSION_LIST=$(gsettings get org.cinnamon enabled-applets | sed 's/^.\(.*\).$/\1/');
fi
# Check if extension is already enabled
EXTENSION_ENABLED=$(echo ${EXTENSION_LIST} | grep ${EXTENSION_UUID});
if [ "$EXTENSION_ENABLED" = "" ]; then
# Enable extension
if [ "$DESKTOP_SESSION" = "gnome" ]; then
gsettings set org.gnome.shell enabled-extensions "[${EXTENSION_LIST},'${EXTENSION_UUID}']"
fi
if [ "$DESKTOP_SESSION" = "cinnamon" ]; then
gsettings set org.cinnamon enabled-extensions "[${EXTENSION_LIST},'${EXTENSION_UUID}']"
fi
# Extension is now available
echo "Extension with ID ${EXTENSION_ID} has been enabled. Restart your desktop to take effect (Alt+F2 then 'r')."
fi
# remove temporary files
rm -f /tmp/extension.zip
| true
|
3bb6e640eefff90c9d391e55a8d66141774a2687
|
Shell
|
zzyhappyzzy/XCConfigOrTarget
|
/XCConfigDemo/beforeBuildShell.sh
|
UTF-8
| 1,550
| 3.5625
| 4
|
[
"MIT"
] |
permissive
|
#! /bin/sh
# Note: change *.xcconfig file and then rebuild project will not see changes for derivedData Caches.
# 说明:直接修改*.xcconfig文件,重新编译,app里面并不能马上体现是因为derivedData的缓存导致的
# remove derivedData and clean project for rebuild
# 该脚本用于清除缓存,保证每次的修改编译后都能够及时生效
# Xcode Version: 9.3 (9E145) <not test for other xcode version>
# 版本:Xcode Version 9.3 (9E145) <其他版本Xcode没测试过>
# run script at `Target -> Build Phases -> Run Script`
# 该脚本集成在`Target -> Build Phases -> Run Script`中运行
# If not run the script on Xcode, you can run Xcode Menu Product -> Clean
# 如果不集成该该脚本,可以手动Xcode的菜单Product -> Clean 清除缓存
if [ -z "${CONFIGURATION}" ]; then
echo "error configuration, exit!"
exit 0
fi
echo "build project ${PROJECT_NAME} and configuration is ${CONFIGURATION}"
BASE_DIR="/Users/$(whoami)/Library/Developer/Xcode/DerivedData"
RESULT=$(ls ${BASE_DIR} | grep ${PROJECT_NAME})
echo "==== start clean derivedData ===="
for directory in ${RESULT}
do
rm -rf "${BASE_DIR}/$directory/Build/Products/${CONFIGURATION}-iphonesimulator/${PROJECT_NAME}.app"
rm -rf "${BASE_DIR}/$directory/Build/Products/${CONFIGURATION}-iphonesimulator/${PROJECT_NAME}.build"
rm -rf "${BASE_DIR}/$directory/Build/Products/${CONFIGURATION}-iphonesimulator/${PROJECT_NAME}.app.dSYM"
done
xcodebuild -alltargets clean -configuration ${CONFIGURATION}
| true
|
d182f2023e62e8197dfc0e1dcca3151e75e999d0
|
Shell
|
Sewdn/meteor-mdl
|
/scripts/publish-edge.sh
|
UTF-8
| 698
| 3.25
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
################################################################################
# This script publishes the edge version.
################################################################################
RLSVER=1.3.5
EDGE_VERSION=1.1.3_8
SCRIPTDIR="$(dirname "$0")"
ROOTDIR="${SCRIPTDIR}/../"
cd $ROOTDIR/meteor-package
##! Todo: Edit or check `package.js` to ensure the package name is "mdl-edge".
# Install npm dependencies
npm install
# Export the edge version so the package will use a different name.
export EDGE_VERSION
# Publish meteor package under the specified release.
meteor publish --release $RLSVER
# Hide the package.
meteor admin set-unmigrated zodiase:mdl-edge
| true
|
c9db1d40c8df2b75932d578bda10a5a875378e5a
|
Shell
|
jiangz222/transporter
|
/scripts/teardown_db_in_docker.sh
|
UTF-8
| 173
| 2.890625
| 3
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/usr/bin/env bash
set -e
case "$TESTDIR" in
adaptor/*)
adaptor=`cut -d "/" -f2 <<< $TESTDIR`
cd config/$adaptor/test_setup
docker-compose down
;;
esac
| true
|
1305ca0dad04e5e0237dde90305c1376c7a30487
|
Shell
|
vishwakumba/vagrant-vms
|
/sftp-centos-vm/configure-sftp.sh
|
UTF-8
| 489
| 2.53125
| 3
|
[] |
no_license
|
#!/bin/bash
sudo mv /etc/ssh/sshd_config /etc/ssh/sshd_config.backup
sudo cp /vagrant/sshd_config /etc/ssh/
sudo chmod 600 /etc/ssh/sshd_config
sudo chown root:root /etc/ssh/sshd_config
sudo mkdir -p /super-heroes-work
sudo userdel -f batman
sudo groupdel super-heroes
sudo groupadd super-heroes
sudo useradd batman -g super-heroes -s /bin/false -d /super-heroes-work/batman-work
echo "batman:batman" | sudo chpasswd
sudo systemctl restart sshd
echo "SFTP Configuration Complete"
| true
|
d42511afa4d2ffdaec66fa446e350f97a781a008
|
Shell
|
AndrewSB/dotfiles
|
/script/new-mac.sh
|
UTF-8
| 1,414
| 3.484375
| 3
|
[] |
no_license
|
#!/bin/bash
# runnable as `curl -L andrew.energy/newmac | bash`
sudo echo
# Install Homebrew
NONINTERACTIVE=1 /bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)"
eval "$(/opt/homebrew/bin/brew shellenv)" || eval "$(/usr/local/bin/brew shellenv)"
# Install Mackup
brew install mackup
# creates ~/.mackup.cfg and writes the following to it:
# [storage]
# engine = icloud
rm ~/.mackup.cfg
echo "[storage]" > "$HOME/.mackup.cfg"
echo "engine = icloud" >> "$HOME/.mackup.cfg"
mackup restore -f
brew install gh
mkdir Developer
pushd ~/Developer || exit
gh repo clone AndrewSB/dotfiles
pushd dotfiles || exit
set -e
brew bundle
popd || exit
xcodes install --latest-prerelease
xcodes select
latest_ios_runtime=$(xcodes runtimes --include-betas | grep '^iOS' | tail -n 1 | awk '{print $0}')
xcodes runtimes install "$latest_ios_runtime"
# Optionally, run the following secret. generated with:
# openssl enc -aes-256-cbc -a -salt -pass pass:"your-password-here" <<EOF
# commands to run here
#EOF
encrypted_command="U2FsdGVkX1/XeI1H8iaFzwgpxx5m10mVeyROvjadO4z7oN+oNTD8e+ygACGRKuaH3J4Q7hCXctZWPpRM+kHbx6511VKwWeOC7cO/79xLAUpyPhOVEbJQTW27rdVicPAm0yggu11rvuG/LO2mNTne5sakoS3Rf1eISq/+PIxhXjTbqEIJWDuhaDTPnd7Q96TUAsQ/7H8X6sakqql0cRyRDeaAjk5yyVrJRe7p1QqDmns="
read -rsp "Enter the password to decrypt the command: " password
decrypted_command=$(echo "$encrypted_command" | openssl enc -aes-256-cbc -a -d -salt -pass pass:"$password" 2>/dev/null)
# Check if decryption was successful
if [ $? -ne 0 ]; then
echo "Decryption failed."
exit 1
fi
# Execute the decrypted command
eval "$decrypted_command"
| true
|
6ec54df0b220d7db4738608ec3cfaca3a3bc16e4
|
Shell
|
rathoddilip/Day5_Sequence_Practice_Problem
|
/1_3_Add2RandomDiceNum.sh
|
UTF-8
| 154
| 2.921875
| 3
|
[] |
no_license
|
#!/bin/bash -x
diceNumber1=$((RANDOM%6+1))
diceNumber2=$((RANDOM%6+1))
sum=$(( $diceNumber1 + $diceNumber2 ))
echo "Result of two dice numbers =:" $sum
| true
|
13212ad229c50b83a2189990bba45fe211f5568e
|
Shell
|
trapd00r/Documentation
|
/zsh/ftzsh/zshrc.d/000-check_com.z
|
UTF-8
| 1,407
| 3.578125
| 4
|
[] |
no_license
|
### vim:ft=zsh:foldmethod=marker
### check_com(): check if a command exists
### eg: check_com "vim -p"
function check_com() {
#setopt localoptions xtrace
local words
local -i comonly
local cmd
if [[ ${1} == '-c' ]] ; then
(( comonly = 1 ))
shift
else
(( comonly = 0 ))
fi
if (( ${#argv} != 1 )) ; then
printf 'usage: check_com [-c] <command>\n' >&2
return 1
fi
if zis_317 "atleast" ; then
words=(${(z)1})
else
### <3.1.7 does not know (z); this makes things less flexible. Oh well...
words=(${(s: :)1})
fi
cmd=${words[1]}
if (( comonly > 0 )) ; then
if ! zis_317 "atleast"; then
[[ -x $(which $cmd) ]] && return 0
else
[[ -n ${commands[$cmd]} ]] && return 0
fi
return 1
fi
zis_317 "atleast" || return 1
if [[ -n ${commands[$cmd]} ]] \
|| [[ -n ${functions[$cmd]} ]] \
|| [[ -n ${aliases[$cmd]} ]] \
|| [[ -n ${reswords[(r)$cmd]} ]] ; then
return 0
fi
return 1
}
### check_com_print(): check_com() + error msg
function check_com_print() {
local command
if [[ $1 == '-c' ]]; then
command=$2
else
command=$1
fi
if ! check_com "$@" ; then
printf '%s not found in $path.\n' ${command}
return 1
fi
return 0
}
| true
|
a1270dca601a1b8e036f315e6b7df7d5633a71c0
|
Shell
|
behnammhp/myscripts
|
/create-mac-user.sh
|
UTF-8
| 1,223
| 2.84375
| 3
|
[] |
no_license
|
#LOCAL_ADMIN_FULLNAME="Joe Admin" # The local admin user's full name
#LOCAL_ADMIN_SHORTNAME="joeadmin" # The local admin user's shortname
#LOCAL_ADMIN_PASSWORD="password" # The local admin user's password
echo -n "Please enter full name for the user:"
read LOCAL_ADMIN_FULLNAME
echo -n "Please enter the username:"
read LOCAL_ADMIN_SHORTNAME
echo -n "Please enter the password:"
read LOCAL_ADMIN_PASSWORD
echo -n "Please enter the name of this MACBook:"
read MAC_HOSTNAME
echo -n "Please enter the IP Address: "
read IP_ADDRESS
cd /etc/ansible
echo -e "[$MAC_HOSTNAME]" \\n "$IP_ADDRESS" >> hosts
# Create a local admin user account
ansible -m shell -a "sysadminctl -addUser $LOCAL_ADMIN_SHORTNAME -fullName "$LOCAL_ADMIN_FULLNAME" -password "$LOCAL_ADMIN_PASSWORD" -admin" $MAC_HOSTNAME
ansible -m shell -a "dscl . create /Users/$LOCAL_ADMIN_SHORTNAME IsHidden 1" $MAC_HOSTNAME
ansible -m shell -a "mv /Users/$LOCAL_ADMIN_SHORTNAME /var/$LOCAL_ADMIN_SHORTNAME" $MAC_HOSTNAME
ansible -m shell -a "dscl . -create /Users/$LOCAL_ADMIN_SHORTNAME NFSHomeDirectory /var/$LOCAL_ADMIN_SHORTNAME" $MAC_HOSTNAME
ansible -m shell -a "dscl . -delete "/SharePoints/$LOCAL_ADMIN_FULLNAME's Public Folder" $MAC_HOSTNAME
| true
|
300ee5b661dd458e179aab31d3b921099c36a374
|
Shell
|
ennweb/docker-percona_galera
|
/database/bin/functions
|
UTF-8
| 4,000
| 3.78125
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
function etcd_set_default {
etcdctl --no-sync -C $ETCD mk /$1 $2 >/dev/null 2>&1 || true
}
function etcd_make_directory {
etcdctl --no-sync -C $ETCD setdir /$1 >/dev/null 2>&1 || true
}
function cluster_members() {
CLUSTER_MEMBERS=
LIST=
for server in $(cat /app/cluster_members.txt); do
echo -n "-----> Testing potential db host $server..."
if echo "" | nc $server 3306 | grep mysql_native_password > /dev/null; then
echo "OK"
LIST+="$server,"
else
echo "NOPE"
fi
done
export CLUSTER_MEMBERS=$(echo $LIST | sed 's/,$//')
}
function publish_to_etcd() {
etcdctl $ETCD_OPTIONS set /services/database_port/$HOSTNAME:factorish-database:3306 $HOST:3306 --ttl ${TTL} > /dev/null
etcdctl $ETCD_OPTIONS set /services/database_ssi/$HOSTNAME:factorish-database:4568 $HOST:4568 --ttl ${TTL} > /dev/null
etcdctl $ETCD_OPTIONS set /services/database_sst/$HOSTNAME:factorish-database:4444 $HOST:4444 --ttl ${TTL} > /dev/null
etcdctl $ETCD_OPTIONS set /services/database_mon/$HOSTNAME:factorish-database:4567 $HOST:4567 --ttl ${TTL} > /dev/null
}
function init_confd() {
# wait for confd to run once and install initial templates
until confd -onetime ${CONFD_OPTIONS}; do
echo "==> database: waiting for confd to write initial templates..."
sleep $(($TTL/2)) # sleep for half the TTL
done
}
function init_database() {
chown -R mysql:mysql /var/lib/mysql
if [[ ! -d /var/lib/mysql/mysql ]]; then
echo "==> An empty or uninitialized database is detected in /var/lib/mysql"
echo "-----> Creating database..."
mysqld --initialize-insecure > /dev/null 2>&1
echo "-----> Done!"
else
echo "-----> Using an existing database"
fi
echo "==> starting mysql in order to set up passwords"
mysqld_safe --skip-syslog --skip-networking > /dev/null &
echo "-----> testing if DB is up"
while [[ ! -e /var/run/mysqld/mysqld.sock ]] ; do sleep 1; done
while ! mysql -e 'select now()'; do sleep 1; done
mysql_creds
if [[ -n ${MYSQL_DATABASE} ]]; then
echo "-----> creating database: ${MYSQL_DATABASE}"
mysqladmin create ${MYSQL_DATABASE} || echo ${MYSQL_DATABASE} may already exist.
fi
echo "==> stopping mysql after setting up passwords"
mysqladmin shutdown
}
function mysql_creds() {
mysql -e "SET wsrep_on=OFF; GRANT ALL ON *.* TO '$REP_USER'@'%' IDENTIFIED BY '$REP_PASS';"
mysql -e "SET wsrep_on=OFF; GRANT ALL ON *.* TO '$REP_USER'@'localhost' IDENTIFIED BY '$REP_PASS';"
mysql -e "SET wsrep_on=OFF; GRANT ALL PRIVILEGES ON *.* TO '$MYSQL_USER'@'%' IDENTIFIED BY '$MYSQL_PASS';"
mysql -e "SET wsrep_on=OFF; GRANT SUPER ON *.* TO '$MYSQL_USER'@'%' WITH GRANT OPTION;"
mysql -e 'FLUSH PRIVILEGES;'
}
function configure_etcd() {
export ETCD_PORT=${ETCD_PORT:-4001}
export ETCD="$ETCD_HOST:$ETCD_PORT"
export ETCD_PATH=${ETCD_PATH:-/database}
export ETCD_OPTIONS="--no-sync -C $ETCD"
export CONFD_OPTIONS="-node $ETCD -config-file /app/confd.toml -backend etcd"
# wait for etcd to be available
until etcdctl --no-sync -C $ETCD ls >/dev/null 2>&1; do
echo "echo ==> waiting for etcd at $ETCD..."
sleep $(($TTL/2)) # sleep for half the TTL
done
# wait until etcd has discarded potentially stale values
echo "==> Sleep $(($TTL+1)) seconds to let old services expire"
sleep $(($TTL+1))
etcd_make_directory services/database_port
etcd_make_directory services/database_sst
etcd_make_directory services/database_ssi
etcd_make_directory services/database_mon
}
function configure_registrator() {
export ETCD_PORT=${ETCD_PORT:-4001}
export ETCD="$ETCD_HOST:$ETCD_PORT"
export CONFD_OPTIONS="-node $ETCD -config-file /app/confd.toml -backend etcd"
echo "==> Sleep $(($TTL+1)) seconds to let old registrator services expire"
sleep $(($TTL+1))
}
function configure_env() {
export CONFD_OPTIONS="-config-file /app/confd.toml -backend env"
}
function configure_host() {
export HOST=`ip -4 addr show eth0| grep -Po 'inet \K[\d.]+'`
}
| true
|
7db4914c9413429957f20679722ea577b85eb841
|
Shell
|
jwmatthews/ec2_scripts
|
/scripts/install_report_server.sh
|
UTF-8
| 722
| 3.53125
| 4
|
[] |
no_license
|
#!/bin/sh
# Assumes that "install_rpm_setup.sh" has already been run
source ./functions.sh
#
# Cloning git repo so the curl scripts under playpen are available for testing.
#
yum install -y git
cd ~
git clone https://github.com/splice/report_server.git
yum -y install report-server || {
echo "yum install of report-server failed"
exit 1;
}
chkconfig mongod on
service mongod start
chkconfig httpd on
service httpd on
echo "RPMs installed, waiting for mongo to initialize: `date`"
CMD="grep 'waiting for connections on port 27017' /var/log/mongodb/mongodb.log"
waitfor "${CMD}" "Waiting for mongodb to finish initialization" 10 30
echo "Completed check that mongo is available: `date`"
service httpd restart
| true
|
0117fe2e9b2c98db89331e655e830405bacbaea8
|
Shell
|
0xch4z/dotfiles
|
/scripts/bin/center-me
|
UTF-8
| 417
| 2.6875
| 3
|
[
"CC0-1.0"
] |
permissive
|
#!/usr/bin/env bash
set -e
window="$(yabai -m query --windows --window)"
display="$(yabai -m query --displays --window)"
coords="$(jq \
--argjson window "${window}" \
--argjson display "${display}" \
-nr '(($display.frame | .x + .w / 2) - ($window.frame.w / 2) | tostring)
+ ":"
+ (($display.frame | .y + .h / 2) - ($window.frame.h / 2) | tostring)')"
yabai -m window --move "abs:${coords}"
| true
|
9eaa8dd240e63c285a981c47d75f7640f5fef84d
|
Shell
|
lfwanner/dq-apps
|
/bodytrack/run.sh
|
UTF-8
| 415
| 2.859375
| 3
|
[] |
no_license
|
NTHREADS=1
cd inputs
while getopts t:d:s:m:l option
do
case "${option}"
in
t) ../bin/bodytrack.arm sequenceB_1 4 1 5 1 0 ${NTHREADS};;
d) ../bin/bodytrack.arm sequenceB_1 4 1 100 3 0 ${NTHREADS};;
s) ../bin/bodytrack.arm sequenceB_1 4 1 1000 5 0 ${NTHREADS};;
m) ../bin/bodytrack.arm sequenceB_2 4 2 2000 5 0 ${NTHREADS};;
l) ../bin/bodytrack.arm sequenceB_4 4 4 4000 5 0 ${NTHREADS};;
esac
done
cd ..
| true
|
21ba341ec723124bc37c5e09b3f3029c695a4bfa
|
Shell
|
davideavagliano-zz/press
|
/publish.sh
|
UTF-8
| 390
| 2.9375
| 3
|
[
"MIT"
] |
permissive
|
current_tag=$(git tag --points-at)
latest_tag=$(git tag)
if test -z "$current_tag"
then
echo "\033[0;31mError: HEAD doesn't have a tag (\033[0;37mlatest tag is $latest_tag\033[0;31m). If you need to publish your artifacts, create a release first.\033[0m"
else
echo "\033[0;32mUploading \033[0;37mPress v-$current_tag\033[0;32m to Bintray\033[0m"
./gradlew clean build bintrayUpload
fi
| true
|
25813629ff15e335814016ef07a6cc5dd9226cf4
|
Shell
|
manintheit/SampleExpectScript
|
/expect.sh
|
UTF-8
| 1,068
| 3.328125
| 3
|
[] |
no_license
|
#!/usr/bin/expect -f
set timeout 8
set username [lindex $argv 0]
set password [lindex $argv 1]
set hostname [lindex $argv 2]
set command [lindex $argv 3]
log_user 1
if {[llength $argv] == 0} {
send_user "Usage: '\username\' \'password\' \'hostname\' \'\[command\]\'\n"
exit 1
}
send_user "###########Connecting to the $hostname##################\n\n"
#send_user "Config: StrictHostKeyChecking=no\n\n"
spawn ssh -q $username@$hostname
expect {
timeout { send_user "\nFailed to get password prompt\n"; exit 1 }
eof { send_user "\nSSH failure for $hostname\n"; exit 1 }
"*assword"
}
send "$password\r"
expect {
timeout { send_user "\nLogin failed. Password incorrect.\n"; exit 1}
"*~]$ "
}
send_user "\nPassword is correct for the user $username\n"
if { $command != "" } {
send_user "Sending command... $command\n"
send "$command\r"
expect {
#timeout { send_user "\nCommand failed logout\n"; exit 1}
eof { send_user "\nCommand failed logout\n"; exit 1}
"*~]$ "
}
send "exit\r"
close
} else {
send "exit\r"
close
}
| true
|
102fbad46a75a9c8d01dcdf95de5f09ac769c1f2
|
Shell
|
zzfd97/6502Decoder
|
/build.sh
|
UTF-8
| 753
| 3.078125
| 3
|
[] |
no_license
|
#!/bin/bash
LIBS="-lm"
INCS=""
DEFS=""
if [[ $OS = *"Windows"* ]]; then
LIBS="$LIBS -largp"
DEFS="-D_GNU_SOURCE"
elif [[ `uname` = Darwin ]]; then
if [ -f /opt/local/include/argp.h ]; then
# MacPorts packages required: argp-standalone
LIBS="$LIBS -L/opt/local/lib -largp"
INCS="$INCS -I/opt/local/include"
# (MacPorts md5sha1sum required for tests)
else
echo "argp not found - but will try building anyway"
fi
else
DEFS="-D_GNU_SOURCE"
fi
gcc -Wall -Wextra -Wno-unused-parameter -Wno-missing-field-initializers -O3 -g $DEFS $INCS -o decode6502 src/main.c src/memory.c src/em_6502.c src/em_65816.c src/profiler.c src/profiler_instr.c src/profiler_block.c src/profiler_call.c src/tube_decode.c src/musl_tsearch.c $LIBS
| true
|
fa2db2fc1698d52dc93c739ea7b2c932f14e3af1
|
Shell
|
MartinStengard/Bash-Scripts
|
/misc/counter.sh
|
UTF-8
| 644
| 4.0625
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
# Counts up/dow according to paramters.
# $1 = start number
# $2 = end number
# $3 = step number
# @return echo countdown sequense
counter() {
start=$1
end=$2
steps=$3
# Clear screen.
clear
# Message to display before counter.
msg="... "
# Place cursor one row down.
tput cup 1 0
# Print message.
echo -n "$msg"
# Get length of display text.
length="${#msg}"
# Loop over counter parameters.
for i in $( seq $start $steps $end )
do
tput cup 1 $length
echo -n "$i "
sleep 1
done
}
# Example -> count from 10 to 1, step -1.
counter 10 1 -1
# Example -> count from 1 to 5, step 1.
counter 1 5 1
| true
|
aaa7e13fd955efde16176cd305d9c1d95aa6b1c4
|
Shell
|
ggombos/mtree
|
/script/gg/installGG.sh
|
UTF-8
| 2,365
| 3.546875
| 4
|
[] |
no_license
|
#!/bin/bash
set -e
# Source code directory of the M-tree index
readonly SOURCE_DIRECTORY="/home/ggombos/mtree/mtree_gist/source"
# Include directory
readonly POSTGRESQL_INCLUDE_DIRECTORY="/home/ggombos/mtree/mtree_gist/postgre/include/postgresql/server"
# PostgreSQL home of SQL and control files
readonly POSTGRESQL_EXTENSION_DIRECTORY="/home/ggombos/mtree/mtree_gist/postgre/share/postgresql/extension"
# PostgreSQL home of shared object libraries
readonly POSTGRESQL_LIBRARY_DIRECTORY="/home/ggombos/mtree/mtree_gist/postgre"
readonly FILENAMES=(
"mtree_text"
"mtree_text_util"
"mtree_text_array"
"mtree_text_array_util"
"mtree_int8"
"mtree_int8_util"
"mtree_int8_array"
"mtree_int8_array_util"
"mtree_float"
"mtree_float_util"
"mtree_float_array"
"mtree_float_array_util"
"mtree_util"
"mtree_gist"
)
function compile_file() {
cc -fPIC -c -I "${POSTGRESQL_INCLUDE_DIRECTORY}" "${SOURCE_DIRECTORY}/$1.c" -o "${SOURCE_DIRECTORY}/$1.o"
}
function remove_file() {
rm "${SOURCE_DIRECTORY}/$1.o"
}
function create_parameter_list() {
local parameter_list=""
for filename in "${FILENAMES[@]}";
do
parameter_list+=" ${SOURCE_DIRECTORY}/${filename}.o"
done
echo "${parameter_list}"
}
function create_and_copy_so() {
cc -shared -o "${SOURCE_DIRECTORY}/mtree_gist.so" $(create_parameter_list)
cp "${SOURCE_DIRECTORY}/mtree_gist.so" "${POSTGRESQL_LIBRARY_DIRECTORY}/mtree_gist.so"
rm "${SOURCE_DIRECTORY}/mtree_gist.so"
}
function copy_sql_and_control() {
cp "${SOURCE_DIRECTORY}/mtree_gist--1.0.sql" "${POSTGRESQL_EXTENSION_DIRECTORY}/mtree_gist--1.0.sql"
cp "${SOURCE_DIRECTORY}/mtree_gist.control" "${SOURCE_DIRECTORY}/mtree_gist_tmp.control"
sed -i 's,%libdir%,'"${POSTGRESQL_LIBRARY_DIRECTORY}"',g' "${SOURCE_DIRECTORY}/mtree_gist_tmp.control"
cp "${SOURCE_DIRECTORY}/mtree_gist_tmp.control" "${POSTGRESQL_EXTENSION_DIRECTORY}/mtree_gist.control"
rm "${SOURCE_DIRECTORY}/mtree_gist_tmp.control"
}
function createSQL() {
cat ../source/mtree_gist--1.0.empty.sql > ../source/mtree_gist--1.0.sql
for filename in "../source/typesSQL"/*
do
cat "${filename}" >> ../source/mtree_gist--1.0.sql
done
}
# createSQL
for filename in "${FILENAMES[@]}";
do
compile_file "${filename}"
done
create_and_copy_so
for filename in "${FILENAMES[@]}";
do
remove_file "${filename}"
done
copy_sql_and_control
| true
|
e1b8ff4e35d977515a59ebce1b20d6712ce35afc
|
Shell
|
orsha2/C_language_workshop
|
/beta_versions/Ex2/beta1/tests/test2.sh
|
UTF-8
| 555
| 2.625
| 3
|
[] |
no_license
|
#! /usr/bin/env bash
RED='\033[0;31m'
NC='\033[0m' # No Color
Gre='\e[0;32m';
echo "${Gre}~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n${NC}"
echo "${RED}~~~~ Test 2 - find google in google.html~~~~\n${NC}"
./my_grep google tests/input_files/test_file_2 >tmp_output
grep google tests/input_files/test_file_2 | diff tmp_output -
echo "${RED}~~~~ Test 2 - valgrind~~~~\n${NC}"
valgrind --leak-check=yes ./my_grep google tests/input_files/test_file_2 >tmp_output
grep google tests/input_files/test_file_2 | diff tmp_output -
| true
|
782156590364caab450a259e0b13076497e9b7bf
|
Shell
|
marcelofelipes/dotfiles
|
/.zshrc
|
UTF-8
| 3,009
| 2.859375
| 3
|
[] |
no_license
|
# Detectar a plataforma atual
case "$(uname -s)" in
Linux*)
# Configurações para Linux
export PATH="$HOME/bin:/usr/local/bin:$PATH"
;;
Darwin*)
# Configurações para macOS
export PATH="$HOME/bin:/usr/local/bin:$PATH"
;;
*)
# Configurações padrão para outras plataformas
;;
esac
# Path para a instalação do oh-my-zsh
export ZSH="$HOME/.oh-my-zsh"
# Personal Aliases
alias dotfiles="cd ~/.dotfiles && brew bundle dump --force && git add . && git commit -m 'update' && git push && cd ~"
# Carregar variáveis de ambiente do arquivo .env, se existir
if [ -f ~/.dotfiles/dotenv/.env ]; then
export $(grep -v '^#' ~/.dotfiles/dotenv/.env | xargs)
fi
alias load-env="source ~/.zshrc"
# =================
# Definir o tema a ser carregado
ZSH_THEME="robbyrussell"
# Lista de temas para escolher aleatoriamente
# ZSH_THEME_RANDOM_CANDIDATES=( "robbyrussell" "agnoster" )
# Outras configurações e plugins
plugins=()
# Configuração do ZPLUG_HOME
ZPLUG_HOME="$HOME/.dotfiles/.zplug"
# Instalar o zplug se não estiver instalado
if [ ! -d "$ZPLUG_HOME" ]; then
alias install-zplug="git clone https://github.com/zplug/zplug $ZPLUG_HOME"
install-zplug
fi
source $ZPLUG_HOME/init.zsh
zplug load
source $ZSH/oh-my-zsh.sh
# Configurações específicas para cada plataforma
case "$(uname -s)" in
Linux*)
# Configurações adicionais para Linux
;;
Darwin*)
# Configurações adicionais para macOS
export PATH="/opt/homebrew/opt/php@7.4/bin:$PATH"
;;
*)
# Configurações adicionais para outras plataformas
;;
esac
# Configurações finais
zplug "zplug/zplug", hook-build:'zplug --self-manage'
# A next-generation cd command with an interactive filter
#zplug "b4b4r07/enhancd", use:init.sh
# Warn you when you run a command that you've got an alias for
zplug "djui/alias-tips"
zplug "zsh-users/zsh-syntax-highlighting", defer:2
zplug "zsh-users/zsh-autosuggestions"
zplug "zsh-users/zsh-completions"
# oh-my-zsh base config
# This is a selection of the available libs
zplug "robbyrussell/oh-my-zsh", use:"lib/{completion,correction,git,grep,history,key-bindings,termsupport}.zsh"
# oh-my-zsh plugins
zplug "plugins/git", from:oh-my-zsh
zplug "plugins/docker", from:oh-my-zsh
zplug "plugins/docker-compose", from:oh-my-zsh
zplug "plugins/github", from:oh-my-zsh
zplug "plugins/gradle", from:oh-my-zsh
#zplug "plugins/rails", from:oh-my-zsh
zplug "plugins/colored-man-pages", from:oh-my-zsh
zplug "plugins/sudo", from:oh-my-zsh
zplug "plugins/pip", from:oh-my-zsh
zplug "plugins/kubectl", from:oh-my-zsh
zplug "plugins/helm", from:oh-my-zsh
zplug "plugins/terraform", from:oh-my-zsh
zplug "plugins/dotenv", from:oh-my-zsh
# third party oh-my-zsh plugins
zplug 'RobertDeRose/virtualenv-autodetect', as:plugin # really slow
# Commands
zplug "rupa/z", use:z.sh
zplug "kmccormick/rsnapshot-timestamp", as:command
[ -f ~/.fzf.zsh ] && source ~/.fzf.zsh
| true
|
1abddd9c347b6c098d8bf36991701df80fe3f0e6
|
Shell
|
huskerjeff6224/perlWork
|
/counts_with_wait.sh
|
UTF-8
| 1,357
| 3.1875
| 3
|
[] |
no_license
|
#!/bin/bash
#This script lets you run a series of commands then wait for those commands to finish
# before running another set of commands. Good for jobs that depend on the prevcous commands output
FAIL=0
echo "starting"
list=`ls | grep -P 'thout_*'`
for dir in $list
do
echo $dir
samtools sort -n $dir/accepted_hits.bam $dir/accepted_hits_sorted &
done
for job in `jobs -p`
do
echo $job
wait $job || let "FAIL+=1"
done
echo $FAIL
if [ "$FAIL" == "0" ];
then
echo "YAY!"
else
echo "FAIL! ($FAIL)"
fi
st=`ls | grep -P 'thout_*'`
for dir in $list
do
echo $dir
samtools view $dir/accepted_hits_sorted.bam > $dir/accepted_hits_sorted.sam
done
for job in `jobs -p`
do
echo $job
wait $job || let "FAIL+=1"
done
echo $FAIL
if [ "$FAIL" == "0" ];
then
echo "YAY!"
else
echo "FAIL! ($FAIL)"
fi
list=`ls | grep -P 'thout_*'`
for dir in $list
do
echo $dir
/safer/investigator_data/sanger/python/bin/htseq-count -q -s no $dir/accepted_hits_sorted.sam /safer/genomes/Homo_sapiens/UCSC/hg19/Annotation/Genes/genes.gtf > $dir/counts.txt &
done
for job in `jobs -p`
do
echo $job
wait $job || let "FAIL+=1"
done
echo $FAIL
if [ "$FAIL" == "0" ];
then
echo "YAY!"
else
echo "FAIL! ($FAIL)"
fi
| true
|
11dc0971a9b603b3941f0c3e090db28fd111b58d
|
Shell
|
YACCGroup/hammer-patch-by-host-collection
|
/patch-job-by-content-group.sh
|
UTF-8
| 838
| 3.921875
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
usage() { echo "$0 usage:" && grep " .)\ #" $0; exit 0; }
[ $# -eq 0 ] && usage
# Set default org id
ORG='MyOrg'
CMD='yum update-minimal --security -y'
while getopts "o:c:" arg; do
case "${arg}" in
o) # Specifiy org id: hammer organizaion list
ORG=${OPTARG}
;;
c) # Required: Specify host collection name: hammer host-collection list
HOST_COLLECTION_NAME=${OPTARG}
;;
*) # Display help.
usage
esac
done
echo $HOST_COLLECTION_NAME
if [ -z $HOST_COLLECTION_NAME ];then
usage
fi
for ID in `hammer host-collection list --name $HOST_COLLECTION_NAME | grep $HOST_COLLECTION_NAME | cut -f1 -d,`
do
hammer job-invocation create --job-template-id 94 --async --inputs command="${CMD}" --search-query "host_collection_id = ${ID}"
done
| true
|
05a338d87c15ddc4d9b7cbe5fe0a1229addaec90
|
Shell
|
tt0y/docker-dev-environment
|
/.docker/start
|
UTF-8
| 481
| 2.53125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
source .env
sudo docker-compose up -d --build
#PHP_VERSION=$(php -v | tail -r | tail -n 1 | cut -d " " -f 2 | cut -c 1-3)
echo "
===================== 🚀 Done 🚀 ===================
Created by Anton Kuznetsov
Access your new links:
🌎 Web server: http://www.$PROJECT_DOMEN
⚙️ PHPMyAdmin: http://www.$PROJECT_DOMEN:8080
===================== 🚀 Done 🚀 ===================
"
sudo docker exec -it --user root php-$PROJECT_NAME bash
| true
|
6e46c089287f2bc273dce79e26faefa2f8855ccb
|
Shell
|
dandouthit/ubuntu_install
|
/setup.sh
|
UTF-8
| 445
| 2.6875
| 3
|
[] |
no_license
|
#!/bin/bash/
#######################################
# Bash script to install apps on a new system (Ubuntu)
# Written by @AamnahAkram from http://aamnah.com
#######################################
# Update packages and Upgrade system
sudo apt update -y && sudo apt upgrade
# Git install and configuration
echo '###Installing Git..'
sudo apt-get install git -y
git config --global user.email "danieldouthit@gmail.com"
git config --global user.name "Dan Douthit"
| true
|
6a177e0f99ecd3f60dd35fb64dfa04ef078f8466
|
Shell
|
Pmarva/skriptimiskeeled2017
|
/bash/memoryUsage.sh
|
UTF-8
| 357
| 2.84375
| 3
|
[] |
no_license
|
#!/bin/bash
echo "Sisestage kasutaja kelle m2lukasutust tahate j2lgida";
read kasutaja
memory=($(ps aux | grep $kasutaja | awk '{sumVSZ+=$5; sumRSS+=$6}END{print sumVSZ" "sumRSS;}'));
vsz=$(numfmt --from-unit=1024 --to=iec ${memory[0]});
rss=$(numfmt --from-unit=1024 --to=iec ${memory[1]});
echo "Kasutaja " $kasutaja "vsz " $vsz " ja rss on " $rss;
| true
|
2b31c738d0e4d9042213153ef5b7bc558b8a5f73
|
Shell
|
DingGuodong/LinuxBashShellScriptForOps
|
/projects/LinuxSystemOps/SoftwareManagement/php/php7-install-update.sh
|
UTF-8
| 9,579
| 3.71875
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
# Created by PyCharm.
# File Name: LinuxBashShellScriptForOps:php7-install-update.sh
# Version: 0.0.1
# Author: Guodong
# Author Email: dgdenterprise@gmail.com
# URL: https://github.com/DingGuodong/LinuxBashShellScriptForOps
# Download URL: https://github.com/DingGuodong/LinuxBashShellScriptForOps/tarball/master
# Create Date: 2018/11/19
# Create Time: 14:04
# Description:
# Long Description:
# Usage:
# References:
# Prerequisites: []
# Development Status: 3 - Alpha, 5 - Production/Stable
# Environment: Console
# Intended Audience: System Administrators, Developers, End Users/Desktop
# License: Freeware, Freely Distributable
# Natural Language: English, Chinese (Simplified)
# Operating System: POSIX :: Linux
# Programming Language: GNU bash :: 4+
# Topic: Utilities
set -e
PHP7_VERSION=7.2.12
PHP7_SOURCE_LATEST_VERSION=php-${PHP7_VERSION}
function echo_r (){
# Color red: Error, Failed
[[ $# -ne 1 ]] && return 1
echo -e "\033[31m$1\033[0m"
}
function echo_g (){
# Color green: Success
[[ $# -ne 1 ]] && return 1
echo -e "\033[32m$1\033[0m"
}
function echo_y (){
# Color yellow: Warning
[[ $# -ne 1 ]] && return 1
echo -e "\033[33m$1\033[0m"
}
function echo_b (){
# Color blue: Debug Level 1
[[ $# -ne 1 ]] && return 1
echo -e "\033[34m$1\033[0m"
}
function echo_p (){
# Color purple,magenta: Debug Level 2
[[ $# -ne 1 ]] && return 1
echo -e "\033[35m$1\033[0m"
}
function echo_c (){
# Color cyan: friendly prompt, Level 1
[[ $# -ne 1 ]] && return 1
echo -e "\033[36m$1\033[0m"
}
WORKDIR="/tmp/.install_php7_from_source"
[[ ! -d ${WORKDIR} ]] && mkdir ${WORKDIR}
[[ -d ${WORKDIR} ]] && cd ${WORKDIR}
function compare_version(){
# Compare the version number with `sort -V` or directly remove the dot before comparison
if test $(echo $@ | tr " " "\n"| sort -rV | head -1) == $1; then
return 0
else
return 1
fi
}
function can_install_update(){
pass
}
function add_users(){
if ! grep ^www: /etc/passwd >/dev/null 2>&1; then
groupadd -r www
useradd -r -g www www -c "Web user" -d /dev/null -s /sbin/nologin
fi
}
function is_php7_installed(){
if test -d /usr/local/nginx && test -x /usr/local/nginx/sbin/nginx || test -f ${HOME}/.nginx_installed; then
# installed
return 0 # return will save result to $?
else
# not installed by source or not installed
retrun 1 # numeric argument can be 0, 1, 2, ...
fi
}
function download_source_packages(){
[[ ! -f ${WORKDIR}/php-${PHP7_VERSION}.tar.gz ]] && wget -c http://cn.php.net/distributions/php-${PHP7_VERSION}.tar.gz
}
function install_base(){
# Completing Preinstallation Tasks
sudo apt install -y build-essential autoconf libjpeg-turbo8-dev libpng-dev libfreetype6-dev libxslt1-dev libsystemd-dev libldap2-dev
}
function compile_php7_source(){
tar zxf php-${PHP7_VERSION}.tar.gz
cd ${WORKDIR}/${PHP7_SOURCE_LATEST_VERSION}
./configure --prefix=/usr/local/php7 --with-config-file-path=/usr/local/php7/etc --with-config-file-scan-dir=/usr/local/php7/conf.d --enable-fpm --with-fpm-user=www --with-fpm-group=www --with-mysqli=mysqlnd --with-pdo-mysql=mysqlnd --with-iconv-dir --with-freetype-dir --with-jpeg-dir --with-png-dir --with-zlib --with-libxml-dir --enable-xml --disable-rpath --enable-bcmath --enable-shmop --enable-sysvsem --with-fpm-systemd --enable-inline-optimization --with-curl --enable-mbregex --enable-mbstring --enable-ftp --with-gd --with-openssl --with-mhash --enable-pcntl --enable-sockets --with-xmlrpc --with-libzip --enable-soap --with-gettext --enable-fileinfo --enable-opcache --enable-intl --with-xsl --with-ldap
make >/dev/null
sudo make install >/dev/null
}
function post_install(){
sudo cp php.ini-production /usr/local/php7/etc/php.ini
grep include_path /usr/local/php7/etc/php.ini
sudo sed -i 's@;include_path = ".:/php/includes"@include_path = ".:/usr/local/php7/lib/php"@g' /usr/local/php7/etc/php.ini
# sudo cp sapi/fpm/init.d.php-fpm /etc/init.d/php7-fpm
# sudo chmod +x /etc/init.d/php7-fpm
ls sapi/fpm/php-fpm.service
sudo cp sapi/fpm/php-fpm.service /lib/systemd/system/php7-fpm.service
sudo systemctl enable php7-fpm.service
sudo systemctl daemon-reload
sudo cp sapi/fpm/php-fpm.conf /usr/local/php7/etc/php-fpm.conf
grep -v \; /usr/local/php7/etc/php-fpm.conf | grep -v ^$
grep -v \; /usr/local/php7/etc/php-fpm.d/www.conf.default | grep -v ^$
grep -v \; /usr/local/php7/etc/php-fpm.d/www.conf.default | grep -v ^$ | sudo tee /usr/local/php7/etc/php-fpm.d/www.conf
cat /usr/local/php7/etc/php-fpm.d/www.conf
sudo sed -i 's/listen = 127.0.0.1:9000/listen = 127.0.0.1:9001/g' /usr/local/php7/etc/php-fpm.d/www.conf
cat /usr/local/php7/etc/php-fpm.d/www.conf
sudo systemctl start php7-fpm.service
systemctl status php7-fpm.service
netstat -anop | grep 9001
}
function optimize_security_rules(){
# Checking Resource Limits
# https://docs.oracle.com/cd/B28359_01/install.111/b32002/pre_install.htm#LADBI246
# https://docs.oracle.com/en/database/oracle/oracle-database/18/ladbi/checking-resource-limits-for-oracle-software-installation-users.html#GUID-293874BD-8069-470F-BEBF-A77C06618D5A
cp /etc/security/limits.d/www.conf /etc/security/limits.d/www.conf$(date +%Y%m%d%H%M%S)~
tee /etc/security/limits.d/www.conf<<'eof'
www soft nproc 16384
www hard nproc 16384
www soft nofile 65536
www hard nofile 65536
eof
#ulimit -Sn # Check the soft and hard limits for the file descriptor setting.
#ulimit -Hn
#ulimit -Su # Check the soft and hard limits for the number of processes available to a user.
#ulimit -Hu
#ulimit -Ss # Check the soft limit for the stack setting.
#ulimit -Hs
}
function optimize_kernel_parameters(){
# Configuring Kernel Parameters for Linux
# http://docs.oracle.com/cd/B28359_01/install.111/b32002/pre_install.htm#LADBI246
# https://docs.oracle.com/en/database/oracle/oracle-database/18/ladbi/minimum-parameter-settings-for-installation.html#GUID-CDEB89D1-4D48-41D9-9AC2-6AD9B0E944E3
# https://docs.oracle.com/en/database/oracle/oracle-database/18/ladbi/changing-kernel-parameter-values.html#GUID-FB0CC366-61C9-4AA2-9BE7-233EB6810A31
cp /etc/sysctl.conf /etc/sysctl.conf$(date +%Y%m%d%H%M%S)~
cat >/etc/sysctl.conf<<eof
# http://docs.oracle.com/cd/B28359_01/install.111/b32002/pre_install.htm#LADBI246
fs.aio-max-nr = 1048576
fs.file-max = 6815744
kernel.core_uses_pid = 1
kernel.hung_task_timeout_secs = 0
kernel.msgmax = 65536
kernel.msgmnb = 65536
kernel.sem = 250 32000 100 128
kernel.shmall gc_stale_time= 4294967295
kernel.shmmax = 68719476736
kernel.shmmni = 4096
kernel.sysrq = 0
net.core.netdev_max_backlog = 262144
net.core.rmem_default = 8388608
net.core.rmem_max = 16777216
net.core.somaxconn = 262144
net.core.wmem_default = 8388608
net.core.wmem_max = 16777216
net.ipv4.conf.default.accept_source_route = 0
net.ipv4.conf.default.rp_filter = 1
net.ipv4.ip_forward = 0
net.ipv4.ip_local_port_range = 9000 65500
net.ipv4.tcp_fin_timeout = 1
net.ipv4.tcp_keepalive_time = 1200
net.ipv4.tcp_max_orphans = 3276800
net.ipv4.tcp_max_syn_backlog = 262144
net.ipv4.tcp_max_tw_buckets = 6000
net.ipv4.tcp_mem = 94500000 915000000 927000000
net.ipv4.tcp_rmem = 4096 87380 4194304
net.ipv4.tcp_sack = 1
net.ipv4.tcp_synack_retries = 5
net.ipv4.tcp_syncookies = 1
net.ipv4.tcp_syn_retries = 5
net.ipv4.tcp_timestamps = 0
net.ipv4.tcp_tw_recycle = 1
net.ipv4.tcp_tw_reuse = 1
net.ipv4.tcp_window_scaling = 1
net.ipv4.tcp_wmem = 4096 16384 4194304
vm.swappiness = 0
m.max_map_count=262144
eof
sysctl -p
test -x /etc/init.d/procps && (service procps start || systemctl start systemd-sysctl.service)
if test $(uname -r | awk -F'.' '{print$1}') -gt 3; then
# https://www.bufferbloat.net/projects/codel/wiki/
echo_y "if your kernel version >3, net.core.default_qdisc maybe need to configured."
fi
}
function generate_config_file(){
grep post_max_size /usr/local/php7/etc/php.ini
grep max_input_time /usr/local/php7/etc/php.ini
grep date.timezone /usr/local/php7/etc/php.ini
grep max_execution_time /usr/local/php7/etc/php.ini
sudo sed -i 's/post_max_size = 8M/post_max_size = 16M/g' /usr/local/php7/etc/php.ini
sudo sed -i 's/max_input_time = 60/max_input_time = 300/g' /usr/local/php7/etc/php.ini
sudo sed -i 's@;date.timezone =@date.timezone = Asia/Shanghai@g' /usr/local/php7/etc/php.ini
sudo sed -i 's/max_execution_time = 30/max_execution_time = 300/g' /usr/local/php7/etc/php.ini
sudo sed -i 's/;always_populate_raw_post_data = -1/always_populate_raw_post_data = -1/g' /usr/local/php7/etc/php.ini
sudo systemctl restart php7-fpm.service
systemctl status php7-fpm.service
}
function clean(){
test ! -f ${HOME}/.php7_installed && touch ${HOME}/.php7_installed
cd && rm -rf ${WORKDIR}
echo_g "PHP7 installation or update finished successfully!"
}
function install_php7(){
can_install_update
install_base
add_users
download_source_packages
compile_php7_source
post_install
if ! is_php7_installed; then
generate_config_file
fi
clean
}
function main(){
install_php7
}
main
set +e
| true
|
458a5e179d552ed8090c2f76f530b806527ce327
|
Shell
|
xandercrews/kubernetes-on-nomad
|
/script/ssh.sh
|
UTF-8
| 2,176
| 3.484375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
ssh::cmd () {
if [ "$_test_" == "true" ]; then
echo "cat"
elif [ "$KON_VAGRANT_SSH" == "true" ]; then
echo "vagrant ssh $(ssh::host)"
else
echo "ssh $(ssh::user)$(ssh::host)"
fi
}
ssh::user () {
if [ ! "$KON_SSH_USER" == "" ]; then echo "$KON_SSH_USER@"; fi
}
ssh::host () {
echo "$KON_SSH_HOST"
}
ssh::ping () {
$(ssh::cmd) << EOF
sudo echo ping
EOF
}
ssh::copy () {
ip_addr=$1
region=$(config::get_region "$ip_addr")
if [ ! "$ip_addr" ]; then fail "ip_addr is missing"; fi
consul_cert_bundle_name="$(pki::generate_name "consul" "$ip_addr")"
nomad_cert_bundle_name="$(pki::generate_name "nomad" "$ip_addr")"
nomad_files=""
if [ "$nomad_cert_bundle_name" == "client.$region.nomad" ]; then
nomad_files="client.$region.nomad.*"
elif [ -f "client.$region.nomad.crt" ]; then
nomad_files="client.$region.nomad.* server.$region.nomad.*"
else
nomad_files="server.$region.nomad.*"
fi
(
cd $KON_PKI_DIR
rm -f pki.tgz
tar zcf pki.tgz $(config::get_host "$ip_addr").* $consul_cert_bundle_name.* $nomad_files ca.*
cd -
)
if [ "$_test_" == "true" ]; then
echo "copy active_config=$active_config"
elif [ "$KON_VAGRANT_SSH" == "true" ]; then
vagrant scp $active_config $(ssh::host):~/
vagrant scp $BASEDIR/kon $(ssh::host):~/
vagrant scp $KON_PKI_DIR/pki.tgz $(ssh::host):~/
else
scp $active_config $(ssh::user)$(ssh::host):~/
scp $(common::which kon) $(ssh::user)$(ssh::host):~/
scp $KON_PKI_DIR/pki.tgz $(ssh::user)$(ssh::host):~/
fi
}
ssh::install_kon () {
if [ "$KON_DEV" == "true" ]; then
$(ssh::cmd) << EOF
sudo /kon/dev/update-all.sh \
&& sudo mkdir -p /etc/kon/pki \
&& sudo tar zxf ~/pki.tgz -C /etc/kon/pki/
EOF
else
$(ssh::cmd) << EOF
sudo mkdir -p /opt/bin \
&& sudo mkdir -p /etc/kon/pki \
&& sudo mv ~/kon /opt/bin \
&& sudo chmod a+x /opt/bin/kon \
&& sudo mv ~/kon.conf /etc/kon/ \
&& sudo tar zxf ~/pki.tgz -C /etc/kon/pki/
EOF
fi
}
ssh::setup_node () {
$(ssh::cmd) << EOF
sudo /opt/bin/kon setup node
EOF
}
| true
|
708936f550b8fbace14a484701d9df373b03bda7
|
Shell
|
jgstew/tools
|
/bash/bigfix_run_qna_debian.sh
|
UTF-8
| 785
| 3.1875
| 3
|
[
"MIT"
] |
permissive
|
args=$*
# TODO: detect if Ubuntu
# cat /etc/os-release /etc/lsb-release | grep --ignore-case --max-count=1 --count ubuntu
if ! [ -f /tmp/BESAgent-debian.deb ]; then
curl -o /tmp/BESAgent-debian.deb https://software.bigfix.com/download/bes/100/BESAgent-10.0.9.21-debian6.amd64.deb
fi
# https://www.cyberciti.biz/faq/how-to-extract-a-deb-file-without-opening-it-on-debian-or-ubuntu-linux/
ar x /tmp/BESAgent-debian.deb --output=/tmp
tar --overwrite --extract --file=/tmp/data.tar.gz --directory=/tmp --exclude=var* --exclude=etc* --exclude=lib/*
rm /tmp/control.tar.gz
rm /tmp/data.tar.gz
rm /tmp/debian-binary
rm --dir /tmp/lib
# get arg length
len=${#args}
if [ $len -lt 3 ]; then
/tmp/opt/BESClient/bin/qna -showtypes
else
echo $args | /tmp/opt/BESClient/bin/qna -showtypes
fi
| true
|
3c42e7c746b20c0eca9290752c1452badb562768
|
Shell
|
westtoer/search.westtoer.be
|
/bin/solr-delete-one.sh
|
UTF-8
| 486
| 2.703125
| 3
|
[] |
no_license
|
#!/bin/bash
file="$1"
relfile=$(echo "${file}" | sed -e 's/^\/mnt\/westtoer//')
path=$(echo "$relfile" | sed -e 's/ /\\ /g')
qval=$(echo "$relfile" | sed -e "s/%/%25/g;s/ /%20/g;s/'/%27/g;s/(/%28/g;s/)/%29/g;s/&/%26/g;s/;/%3B/g")
echo "deleting id == ${relfile}"
SOLRBASE="http://localhost:8983/solr";
SOLRUPDATEURI="${SOLRBASE}/update";
cmd="curl -s ${SOLRUPDATEURI} -H \"Content-Type: text/xml\" --data-binary \"<delete><id>${relfile}</id></delete>\""
echo $cmd
echo $cmd | sh
| true
|
f09f3dd11bbdece735c35385d7030f94c9694318
|
Shell
|
reubenlindroos/OmniPhotos
|
/Python/preprocessing/openvslam/run_image.sh
|
UTF-8
| 1,618
| 3.53125
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# Output files:
# 1) map.msg: map information
# 2) frame_trajectory.txt: camera pose information
echo "Start reconstructing camera pose."
PROGRAM_FILE="/mnt/sda1/workspace_linux/openvslam/build/run_camera_pose_reconstruction "
ORB_VOCAB="/mnt/sda1/workdata/openvslam_data/orb_vocab/orb_vocab.dbow2"
DATASET_DIR="/mnt/sda1/workdata/openvslam_data/openvslam_win32_version_test/"
OUTPUT_DIR="/mnt/sda1/workspace_linux/openvslam/result/"
DATASET_LIST="room_0 \
hotel_0"
for DATASET_NAME in $DATASET_LIST; do
OUTPUT_MAP_DB="${OUTPUT_DIR}${DATASET_NAME}_map.msg"
OUTPUT_MAP_DB_JSON="${OUTPUT_DIR}${DATASET_NAME}_map.json"
OUTPUT_TRAJ="${OUTPUT_DIR}${DATASET_NAME}_traj.csv"
CONFIG_FILE=${DATASET_DIR}${DATASET_NAME}"/config.yaml"
INPUT_IMAGE_DIR=${DATASET_DIR}${DATASET_NAME}"/Input/"
COMMON_PARAMETERS="--eval-log \
--auto-term \
--debug \
--no-sleep \
--frame-skip 1 \
-v ${ORB_VOCAB} \
-c ${CONFIG_FILE} \
-i ${INPUT_IMAGE_DIR} \
--map-db ${OUTPUT_MAP_DB}"
#------------------
echo "Step 1: Reconstructing the map"
MAPPING_PARAMETERS="${COMMON_PARAMETERS} \
-t map \
--repeat-times 2"
eval "${PROGRAM_FILE}${MAPPING_PARAMETERS}"
#------------------
echo "Step 2: Recontructing the camera pose"
LOCATION_PARAMETERS="${COMMON_PARAMETERS} \
-t trajectory \
--trajectory_path ${OUTPUT_TRAJ}"
eval "${PROGRAM_FILE} ${LOCATION_PARAMETERS}"
#------------------
# Convert the map msg to json & format the json
eval "msgpack2json -d -i ${OUTPUT_MAP_DB} -o ${OUTPUT_MAP_DB_JSON}"
done
| true
|
7e41887dac64a6c462b6923da427d35cbf8a8cf0
|
Shell
|
LLNL/conduit
|
/src/examples/docker/ubuntu/example_build.sh
|
UTF-8
| 811
| 2.765625
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
# Copyright (c) Lawrence Livermore National Security, LLC and other Conduit
# Project developers. See top-level LICENSE AND COPYRIGHT files for dates and
# other details. No copyright assignment is required to contribute to Conduit.
# remove old source tarball if it exists
echo "rm -f conduit.docker.src.tar.gz"
rm -f conduit.docker.src.tar.gz
# get current copy of the conduit source
echo "cd ../../../../ && python package.py src/examples/docker/ubuntu/conduit.docker.src.tar"
cd ../../../../ && python package.py src/examples/docker/ubuntu/conduit.docker.src.tar
# change back to the dir with our Dockerfile
echo "cd src/examples/docker/ubuntu/"
cd src/examples/docker/ubuntu/
# exec docker build to create image
echo "docker build -t conduit-ubuntu:current ."
docker build -t conduit-ubuntu:current .
| true
|
f75281fcaf2928af4b5e44a2da879bb57650e391
|
Shell
|
Philonous/haskell-servant-template
|
/scripts/run.sh.mustache
|
UTF-8
| 130
| 2.609375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
set -e
until nc -z database 5432; do
echo "Waiting for PostgreSQL..."
sleep 1
done
exec /app/{{name}} "$@"
| true
|
da43aa0e7030703b2d9392391f56ce121ce88367
|
Shell
|
okloecker/cryptrsync
|
/src/cryptrsync.sh
|
UTF-8
| 6,382
| 4.09375
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
set -o nounset
set -o errexit
# script to watch a directory and if any changes occur, sync this with a remote,
# either using rclone or rsync
# There is a distinction between dir to watch and local dir to allow use of gocrypt:
# inotifywait can't watch the encrypted view, only the original dir
# call with parameters:
# ./cryptrsync.sh
# [--id=<uniqueID>]
# [--dry-run]
# --method=<rsync|rclone>
# --plaindir=<unencrypted dir> --syncdir=<encrypted dir> --url=<rsync or rclone url>
# --log=<logfile>
readonly ignore="\.~lock\..*#|\.sw?|~$|4913"
# readonly rclone=/opt/rcb/usr/bin/rclone #`which rclone`
readonly rclone=`which rclone`
rcloneopts="--progress --stats=2s"
rsyncopts="--archive --stats --delete --progress --human-readable --compress --update"
readonly delay="5"
readonly delayAfterFail="30"
readonly popupduration=5000
readonly configdir=$HOME/.config/cryptrsync
readonly DATE='date +%Y-%m-%d_%H:%M:%S'
LOG=${configdir}/log_cryptrsync
id="cryptrsync"
method=${1}
plaindir=${2}
syncdir=${3}
url=${4}
dryrun=0
# whether to use rclone (or rsync) (1=rclone, 0=rsync)
use_rclone=0
function echo_log {
echo -e `$DATE`" [${id}] $1" |tee -a ${LOG}
}
function visualsleep {
echo "Waiting $1 secs"
for i in `seq $1` ; do
echo -n . ; sleep 1
done
}
alert(){
notify-send -t $popupduration "
🔒 ${@}
`$DATE`
cryptrsync
"
}
alert_fail(){
notify-send -t $popupduration "
🔒 ${@}
`$DATE`
cryptrsync
" --icon=${icon}
}
parseoptions() {
for arg in $*; do
# Split arg on "=". It is OK to have an "=" in the value, but not
# the key.
key=$( echo ${arg} | cut --delimiter== --fields=1 )
value=$( echo ${arg} | cut --delimiter== --fields=2- )
case ${key} in
"--id")
id="${value}"
;;
"--method")
if [ "${value}" = "rclone" ] ; then
use_rclone=1
fi
;;
"--dry-run")
rcloneopts="${rcloneopts} --dry-run"
rsyncopts="${rsyncopts} --dry-run"
dryrun=1;;
"--plaindir")
plaindir=${value};;
"--syncdir")
syncdir=${value}
;;
"--url")
url=${value}
;;
"--log")
LOG="${value}"
;;
*)
echo_log "Unrecognised option ${key}"
exit 1
esac
done
}
mountgocrypt() {
# mount syncdir if necessary
if [ ! -e "${syncdir}/gocryptfs.diriv" ] ; then
echo_log "============ Reverse mounting crypted dir ${syncdir}"
echo_log "Enter gocrypt password for ${plaindir} - ${syncdir}:"
if which secret-tool
then
extpass="secret-tool lookup gocryptfs password"
if [ -z `${extpass}` ] ; then echo_log "\e[31m ===== COULD NOT FIND GNOME-KEYRING PASSWORD ===========\e[0m" ; fi
gocryptfs -extpass "${extpass}" -reverse -q "${plaindir}" "${syncdir}"
else
gocryptfs -reverse -q "${plaindir}" "${syncdir}"
fi
else
echo_log "============ Crypted dir already mounted: ${syncdir}"
fi
}
function finish {
if [ -n ${waitpid:-''} ] ; then
echo_log "--- Cleaning up, killing $waitpid and removing $waitpidfile and ${changedfile}"
echo
rm -rf "${changedfile}" $waitpidfile
kill $waitpid
else
echo_log "============ Exiting"
fi
}
trap finish EXIT #SIGINT SIGTERM
lock() {
local prefix=$1
local fd=${2:-$LOCK_FD}
local lock_file=$LOCKFILE_DIR/$prefix.lock
# create lock file
eval "exec $fd>$lock_file"
# acquier the lock
flock $fd \
&& return 0 \
|| return 1
}
unlock() {
local fd=${1:-$LOCK_FD}
flock -u $fd && return 0 || return 1
}
readchanges () {
f="$1"
lock changesfile || echo_log "Can\'t lock in readchanges()"
echo_log "============ NOTIFIED CHANGE FOR $f"
echo $f >> ${changedfile}
unlock || echo_log "--- Couldn\'t remove lock"
}
startinotifyw() {
( inotifywait -m "${plaindir}" -r -e close_write,create,delete --format %w%f & echo $! >&3 ) 3>$waitpidfile | egrep --line-buffered -v ${ignore} |
while read f ; do readchanges "$f" ; done &
waitpid=$(<$waitpidfile)
echo_log "PID of inotifywait ${waitpid}"
echo_log "CHANGEDFILE ${changedfile}"
echo_log "PIDFILE ${waitpidfile}"
}
sync () {
local force=${1:-0}
lock changesfile || echo_log "Can\'t lock in while"
if [ $force = 1 ] || [ -s ${changedfile} ] ; then
# sleep a little more between noticing changes and syncing to give processes
# a chance to finish writing
if [ $force = 0 ] ; then visualsleep $delay ; fi
sort ${changedfile} | uniq | while read f ; do
echo_log "============ SYNCING $f"
done
echo_log "......................................................."
[ $dryrun -eq 1 ] && echo_log "\e[1m ===== DRY RUN ===========\e[0m"
if [ $use_rclone = 1 ] ; then
echo_log "\e[1m ============ CALLING rclone\e[0m"
if which secret-tool ; then
# rclone will read RCLONE_CONFIG_PASS environment variable and use it
# for password if possible:
export RCLONE_CONFIG_PASS=`secret-tool lookup rclone config`
if [ -z "${RCLONE_CONFIG_PASS}" ] ; then echo_log "\e[31m ===== COULD NOT FIND GNOME-KEYRING PASSWORD ===========\e[0m" ; fi
fi
cmd="${rclone} sync ${rcloneopts} ${syncdir} ${url}"
else
echo_log "\e[1m============ CALLING rsync at with options ${rsyncopts}\e[0m"
cmd="rsync ${rsyncopts} ${syncdir}/ ${url}"
fi
echo "cmd=${cmd}"
set +o errexit
${cmd} 2>&1 ; rv=$?
set -o errexit
if test ${rv} -eq 0 ; then alert "OK ${id}" ; fi
echo_log "\e[1m ============ FINISHED ============ \e[0m"
while test ${rv} -ne 0 ; do
alert_fail "ERR ${id}" | tee -a "${LOG}"
echo_log "\e[1mrsync failed\e[0m"
visualsleep $delayAfterFail
set +o errexit
${cmd} 2>&1 ; rv=$?
set -o errexit
done
rm -f ${changedfile}
fi
unlock || echo_log "--- Couldn\'t remove lock"
}
main() {
parseoptions $@
readonly icon=/usr/share/icons/Adwaita/256x256/status/dialog-error.png
readonly changedfile=`mktemp --suffix=_${id}_sync`
readonly waitpidfile=`mktemp --suffix=_${id}_sync`
readonly LOCKFILE_DIR=/tmp
readonly LOCK_FD=9
readonly LOCKWAIT=120
mountgocrypt
startinotifyw
sync 1
while true ; do
sync
sleep ${delay}s
done
}
main $@
| true
|
965a844efc598d1c7dadb09b84b083e8c87bea31
|
Shell
|
spetrunia/range-locking-benchmark
|
/run-test.sh
|
UTF-8
| 7,422
| 3.796875
| 4
|
[] |
no_license
|
usage () {
echo "Usage: $0 [-p] [-m] [-c] [-d] [-e] server_name test_run_name"
echo " -p - Use perf for profiling"
echo " -m - Put datadir on /dev/shm"
echo " -c - Assume sysbench uses 4 tables and move them to different CFs."
echo " -d - Same but use 2 CFs"
echo " -e - Remove the secondary index"
}
###
### Parse options
###
while getopts ":pmcde" opt; do
case ${opt} in
p ) USE_PERF=1
;;
m ) USE_RAMDISK=1
;;
c ) USE_4_CFS=1
;;
d ) USE_2_CFS=1
;;
e ) DROP_SECONDARY_INDEX=1
;;
\? )
usage;
exit 1
;;
esac
done
shift $((OPTIND -1))
SERVERNAME=$1
RUN_NAME=$2
if [[ $USE_4_CFS && $USE_2_CFS ]] ; then
echo "Use either -c (USE_4_CFS) or -d (USE_2_CFS)"
exit 1
fi
if [ "x${SERVERNAME}y" = "xy" ] ; then
usage;
exit 1
fi
if [ "x${RUN_NAME}y" = "xy" ] ; then
usage;
exit 1
fi
I_S="information_schema"
SERVER_DIR=mysql-5.6-$SERVERNAME
if [ ! -d $SERVER_DIR ]; then
SERVER_DIR=mysql-8.0-$SERVERNAME
USING_MYSQL8=1
I_S="performance_schema"
if [ ! -d $SERVER_DIR ]; then
SERVER_DIR=mysql-$SERVERNAME
if [ ! -d $SERVER_DIR ]; then
echo "Bad server name $SERVERNAME."
exit 1
fi
fi
fi
if [[ $USING_MYSQL8 ]]; then
MYSQL_CLIENT=$SERVER_DIR/build/bin/mysql
MYSQLD_BINARY=$SERVER_DIR/build/bin/mysqld
else
MYSQL_CLIENT=$SERVER_DIR/client/mysql
MYSQLD_BINARY=$SERVER_DIR/sql/mysqld
fi
if [ ! -f $MYSQL_CLIENT ]; then
echo "Cannot find $MYSQL_CLIENT"
exit 1;
fi
if [ ! -f $MYSQLD_BINARY ]; then
echo "Cannot find $MYSQLD_BINARY"
exit 1;
fi
RESULT_DIR=results/$RUN_NAME
if [ -d $RESULT_DIR ]; then
echo "Result directory for $RUN_NAME already exists"
exit 1
fi
echo "Starting test "$RUN_NAME" with $SERVER_DIR"
if [[ $USE_PERF ]] ; then
echo " Collecting perf profile"
fi
if [[ $USE_RAMDISK ]] ; then
echo " Using /dev/shm for data dir"
fi
if [[ $USE_4_CFS ]] ; then
echo " Data is in 4 tables"
fi
if [[ $USE_2_CFS ]] ; then
echo " Data is in 2 tables"
fi
#############################################################################
### Start the server
killall -9 mysqld
sleep 5
DATA_DIR=data-fbmysql-$SERVERNAME
rm -rf $DATA_DIR
initialize_mysql8_datadir() {
$MYSQLD_BINARY --defaults-file=./my-fbmysql-${SERVERNAME}.cnf --initialize-insecure
}
if [[ $USE_RAMDISK ]] ; then
rm -rf /dev/shm/$DATA_DIR
rm -rf /dev/shm/data-fbmysql-*
if [[ $USING_MYSQL8 ]] ; then
# intialize
mkdir /dev/shm/$DATA_DIR
ln -s /dev/shm/$DATA_DIR $DATA_DIR
initialize_mysql8_datadir
else
cp -r ${DATA_DIR}.clean /dev/shm/$DATA_DIR
ln -s /dev/shm/$DATA_DIR $DATA_DIR
fi
else
if [[ $USING_MYSQL8 ]] ; then
mkdir $DATA_DIR
initialize_mysql8_datadir
else
cp -r ${DATA_DIR}.clean $DATA_DIR
fi
fi
#exit 0;
MYSQL_CMD="$MYSQL_CLIENT --defaults-file=./my-fbmysql-${SERVERNAME}.cnf -uroot"
server_attempts=0
while true ; do
$MYSQLD_BINARY --defaults-file=./my-fbmysql-${SERVERNAME}.cnf &
sleep 5
client_attempts=0
while true ; do
$MYSQL_CMD -e "drop database if exists sbtest"
$MYSQL_CMD -e "create database sbtest"
if [ $? -eq 0 ]; then
break
fi
sleep 1
client_attempts=$((client_attempts + 1))
if [ $client_attempts -ge 10 ]; then
break;
fi
done
MYSQLD_PID=`ps -C mysqld --no-header | awk '{print $1}'`
if [[ "a${MYSQLD_PID}b" != "ab" ]] ; then
break
fi
server_attempts=$((server_attempts + 1))
if [ $server_attempts -ge 4 ]; then
echo "Failed to launch mysqld"
exit 1
fi
done
#############################################################################
### Prepare the benchmark
RESULT_DIR=results/$RUN_NAME
mkdir -p $RESULT_DIR
SYSBENCH_ARGS=" --db-driver=mysql --mysql-host=127.0.0.1 --mysql-user=root \
--mysql-storage-engine=rocksdb \
--time=60 \
/usr/share/sysbench/oltp_write_only.lua"
if [[ $USE_4_CFS ]] ; then
SYSBENCH_ARGS="$SYSBENCH_ARGS --tables=4 --table-size=250000"
elif [[ $USE_2_CFS ]] ; then
SYSBENCH_ARGS="$SYSBENCH_ARGS --tables=2 --table-size=500000"
else
SYSBENCH_ARGS="$SYSBENCH_ARGS --table-size=1000000"
fi
## /usr/share/sysbench/oltp_write_only.lua --table-size=250000 --tables=4"
SYSBENCH_TEST="oltp_write_only.lua"
(cd $SERVER_DIR && git log -1 ) > $RESULT_DIR/server-cset.txt
(cd $SERVER_DIR/rocksdb && git log -1 ) > $RESULT_DIR/rocksdb-cset.txt
cat > $RESULT_DIR/info.txt <<END
SERVERNAME=$SERVERNAME
SYSBENCH_TEST=$SYSBENCH_TEST
SERVER_DIR=$SERVER_DIR
SYSBENCH_ARGS=$SYSBENCH_ARGS
END
sleep 3
sysbench $SYSBENCH_ARGS prepare | tee $RESULT_DIR/sysbench-prepare.txt
if [[ $USE_4_CFS ]] ; then
echo "Splitting 4 tables into different CFs"
$MYSQL_CMD < make-4-cfs.sql
fi
if [[ $USE_2_CFS ]] ; then
echo "Splitting 2 tables into different CFs"
$MYSQL_CMD < make-2-cfs.sql
fi
if [[ $DROP_SECONDARY_INDEX ]]; then
if [[ $USE_4_CFS ]]; then
echo "Dropping the secondary indexes"
for i in `seq 1 4` ; do
echo "alter table sbtest.sbtest$i drop key k_1" | $MYSQL_CMD
echo "show create table sbtest.sbtest$i" | $MYSQL_CMD
done
fi
if [[ $USE_2_CFS ]]; then
echo "DROP_SECONDARY_INDEX and USE_2_CFS are not supported"
exit 1
fi
echo "Dropping the secondary index"
echo "alter table sbtest.sbtest1 drop key k_1" | $MYSQL_CMD
echo "show create table sbtest.sbtest1" | $MYSQL_CMD
fi
sleep 3
$MYSQL_CMD -e "show variables like 'rocksdb%'" > $RESULT_DIR/variables-rocksdb.txt
$MYSQL_CMD -e "show variables" > $RESULT_DIR/variables-all.txt
$MYSQL_CMD -e "select * from $I_S.global_status where variable_name like 'ROCKSDB%'" > $RESULT_DIR/status-before-test.txt
sleep 3
#############################################################################
### Start the profiler
if [[ $USE_PERF ]] ; then
# Start perf
sudo sh -c "echo -1 >> /proc/sys/kernel/perf_event_paranoid"
#sudo perf record -F 99 -p $MYSQLD_PID -g sleep 60 &
sudo perf record -F 99 -a -g sleep 60 &
fi
#############################################################################
### Run the benchmark
RUNS="1 5 10 20 40 60 80 100"
if [[ $USE_PERF ]] ; then
RUNS="100"
fi
for threads in $RUNS ; do
#echo "THREADS $threads $storage_engine"
$MYSQL_CMD -e "drop table if exists sbtest.rocksdb_vars;"
$MYSQL_CMD -e "create table sbtest.rocksdb_vars as select * from $I_S.global_status where variable_name like 'ROCKSDB%'"
$MYSQL_CMD -e "drop table if exists sbtest.rocksdb_perf_context_global;"
$MYSQL_CMD -e "create table sbtest.rocksdb_perf_context_global as select * from information_schema.rocksdb_perf_context_global \
where 1"
SYSBENCH_ALL_ARGS="$SYSBENCH_ARGS --threads=$threads"
OUTFILE="${RESULT_DIR}/sysbench-run-${threads}.txt"
sysbench $SYSBENCH_ALL_ARGS run | tee $OUTFILE
$MYSQL_CMD -e \
"select A.VARIABLE_NAME, B.VARIABLE_VALUE - A.VARIABLE_VALUE \
from $I_S.global_status B, sbtest.rocksdb_vars A \
where B.VARIABLE_NAME=A.VARIABLE_NAME AND B.VARIABLE_VALUE - A.VARIABLE_VALUE >0" > $RESULT_DIR/status-after-test-$threads.txt
$MYSQL_CMD -e \
"select A.STAT_TYPE, FORMAT(B.VALUE - A.VALUE,0) \
from information_schema.rocksdb_perf_context_global B, sbtest.rocksdb_perf_context_global A \
where B.STAT_TYPE=A.STAT_TYPE AND B.VALUE - A.VALUE >0" > $RESULT_DIR/perf_context-after-test-$threads.txt
done
if [[ $USE_PERF ]] ; then
CUR_USER=`id -un`
sudo chown $CUR_USER perf.*
fi
| true
|
32c88627c716275f99c2afd9745dd32cf78f2775
|
Shell
|
lilab-bcb/skylab
|
/docker/zarr-to-loom/build.sh
|
UTF-8
| 296
| 3.203125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
tag=$1
if [ -z $tag ]; then
echo -e "\nYou must provide a tag"
echo -e "\nUsage: bash build_docker.sh TAG\n"
exit 1
fi
docker build . -t quay.io/humancellatlas/zarr-to-loom:$tag
echo "You can now push with docker push quay.io/humancellatlas/zarr-to-loom:$tag"
| true
|
6655ed61bc0c5d601cc7c7e2d070259d8c055283
|
Shell
|
qrvd/discord-unix-bot
|
/bin/some
|
UTF-8
| 525
| 2.640625
| 3
|
[
"BSL-1.0"
] |
permissive
|
#!/bin/bash
set -ueo pipefail
function saywait() {
sbin/say "$SRCID" <<< "$2"
if [[ "$1" -gt 0 ]]; then
sleep "$1"
fi
}
saywait 1 '*...BODY just told me* :notes:'
saywait 3 '*the **world** is gonna roll me* :notes:'
saywait 3 "*I ain't the sharpest tool in the shed-* :notes:"
saywait 1 '*She was looking kinda dumb* :musical_note:'
saywait 2 '*With her finger and her thumb* :notes:'
saywait 2 '*In the shape of an L on her forehead!* :musical_note:'
saywait 0 'Source: <https://www.youtube.com/watch?v=L_jWHffIx5E>'
| true
|
73e75cf1ab17a52f2cdbd07be274ff9f04c43f37
|
Shell
|
Dunaiskyi/Project
|
/backup/root/etc/nginx/ssl/acme/deploy/gcore_cdn.sh
|
UTF-8
| 4,581
| 3.359375
| 3
|
[] |
no_license
|
#!/bin/bash
# Here is the script to deploy the cert to G-Core CDN service (https://gcorelabs.com/ru/) using the G-Core Labs API (https://docs.gcorelabs.com/cdn/).
# Returns 0 when success.
#
# Written by temoffey <temofffey@gmail.com>
# Public domain, 2019
#export DEPLOY_GCORE_CDN_USERNAME=myusername
#export DEPLOY_GCORE_CDN_PASSWORD=mypassword
######## Public functions #####################
#domain keyfile certfile cafile fullchain
gcore_cdn_deploy() {
_cdomain="$1"
_ckey="$2"
_ccert="$3"
_cca="$4"
_cfullchain="$5"
_debug _cdomain "$_cdomain"
_debug _ckey "$_ckey"
_debug _ccert "$_ccert"
_debug _cca "$_cca"
_debug _cfullchain "$_cfullchain"
_fullchain=$(tr '\r\n' '*#' <"$_cfullchain" | sed 's/*#/#/g;s/##/#/g;s/#/\\n/g')
_key=$(tr '\r\n' '*#' <"$_ckey" | sed 's/*#/#/g;s/#/\\n/g')
_debug _fullchain "$_fullchain"
_debug _key "$_key"
if [ -z "$DEPLOY_GCORE_CDN_USERNAME" ]; then
if [ -z "$Le_Deploy_gcore_cdn_username" ]; then
_err "Please define the target username: export DEPLOY_GCORE_CDN_USERNAME=username"
return 1
fi
else
Le_Deploy_gcore_cdn_username="$DEPLOY_GCORE_CDN_USERNAME"
_savedomainconf Le_Deploy_gcore_cdn_username "$Le_Deploy_gcore_cdn_username"
fi
if [ -z "$DEPLOY_GCORE_CDN_PASSWORD" ]; then
if [ -z "$Le_Deploy_gcore_cdn_password" ]; then
_err "Please define the target password: export DEPLOY_GCORE_CDN_PASSWORD=password"
return 1
fi
else
Le_Deploy_gcore_cdn_password="$DEPLOY_GCORE_CDN_PASSWORD"
_savedomainconf Le_Deploy_gcore_cdn_password "$Le_Deploy_gcore_cdn_password"
fi
_info "Get authorization token"
_request="{\"username\":\"$Le_Deploy_gcore_cdn_username\",\"password\":\"$Le_Deploy_gcore_cdn_password\"}"
_debug _request "$_request"
export _H1="Content-Type:application/json"
_response=$(_post "$_request" "https://api.gcdn.co/auth/signin")
_debug _response "$_response"
_regex=".*\"token\":\"\([-._0-9A-Za-z]*\)\".*$"
_debug _regex "$_regex"
_token=$(echo "$_response" | sed -n "s/$_regex/\1/p")
_debug _token "$_token"
if [ -z "$_token" ]; then
_err "Error G-Core Labs API authorization"
return 1
fi
_info "Find CDN resource with cname $_cdomain"
export _H2="Authorization:Token $_token"
_response=$(_get "https://api.gcdn.co/resources")
_debug _response "$_response"
_regex=".*(\"id\".*?\"cname\":\"$_cdomain\".*?})"
_regex="^.*\"cname\":\"$_cdomain\".*$"
_debug _regex "$_regex"
_resource=$(echo "$_response" | sed 's/},{/},\n{/g' | _egrep_o "$_regex")
_debug _resource "$_resource"
_regex=".*\"id\":\([0-9]*\).*\"rules\".*$"
_debug _regex "$_regex"
_resourceId=$(echo "$_resource" | sed -n "s/$_regex/\1/p")
_debug _resourceId "$_resourceId"
_regex=".*\"sslData\":\([0-9]*\).*$"
_debug _regex "$_regex"
_sslDataOld=$(echo "$_resource" | sed -n "s/$_regex/\1/p")
_debug _sslDataOld "$_sslDataOld"
_regex=".*\"originGroup\":\([0-9]*\).*$"
_debug _regex "$_regex"
_originGroup=$(echo "$_resource" | sed -n "s/$_regex/\1/p")
_debug _originGroup "$_originGroup"
if [ -z "$_resourceId" ] || [ -z "$_originGroup" ]; then
_err "Not found CDN resource with cname $_cdomain"
return 1
fi
_info "Add new SSL certificate"
_date=$(date "+%d.%m.%Y %H:%M:%S")
_request="{\"name\":\"$_cdomain ($_date)\",\"sslCertificate\":\"$_fullchain\",\"sslPrivateKey\":\"$_key\"}"
_debug _request "$_request"
_response=$(_post "$_request" "https://api.gcdn.co/sslData")
_debug _response "$_response"
_regex=".*\"id\":\([0-9]*\).*$"
_debug _regex "$_regex"
_sslDataAdd=$(echo "$_response" | sed -n "s/$_regex/\1/p")
_debug _sslDataAdd "$_sslDataAdd"
if [ -z "$_sslDataAdd" ]; then
_err "Error new SSL certificate add"
return 1
fi
_info "Update CDN resource"
_request="{\"originGroup\":$_originGroup,\"sslData\":$_sslDataAdd}"
_debug _request "$_request"
_response=$(_post "$_request" "https://api.gcdn.co/resources/$_resourceId" '' "PUT")
_debug _response "$_response"
_regex=".*\"sslData\":\([0-9]*\).*$"
_debug _regex "$_regex"
_sslDataNew=$(echo "$_response" | sed -n "s/$_regex/\1/p")
_debug _sslDataNew "$_sslDataNew"
if [ "$_sslDataNew" != "$_sslDataAdd" ]; then
_err "Error CDN resource update"
return 1
fi
if [ -z "$_sslDataOld" ] || [ "$_sslDataOld" = "null" ]; then
_info "Not found old SSL certificate"
else
_info "Delete old SSL certificate"
_response=$(_post '' "https://api.gcdn.co/sslData/$_sslDataOld" '' "DELETE")
_debug _response "$_response"
fi
_info "Certificate successfully deployed"
return 0
}
| true
|
74548b6c34cbffca351a165c5a469f3b6c7b9b05
|
Shell
|
ag3mma/learningToCode
|
/sec2ex6bashShellScriptingUdemy.sh
|
UTF-8
| 415
| 4.03125
| 4
|
[] |
no_license
|
#! /bin/bash
# Sixth exercise of the course Shell Scripting on Udemy
read -p "Give a name of a file or a directory: " INPUT
if [ -d "${INPUT}" ]
then
echo "The input is a directory."
ls -l
elif [ -f "${INPUT}" ]
then
echo "The input is a regular file."
ls -l
elif [ -e "${INPUT}" ]
then
echo "The input file exists."
ls -l
else
echo "File or directory not present, or invalid input."
fi
| true
|
49b1ddbb3245d8c9f299b051a9d661af56fbd304
|
Shell
|
kushida-gh/audit
|
/rhel7-baseline-compare
|
UTF-8
| 2,556
| 2.984375
| 3
|
[] |
no_license
|
#!/bin/bash
#
# Compare diff before/after baseline files, run script as root
rightnow=$(date +%Y-%m-%d)
systemhostname=$(hostname | cut -d '.' -f 1)
out=("$systemhostname")
diff before/$out-local-users-with-shell-access-$rightnow.txt after/$out-local-users-with-shell-access-$rightnow.txt >> diff/$out-group-$rightnow.txt
diff before/$out-group-$rightnow.txt after/$out-group-$rightnow.txt >> diff/$out-group-$rightnow.txt
diff before/$out-rpm-list-$rightnow.txt after/$out-rpm-list-$rightnow.txt >> diff/$out-rpm-list-$rightnow.txt
diff before/$out-rpm-count-$rightnow.txt after/$out-rpm-count-$rightnow.txt >> diff/$out-rpm-count-$rightnow.txt
diff before/$out-services-$rightnow.txt after/$out-services-$rightnow.txt >> diff/$out-services-$rightnow.txt
diff before/$out-services-enabled-$rightnow.txt after/$out-services-enabled-$rightnow.txt >> diff/$out-services-enabled-$rightnow.txt
diff before/$out-selinux-mode-$rightnow.txt after/$out-selinux-mode-$rightnow.txt >> diff/$out-selinux-mode-$rightnow.txt
diff before/$out-selinux-status-$rightnow.txt after/$out-selinux-status-$rightnow.txt >> diff/$out-selinux-status-$rightnow.txt
diff before/$out-ifconfig--$rightnow.txt after/$out-ifconfig--$rightnow.txt >> diff/$out-ifconfig--$rightnow.txt
diff before/$out-netstat-listening-tcp-$rightnow.txt after/$out-netstat-listening-tcp-$rightnow.txt >> diff/$out-netstat-listening-tcp-$rightnow.txt
diff before/$out-netstat-listening-udp-$rightnow.txt after/$out-netstat-listening-udp-$rightnow.txt >> diff/$out-netstat-listening-udp-$rightnow.txt
diff before/$out-processes-$rightnow.txt after/$out-processes-$rightnow.txt >> diff/$out-processes-$rightnow.txt
diff before/$out-process-tree-$rightnow.txt after/$out-process-tree-$rightnow.txt >> diff/$out-process-tree-$rightnow.txt
diff before/$out-cron-root-$rightnow.txt after/$out-cron-root-$rightnow.txt >> diff/$out-cron-root-$rightnow.txt
diff before/$out-dns-servers-$rightnow.txt after/$out-dns-servers-$rightnow.txt >> diff/$out-dns-servers-$rightnow.txt
diff before/$out-ntp-servers-$rightnow.txt after/$out-ntp-servers-$rightnow.txt >> diff/$out-ntp-servers-$rightnow.txt
diff before/$out-directories-$rightnow.txt after/$out-directories-$rightnow.txt >> diff/$out-directories-$rightnow.txt
diff before/$out-disk-usage-$rightnow.txt after/$out-disk-usage-$rightnow.txt >> diff/$out-disk-usage-$rightnow.txt
# Remove 0-byte diff files representing no changes for that category
cd diff
find -type f -size 0 -delete
cd ..
# Finally, list files which have system changes in them
ls -l diff*
| true
|
563c08de61e0359b63a298b7699f2e26ad8018b7
|
Shell
|
trngaje/es_losernatorconfig
|
/config/scripts/configscripts/mupen64plus.sh
|
UTF-8
| 8,076
| 3.265625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
# This file is part of The RetroPie Project
#
# The RetroPie Project is the legal property of its developers, whose names are
# too numerous to list here. Please refer to the COPYRIGHT.md file distributed with this source.
#
# See the LICENSE.md file at the top-level directory of this distribution and
# at https://raw.githubusercontent.com/RetroPie/RetroPie-Setup/master/LICENSE.md
#
function onstart_mupen64plus_joystick() {
# write temp file header
echo "; ${DEVICE_NAME}_START " > /tmp/mp64tempconfig.cfg
echo "[${DEVICE_NAME}]" >> /tmp/mp64tempconfig.cfg
iniConfig " = " "" "/tmp/mp64tempconfig.cfg"
iniSet "plugged" "True"
iniSet "plugin" "2"
iniSet "mouse" "False"
iniSet "AnalogDeadzone" "4096,4096"
iniSet "AnalogPeak" "32768,32768"
iniSet "Mempak switch" ""
iniSet "Rumblepak switch" ""
}
function map_mupen64plus_joystick() {
local input_name="$1"
local input_type="$2"
local input_id="$3"
local input_value="$4"
local keys
local dir
case "$input_name" in
up)
keys=("DPad U")
dir=("Up")
;;
down)
keys=("DPad D")
dir=("Down")
;;
left)
keys=("DPad L")
dir=("Left")
;;
right)
keys=("DPad R")
dir=("Right")
;;
b)
keys=("A Button")
;;
y)
keys=("B Button")
;;
a)
keys=("C Button D")
;;
x)
keys=("C Button U")
;;
leftbottom|leftshoulder)
keys=("L Trig")
;;
rightbottom|rightshoulder)
keys=("R Trig")
;;
lefttop|lefttrigger)
keys=("Z Trig")
;;
start)
keys=("Start")
;;
leftanalogleft)
keys=("X Axis")
dir=("Left")
;;
leftanalogright)
keys=("X Axis")
dir=("Right")
;;
leftanalogup)
keys=("Y Axis")
dir=("Up")
;;
leftanalogdown)
keys=("Y Axis")
dir=("Down")
;;
rightanalogleft)
keys=("C Button L")
dir=("Left")
;;
rightanalogright)
keys=("C Button R")
dir=("Right")
;;
rightanalogup)
keys=("C Button U")
dir=("Up")
;;
rightanalogdown)
keys=("C Button D")
dir=("Down")
;;
leftthumb)
keys=("Mempak switch")
;;
rightthumb)
keys=("Rumblepak switch")
;;
*)
return
;;
esac
local key
local value
#iniConfig " = " "" "/tmp/mp64keys.cfg"
for key in "${keys[@]}"; do
# read key value. Axis takes two key/axis values.
iniGet "$key"
case "$input_type" in
axis)
# key "X/Y Axis" needs different button naming
if [[ "$key" == *Axis* ]]; then
# if there is already a "-" axis add "+" axis value
if [[ "$ini_value" == *\(* ]]; then
value="${ini_value}${input_id}+)"
# if there is already a "+" axis add "-" axis value
elif [[ "$ini_value" == *\)* ]]; then
value="axis(${input_id}-,${ini_value}"
# if there is no ini_value add "+" axis value
elif [[ "$input_value" == "1" ]]; then
value="${input_id}+)"
else
value="axis(${input_id}-,"
fi
elif [[ "$input_value" == "1" ]]; then
value="axis(${input_id}+) ${ini_value}"
else
value="axis(${input_id}-) ${ini_value}"
fi
;;
hat)
if [[ "$key" == *Axis* ]]; then
if [[ "$ini_value" == *\(* ]]; then
value="${ini_value}${dir})"
elif [[ "$ini_value" == *\)* ]]; then
value="hat(${input_id} ${dir} ${ini_value}"
elif [[ "$dir" == "Up" || "$dir" == "Left" ]]; then
value="hat(${input_id} ${dir} "
elif [[ "$dir" == "Right" || "$dir" == "Down" ]]; then
value="${dir})"
fi
else
if [[ -n "$dir" ]]; then
value="hat(${input_id} ${dir}) ${ini_value}"
fi
fi
;;
*)
if [[ "$key" == *Axis* ]]; then
if [[ "$ini_value" == *\(* ]]; then
value="${ini_value}${input_id})"
elif [[ "$ini_value" == *\)* ]]; then
value="button(${input_id},${ini_value}"
elif [[ "$dir" == "Up" || "$dir" == "Left" ]]; then
value="button(${input_id},"
elif [[ "$dir" == "Right" || "$dir" == "Down" ]]; then
value="${input_id})"
fi
else
value="button(${input_id}) ${ini_value}"
fi
;;
esac
iniSet "$key" "$value"
done
}
function onend_mupen64plus_joystick() {
local bind
local axis
local axis_neg
local axis_pos
for axis in "X Axis" "Y Axis"; do
if [[ "$axis" == *X* ]]; then
axis_neg="DPad L"
axis_pos="DPad R"
else
axis_neg="DPad U"
axis_pos="DPad D"
fi
# analog stick sanity check
# replace Axis values with DPAD values if there is no Axis
# device setup
if ! grep -q "$axis" /tmp/mp64tempconfig.cfg ; then
iniGet "${axis_neg}"
bind=${ini_value//)/,}
iniGet "${axis_pos}"
ini_value=${ini_value//axis(/}
ini_value=${ini_value//hat(/}
ini_value=${ini_value//button(/}
bind="${bind}${ini_value}"
iniSet "$axis" "$bind"
iniDel "${axis_neg}"
iniDel "${axis_pos}"
fi
done
# If there is no Z Trig try to map the L shoulder
# button to it via copying over the existing L Trig
# value and deleting it (L Trig) after
if ! grep -q "Z Trig" /tmp/mp64tempconfig.cfg ; then
iniGet "L Trig"
iniSet "Z Trig" "${ini_value}"
iniDel "L Trig"
fi
echo "; ${DEVICE_NAME}_END " >> /tmp/mp64tempconfig.cfg
echo "" >> /tmp/mp64tempconfig.cfg
# abort if old device config cannot be deleted.
# keep original mupen64plus-input-sdl configs.
mkdir -p "$configdir/mupen64plus"
local file="$configdir/mupen64plus/InputAutoCfg.ini"
if [[ -f "$file" ]]; then
# backup current config file
cp "$file" "${file}.bak"
sed -i /"${DEVICE_NAME}_START"/,/"${DEVICE_NAME}_END"/d "$file"
if grep -q "$DEVICE_NAME" "$file" ; then
rm /tmp/mp64tempconfig.cfg
return
fi
else
cat > "$file" << _EOF_
; InputAutoCfg.ini for Mupen64Plus SDL Input plugin
; Keyboard_START
[Keyboard]
plugged = True
plugin = 2
mouse = False
DPad R = key(100)
DPad L = key(97)
DPad D = key(115)
DPad U = key(119)
Start = key(13)
Z Trig = key(122)
B Button = key(306)
A Button = key(304)
C Button R = key(108)
C Button L = key(106)
C Button D = key(107)
C Button U = key(105)
R Trig = key(99)
L Trig = key(120)
Mempak switch = key(44)
Rumblepak switch = key(46)
X Axis = key(276,275)
Y Axis = key(273,274)
; Keyboard_END
_EOF_
fi
# append temp device configuration to InputAutoCfg.ini
cat /tmp/mp64tempconfig.cfg >> "$file"
rm /tmp/mp64tempconfig.cfg
}
| true
|
9a407d9a518521719edf486228a3c1f2e2cef521
|
Shell
|
kdave/xfstests
|
/tests/btrfs/156
|
UTF-8
| 3,138
| 3.453125
| 3
|
[] |
no_license
|
#! /bin/bash
# SPDX-License-Identifier: GPL-2.0
# Copyright (c) 2017 SUSE Linux Products GmbH. All Rights Reserved.
#
# FS QA Test 156
#
# Check if btrfs can correctly trim free space in block groups
#
# An ancient regression prevent btrfs from trimming free space inside
# existing block groups, if bytenr of block group starts beyond
# btrfs_super_block->total_bytes.
# However all bytenr in btrfs is in btrfs logical address space,
# where any bytenr in range [0, U64_MAX] is valid.
#
# Fixed by patch named "btrfs: Ensure btrfs_trim_fs can trim the whole fs".
#
. ./common/preamble
_begin_fstest auto quick trim balance
# Import common functions.
. ./common/filter
# real QA test starts here
# Modify as appropriate.
_supported_fs btrfs
_require_scratch
_require_fstrim
# We need the allocated space to actually use that amount so the trim amount
# comes out correctly. Because we mark free extents as TRIMMED we won't trim
# the free extents on the second fstrim and thus we'll get a trimmed bytes at <
# half of the device if we have compression enabled, even though fs trim did the
# correct thing.
_require_no_compress
# 1024fs size
fs_size=$((1024 * 1024 * 1024))
# Use small files to fill half of the fs
file_size=$(( 1024 * 1024 ))
nr_files=$(( $fs_size / $file_size / 2))
# Force to use single data and meta profile.
# Since the test relies on fstrim output, which will differ for different
# profiles
_check_minimal_fs_size $fs_size
_scratch_mkfs -b $fs_size -m single -d single > /dev/null
_scratch_mount
_require_batched_discard "$SCRATCH_MNT"
for n in $(seq -w 0 $(( $nr_files - 1))); do
$XFS_IO_PROG -f -c "pwrite 0 $file_size" "$SCRATCH_MNT/file_$n" \
> /dev/null
done
# Flush all buffer data into disk, to trigger chunk allocation
sync
# Now we have take at least 50% of the filesystem, relocate all chunks twice
# so all chunks will start after 1G in logical space.
# (Btrfs chunk allocation will not rewind to reuse lower space)
_run_btrfs_balance_start $SCRATCH_MNT >> $seqres.full
# To avoid possible false ENOSPC alert on v4.15-rc1, seems to be a
# reserved space related bug (maybe related to outstanding space rework?),
# but that's another story.
sync
_run_btrfs_balance_start $SCRATCH_MNT >> $seqres.full
# Now remove half of the files to make some holes for later trim.
# While still keep the chunk space fragmented, so no chunk will be freed
rm $SCRATCH_MNT/file_*[13579] -f
# Make sure space is freed
sync
trimmed=$($FSTRIM_PROG -v "$SCRATCH_MNT" | _filter_fstrim)
echo "Trimmed=$trimmed total_size=$fs_size ratio=$(($trimmed * 100 / $fs_size))%" \
>> $seqres.full
# For correct full fs trim, both unallocated space (less than 50%)
# and free space in existing block groups (about 25%) should be trimmed.
# If less than 50% is trimmed, then only unallocated space is trimmed.
# BTW, without fix only 31% can be trimmed, while after fix it's 64%.
if [ $trimmed -lt $(( $fs_size / 2)) ]; then
echo "Free space in block groups not trimmed"
echo "Trimmed=$trimmed total_size=$fs_size ratio=$(($trimmed * 100 / $fs_size))%"
fi
echo "Silence is golden"
# success, all done
status=0
exit
| true
|
e9752e83b1ffcafc26a9dd13e7e5334865dbe83a
|
Shell
|
ChenyunWu/DescribingTextures
|
/models/naive_classifier/train_ft_fc_tune.sh
|
UTF-8
| 1,011
| 2.515625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
cd ~/work1/DescribeTexture
. /home/chenyun/anaconda3/etc/profile.d/conda.sh
conda activate py37_cuda92
exp=v1
exp_i=0
partition=1080ti-short
for tune in False True; do
for fc in "" "512"; do
for ft in "4" "3" "2" "1" "1,4" "2,4" "3,4" "1,3" "2,3" "1,2"; do
if ((exp_i >= 7));then
if ((exp_i <= 19));then
partition=2080ti-short
out="output/naive_classify/${exp}_${exp_i}_ft${ft}_fc${fc}_tune${tune}"
options="MODEL.BACKBONE_FEATS [${ft}] MODEL.FC_DIMS [${fc}] TRAIN.TUNE_BACKBONE ${tune} OUTPUT_PATH ${out}"
echo "${exp}${exp_i}: ${options}"
export options
fname=nc_${exp}_${exp_i}
sbatch --job-name ${fname} --exclude node155 -p ${partition} -o logs/train_${fname}.out -e logs/train_${fname}.err \
models/naive_classifier/train.sbatch
sleep 0.1
fi
fi
((exp_i=exp_i+1))
done
done
done
| true
|
dc22247081b61f20f79a9ac36ea158bdf6321245
|
Shell
|
CSCfi/pouta-openshift-cluster
|
/playbooks/files/monitoring/check_cinder/check_cinder.bash
|
UTF-8
| 664
| 3.296875
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
#
# A wrapper Bash script to handle Python virtualenv and oc login for the
# check_oso_deploy.py Python script.
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
if [[ ! -d $HOME/check_cinder ]]; then
virtualenv $HOME/check_cinder &> /dev/null
source $HOME/check_cinder/bin/activate
pip install -r $SCRIPT_DIR/requirements.txt &> /dev/null
else
source $HOME/check_cinder/bin/activate
fi
if [[ ! -f /dev/shm/secret/openrc.sh ]]; then
echo "Cannot find openrc credentials in /dev/shm/secret/openrc.sh"
exit 1
fi
source /dev/shm/secret/openrc.sh
python $SCRIPT_DIR/check_cinder.py $@
ret=$?
deactivate
exit $ret
| true
|
27d3d027c8f149dbda369095832c0748cbc6dd28
|
Shell
|
Methodo-Stats-Tutor/resources
|
/bin/pdf2php.sh
|
UTF-8
| 1,251
| 2.765625
| 3
|
[] |
no_license
|
#!/bin/bash
chemin=$1
entryPdf=$2
uid=$3
cd "$chemin" &&
mkdir -p "$uid" &&
cd "$uid" &&
pdf2htmlEX --embed-css 0 --css-filename "$uid.css" --embed-image 0 --optimize-text 1 --fit-width 1000 "$chemin$entryPdf" "$uid.html" &&
awk 'BEGIN{RS="</script>"}/<script/{gsub("<script.*","")}{print}END{if(RS=="")print}' "$uid.html" > "$uid.temp" &&
mv $uid.temp $uid.html &&
sed -i "s/src=\"/src=\"$uid\//" "$uid.html" &&
sed -i "s/href=\"/href=\"$uid\//" "$uid.html" &&
sed -i '/<\/body>/i <link rel="stylesheet" href="../../tools/zenburn.css">\n<link rel="stylesheet" href="../../tools/css/theme-dark/annotorious-dark.css" />\n<link rel="stylesheet" type="text/css" href="../../tools/annotator.<?php echo $_GET["mode"]; ?>.css">\n<script src="../../plugins/jQuery/jQuery-2.1.4.min.js"></script>\n<script src="../../tools/highlight.pack.js"></script>\n<link rel="stylesheet" type="text/css" href="../../tools/css/<?php echo $_GET["mode"]; ?>/annotorious.css">\n<script src="../../tools/annotator.<?php echo $_GET["mode"]; ?>.js"></script>\n<script src="../../tools/annotorious.<?php echo $_GET["mode"]; ?>.min.js"></script>\n<script src="../../tools/mstAnnot2.js"></script>\n' "$uid.html" &&
mv "$uid.html" "../$uid.php" &&
cd ..
#&&rm "$entryPdf"
| true
|
7305b7e25a47f486f2ccf2cd3b512f8e017a2e4e
|
Shell
|
albertoaus/bisect
|
/test.sh
|
UTF-8
| 250
| 3.234375
| 3
|
[] |
no_license
|
#!/bin/bash
# Script para compilar y testear el ejemplo
echo "Compilando"
javac Hello.java
echo "Lanzo Hello"
java Hello > resultado.txt
A=$(cat resultado.txt)
if [ $A = "Hello" ]; then
echo "OK"
else
echo "Fail"
fi
rm Hello.class
exit
| true
|
1351446931eb63cfa36e8c1bad30cbe1fc398a78
|
Shell
|
mrwhale/limitlessled-bash
|
/led.sh
|
UTF-8
| 8,168
| 3.96875
| 4
|
[] |
no_license
|
#!/bin/bash
if [ -z $1 ]
then
echo "You must enter a parameter:"
echo "e.g. 'led.sh' c 1 on #turns colour zone 1 one"
exit "1"
fi
##########
# Config
##########
# Wifi controller information
ipaddress="10.1.1.23"
portnum="8899"
##########
# Zone Information
##########
# Change the value on the left hand side
#Colour Zones
all=0
kitchen=1
lounge=2
czone3=3
czone4=4
#White Zones
all=0
entry=1
hallway=2
bed=3
bedside=4
##########
# Input
##########
# Script parameters
type="$1"
zone="$2"
command="$3"
param="$4"
##########
# Colour (RGBW) light bulbs
##########
function colour {
##########
# Send Command Functions
##########
function sendCmd { # Generic send any command the controller
ctrl="\x55"
cmd=$1
# Try sending to /dev/udp, if that fails use netcat
echo -n -e "$cmd$ctrl" >/dev/udp/$ipaddress/$portnum || echo -n -e "$cmd$ctrl" | nc -w 1 -u $ipaddress $portnum
}
function sendOnCommand { # On command is also used to select zones
onarray=("\x42" "\x45" "\x47" "\x49" "\x4B")
standby="\00"
sendCmd "${onarray[$zone]}$standby"
}
function selectZone { # Select zone by sending standby cmd and sleep for a second
sendOnCommand
sleep 0
}
function sendOffCommand {
offarray=("\x41" "\x46" "\x48" "\x4A" "\x4C")
standby="\00"
sendCmd "${offarray[$zone]}$standby"
}
function sendBrightCmd {
brightarray=("\x02" "\x04" "\x08" "\x0A" "\x0B" "\xD0" "\x10" "\x13" "\x16" "\x19" "\x1B")
selectZone
bright="\x4E"
cmd="${brightarray[$1]}"
sendCmd "$bright$cmd"
}
function sendNightMode {
offarray=("\x41" "\x46" "\x48" "\x4A" "\x4C")
nightarray=("x\C1" "\xC6" "\xC8" "\xCA" "\xCC")
standby="\00"
sendCmd "${offarray[$zone]}$standby"
sendCmd "${nightarray[$zone]}$standby"
}
function sendColorCmd {
selectZone
color="\x40"
cmd=$1
sendCmd "$color$cmd"
}
function sendWhiteCmd {
whitearray=("\xC2" "\xC5" "\xC7" "\xC9" "\xC9")
selectZone
white="\00"
cmd="${whitearray[$zone]}"
sendCmd "$cmd$white"
}
##########
# Input Handling Functions
##########
function handleOn {
echo "You just turned colour bulbs in zone $zone on"
sendOnCommand
}
function handleOff {
echo "You just turned colour bulbs in zone $zone off"
sendOffCommand
}
function handleBrightness {
case $param in
"full")
echo "You turned colour bulbs in zone $zone to full brightness"
sendBrightCmd "10";;
[0-9])
echo "You turned colour bulbs in zone $zone to $param"
sendBrightCmd "$param";;
"night")
echo "You just turned colour bulbs in zone $zone to night mode"
sendNightMode;;
*)
echo "You've done something wrong";;
esac
}
function handleColor {
echo "Attempting to change colour bulbs in zone $zone to $param"
case $param in
"white")
sendWhiteCmd;;
"purple")
sendColorCmd "\xF0";;
"lightblue")
sendColorCmd "\x20";;
"blue")
sendColorCmd "\x10";;
"red")
sendColorCmd "\xb0";;
"green")
sendColorCmd "\x60";;
"yellow")
sendColorCmd "\x80";;
"pink")
sendColorCmd "\xC0";;
"orange")
sendColorCmd "\xA0";;
*)
echo "Colour $param isn't configured";;
esac
}
##########
# Input Parsing
##########
case $command in
"on"|"ON")
handleOn;;
"off"|"OFF")
handleOff;;
"b"|"B")
handleBrightness;;
"c"|"C")
handleColor;;
*)
echo "You've done something wrong";;
esac
}
##########
# Dual white light bulbs
##########
function white {
##########
# Send Command Functions
##########
function sendCmd { # Generic send any command the controller
ctrl="\x55"
cmd=$1
# Try sending to /dev/udp, if that fails use netcat
echo -n -e "$cmd$ctrl" >/dev/udp/$ipaddress/$portnum || echo -n -e "$cmd$ctrl" | nc -w 1 -u $ipaddress $portnum
}
function sendOnCommand { # On command is also used to select zones
onarray=("\x35" "\x38" "\x3D" "\x37" "\x32")
standby="\00"
sendCmd "${onarray[$zone]}$standby"
}
function selectZone { # Select zone by sending standby cmd and sleep for a second
sendOnCommand
sleep 0
}
function sendOffCommand {
offarray=("\x39" "\x3B" "\x33" "\x3A" "\x36")
standby="\00"
sendCmd "${offarray[$zone]}$standby"
}
function sendNightCommand {
nightarray=("\xB9\00" "\xBB\00" "\xB3\00" "\xBA\00" "\xB6\00")
selectZone
sendCmd "${nightarray[$zone]}"
}
function sendFullBrightCommand {
fullbrightarray=("\xB5\00" "\xB8\00" "\xBD\00" "\xB7\00" "\xB2\00")
selectZone
sendCmd "${fullbrightarray[$zone]}"
}
function sendBrightDimCommand {
brightDim=$1
selectZone
sendCmd "$brightDim\00"
}
function sendCoolWarmCommand {
coolWarm=$1
selectZone
sendCmd "$coolWarm\00"
}
##########
# Input Handling Functions
##########
function handleOn {
echo "You just turned white bulbs in zone $zone on"
sendOnCommand
}
function handleOff {
echo "You just turned white bulbs in zone $zone off"
sendOffCommand
}
function handleBrightness {
case $param in
"night")
echo "You turned white bulbs in zone $zone to night-mode"
sendNightCommand;;
"full")
echo "You turned white bulbs in zone $zone to full brightness"
sendFullBrightCommand;;
"up")
echo "You turned white bulbs in zone $zone up 1 brightness"
sendBrightDimCommand "\x3C";;
"down")
echo "You turned white bulbs in zone $zone down 1 brightness"
sendBrightDimCommand "\x34";;
"cool")
echo "You cooled down white bulbs in zone $zone"
sendCoolWarmCommand "\x3f";;
"warm")
echo "You warmed up white bulbs in zone $zone"
sendCoolWarmCommand "\x3e";;
*)
echo "You've done something wrong"
esac
}
function handleInteractive {
echo "Press CTRL+C to exit interactive mode"
echo "Make sure you have numlock ON when using numpad"
for (( ; ; ))
do
read -s -n 1 var
case $var in
8)
echo "You turned white bulbs in zone $zone up 1 brightness"
sendBrightDimCommand "\x3C";;
2)
echo "You turned white bulbs in zone $zone down 1 brightness"
sendBrightDimCommand "\x34";;
4)
echo "You cooled down white bulbs in zone $zone"
sendCoolWarmCommand "\x3f";;
6)
echo "You warmed up white bulbs in zone $zone"
sendCoolWarmCommand "\x3e";;
*)
echo "wrong key pressed"
esac
done
}
##########
# Input Parsing
##########
case $command in
"on"|"ON")
handleOn;;
"off"|"OFF")
handleOff;;
"b"|"B")
if [ $param = "i" ]
then
handleInteractive
else
handleBrightness
fi;;
*)
echo "You've done something wrong";;
esac
}
case $type in
"c"|"C")
colour;;
"w"|"W")
white;;
*)
echo "You've done something wrong";;
esac
| true
|
cb38daa80b7358c08352a0918d07ede3121ab45e
|
Shell
|
yousong/brtest
|
/modparams.sh
|
UTF-8
| 634
| 3.6875
| 4
|
[] |
no_license
|
#
# Author: Yousong Zhou <yszhou4tech AT gmail.com>
#
# Sample run
#
# ./modparams.sh ixgbe
#
_modparams() {
# module
local m="$1"
# module path
local mp="/sys/module/$m"
# parameter path
local pp="$mp/parameters"
local param
# not yet loaded?
[ -d "$mp" ] || {
echo "cannot find $mp" >&2
return 1
}
# no param on load?
[ -d "$pp" ] || {
return 0
}
for param in $(ls "$pp"); do
echo -n "$param=$(cat $pp/$param) "
done
}
modparams() {
local m
for m in $*; do
echo -n "$m: "
_modparams "$m"
echo
done
}
if [ "$#" -gt 0 ]; then
modparams "$@"
else
modparams "$(cat /proc/modules | cut -f1 -d' ')"
fi
| true
|
684e4eab1c76e6ceb8767bcc1fde8b3574eafbc5
|
Shell
|
npavlovikj/pegasus-trinity-test
|
/submit.sh
|
UTF-8
| 1,881
| 2.90625
| 3
|
[] |
no_license
|
#!/bin/bash
# totalmemory is in megabytes
# runtime is in seconds
set -e
# create dax file
./dax.py > pipeline.dax
# create the site catalog
cat > sites.xml <<EOF
<?xml version="1.0" encoding="UTF-8"?>
<sitecatalog xmlns="http://pegasus.isi.edu/schema/sitecatalog" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://pegasus.isi.edu/schema/sitecatalog http://pegasus.isi.edu/schema/sc-4.0.xsd" version="4.0">
<site handle="local" arch="x86_64" os="LINUX">
<directory type="shared-scratch" path="${PWD}/work">
<file-server operation="all" url="file://${PWD}/work"/>
</directory>
<directory type="local-storage" path="${PWD}/scratch">
<file-server operation="all" url="file://${PWD}/scratch"/>
</directory>
</site>
<site handle="local-hcc" arch="x86_64" os="LINUX">
<directory type="shared-scratch" path="${PWD}/out">
<file-server operation="all" url="file://${PWD}/out"/>
</directory>
<profile namespace="pegasus" key="style">glite</profile>
<profile namespace="condor" key="grid_resource">batch slurm</profile>
<profile namespace="pegasus" key="queue">batch</profile>
<profile namespace="env" key="PEGASUS_HOME">/usr</profile>
<profile namespace="pegasus" key="runtime">5760</profile>
<profile namespace="globus" key="totalmemory">2000</profile>
<profile namespace="pegasus" key="cores">2</profile>
<profile namespace="pegasus" key="nodes">1</profile>
<profile namespace="env" key="PERL5LIB">/util/opt/anaconda/deployed-conda-envs/packages/trinity/envs/trinity-2.4.0/lib/perl5</profile>
</site>
</sitecatalog>
EOF
# plan and submit the workflow
pegasus-plan --conf pegasusrc --sites local-hcc --output-site local --dir ${PWD} --dax pipeline.dax --submit
| true
|
69f4aaf5429d474c9a1be2ea36f22c3b254611cd
|
Shell
|
patrisampe/Books-AI
|
/extractClp.sh
|
UTF-8
| 1,278
| 3.578125
| 4
|
[] |
no_license
|
CLP=Libros.clp
BEGINLINES=$(cat $CLP | grep ";;; " --line-number | grep BEGIN | xargs --delimiter=: | awk '{print $1}')
FILENAMES1=$(cat $CLP | grep ";;; " --line-number | grep BEGIN | xargs --delimiter=: | awk '{print $3}')
FILENAMES2=$(cat $CLP | grep ";;; " --line-number | grep END | xargs --delimiter=: | awk '{print $3}')
ENDLINES=$(cat $CLP | grep ";;; " --line-number | grep END | xargs --delimiter=: | awk '{print $1}')
BEGINARRAY=($BEGINLINES)
FILEARRAY1=($FILENAMES1)
FILEARRAY2=($FILENAMES2)
ENDARRAY=($ENDLINES)
ELEMS=$(cat Libros.clp | grep ";;; " | grep BEGIN | wc -l)
for (( c=0; c<$ELEMS; c++ ))
do
BEGLINE=${BEGINARRAY[$c]}
FILE=${FILEARRAY1[$c]}
FILE2=${FILEARRAY2[$c]}
if [ $FILE != $FILE2 ]; then
echo "[ERROR ] in the BEGIN line ($BEGLINE) we found $FILE but in the END line ($ENDLINE)it was $FILE2"
exit
fi
if [ "$(echo $FILE | grep pins)" != "" ] || [ "$(echo $FILE | grep pont)" != "" ]; then
continue
fi
ENDLINE=${ENDARRAY[$c]}
echo "Parsing $FILE from line $BEGLINE to $ENDLINE ..."
BEGLINE=$(($BEGLINE+2))
ENDLINE=$(($ENDLINE-1))
if [ "$(head -n $ENDLINE $CLP | tail -1)" == "" ]; then
ENDLINE=$(($ENDLINE-1))
fi
cp $FILE $FILE.save
sed -n "$BEGLINE,$(echo $ENDLINE)p" $CLP > $FILE
done
| true
|
08dfd8993c67cbef6b4ad9136b30ec2694d0f4ab
|
Shell
|
Smiledemon/junit
|
/checkout.sh
|
UTF-8
| 326
| 2.875
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
name="<testsuite errors=\"0\" failures=\"0\""
result=`grep -c "${name}" report/TESTS-TestSuites.xml`
name1="<testsuite errors=\"0\" failures"
result1=`grep -c "${name1}" report/TESTS-TestSuites.xml`
if [ "${result}" -eq "${result1}" ]
then
exit 0
else
git reset --hard
git push
exit 1
fi
| true
|
8690b5a939af272fdffa9ec89392d9efa32825f9
|
Shell
|
mohitmayank/.dotfiles
|
/profiles/centos
|
UTF-8
| 331
| 2.984375
| 3
|
[] |
no_license
|
#!/bin/bash
##Add self to suoders
ME=$(whoami)
sudo sh -c "echo '$ME ALL=(ALL) NOPASSWD: ALL' > /etc/sudoers.d/$ME"
sudo yum install –y epel-release
if [ -x "$(command -v yum-config-manager)" ]; then
yum-config-manager --enable epel
fi
sudo yum -y install curl wget p7zip tree htop
sudo yum -y install vim git tmux dos2unix
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.