blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
a5169c34b24405ed96664d02254dc6b1a104fc99
|
Shell
|
pappasam/npd
|
/c1_intro_programming/scripts/check_a2.sh
|
UTF-8
| 1,057
| 3.4375
| 3
|
[] |
no_license
|
check_course_vars_loaded
assignment_index="2"
thisAssignment="$course_assignments/assignment_$assignment_index"
check_assignment_started "$assignment_index"
passing="true"
links=""
if [ ! -f "$thisAssignment/index.html" ]; then
echo "Was expecting to find $thisAssignment/index.html"
echo
passing="false"
links="www.google.com $links"
fi
if [ ! -f "$thisAssignment/vimrc_minimum" ]; then
echo "Was expecting to find $thisAssignment/vimrc_minimum"
echo
passing="false"
links="https://raw.githubusercontent.com/pappasam/configsettings/master/vimrc_minimum $links"
fi
if [ $passing != "true" ]; then
echo "Make sure your current directory is the assignment subdirectory, then download"
echo "the files with wget. Download the files at the following URLs, results"
echo "should named index.html and vimrc_minimum."
echo
echo " cd ~/intro-programming/assignment_2/"
for link in $links; do
echo " wget $link"
done
echo
else
echo "COMPLETED: Assignment $assignment_index"
fi
| true
|
9e829a2796b86c61284bf31a352ba1eff5360dc4
|
Shell
|
DerBunman/docker_debuild
|
/gpg_init.sh
|
UTF-8
| 772
| 3.328125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
export GNUPGHOME=$(dirname $0)/data/volumes/home/.gnupg
test -d && rm -r $GNUPGHOME
mkdir -p $GNUPGHOME
GPG_BIN=$(which gpg2) || GPG_BIN=$(which gpg) || {
echo "error gpg not found"
exit 1
}
cat >$GNUPGHOME/batch <<EOF
%echo Generating a basic OpenPGP key
%no-ask-passphrase
%no-protection
Key-Type: DSA
Key-Length: 1024
Name-Real: Lorenz Werner
Name-Comment: apt sign key
Name-Email: apt@doublefun.net
Expire-Date: 0
# Do a commit here, so that we can later print "done" :-)
%commit
%echo done
EOF
$GPG_BIN --batch --gen-key $GNUPGHOME/batch
key=$($GPG_BIN --with-colons --keyid-format long --list-keys apt@doublefun.net | grep fpr | cut -d ':' -f 10)
$GPG_BIN --keyserver keyserver.ubuntu.com --send-keys $key
$GPG_BIN --output pubkey.gpg --armor --export $key
| true
|
1831fd56ae767dcd1670a8b3eefdc05012f36ac3
|
Shell
|
ZMpursue/OSX-KVM
|
/boot-passthrough.sh
|
UTF-8
| 2,337
| 2.953125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
# Special thanks to:
# https://github.com/Leoyzen/KVM-Opencore
# https://github.com/thenickdude/KVM-Opencore/
# https://github.com/qemu/qemu/blob/master/docs/usb2.txt
#
# qemu-img create -f qcow2 mac_hdd_ng.img 128G
#
# echo 1 > /sys/module/kvm/parameters/ignore_msrs (this is required)
############################################################################
# NOTE: Tweak the "MY_OPTIONS" line in case you are having booting problems!
############################################################################
MY_OPTIONS="+pcid,+ssse3,+sse4.2,+popcnt,+avx,+aes,+xsave,+xsaveopt,check"
# This script works for Big Sur, Catalina, Mojave, and High Sierra. Tested with
# macOS 10.15.6, macOS 10.14.6, and macOS 10.13.6
ALLOCATED_RAM="3072" # MiB
CPU_SOCKETS="1"
CPU_CORES="2"
CPU_THREADS="4"
REPO_PATH="./"
OVMF_DIR="."
# Note: This script assumes that you are doing CPU + GPU passthrough. This
# script will need to be modified for your specific needs!
#
# We recommend doing the initial macOS installation without using passthrough
# stuff. In other words, don't use this script for the initial macOS
# installation.
# shellcheck disable=SC2054
args=(
-enable-kvm -m "$ALLOCATED_RAM" -cpu host,vendor=GenuineIntel,kvm=on,vmware-cpuid-freq=on,+invtsc,+hypervisor
-machine pc-q35-2.9
-smp "$CPU_THREADS",cores="$CPU_CORES",sockets="$CPU_SOCKETS"
-vga none
-device pcie-root-port,bus=pcie.0,multifunction=on,port=1,chassis=1,id=port.1
-device vfio-pci,host=01:00.0,bus=port.1,multifunction=on
-device vfio-pci,host=01:00.1,bus=port.1
-usb -device usb-kbd -device usb-tablet
-device isa-applesmc,osk="ourhardworkbythesewordsguardedpleasedontsteal(c)AppleComputerInc"
-drive if=pflash,format=raw,readonly,file="$REPO_PATH/$OVMF_DIR/OVMF_CODE.fd"
-drive if=pflash,format=raw,file="$REPO_PATH/$OVMF_DIR/OVMF_VARS-1024x768.fd"
-smbios type=2
-drive id=MacHDD,if=none,file=./mac_hdd_ng.img
-device ide-drive,bus=sata.2,drive=MacHDD
-drive id=OpenCoreBoot,if=none,snapshot=on,format=qcow2,file="$REPO_PATH/OpenCore-Catalina/OpenCore-Passthrough.qcow2"
-device ide-hd,bus=sata.3,drive=OpenCoreBoot
-netdev tap,id=net0,ifname=tap0,script=no,downscript=no -device vmxnet3,netdev=net0,id=net0,mac=52:54:00:c9:18:27
-monitor stdio
-display none
)
qemu-system-x86_64 "${args[@]}"
| true
|
f4595d49ca09382541a67882e78d147acc3e4e5a
|
Shell
|
Alanma23/Sze_CRCMetaAnalysis_mBio_2018
|
/code/kostic.batch
|
UTF-8
| 3,583
| 2.765625
| 3
|
[
"MIT"
] |
permissive
|
#!bash
#Load needed R
module load R/3.3.3
module load sratoolkit/2.7.0
tempMothur=/nfs/turbo/schloss-lab/msze/axiom_home_stuff/bin/mothurV1.39.3/mothur/mothur
DOWNDIR=data/raw/kostic
WORKDIR=data/process/kostic
REF=data/references
# Need to add in a bunch of stuff on the study and what they were looking for.
# Might adjust parameters to those set by authors of original manuscript
# (Kostic et al 2012).
# The SRA project ID is SRP000383. qvalue of 25 is too high try 20
# Download the data set
wget -r -q -np -nd -k -P $DOWNDIR ftp://ftp-trace.ncbi.nih.gov/sra/sra-instant/reads/ByStudy/sra/SRP/SRP000/SRP000383/
# Convert to fasta files that will be used
for sample in $DOWNDIR/*.sra
do
fastq-dump $sample -O $WORKDIR
fastq=${sample//sra/fastq}
fastq=${fastq//raw/process}
mothur "#fastq.info(fastq=$fastq);
trim.seqs(fasta=current, qfile=current, maxambig=0, maxhomop=8, qwindowaverage=25, qwindowsize=50, processors=8)"
rm *logfile
done
# Combined the seperate fasta files to one file
cat $WORKDIR/*trim.fasta > $WORKDIR/combined.fasta
# Create a group file
grep '^>' $WORKDIR/combined.fasta | cut -c 2- > $WORKDIR/header.txt
sed 's/\..*//g' $WORKDIR/header.txt > $WORKDIR/group.txt
paste --delimiters='\t' $WORKDIR/header.txt $WORKDIR/group.txt > $WORKDIR/combined.groups
# Remove unessary files
rm -f $WORKDIR/*.fastq $WORKDIR/*.scrap.* $WORKDIR/*.trim.* $WORKDIR/SRR*fasta $WORKDIR/SRR*qual $WORKDIR/header.txt $WORKDIR/group.txt
# Run mothur for sequencing processing on combined file
$tempMothur "#unique.seqs(fasta=$WORKDIR/combined.fasta);
align.seqs(fasta=current, reference=$REF/silva.seed.align, flip=T, processors=8);
count.seqs(name=current, group=$WORKDIR/combined.groups);
summary.seqs(fasta=current, count=current)"
$tempMothur "#screen.seqs(fasta=$WORKDIR/combined.unique.align, count=$WORKDIR/combined.count_table, summary=$WORKDIR/combined.unique.summary, end=28601, optimize=start, criteria=95, minlength=200, maxhomop=8, processors=8);
filter.seqs(fasta=current, vertical=T, trump=.);
unique.seqs(fasta=current, count=current);
summary.seqs(fasta=current, count=current)"
$tempMothur "#pre.cluster(fasta=$WORKDIR/combined.unique.good.filter.unique.fasta, count=$WORKDIR/combined.unique.good.filter.count_table, diffs=2);
chimera.uchime(fasta=current, count=current, dereplicate=t, processors=8);
remove.seqs(fasta=current, accnos=current);
classify.seqs(fasta=current, count=current, reference=$REF/trainset14_032015.pds.fasta, taxonomy=$REF/trainset14_032015.pds.tax, cutoff=80);
remove.lineage(fasta=current, count=current, taxonomy=current, taxon=Chloroplast-Mitochondria-unknown-Archaea-Eukaryota);
summary.seqs(fasta=current, count=current);
cluster.split(fasta=current, count=current, taxonomy=current, method=opti, metric=mcc, taxlevel=5, cutoff=0.03);
make.shared(list=current, count=current, label=0.03);
classify.otu(list=current, count=current, taxonomy=current, label=0.03);
get.oturep(fasta=current, count=current, list=current, label=0.03, method=abundance);
count.groups()"
# Match metadata with the shared file
#R -e "source('code/kostic.R')"
#$tempMothur "#sub.sample(shared=$WORKDIR/kostic.shared, size=1187, label=0.03);
# dist.shared(shared=$WORKDIR/kostic.shared, calc=braycurtis, label=0.03, subsample=1187, iters=100, processors=8);
# summary.single(shared=$WORKDIR/kostic.shared, calc=nseqs-sobs-shannon-shannoneven, subsample=1187)"
#mv $WORKDIR/*.cons.taxonomy $WORKDIR/kostic.taxonomy
#mv $WORKDIR/*0.03.rep.fasta $WORKDIR/kostic.rep.seqs
#rm $WORKDIR/combined.*
#rm $WORKDIR/*rabund
| true
|
a924f1a145149d67d9534db8e9b5b76b72459645
|
Shell
|
krullun/mathTableRecitation
|
/stampspik.sh
|
UTF-8
| 718
| 3.28125
| 3
|
[] |
no_license
|
#!/bin/bash
#initial announcement
espeak -ven+f3 -s150 "Your resting time starts now" -w table.wav
aplay table.wav
#signal to EEG setup through arduino
#IMPORTANT: you may need to change ACM0 that arduino is using
#It can be check by comparing the result of command "ls /dev/tty*" without quotes before and after pluging the arduino
echo aa > /dev/ttyACM0 9600
#waiting period of 3s
sleep 30
#Table recitation begin
espeak -ven+f3 -s150 "Table begins now" -w table11.wav
#signal is send before the playing the file
echo aa > /dev/ttyACM0 9600
aplay table11.wav
#wait for 3s
sleep 3
#recitation in loop from 1 to 10
for i in {1..10}
do
file="table$i.wav"
echo aa > /dev/ttyACM0 9600
aplay $file
sleep 3
done
| true
|
26815e8a1ab28ebca91d39fd3e307a94af1294bb
|
Shell
|
wikimedia/operations-software-homer-deploy
|
/scap/cmd.sh
|
UTF-8
| 187
| 2.546875
| 3
|
[] |
no_license
|
#!/bin/bash
set -eu
BASE_PATH="/srv/deployment/homer"
export VENV="${BASE_PATH}/venv"
export DEPLOY_PATH="${BASE_PATH}/deploy"
(cd "${DEPLOY_PATH}" && make -f Makefile.deploy deploy)
| true
|
568c6ddbbe568eb51b0f147b90ba97ae76315006
|
Shell
|
svn2github/iup-iup
|
/tags/iup_3_6/iup/srcconsole/copy_all_dll
|
UTF-8
| 1,188
| 3.1875
| 3
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/bin/bash
Copy_DLLs()
{
PLAT=$1
SYS=$2
LUA_SFX=$3
if [ $SYS == Win64 ]; then
SFX=_64
PLAT=$PLAT$SFX
fi
echo $PLAT/$SYS/Lua$LUA_SFX
mkdir ../bin/$SYS/Lua$LUA_SFX
cp -f ../lib/$PLAT/*.dll ../bin/$SYS/Lua$LUA_SFX
cp -f ../../cd/lib/$PLAT/*.dll ../bin/$SYS/Lua$LUA_SFX
cp -f ../../im/lib/$PLAT/*.dll ../bin/$SYS/Lua$LUA_SFX
cp -f ../../lua$LUA_SFX/lib/$PLAT/*.dll ../bin/$SYS/Lua$LUA_SFX
cp -f ../../luagl/lib$LUA_SFX/$PLAT/*.dll ../bin/$SYS/Lua$LUA_SFX
cp -f ../../lfs/lib$LUA_SFX/$PLAT/*.dll ../bin/$SYS/Lua$LUA_SFX
if [ $1 == dll8 ]; then
cp -frv ../../lua$LUA_SFX/bin/$SYS/Microsoft.VC80.CRT ../bin/$SYS/Lua$LUA_SFX
fi
if [ $1 == dll10 ]; then
cp -f ../dist/Microsoft.VC100.CRT/$PLAT/* ../bin/$SYS/Lua$LUA_SFX
fi
rm -f ../bin/$SYS/Lua$LUA_SFX/*3.dll
if [ $LUA_SFX == 5.1 ]; then
rm -f ../bin/$SYS/Lua$LUA_SFX/*52.dll
else
rm -f ../bin/$SYS/Lua$LUA_SFX/*51.dll
fi
mv -f ../bin/$SYS/iuplua$LUA_SFX.exe ../bin/$SYS/Lua$LUA_SFX/iuplua$LUA_SFX.exe
}
Copy_DLLs dll8 Win32 5.1
Copy_DLLs dll8 Win64 5.1
Copy_DLLs dll10 Win32 52
Copy_DLLs dll10 Win64 52
Copy_DLLs cygw17 cygw17 5.1
Copy_DLLs cygw17 cygw17 52
| true
|
39e8c12b168d9809a38bf9195d3bc98d924a4f0f
|
Shell
|
Nazrath10R/RetroMiner
|
/scripts/older/custom_report.sh
|
UTF-8
| 1,921
| 3.21875
| 3
|
[] |
no_license
|
#!/bin/bash
# java -Xmx200G -cp PeptideShaker-1.14.6.jar eu.isas.peptideshaker.cmd.ReportCLI -in "$DIR/pride_reanalysis/outputs/PXD004624/1.cpsx" -out_reports "$DIR/pride_reanalysis/reports/PXD004624/" -reports 9
DIR=/data/SBCS-BessantLab/naz/
cd /data/SBCS-BessantLab/naz/pride_reanalysis/outputs/
# FOLDERS=$(ls -1)
FOLDERS=$(cat output_to_convert.txt)
COUNTER=21
# COUNTER=$(ls -1 | wc -l)
for y in $FOLDERS; do
cd $y
echo -en "\033[34m"
COUNTER=$[$COUNTER -1]
echo $(($COUNTER))
echo $y
echo -en "\033[0m"
# FILES=$(echo *.cpsx)
FILES=$(find $DIR/pride_reanalysis/outputs/$y -type f -name "*.cpsx")
for x in $FILES; do
java -Xmx200G -cp /data/SBCS-BessantLab/naz/pride_reanalysis/PeptideShaker.6/PeptideShaker-1.14.6.jar eu.isas.peptideshaker.cmd.ReportCLI -in $x -out_reports $DIR/pride_reanalysis/reports/$y/ -reports 9,10,11,12,13
# java -cp /data/SBCS-BessantLab/naz/pride_reanalysis/PeptideShaker.6/PeptideShaker-1.14.6.jar eu.isas.peptideshaker.cmd.MzidCLI -in $x -output_file $PWD/${x%.cpsx}.mzid -contact_first_name "Nazrath" -contact_last_name "Nawaz" -contact_email "nazrath.nawaz@yahoo.de" -contact_address "Fogg Building" -organization_name "QMUL" -organization_email "m.n.mohamednawaz@qmul.ac.uk" -organization_address "Mile end road, London"
done
cd ..
done
cd /data/SBCS-BessantLab/naz/pride_reanalysis/reports/
FOLDERS=$(cat to_filter.txt)
COUNTER=21
for y in $FOLDERS; do
cd $y
echo -en "\033[34m"
COUNTER=$[$COUNTER -1]
echo $(($COUNTER))
echo $y
echo -en "\033[0m"
FILES=$(find $DIR/pride_reanalysis/reports/$y -type f -name "*_custom_protein_report.txt")
# FILES=$(echo *_custom_protein_report.txt)
for x in $FILES; do
awk '/LINE-1|LINE_1/ { print $0 }' $x > ${x%.txt}_LINE_filtered.txt
awk '/HERV/ { print $0 }' $x > ${x%.txt}_HERV_filtered.txt
done
cd ..
done
mail -s "Apocrita run completed" nazrath.nawaz@yahoo.de <<< "exported and filtered"
| true
|
ee88251fd3aae9a336ba44408cc9de4b0b6e5043
|
Shell
|
waynebhayes/SANA
|
/scripts/NAFpredict.sh
|
UTF-8
| 9,908
| 3.71875
| 4
|
[] |
no_license
|
#!/bin/bash
USAGE="$0 [-a] [-GO1freq k] gene2goWhich oldG1.el oldG2.el seqSim GO1 GO2 tax1 tax2 NAFthresh col1 col2 outName [files]
NOTE: predictions go to outName-p, validations to outName-v, summary to stdout
where:
seqSim is the file with sequence similarities/orthologs used to eliminate predictions possible to predict by sequence
GO1 is the name of source gene2go file used to predict annotations in GO2 (NOT including allGO or NOSEQ part of filename)
tax1 and tax2 are the predictor and predictee taxonomic IDs (eg 10090 for mouse, 9606 for human)
NAFthresh is the lower predictive bound, and has a leading dash if you want to predict in the opposite direction
to the directory names (eg 0 to predict MM->HS or -0 for MM<-HS)
col1,col2 are the columns in the files where protein names are found
files contain at least 2 columns with aligned protein pairs, including duplicates and NO count
(we will compute NAF in the script) [no files means you're sending to stdin]
gene2goWhich: should be NOSEQ, allGO, or any other extension that exists
Options:
-a : allow all predictions, even those not validatable (NOT recommended! default is to restrict to validatable)
-GO1freq k: remove predictions of GO terms with frequency above k"
die(){ echo "$USAGE">&2; echo "$@" >&2; exit 1
}
# Get rid of annoying NOT lines...
notNOT() { grep -v ' NOT ' "$@"
}
EXEDIR=`dirname $0`
[ "$MYTMP" ] || MYTMP=`for i in /scratch/preserve/wayne /var/tmp/wayne /tmp/wayne; do mkdir -p $i && break; done; echo $i`
TMPDIR=$MYTMP/GOpredict.$$
trap "/bin/rm -rf $TMPDIR; exit" 0 1 2 15
trap "trap '' 0 1 2 15; echo TMPDIR is $TMPDIR >&2; exit 1" 3
mkdir $TMPDIR || die "Hmmm, $TMPDIR could not make $TMPDIR"
ALLOW_ALL=false
GO1freq=2000000000 # big enough for default...
while true; do
case "$1" in
-a) ALLOW_ALL=true; shift;;
-GO1freq) GO1freq="$2"; shift 2;;
-*) die "unknown option '$1'";;
*) break;;
esac
done
[ $# -ge 12 ] || die "expecting at least 12 args, not $#"
GENE2GO="$1"
G1="$2"
G2="$3"
seqSim="$4"
GO1="$5"
GO2="$6"
tax1=$7
tax2=$8
NAFthresh=$9
c1=${10}
c2=${11}
outName=${12}
shift 12
# Evidence codes: all, NOSEQ, and SEQ
EVC_NOS="EXP HDA HEP HGI HMP IC IDA IEP IGI IKR IMP IMR IPI IRD NAS ND TAS"
EVC_SEQ="IBA IEA IGC ISA ISM ISO ISS RCA"
EVC_ALL="$EVC_NOS $EVC_SEQ"
[ -f $G1 -a -f $G2 ] || die "need network files $G1 and $G2"
[ -f $outName-v -o -f $outName-p ] && die "refusing to overwrite existing $outName-[pv]"
for g in allGO NOSEQ; do
[ -f "$GO1.$g" ] || die "can't find gene2go file '$GO1.$g'"
[ -f "$GO2.$g" ] || die "can't find gene2go file '$GO2.$g'"
done
echo $EVC_SEQ | newlines | awk '{printf "\t%s\t\n",$0}' > $TMPDIR/EVC_SEQ # sequence evidence codes
cat "$seqSim" > $TMPDIR/seqSim # in case it's a pipe, we need to store it
# Annotations that are already known for the target species at time t1:
grep "^$tax2 " $GO1.allGO | cut -f1-3 | sort -u >$TMPDIR/GO1.tax2.allGO.1-3
# All annotations at time t2 (the validatable set), with their validating evidence codes (nothing to do with the evidence
# codes in the PREDICTING set).
grep "^$tax2 " $GO2.$GENE2GO | cut -f1-4 | sort -u | tee $TMPDIR/GO2.tax2.$GENE2GO.1-4 |
fgrep -f $TMPDIR/EVC_SEQ | # extract annotations with sequence-based evidence
cut -f1-3 | sort -u >$TMPDIR/GO2.tax2.SEQ.1-3 # sequence-based annotations discovered by the time of GO2
PGO2="$GO2" # Argument representing GO2 for Predictable.sh script
if $ALLOW_ALL; then
PGO2=NONE # Predictable.sh will not restrict based on validatable annotations
fi
$EXEDIR/Predictable.sh -GO1freq $GO1freq -gene2go $GENE2GO $tax1 $tax2 $GO1 $PGO2 $G2 | # Predictable-in-principle, with SOURCE evidence codes
tee $TMPDIR/Predictable.Vable | # validatable=(predictable-in-principle annotations) \INTERSECT (actual annotations in GO2)
fgrep -v -f $TMPDIR/GO1.tax2.allGO.1-3 | # remove predictions already known in target species at earlier date
fgrep -v -f $TMPDIR/GO2.tax2.SEQ.1-3 | # remove sequence-based annotations discovered by later date
tee $TMPDIR/Predictable.Vable.notGO1.notSEQ2 | # list of interest, with PREDICTING evidence code
cut -f1-3 | sort -u > $TMPDIR/Predictable.Vable.notGO1.notSEQ2.1-3 # final set (RECALL denom), without evidence codes
#grep ' NOT ' $TMPDIR/* && die "NOT fields found after Predictable.sh was run"
dataDir=`echo "$@" | newlines | sed 's,/[^/]*$,,' | sort -u`
sort "$@" | uniq -c | sort -nr | awk 'BEGIN{tax1='$tax1';tax2='$tax2';
c1=1+'$c1';c2=1+'$c2'; # increment column since "uniq -c" above prepends NAF to the line
NAFthresh='$NAFthresh';
}
ARGIND==1{seq[$1][$2]=1;next} # orthologous & sequence similar pairs
ARGIND==2{u=$c1;;v=$c2;
if(u in seq && v in seq[u])next; # ignore known orthology or sequence similarity
NAF[u][v]=$1; # store ALL NAFs for now, not just those above the threshold, because
# later we allow the total score of v to be additive across multiple nodes u.
next
}
ARGIND==3{FS=" "; if(/ NOT /)next} # make FS a tab, and ignore "NOT" lines
ARGIND==3&&($1==tax1||$1==tax2){++pGO[$1][$2][$3][$4]; # species, protein, GO, evidence code
C[$3]=$NF; # Category (BP,MF,CC)
}
END{
for(p1 in pGO[tax1]) # loop over all proteins in species1 that have any GO terms
if(p1 in NAF) # if we have a predictive NAF value for that protein...
for(p2 in NAF[p1]) # loop over all the species2 proteins aligned to p1
for(g in pGO[tax1][p1]) # loop over all the GO terms from protein p1
# if protein p2 is not listed at all in the gene2go file...
#... or if it is but does not have this particular GO term...
if(!(p2 in pGO[tax2]) ||!(g in pGO[tax2][p2]))
for(evc in pGO[tax1][p1][g]) {
# Note that this will bump the NAF of p2 for multiple p1s it is aligned to--by construction
NAFpredict[tax2][p2][g][evc]+=NAF[p1][p2]
NAFpredict[tax2][p2][g]["ALL"]+=NAF[p1][p2] # "ALL" accounts for all evidence codes.
}
# Now that we have accumulated all the possible predictions and evidences, print out only those that meet NAFthresh
for(p2 in NAFpredict[tax2])for(g in NAFpredict[tax2][p2])for(evc in NAFpredict[tax2][p2][g])
if(NAFpredict[tax2][p2][g][evc] >= NAFthresh) {
# it is a prediction!!! Print out the NAF and the expected line to find in the later gene2go file:
printf "%d\t%d\t%s\t%s\t%s\t%s\n",NAFpredict[tax2][p2][g][evc],tax2,p2,g,evc,C[g]
# Note, however, that when evc="ALL", grepping for the above line will not match any lines in a gene2go file.
}
}' "$TMPDIR/seqSim" - "$GO1.$GENE2GO" |
fgrep -v -f $TMPDIR/GO1.tax2.allGO.1-3 | # remove ones that are already known (with any evidence) at the earlier date
fgrep -v -f $TMPDIR/GO2.tax2.SEQ.1-3 | # remove ones discovered using sequence evidence at later date
sort -nr | # sort highest first;do NOT remove duplicates: same p2 predicted from diff p1s (or diff evcs) has meaning
tee $TMPDIR/predictions.NAF,1-4,8 |
cut -f2-4 | # remove NAF, evCode & Cat from prediction before search (evc is PREDICTING evidence code!)
sort -u | fgrep -f - "$GO2.$GENE2GO" | # if we validate with allGO we could worry about removing SEQ later.
sort -u > $TMPDIR/validated.$GENE2GO
#grep ' NOT ' $TMPDIR/* && die "NOT fields found after predictions made"
# Now process
evCodesPred=`cut -f5 $TMPDIR/predictions.NAF,1-4,8 | grep -v ALL | sort -u` # col5 because of leading NAF
allRegExp="(`echo $evCodesPred | sed 's/ /|/g'`)"
for evc in $evCodesPred $allRegExp; do
PIP=`egrep " $evc " $TMPDIR/Predictable.Vable.notGO1.notSEQ2 | cut -f1-3 | sort -u | wc -l`
pred=`egrep " $evc " $TMPDIR/predictions.NAF,1-4,8 | cut -f2-4 | sort -u | tee $TMPDIR/pred$evc | wc -l`
val=`fgrep -f $TMPDIR/pred$evc $TMPDIR/Predictable.Vable.notGO1.notSEQ2.1-3 | sort|tee $TMPDIR/V1 | fgrep -f - $TMPDIR/validated.$GENE2GO | cut -f1-3 | sort -u | tee $TMPDIR/val$evc | wc -l`
#DF_FILES="$TMPDIR/V1 $TMPDIR/val$evc"; diff $DF_FILES >/dev/tty || die "oops, $DF_FILES differ"
echo "$evc $pred $PIP $val" |
awk '$2>0{printf "%10s %3s NAF %3d : %6d pred %6d PIP %5d val prec %5.1f%%\n",
"'$dataDir'",$1,'$NAFthresh',$2,$3,$4,100*$4/$2}'
done > $TMPDIR/predECSummary
Categories=`cut -f8 $TMPDIR/validated.$GENE2GO | sort -u`
for c in $Categories; do
PIP=`grep " $c" $TMPDIR/Predictable.Vable.notGO1.notSEQ2 | cut -f1-3 | sort -u | wc -l`
pred=`grep " $c" $TMPDIR/predictions.NAF,1-4,8 | cut -f2-4 | sort -u | tee $TMPDIR/pred$c | wc -l`
val=`fgrep -f $TMPDIR/pred$c $TMPDIR/Predictable.Vable.notGO1.notSEQ2.1-3 | sort|tee $TMPDIR/V1 | fgrep -f - $TMPDIR/validated.$GENE2GO | cut -f1-3 | sort -u | tee $TMPDIR/val$c | wc -l`
#DF_FILES="$TMPDIR/V1 $TMPDIR/val$c"; diff $DF_FILES >/dev/tty || die "oops, $DF_FILES differ"
echo "$c $pred $PIP $val" |
awk '$2>0{printf "%10s %9s NAF %3d : %6d pred %6d PIP %5d val prec %5.1f%%\n",
"'$dataDir'",$1,'$NAFthresh',$2,$3,$4,100*$4/$2}'
done > $TMPDIR/predCatSummary
#grep ' NOT ' $TMPDIR/* && die "NOT fields found after validation step"
# Shorten GO fileNames for output
GO1=`echo $GO1|sed 's,^.*/go/,,'`
GO2=`echo $GO2|sed 's,^.*/go/,,'`
echo "Predictions by evidence code for $dataDir $GO1 -> $GO2, NAF $NAFthresh"
lastCol=`awk '{print NF}' $TMPDIR/predECSummary | sort | uniq -c | sort -nr | head -1 | awk '{print $2}'`
[ "X$lastCol" != X ] && sort -k ${lastCol}gr $TMPDIR/predECSummary
echo "Predictions by GO hierarchy for $dataDir $GO1 -> $GO2, NAF $NAFthresh"
lastCol=`awk '{print NF}' $TMPDIR/predCatSummary | sort | uniq -c | sort -nr | head -1 | awk '{print $2}'`
[ "X$lastCol" != X ] && sort -k ${lastCol}gr $TMPDIR/predCatSummary
mv $TMPDIR/predictions.NAF,1-4,8 $outName-p # includes leading NAF+duplicates, each should be from a different p1 in tax1
mv $TMPDIR/validated.$GENE2GO $outName-v
[ $NAFthresh -eq 1 ] && mv $TMPDIR/Predictable.Vable.notGO1.notSEQ2 $outName-PIP # same for all NAF values so only copy for NAF 1
| true
|
047b4d5a86d1c8c3b76f2094861667be4b115346
|
Shell
|
tectronics/ath
|
/ath/build/get-version.sh
|
UTF-8
| 924
| 4.25
| 4
|
[
"Apache-1.1",
"BSD-2-Clause"
] |
permissive
|
#!/bin/sh
#
# extract version numbers from a header file
#
# USAGE: get-version.sh CMD VERSION_HEADER
# where CMD is one of: all, major, libtool
#
# get-version.sh all returns a dotted version number
# get-version.sh major returns just the major version number
# get-version.sh libtool returns a version "libtool -version-info" format
#
if test $# != 2; then
echo "USAGE: $0 CMD INCLUDEDIR"
echo " where CMD is one of: all, major"
exit 1
fi
major="`sed -n '/#define.*ATH_MAJOR_VERSION/s/^.*\([0-9][0-9]*\).*$/\1/p' $2`"
minor="`sed -n '/#define.*ATH_MINOR_VERSION/s/^.*\([0-9][0-9]*\).*$/\1/p' $2`"
patch="`sed -n '/#define.*ATH_PATCH_VERSION/s/^.*\([0-9][0-9]*\).*$/\1/p' $2`"
if test "$1" = "all"; then
echo ${major}.${minor}.${patch}
elif test "$1" = "major"; then
echo ${major}
elif test "$1" = "libtool"; then
echo ${minor}:${patch}:${minor}
else
echo "ERROR: unknown version CMD"
exit 1
fi
| true
|
4c43dfc7f450b86d8107c6d328a0de31625c8047
|
Shell
|
silatiol/tesi
|
/src/utils.sh
|
UTF-8
| 1,160
| 3.96875
| 4
|
[] |
no_license
|
#!/bin/bash
function wait_emulator_to_be_ready () {
boot_completed=false
while [ "$boot_completed" == false ]; do
status=$(adb wait-for-device shell getprop sys.boot_completed | tr -d '\r')
echo "Boot Status: $status"
if [ "$status" == "1" ]; then
boot_completed=true
else
sleep 1
fi
done
}
function change_language_if_needed() {
if [ ! -z "${LANGUAGE// }" ] && [ ! -z "${COUNTRY// }" ]; then
wait_emulator_to_be_ready
echo "Language will be changed to ${LANGUAGE}-${COUNTRY}"
adb root && adb shell "setprop persist.sys.language $LANGUAGE; setprop persist.sys.country $COUNTRY; stop; start" && adb unroot
echo "Language is changed!"
fi
}
function install_google_play () {
wait_emulator_to_be_ready
echo "Apps will be installed"
for f in $(ls apps/*.apk); do
adb install $f
done
echo "Done"
}
function start_x11 () {
wmctrl -c "Emulator Running in Nested Virtualization"
sleep 1
/usr/bin/x11vnc -id $(xwininfo -root -tree | grep 'Android Emulator' | tail -n1 | sed "s/^[ \t]*//" | cut -d ' ' -f1) -forever -shared -nopw &
}
sleep 10
start_x11
sleep 4
install_google_play
| true
|
5ab5911674d6a5d459572cbb75f1a495ae4148a5
|
Shell
|
touch-head-optimistically/golang_oracle_lib
|
/server_prebuild.sh
|
UTF-8
| 821
| 2.890625
| 3
|
[] |
no_license
|
#/bin/bash
#usage
# source ./server_prebuild..sh
echo '重要提示: 请不要直接./运行,为了确保其中的环境变量生效,应该使用: source ./server_prebuild.sh'
# 由于danastudio-ants引入了oracle的包,首次编译时,需要针对不同编译平台进行CGO编译环境配置(只需配置一次,无需重复配置),这里以centos为例:
# 1. Install oracle oci
rpm -ivh oracle-instantclient12.1-basic-12.1.0.2.0-1.x86_64.rpm oracle-instantclient12.1-devel-12.1.0.2.0-1.x86_64.rpm
# 2. 链接oracle依赖库
ln -s /usr/lib/oracle/12.1/client64/lib/libclntsh.so.12.1 /usr/lib64/libclntsh.so.11.1
yes|cp /usr/lib/oracle/12.1/client64/lib/* /usr/lib64/
# 3. 配置编译环境
# export当前目录为$PKG_CONFIG_PATH:
export PKG_CONFIG_PATH=./
# 4. 配置完毕正常编译即可
| true
|
6004c8d56efc62dee39f23c4b5104b9218bd3075
|
Shell
|
kangapp/openPAI
|
/kafka/kafka/deploy/kafka-configuration/kafka-generate.sh
|
UTF-8
| 2,564
| 3.109375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Copyright (c) Microsoft Corporation
# All rights reserved.
#
# MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
# to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# Get the configuration from configmap-volume, and cover the original file in the path of hbase's conf
# With this solution, you will be able to use one image to run different role.
cp /kafka-configuration/server.properties $KAFKA_HOME/config/cluster-server.properties
# You could process the un-complete file copied to hbase in your own way.
# example 1
# With the environment passed by kubernetes and docker, fill the target with sed or other tools
sed -i "s/{MASTER_ADDRESS}/${MASTER_ADDRESS}/g" $KAFKA_HOME/config/cluster-server.properties
sed -i "s/{KAFKA_LOG_DIR}/${KAFKA_LOG_DIR}/g" $KAFKA_HOME/config/cluster-server.properties
# example 2
# In our project, we provide a python tool, to fill the target value from the configmap-volume of cluster-configuration. And in this tool, we take advantage of jinja2. You could find more knowledge about jinja2 in this website. http://jinja.pocoo.org/docs/2.10/
# You could find the tool in the code path: src/base-image/build/host-configure.py
## Note: This feature will be upgrade in the future.
## Upgrade reason: 1) To improve the management of service configuration.
## Upgrage reason: 2) To make key-value pair more flexible. Because now, map relation is fixed.
## Upgrade reason: 3) To solve some issue.
HOST_NAME=`hostname`
/usr/local/host-configure.py -c /host-configuration/host-configuration.yaml -f $KAFKA_HOME/config/cluster-server.properties -n $HOST_NAME
| true
|
b4630104c56b101794f7fc57eacce6a250068867
|
Shell
|
pabagan/knowledgebase
|
/hashicorp-vault/course/m7/3-configure-consul.sh
|
UTF-8
| 1,499
| 3.125
| 3
|
[
"MIT"
] |
permissive
|
# You are going to need the consul binary to follow along here.
# You can get it by going to https://www.consul.io/downloads
# We are going to start up a basic instance of Consul and
# get a token with permissions to generate new token.
# You don't need to know much about Consul aside from the
# fact it uses tokens for authentication and authorization
# just like Vault.
# Create a data subdirectory in m7
mkdir data
# Launch consul server instance
consul agent -bootstrap -config-file="consul-config.hcl" -bind="127.0.0.1"
# From a separate terminal window run the following
consul acl bootstrap
# Set CONSUL_HTTP_TOKEN to SecretID
# Linux and MacOS
export CONSUL_HTTP_TOKEN=SECRETID_VALUE
# Next we have to create a policy and role for new tokens
# that Vault will generate on Consul
consul acl policy create -name=web -rules @web-policy.hcl
# Now we'll configure out Consul secrets engine
vault path-help consul/
vault path-help consul/config/access
vault write consul/config/access address="http://127.0.0.1:8500" token=$CONSUL_HTTP_TOKEN
# And add a role to provision tokens with a ttl of 1 hour and a max of 2 hours
vault path-help consul/roles/web
vault write consul/roles/web name=web policies=web ttl=3600 max_ttl=7200
# Now how do we use this role to get a token? By using the creds path
vault path-help consul/creds/web
vault read consul/creds/web
# Check on the consul side and we can see the token created
consul acl token list -format=json | jq .[].AccessorID
| true
|
f681fbe0f70a31b7798030c90bdaf9c47da558f7
|
Shell
|
lawremi/RGtk2
|
/books/rgui/ch-gWidgets/makePackage.sh
|
UTF-8
| 191
| 2.6875
| 3
|
[] |
no_license
|
#!/bin/sh
## move ex-*.Rnw files into package
PACKAGEDIR=../ProgGUIinR/inst/Examples/ch-gWidgets/
for i in ex-gWidgets-*.Rnw; do
R CMD Stangle $i;
done
cp ex-gWidgets*.R $PACKAGEDIR
| true
|
a5428af562de3ee6115b23e366502ab7bd892c8d
|
Shell
|
sorz/dotfiles
|
/install.sh
|
UTF-8
| 2,040
| 3.53125
| 4
|
[] |
no_license
|
#!/bin/bash
cd ~/.dotfiles
T=`stat -c %Y install.sh`
git pull
if [ $T != `stat -c %Y install.sh` ]; then
exec ./install.sh
fi
# Git
ln -svf ~/.dotfiles/git/.gitattributes ~
rm -f ~/.gitconfig
GPGEXE='/mnt/c/Program Files (x86)/GnuPG/bin/gpg.exe'
if [[ "`uname -r`" == *"microsoft"* ]] && [[ -x "$GPGEXE" ]]; then
echo "# Generated at `date`" > ~/.gitconfig
cat ~/.dotfiles/git/.gitconfig >> ~/.gitconfig
echo '[gpg]' >> ~/.gitconfig
WINHOME="/mnt/c/Users/`whoami`/"
if [[ -d "$WINHOME" ]]; then
cp ~/.gitconfig "$WINHOME"
printf "\tprogram = C:/Program Files (x86)/GnuPG/bin/gpg.exe\n" \
>> "$WINHOME/.gitconfig"
fi
printf "\tprogram = gpg.exe\n" >> ~/.gitconfig
else
ln -svf ~/.dotfiles/git/.gitconfig ~
fi
# Mintty
if [ "$TERM" = "xterm" ]; then
ln -svf ~/.dotfiles/mintty/.minttyrc ~
fi
# Cache dir
mkdir -pm 700 ~/.cache
touch ~/.cache/.nobackup
# Bash
ln -svf ~/.dotfiles/bash/.bashrc ~
BASHD="$HOME/.dotfiles/bash/generated"
mkdir -p $BASHD
touch -a "$BASHD/empty"
# Python
mkdir -p ~/.cache/pycache
if hash pip 2> /dev/null; then
pip completion --bash > $BASHD/pip_completion
fi
# GnuPG
mkdir -p ~/.gnupg
chmod 700 ~/.gnupg
ln -svf ~/.dotfiles/gnupg/gpg-agent.conf ~/.gnupg/
ln -svf ~/.dotfiles/gnupg/gpg.conf ~/.gnupg/
# Byobu
if [ -d "$HOME/.byobu" ]; then
ln -svf ~/.dotfiles/byobu/.tmux.conf ~/.byobu/
fi
# wget
touch -a "$HOME/.wget-hsts"
chmod og-rwx "$HOME/.wget-hsts"
# Vim
ln -svf ~/.dotfiles/vim/.vimrc ~
mkdir -pm 700 ~/.cache/vim
PLUG="$HOME/.vim/autoload/plug.vim"
PLUGURL="https://raw.githubusercontent.com/junegunn/vim-plug/master/plug.vim"
if [ -f "$PLUG" ]; then
echo update curl -Lo $PLUG -z $PLUG $PLUGURL
curl -Lo $PLUG -z $PLUG $PLUGURL
else
echo install curl -Lo $PLUG --create-dirs $PLUGURL
curl -Lo $PLUG --create-dirs $PLUGURL
fi
/usr/bin/vim +PlugUpdate! +qall
# neovim
if hash nvim 2> /dev/null; then
ln -svf ~/.dotfiles/nvim ~/.config/
nvim --headless '+Lazy! sync' +qa
fi
printf "\nDone.\n"
| true
|
af12c528762966634bf3f964eea77e5aba9e1ea1
|
Shell
|
asmitamthorat/shellcoding
|
/DAY6_advancedPrograms/factors_of_Number.sh
|
UTF-8
| 191
| 3.34375
| 3
|
[] |
no_license
|
#! /bin/bash
read -p "enter the number" num
echo "factors are the following"
for (( i=2; i<$num; i++))
do
if [ $((num%i)) -eq 0 ]
then
echo $i
fi
done
| true
|
7315e5a8b18087d3ed35b724805d8ed4a64455b4
|
Shell
|
MPC-SoK/frameworks
|
/mp-spdz/test_readme.sh
|
UTF-8
| 786
| 2.875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
cd MP-SPDZ
for i in mult3 innerprod xtabs; do
./compile.py $i || exit 1
done
Scripts/setup-ssl.sh 3
mkdir -p Player-Data
echo 14 > Player-Data/Input-P0-0
echo 12 > Player-Data/Input-P1-0
echo 8 > Player-Data/Input-P2-0
progs="./mascot-party.x ./hemi-party.x ./malicious-shamir-party.x ./shamir-party.x"
for prog in $progs; do
for i in 0 1 2; do
$prog -N 3 -p $i mult3 & true
done | grep 1344 || exit 1
done
for i in 0 1; do
cp Programs/Source/xtabs.Input-P$i-0 Player-Data/Input-P$i-0
done
for prog in $progs; do
for i in 0 1 2; do
$prog -N 3 -p $i innerprod & true
done | grep 'InnerProd: 2248' || exit 1
done
for prog in $progs; do
for i in 0 1 2; do
$prog -N 3 -p $i xtabs & true
done | grep 'expected 6, got 6' || exit 1
done
| true
|
ed8aa2d8aee1e662ba590b9efb865fb4ed3fa886
|
Shell
|
littlet1968/div_scripts
|
/check_usage.sh
|
UTF-8
| 2,683
| 3.015625
| 3
|
[] |
no_license
|
#!/bin/bash
/usr/bin/top -n1 | /bin/grep -i -C3 cpu\(s\) | /usr/bin/tee "log.txt"
string1=$(/bin/grep -i cpu\(s\) log.txt)
string3=$(/bin/grep -i swap: log.txt)
#===============================================================================
# Get CPU percent_used
#===============================================================================
totalCpu=$(/bin/echo $string1 | /bin/sed 's/\s\s*/ /g' | /bin/cut -d'%' -f1 | /bin/cut -d' ' -f2)
/bin/echo
/bin/echo "LOOK HERE ..."
/bin/echo
/bin/echo "Percentage of used CPU = "$totalCpu"% :: escalate to App owner (for SQL tuning) if > 90%"
#===============================================================================
# Get Memory percent_used
# -----------------------
# Calculation based on OEM process (MOS Doc ID 1908853.1 )
#
# formula used by Enterprise Manager 12.1.0.3 for Linux Memory Utilization (%), for example:
# Memory Utilization (%) = (100.0 * (activeMem) / realMem)
# = 100 * 25046000/99060536
# = 25.28
# EM Shows : 25.5
# Here, activeMem is Active Memory (Active), and realMem is Total Memory (MemTotal).
#
#===============================================================================
/bin/cat /proc/meminfo | /bin/grep MemTotal > "/tmp/log.txt"
/bin/cat /proc/meminfo | /bin/grep Active\: >> "/tmp/log.txt"
totalMem=`/bin/grep -i mem /tmp/log.txt | /bin/awk '{print $2}' `
usedMem=`/bin/grep -i act /tmp/log.txt | /bin/awk '{print $2}' `
pctUsedMem=`/bin/echo "scale=2;$usedMem/$totalMem*100" | /usr/bin/bc`
/bin/echo "Percentage of used memory =" $pctUsedMem"% :: escalate to DBA (DB bounce needed) if > 95%"
#===============================================================================
# Get Swap percent_used
#===============================================================================
totalSwap1=$(/bin/echo $string3 | /bin/sed 's/\s\s*/ /g' | /bin/cut -d' ' -f2)
totalSwap2="${totalSwap1%?}"
c=$totalSwap2
usedSwap1=$(/bin/echo $string3 | /bin/sed 's/\s\s*/ /g' | /bin/cut -d' ' -f4)
usedSwap2="${usedSwap1%?}"
d=$usedSwap2
percentageUsedSwap2=$(/bin/echo "scale=4;$d/$c*100" | /usr/bin/bc)
percentageUsedSwap="${percentageUsedSwap2%??}"
/bin/echo "Percentage of used swap =" $percentageUsedSwap"% :: escalate to SysAdmin (swap undersized) if > 50%"
/bin/echo
/bin/echo
#===============================================================================
# List top-10 memory consuming PIDs
#===============================================================================
/bin/echo
/bin/echo "List ot top-10 memory consuming PIDs"
/bin/echo "------------------------------------"
/bin/echo
/bin/ps aux | /bin/sort +5 -6 -n -r | /usr/bin/head -10
| true
|
f33761d453331c130d6b9c129fde9ce4edceac1d
|
Shell
|
alihanyalcin/edgex-go
|
/snap/hooks/install
|
UTF-8
| 10,504
| 3.390625
| 3
|
[
"Apache-2.0",
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash -ex
# get the values of $SNAP_DATA and $SNAP using the current symlink instead of
# the default behavior which has the revision hard-coded, which breaks after
# a refresh
SNAP_DATA_CURRENT=${SNAP_DATA/%$SNAP_REVISION/current}
SNAP_CURRENT=${SNAP/%$SNAP_REVISION/current}
# install all the config files from $SNAP/config/SERVICE/res/configuration.toml
# into $SNAP_DATA/config
# note that app-service-configurable is handled separately
mkdir -p "$SNAP_DATA/config"
for service in edgex-mongo security-proxy-setup security-secrets-setup security-secretstore-setup core-command config-seed core-data core-metadata support-logging support-notifications support-scheduler sys-mgmt-agent device-random device-virtual; do
if [ ! -f "$SNAP_DATA/config/$service/res/configuration.toml" ]; then
mkdir -p "$SNAP_DATA/config/$service/res"
cp "$SNAP/config/$service/res/configuration.toml" "$SNAP_DATA/config/$service/res/configuration.toml"
# do replacement of the $SNAP, $SNAP_DATA, $SNAP_COMMON environment variables in the config files
sed -i -e "s@\$SNAP_COMMON@$SNAP_COMMON@g" \
-e "s@\$SNAP_DATA@$SNAP_DATA_CURRENT@g" \
-e "s@\$SNAP@$SNAP_CURRENT@g" \
"$SNAP_DATA/config/$service/res/configuration.toml"
fi
done
# handle app-service-configurable's various profiles:
# 1. ensure all the directories from app-service-configurable exist
# 2. copy the config files from $SNAP into $SNAP_DATA
# 3. replace the various env vars that might be in that config file with their
# "current" symlink equivalent
mkdir -p "$SNAP_DATA/config/app-service-configurable/res/rules-engine"
RULES_ENGINE_PROFILE_CONFIG="config/app-service-configurable/res/rules-engine/configuration.toml"
if [ ! -f "$SNAP_DATA/$RULES_ENGINE_PROFILE_CONFIG" ]; then
cp "$SNAP/$RULES_ENGINE_PROFILE_CONFIG" "$SNAP_DATA/$RULES_ENGINE_PROFILE_CONFIG"
sed -i -e "s@\$SNAP_COMMON@$SNAP_COMMON@g" \
-e "s@\$SNAP_DATA@$SNAP_DATA_CURRENT@g" \
-e "s@\$SNAP@$SNAP_CURRENT@g" \
"$SNAP_DATA/$RULES_ENGINE_PROFILE_CONFIG"
fi
# handle device-random device profile
cp "$SNAP/config/device-random/res/device.random.yaml" "$SNAP_DATA/config/device-random/res/device.random.yaml"
# handle device-virtual device profiles
for profileType in bool float int uint; do
cp "$SNAP/config/device-virtual/res/device.virtual.$profileType.yaml" "$SNAP_DATA/config/device-virtual/res/device.virtual.$profileType.yaml"
done
# also handle java services' application.properties
# shellcheck disable=SC2043
for jsvc in edgex-support-rulesengine; do
if [ ! -f "$SNAP_DATA/config/config-seed/res/properties/$jsvc/application.properties" ]; then
mkdir -p "$SNAP_DATA/config/config-seed/res/properties/$jsvc"
cp "$SNAP/config/config-seed/res/properties/$jsvc/application.properties" "$SNAP_DATA/config/config-seed/res/properties/$jsvc/application.properties"
# also replace SNAP_DATA and SNAP_COMMON in the application files
sed -i -e "s@\$SNAP_COMMON@$SNAP_COMMON@g" -e "s@\$SNAP_DATA@$SNAP_DATA_CURRENT@g" "$SNAP_DATA/config/config-seed/res/properties/$jsvc/application.properties"
fi
done
# create support-rulesengine directories for templates/rules
if [ ! -f "$SNAP_DATA/support-rulesengine/templates" ]; then
mkdir -p "$SNAP_DATA/support-rulesengine/templates"
cp "$SNAP/jar/support-rulesengine/templates/rule-template.drl" "$SNAP_DATA/support-rulesengine/templates/rule-template.drl"
fi
if [ ! -f "$SNAP_DATA/support-rulesengine/rules" ]; then
mkdir -p "$SNAP_DATA/support-rulesengine/rules"
fi
# for the kong pki setup file, we need to set the hostname as localhost
# and then set the directory to store the cert files as $SNAP_DATA/kong/ssl
mkdir -p "$SNAP_DATA/config/security-secrets-setup/res"
for svc in kong vault; do
CONFIG_FILE_PATH="config/security-secrets-setup/res/pkisetup-$svc.json"
if [ ! -f "$SNAP_DATA/$CONFIG_FILE_PATH" ]; then
# replace the hostname with localhost using jq
jq --arg WORKDIR "$SNAP_DATA_CURRENT" \
'.x509_tls_server_parameters.tls_host = "localhost" | .pki_setup_dir = "pki" | .working_dir = $WORKDIR' \
"$SNAP/$CONFIG_FILE_PATH" > "$SNAP_DATA/$CONFIG_FILE_PATH.tmp"
mv "$SNAP_DATA/$CONFIG_FILE_PATH.tmp" "$SNAP_DATA/$CONFIG_FILE_PATH"
chmod 600 "$SNAP_DATA/$CONFIG_FILE_PATH"
fi
done
# ensure consul dirs exist
mkdir -p "$SNAP_DATA/consul/data"
mkdir -p "$SNAP_DATA/consul/config"
# ensure mongodb data dirs exist
mkdir -p "$SNAP_DATA/mongo/db"
# ensure vault pki directory exists
mkdir -p "$SNAP_DATA/vault/pki"
# touch all the kong log files to ensure they exist
mkdir -p "$SNAP_COMMON/logs"
for type in proxy admin; do
for op in access error; do
touch "$SNAP_COMMON/logs/kong-$type-$op.log"
done
done
# install redis configuration files
if [ ! -f "$SNAP_DATA/config/redis/redis.conf" ]; then
mkdir -p "$SNAP_DATA/redis"
cp "$SNAP/config/redis/redis.conf" "$SNAP_DATA/redis/redis.conf"
sed -i -e "s@\$SNAP_COMMON@$SNAP_COMMON@g" \
-e "s@\$SNAP_DATA@$SNAP_DATA_CURRENT@g" \
-e "s@\$SNAP@$SNAP_CURRENT@g" \
"$SNAP_DATA/redis/redis.conf"
fi
# the vault config needs to be generated with sed, replacing $SNAP_DATA in the file
# with the actual absolute directory
# note that if anyone ever somehow has a "@" in their $SNAP_DATA this will likely break :-/
if [ ! -f "$SNAP_DATA/config/security-secret-store/vault-config.hcl" ]; then
mkdir -p "$SNAP_DATA/config/security-secret-store"
sed "s@\$SNAP_DATA@$SNAP_DATA_CURRENT@g" "$SNAP/config/security-secret-store/vault-config.hcl.in" > "$SNAP_DATA/config/security-secret-store/vault-config.hcl"
chmod 644 "$SNAP_DATA/config/security-secret-store/vault-config.hcl"
fi
# the kong config file needs to be generated with 3 changes from the default one included in the snap
# - we set the prefix to be $SNAP_DATA/kong as an absolute path (note that this has to be done here in the install hook)
# - we set the nginx user to be root
if [ ! -f "$SNAP_DATA/config/security-proxy-setup/kong.conf" ]; then
mkdir -p "$SNAP_DATA/config/security-proxy-setup"
cp "$SNAP/config/security-proxy-setup/kong.conf" "$SNAP_DATA/config/security-proxy-setup/kong.conf"
# replace the default prefix setting with an absolute path using $SNAP_DATA
# note that if anyone ever has a "@" in their $SNAP_DATA this will likely fail
sed -i "s@#prefix = /usr/local/kong/@prefix = $SNAP_DATA_CURRENT/kong@" "$SNAP_DATA/config/security-proxy-setup/kong.conf"
# also replace the default nginx user/group to be root
sed -i "s@#nginx_user = nobody nobody@nginx_user = root root@" "$SNAP_DATA/config/security-proxy-setup/kong.conf"
fi
# setup postgres db config file with env vars replaced
if [ ! -f "$SNAP_DATA/etc/postgresql/10/main/postgresql.conf" ]; then
mkdir -p "$SNAP_DATA/etc/postgresql/10/main"
cp "$SNAP/etc/postgresql/10/main/postgresql.conf" "$SNAP_DATA/etc/postgresql/10/main/postgresql.conf"
# do replacement of the $SNAP, $SNAP_DATA, $SNAP_COMMON environment variables in the config files
sed -i -e "s@\$SNAP_COMMON@$SNAP_COMMON@g" \
-e "s@\$SNAP_DATA@$SNAP_DATA_CURRENT@g" \
-e "s@\$SNAP@$SNAP_CURRENT@g" \
"$SNAP_DATA/etc/postgresql/10/main/postgresql.conf"
fi
# ensure the postgres data directory exists and is owned by snap_daemon
mkdir -p "$SNAP_DATA/postgresql"
chown -R snap_daemon:snap_daemon "$SNAP_DATA/postgresql"
# setup the postgres data directory
gosu snap_daemon "$SNAP/usr/lib/postgresql/10/bin/initdb" -D "$SNAP_DATA/postgresql/10/main"
# ensure the sockets dir exists and is properly owned
mkdir -p "$SNAP_COMMON/sockets"
chown -R snap_daemon:snap_daemon "$SNAP_COMMON/sockets"
# start postgres up and wait a bit for it so we can create the database and user
# for kong
snapctl start "$SNAP_NAME.postgres"
# add a kong user and database in postgres - note we have to run these through
# the perl5lib-launch scripts to setup env vars properly and we need to loop
# trying to do this because we have to wait for postgres to start up
iter_num=0
MAX_POSTGRES_INIT_ITERATIONS=10
until gosu snap_daemon "$SNAP/bin/perl5lib-launch.sh" "$SNAP/usr/bin/createuser" kong; do
sleep 1
iter_num=$(( iter_num + 1 ))
if [ $iter_num -gt $MAX_POSTGRES_INIT_ITERATIONS ]; then
echo "failed to create kong user in postgres after $iter_num iterations"
exit 1
fi
done
iter_num=0
until gosu snap_daemon "$SNAP/bin/perl5lib-launch.sh" "$SNAP/usr/bin/createdb" kong; do
sleep 1
iter_num=$(( iter_num + 1 ))
if [ $iter_num -gt $MAX_POSTGRES_INIT_ITERATIONS ]; then
echo "failed to create kong db in postgres after $iter_num iterations"
exit 1
fi
done
# stop postgres again in case the security services should be turned off
snapctl stop "$SNAP_NAME.postgres"
# finally, disable and turn off non-default services
# by default, we want the support-*, device-*, and redis services
# off.
# also the app-service-configurable service since that is meant to replace the
# export services
for svc in support-notifications support-scheduler support-logging app-service-configurable support-rulesengine device-random device-virtual redis; do
# set the service as off, so that the setting is persistent after a refresh
# due to snapd bug: https://bugs.launchpad.net/snapd/+bug/1818306
snapctl set $svc=off
# also disable the service so it doesn't initially run - note that just
# setting the service as off above isn't quite what we want as the install
# tasks go:
# - install hook runs
# - all non-disabled services are started
# - configure hook runs (thus disabled services turned off)
# as such, we don't want the services we turn off initially to be started
# before the configure hook runs as it will be resource intensive for no
# reason just to start a bunch of services and then immediately try to
# stop them afterwards
snapctl stop --disable "$SNAP_NAME.$svc"
done
# if we are on arm64, disable the security-proxy because kong isn't
# properly supported on arm64 due to incorrect memory pointers used by lua and
# openresty
# see https://github.com/edgexfoundry/blackbox-testing/issues/185 for more
# details
if [ "$SNAP_ARCH" == "arm64" ]; then
snapctl set security-proxy=off
snapctl stop --disable "$SNAP_NAME.kong-daemon"
snapctl stop --disable "$SNAP_NAME.security-proxy-setup"
fi
| true
|
4ad30db8bd1aea8fa014e353aa6bb51c18f8a4c6
|
Shell
|
shakiyam/EMCC13.2
|
/start.sh
|
UTF-8
| 386
| 3.125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
set -eu -o pipefail
# Load environment variables
envfile=.env
if [ ! -f $envfile ]; then
echo "File not found: $envfile"
exit 1
fi
# shellcheck disable=SC2046
export $(grep -v ^# < $envfile | xargs)
sudo -u oracle "$ORACLE_HOME/bin/dbstart" "$ORACLE_HOME"
sudo -u oracle "$ORACLE_MIDDLEWARE_HOME/bin/emctl" start oms
sudo -u oracle "$AGENT_HOME/bin/emctl" start agent
| true
|
e0f3484fba242d67675355c36a7b3b5e7086df87
|
Shell
|
openstack/charm-heat
|
/hooks/upgrade-charm
|
UTF-8
| 793
| 3.609375
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash -e
# Wrapper to deal with newer Ubuntu versions that don't have py2 installed
# by default.
declare -a DEPS=('apt' 'netaddr' 'netifaces' 'pip' 'yaml' 'dnspython' 'openstackclient')
# drop this when trusty support is ended. Only need python3 at that point
release=$(lsb_release -c -s)
if [ "$release" == "trusty" ]; then
PYTHON="python"
else
PYTHON="python3"
# if python-openstackclient is installed then remove it
if dpkg -s python-openstackclient 2>&1 > /dev/null; then
apt purge -y python-openstackclient
fi
fi
check_and_install() {
pkg="${1}-${2}"
if ! dpkg -s ${pkg} 2>&1 > /dev/null; then
apt-get -y install ${pkg}
fi
}
for dep in ${DEPS[@]}; do
check_and_install ${PYTHON} ${dep}
done
exec ./hooks/upgrade-charm.real
| true
|
a6bb2eb6272573d757fcc397a264b7502888cfcb
|
Shell
|
dohoang1102/ChromeIcon
|
/chrome
|
UTF-8
| 864
| 3.375
| 3
|
[] |
no_license
|
#!/bin/sh
# Author: willscott@gmail.com
CHROME=/Applications/Chromium.app
DATA="$HOME/Library/Application Support/Chromium"
#CHROME=/Applications/Google\ Chrome.app
#DATA="$HOME/Library/Application Support/Google/Chrome"
# Make sure a profile was chosen
if [ $# -eq 0 ]; then
echo "Usage: $0 <profile> [--chrome-arguments]"
exit 0;
fi
# Fork off of the shell to start a new process group.
ARGS=($@)
if [ "x$2" != "x--" ]; then
$0 $1 -- ${ARGS[@]:1} 1> /dev/null 2> /dev/null &
exit 0
fi
IDENTIFIER=`defaults read "$CHROME/Contents/Info" CFBundleIdentifier`
EXECUTABLE=`defaults read "$CHROME/Contents/Info" CFBundleExecutable`
#Set the custom icon for the SIMBL Plugin
defaults write $IDENTIFIER CustomIcon "$DATA/$1/icon.png"
#Run the App
nohup "$CHROME/Contents/MacOS/$EXECUTABLE" --enable-udd-profiles --user-data-dir="$DATA/$1" ${ARGS[@]:2} 0<&- 2>&1 >/dev/null
| true
|
12f050d13fac2ff4ee986dd52136fdd23a26d291
|
Shell
|
yorkmena/ShellAssignmentOffice
|
/23.sh
|
UTF-8
| 195
| 3.484375
| 3
|
[] |
no_license
|
#!/bin/bash
echo "Enter directory"
read dir
if [ ! -d $dir ]; then
echo "dir does not exist"
exit 1
fi
echo "Number of days"
read n
d=$(realpath $dir)
find "$d"/* -maxdepth 0 -type f -mtime +"$n"
| true
|
56d74895be06e6c21b6794cd7c76ab42c65fc45f
|
Shell
|
cyclestreets/cyclescape-chef
|
/local-cookbooks/cyclescape-backups/templates/default/run-backups.sh.erb
|
UTF-8
| 1,796
| 3.140625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
set -e
echo "Back up the shared directory, which includes uploaded images and documents"
echo `date`
tar -cjpf <%= @shared_filename %> -C /var/www/cyclescape/shared system
openssl dgst -md5 <%= @shared_filename %> > <%= @shared_filename + ".md5" %>
echo "Back up the only recently changed shared files"
echo `date`
pushd /var/www/cyclescape/shared/system
find . -type f -mtime -2 -print0 | tar -cjpf <%= @recent_filename %> --null -T -
popd
openssl dgst -md5 <%= @recent_filename %> > <%= @recent_filename + ".md5" %>
echo "Back up the database"
echo `date`
pg_dump <%= @database %> -Z 9 -f <%= @dbdump_filename %>
openssl dgst -md5 <%= @dbdump_filename %> > <%= @dbdump_filename + ".md5" %>
echo "Create an Anonymous version"
echo `date`
echo "SELECT 'CREATE DATABASE <%= "#{@database}_backup" %>' WHERE NOT EXISTS (SELECT FROM pg_database WHERE datname = '<%= "#{@database}_backup" %>')\gexec" | psql
gunzip -c <%= @dbdump_filename %> | psql <%= "#{@database}_backup" %>
# Wipes personal info and sets the password to "password"
psql -d <%= "#{@database}_backup" %> -c "UPDATE users SET full_name = CONCAT('someone+', id),
email = CONCAT('someone+', id, '@example.com'),
confirmation_token = NULL,
reset_password_token = NULL,
last_seen_at = NULL,
public_token = CONCAT('token', id),
api_key = CONCAT('api_key', id),
encrypted_password = '\$2a\$10\$Wp3qoMvrRxNOAeZTbaNuPepxGlZe/KrzhDy8qU9l5tA7tQxICSY1K',
WHERE NOT role = 'admin'
"
psql -d <%= "#{@database}_backup" %> -c "UPDATE users SET display_name = CONCAT('mydisplay_name+', id) WHERE display_name IS NOT NULL"
pg_dump <%= "#{@database}_backup" %> -Z 9 -f <%= @anon_dbdump_filename %>
openssl dgst -md5 <%= @anon_dbdump_filename %> > <%= "#{@anon_dbdump_filename}.md5" %>
dropdb <%= "#{@database}_backup" %>
echo "Finished"
echo `date`
| true
|
2f0954e5e1bc0afddd3b767f45c99c20843f22f1
|
Shell
|
natsuya-niki/docker-sample-php7.3-mysql5.7-redisc
|
/dockerfiles/redis/redis.sh
|
UTF-8
| 3,015
| 2.96875
| 3
|
[] |
no_license
|
#!/bin/bash
# 邪魔なファイルを削除。
rm -f \
/data/conf/r7000i.log \
/data/conf/r7001i.log \
/data/conf/r7002i.log \
/data/conf/r7003i.log \
/data/conf/r7004i.log \
/data/conf/r7005i.log \
/data/conf/nodes.7000.conf \
/data/conf/nodes.7001.conf \
/data/conf/nodes.7002.conf \
/data/conf/nodes.7003.conf \
/data/conf/nodes.7004.conf \
/data/conf/nodes.7005.conf ;
#redisを6台クラスターモードで(クラスターモードの設定はredis.conf)起動。
# nodes.****.conf はそれぞれ別々のファイルを指定する必要がある。
redis-server /data/conf/redis.conf --port 7000 --cluster-config-file /data/conf/nodes.7000.conf --daemonize yes ;
redis-server /data/conf/redis.conf --port 7001 --cluster-config-file /data/conf/nodes.7001.conf --daemonize yes ;
redis-server /data/conf/redis.conf --port 7002 --cluster-config-file /data/conf/nodes.7002.conf --daemonize yes ;
#redis-server /data/conf/redis.conf --port 7003 --cluster-config-file /data/conf/nodes.7003.conf --daemonize yes ;
#redis-server /data/conf/redis.conf --port 7004 --cluster-config-file /data/conf/nodes.7004.conf --daemonize yes ;
#redis-server /data/conf/redis.conf --port 7005 --cluster-config-file /data/conf/nodes.7005.conf --daemonize yes ;
REDIS_LOAD_FLG=true;
#全てのredis-serverの起動が完了するまでループ。
while $REDIS_LOAD_FLG;
do
sleep 1;
redis-cli -p 7000 info 1> /data/conf/r7000i.log 2> /dev/null;
if [ -s /data/conf/r7000i.log ]; then
:
else
continue;
fi
redis-cli -p 7001 info 1> /data/conf/r7001i.log 2> /dev/null;
if [ -s /data/conf/r7001i.log ]; then
:
else
continue;
fi
redis-cli -p 7002 info 1> /data/conf/r7002i.log 2> /dev/null;
if [ -s /data/conf/r7002i.log ]; then
:
else
continue;
fi
# redis-cli -p 7003 info 1> /data/conf/r7003i.log 2> /dev/null;
# if [ -s /data/conf/r7003i.log ]; then
# :
# else
# continue;
# fi
# redis-cli -p 7004 info 1> /data/conf/r7004i.log 2> /dev/null;
# if [ -s /data/conf/r7004i.log ]; then
# :
# else
# continue;
# fi
# redis-cli -p 7005 info 1> /data/conf/r7005i.log 2> /dev/null;
# if [ -s /data/conf/r7005i.log ]; then
# :
# else
# continue;
# fi
#redis-serverの起動が終わったらクラスター・レプリカの割り当てる。
#ipを127.0.0.1で割り当てるとphpで不具合が起こるのでpublic ipを指定。
# yes "yes" | redis-cli --cluster create 172.16.239.10:7000 172.16.239.10:7001 172.16.239.10:7002 172.16.239.10:7003 172.16.239.10:7004 172.16.239.10:7005 --cluster-replicas 1;
# yes "yes" | redis-cli --cluster create 172.16.239.10:7000 172.16.239.10:7001 172.16.239.10:7002 172.16.239.10:7003 172.16.239.10:7004 172.16.239.10:7005;
yes "yes" | redis-cli --cluster create 172.16.239.10:7000 172.16.239.10:7001 172.16.239.10:7002;
REDIS_LOAD_FLG=false;
done
| true
|
47bb8a1806ee6f7118e1073806bb26f7ce63cf70
|
Shell
|
fernfernfern/circleci-orbs
|
/scripts/dev-release.sh
|
UTF-8
| 687
| 3.25
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash -eo pipefail
# COMMIT_RANGE=$(echo $CIRCLE_COMPARE_URL | sed 's:^.*/compare/::g')
# echo "Commit range: " $COMMIT_RANGE
# UPDATED_ORBS=$(git diff $COMMIT_RANGE --name-status | \
# sed -E 's/.*src\/(.+)\/.*/\1/' | \
# grep -vE '^[A-Z!?]{1,2}\s+')
# for ORB in $UPDATED_ORBS; do
for ORB in src/*/; do
echo "Attempting to publish ${ORB} as circleci/${ORB}@dev:${CIRCLE_BRANCH}"
if [[ -z "$CIRCLECI_API_TOKEN" ]]; then
circleci orb publish dev $ORB/orb.yml circleci $(basename $ORB) dev:${CIRCLE_BRANCH}
else
circleci orb publish dev $ORB/orb.yml circleci $(basename $ORB) dev:${CIRCLE_BRANCH} --token $CIRCLECI_API_TOKEN
fi
done
| true
|
0a5275126985f13e91a6cf3a03797aaf15eb9389
|
Shell
|
dtonthat2/chem160module21
|
/checkrange2.bash
|
UTF-8
| 166
| 3.296875
| 3
|
[] |
no_license
|
#!/bin/bash
if [ $1 -ge 100 ] && [ $2 -le 1000 ]; then
echo "Arguments give reasonable range"
else
echo "Range given ($1 to $2) not in (100 to 1000)"
exit 1
fi
| true
|
d8ac59280b85945e1dcc7d358a158f25019f8199
|
Shell
|
lsst-camera-dh/mutils
|
/bin/plotTestShorts.sh
|
UTF-8
| 1,364
| 3.828125
| 4
|
[
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/bin/bash
#
#------------------------------------------------------------------------
function usage {
cat <<-EOM
Usage ${0##*/} rebPath startTime
rebPath ~ <subsystem>/<bay>/Reb[012]
quote time if it contains spaces
Options:
-h (print help msg)
-d <duration>
EOM
exit 1
}
if [ $# -lt 2 ]; then
usage
fi
#-- process commandline options
#
duration=
while getopts "hswd:" Option
do
case $Option in
h ) usage;;
d ) duration=$OPTARG;;
s ) savePlot="yes";;
w ) waitTime="yes";;
* ) ${ECHO} "Unimplemented option chosen.";; # Default.
esac
done
shift $((OPTIND - 1))
declare -a regexes
regexes+=(${1}'/[DA].*I') # digital & analog currents
regexes+=(${1}'/[CO].*I') # clocks and OD currents
regexes+=(${1}'/[PSR].*[UL]$') # clock levels
regexes+=(${1}'/S.*/.*V$') # bias voltages
if [ $duration"XXX" == "XXX" ] ; then
duration=8s
fi
if [ $waitTime ] ; then
sleep $duration
sleep 8
fi
sarg=
if [ $savePlot ] ; then
sarg=" --save "$(echo -n $1 | sed 's?/?_?g' | sed 's?\^??' )"_TestShorts.png"
fi
site=
if [[ $HOSTNAME"XXX" =~ .*slac.stanford.edu"XXX" ]] ; then
site="slac"
else
site="localhost"
fi
trender.py --site ${site} --lay 4x1 --out ${sarg} --start "${2}" --title "testCCDShorts:${1}" --overlayreg --plot --dur $duration --fmt 'o-' -- "${regexes[@]}"
| true
|
97a1d217c3ceefd9c619c23593adfdf7baf71ac1
|
Shell
|
mebbert/Dark_and_Camouflaged_genes
|
/scripts/00_GET_BAMS/realign_minimap2.ogs
|
UTF-8
| 1,364
| 3.28125
| 3
|
[] |
no_license
|
#!/bin/bash
#$ -cwd
#$ -N realign
#$ -q <queue>
#$ -M email@institution.edu
#$ -pe threaded 16
#$ -l h_vmem=32G
#$ -j y
#$ -notify
source $HOME/.bash_profile
bam=$1
ref=$2
ref_tag=$3
out=$4
mkdir -p $out
echo "`date` Realign $1 running on `hostname`"
regex="SM:([A-Za-z0-9_\-]+)"
RG=$(samtools view -H $bam | grep '^@RG' | tail -1)
[[ $RG =~ $regex ]]
sampleName=${BASH_REMATCH[1]}
RG=${RG// /\\t}
echo "`date` Realigning sample: $sampleName"
TMP_DIR="/tmp/$JOB_ID"
mkdir -p $TMP_DIR
fq="${TMP_DIR}/${sampleName}.fastq"
echo "`date` Converting to Fastq:"
echo "`date` bedtools bamtofastq -i $bam -fq $fq"
bedtools bamtofastq -i $bam -fq $fq
tmp_sam="${TMP_DIR}/${sampleName}.unsorted.sam"
echo "`date` Aligning to $ref_tag:"
echo "`date` minimap | samtools view -hb > $tmp_bam"
minimap2 -a \
-t 16 \
-x map-pb \
--eqx \
-L \
-O 5,56 \
-E 4,1 \
-B 5 \
--secondary=no \
-z 400,50 \
-r 2k \
-Y \
$ref \
$fq \
> $tmp_sam
final_bam="${out}/${sampleName}.${ref_tag}.bam"
echo "`date` Sorting final bam:"
echo "`date` samtools sort -@ 16 -m 16G $tmp_bam > $final_bam"
samtools view -hb $tmp_sam | samtools sort -@ 16 -m 16G $tmp_bam > $final_bam
echo "`date` Indexing final bam:"
echo "`date` samtools index $final_bam"
samtools index -@ 16 $final_bam
echo "`date` Validating bam:"
samtools quickcheck $final_bam
echo "`date` DONE!"
rm -rfv $TMP_DIR
| true
|
1c38fb7237817793b846e855623f63e042561ef1
|
Shell
|
wdwatkins/pgml_temperature_prediction
|
/slurm/lake_optimize.slurm
|
UTF-8
| 1,317
| 2.75
| 3
|
[] |
no_license
|
#!/bin/bash
#SBATCH --job-name=kabet # name that you chose
#SBATCH -p normal # the partition you want to use, for this case prod is best
#SBATCH -A cida # your account
#SBATCH -o shellLog/slurm-%A_%a.out
#SBATCH --time=00:10:00 # time at which the process will be cancelled if unfinished
#SBATCH --mail-type=ALL
#SBATCH --mail-user=wwatkins@usgs.gov
#SBATCH --export=ALL
#SBATCH --array=1-2 # process IDs
#SBATCH -n 1
offset_task_id=${1}
echo $offset_task_id
if [[ $offset_task_id == "offset_yes" ]]
then
task_id_adj=$(echo ${SLURM_ARRAY_TASK_ID} + 5000 | bc)
job_offset=5000
elif [[ $offset_task_id == "offset_no" ]]
then
task_id_adj=$(echo ${SLURM_ARRAY_TASK_ID})
else
echo "invalid argument: use offset_yes or offset_no"
exit 1 #terminate with error
fi
#TODO: pass in offset arg to r script; Now works, just need to add logic
#TODO: be smarter about lake id in file name do we still need to write to local scratch with less output?
module load tools/netcdf-4.3.2-gnu
mkdir rlogs/${SLURM_ARRAY_JOB_ID}_${SLURM_ARRAY_TASK_ID}
R CMD BATCH --no-restore --no-save '--args '$1'' R/lake_optimize.R rlogs/${SLURM_ARRAY_JOB_ID}_${SLURM_ARRAY_TASK_ID}/rlog.Rlog
cp -r ${LOCAL_SCRATCH}/nhd_120052238_optim_"$task_id_adj" /lustre/projects/water/owi/booth-lakes/out_kabet
sync
| true
|
16a151cbad448e1b254f8f371ba5719e066e08ae
|
Shell
|
sibis-platform/ncanda-data-integration
|
/scripts/crond/front-hourly-script
|
UTF-8
| 2,225
| 3.5625
| 4
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash -l
##
## See COPYING file distributed along with the ncanda-data-integration package
## for the copyright and license terms
##
qa_args="$*"
# Set the SIBIS environment variable to the data integration repo
export SIBIS=`realpath $(dirname $0)/../../`
XNAT_FLAG=1
IMPORT_FLAG=1
[ -r $HOME/.bashrc ] && . $HOME/.bashrc
# Import some useful functions
export SCRIPT_LABEL="front-hourly"
. $(dirname $0)/crontools.sh
LOG_DIR=${SIBIS_ANALYSIS_DIR}log/front-hourly
qa_args=""
# Clean this up later
initDayFlag=false
endDayFlag=false
while getopts :pie opt; do
case $opt in
p) qa_args="${qa_args} -p";;
e) #end of day
endDayFlag=true;;
i) #init day
initDayFlag=true
qa_args="${qa_args} -t $LOG_DIR";;
?) echo "script usage: $(basename $0) [-p] [-e] [-i]" >&2
exit 1;;
esac
done
shift "$(($OPTIND -1))"
# echo "INFO:${SCRIPT_LABEL}:Parameter setting $qa_args $initDayFlag $endDayFlag"
# Run QA on fBIRN and ADNI phantom scans
if [ ${XNAT_FLAG} == 1 ]; then
catch_output_email "${SCRIPT_LABEL}:XNAT: QA Phantoms (phantom_qa)" ${SIBIS}/scripts/xnat/phantom_qa ${qa_args}
fi
# Import data from the sites' data capture laptops into REDCap and reconcile imported with longitudinal data
if [ ${IMPORT_FLAG} == 1 ]; then
lastWeek=""
# do that so that if the script was interrupted the svn update is performed again - otherwise would loose data
if $endDayFlag; then
lastWeek="--last-week"
fi
catch_output_email "${SCRIPT_LABEL}:Import Laptop: Data Stage 1 (harvester)" ${SIBIS}/scripts/import/laptops/harvester --only-converter-post-to-github ${qa_args} ${lastWeek}
catch_output_email "${SCRIPT_LABEL}:Import Self-Report (selfreport2csv)" \
${SIBIS}/scripts/import/laptops/selfreport2csv /fs/storage/laptops/imported --no-output ${qa_args}
else
if $initDayFlag; then
echo "${SCRIPT_LABEL}: Warning: Import from laptops disabled !"
fi
fi
#
# Previouly front-nighlty
#
# REDCap updates
update_args=""
if $initDayFlag; then
update_args+="--update-all"
fi
catch_output_email "${SCRIPT_LABEL}:REDCap: Update Scores (redcap_update_summary_scores)" $SIBIS_PYTHON_PATH/sibispy/cmds/redcap_update_summary_scores.py ${update_args} ${qa_args}
| true
|
b9111106b7968b924f323507138d0a2cc7a357b5
|
Shell
|
iaintshootinmis/CitrixConfig
|
/citrixfix
|
UTF-8
| 2,022
| 4.03125
| 4
|
[] |
no_license
|
#! /bin/bash
#######################################################################################################
# Author: Justin McAfee #
# Contact: me@justinmcafee.com #
# Purpose: Download certificates for Citrix Recevier, move to folder and set perms. #
# Date: 24JAN2021 #
# Revision: 3 #
#######################################################################################################
CertPath=/opt/Citrix/ICAClient/keystore/cacerts/
Cert=entrust_g2_ca.cer
CertURL=https://web.entrust.com/root-certificates/$Cert
ReceiverURL=https://
checkHelp(){
if [ "$1" == "--help" ];then
printf "\nUsage: `basename $0` attempts to find a running instance of Citrix Workspace and download and configure the associated SSL certificates.\n"
exit 0
fi
}
checkroot(){
if [ $(whoami) != root ]; then
printf "Please re-run with sudo.\n"
exit
fi
}
errorcheck(){
if [ $? -ne 0 ];then
printf " An error was encountered. Closing\n"
exit
fi
return
}
checkRcvr(){
service ctxlogd status
if [ $? -ne 0 ];then
printf "\n Citrix Reciever not running. User must manually install Citrix for Linux from https://www.citrix.com/downloads/workspace-app/linux/workspace-app-for-linux-latest.html \n"
exit
else
printf "\n Citrix Reciever is already installed. Checking for Certs.\n"
if [ -f "$CertPath/$Cert" ];then
printf "\n Certificates already in place.\n If ICAs are presenting SSL errors, try running ctx_rehash in /opt/Citrix/ICAClient/util/ctx_rehash manually.\n"
exit
fi
fi
}
downloadCert(){
cd $CertPath || exit
wget $CertURL
errorcheck
}
setPerms(){
chmod 644 $Cert
errorcheck
}
relinkCerts(){
/opt/Citrix/ICAClient/util/ctx_rehash
errorcheck
}
Finished(){
printf "\n Citrix Receiver has placed $Cert in $CertPath and successfully rehashed the files.
ICAs with SSL errors should now work as intended.\n"
}
checkHelp $1
checkroot
checkRcvr
downloadCert
setPerms
relinkCerts
Finished
| true
|
96b271438725c1d1b996c32ed340f5969ea6595f
|
Shell
|
jaysencryer/SysSet
|
/Hardware.fun
|
UTF-8
| 10,229
| 3.4375
| 3
|
[] |
no_license
|
#! /bin/sh
#
# Hardware.fun V1
#
# 31/7/2002 :- altered InstUPS to be less ambiguous
# altered InstNetCard to bomb out with proper error message
# if config file not complete.
#
# 13/10/2004 :- v3.000b updated gigabit driver
TestFun () {
echo "Banana"
return 1
}
################################################################################
#
# InstTape
#
################################################################################
InstTape () {
_tapesize=$1
_tapesetup=$2
case $RunMode in
"AUTO")
if [ $_tapesetup ]; then
case $_tapesetup in
"standard")
_adapt_options=",0,0,2,0"
;;
"non_standard")
if [ ! -z "$tp_adap" ]; then
_adapt_options="$tp_adap,$tp_adapid,$tp_bus,$tp_id,0"
else
setcolor $ERROR_COL
echo "You have specified non-standard setup but not entered any adapter setup info"
exit 1
fi
;;
*)
echo "Tape not setup as I do not understand the tapesetup request!"
exit 1
;;
esac
if [ $_tapesize = 20000000 ]; then
_here_opts="1,1,$_adapt_options,y,HP,2,2,4,,q,q,q,n"
elif [ $_tapesize = 40000000 ]; then
_here_opts="1,1,$_adapt_options,y,HP,2,2,1,,q,q,q,n"
elif [ $_tapesize = 100000000 ]; then
_here_opts="1,1,$_adapt_options,y,HP,3,2,1,,q,q,q,n"
else
_here_opts="1,1,$_adapt_options,y,,,,4,,q,q,q,n"
fi
MakeHere "mkdev tape >>$g_TEMP/hardware.log 2>&1" $_here_opts
cp /etc/default/tar /etc/default/tar.old
grep -v "archive8" /etc/default/tar > /tmp/tarno8
echo "archive8=/dev/rct0 20 $_tapesize y" >> /tmp/tarno8
cp /tmp/tarno8 /etc/default/tar
else
echo "There is no tapesetup information available"
echo "aborting tape setup information"
exit 1
fi
;;
"INT")
mkdev tape
echo "1 - 4 GB Dat\n2 - 12 GB Dat\n3 - 20 GB Dat\n4 - 40 GB Dat (DLT)\n5 - 100 GB Viper\n(Default) 40 GB Dat\n"
GetAns "Please select tape size" 1 2 3 4 5
answer=$?
case $answer in
1)
_tapesize=4000000
;;
2)
_tapesize=12000000
;;
4)
_tapesize=40000000
;;
5)
_tapesize=100000000
;;
*)
_tapesize=40000000
;;
esac
cp /etc/default/tar /etc/default/tar.old
grep -v "archive8" /etc/default/tar > /tmp/tarno8
echo "archive8=/dev/rct0 20 $_tapesize y" >> /tmp/tarno8
cp /tmp/tarno8 /etc/default/tar
;;
esac
}
################################################################################
#
# InstUPS <port device>
#
################################################################################
InstUPS () {
_port=$1
if [ $_port ]; then
case $RunMode in
"AUTO")
echo "Please unplug UPS from com port"
UnpackFiles UPS
[ $? = 1 ] && { exit 0; }
cd $ProgFiles/UPS
MakeHere "./INSTALL >>$g_TEMP/hardware.log 2>&1" 1,,n,3,2,y,6,n,,2,,y,E
if [ -e /usr/lib/powerchute/powerchute.ini ]; then
repline /usr/lib/powerchute/powerchute.ini " PortName = /dev/tty2a" " PortName = $_port"
fi
;;
"INT")
UnpackFiles UPS
[ $? = 1 ] && { exit 0; }
cd $ProgFiles/UPS
./INSTALL
;;
esac
else
echo "Error - not setting up UPS software"
return 1
fi
}
################################################################################
#
# HowManyCards
#
# works out number network adapters in machine using HWdetected file
# The runs RunNetInst if there are multiple cards ( this seperates the
# different fields ). InstNetCard is called if there is just one card
#
################################################################################
HowManyCards () {
_number_cards=`grep -c "Network Adapter" $g_TEMP/HWdetected`
_adapter=`grep "Network Adapter" $g_TEMP/HWdetected | awk -F : '{print $14}'`
_device=`grep "Network Adapter" $g_TEMP/HWdetected | awk -F : '{print $4}'`
_id=`grep "Network Adapter" $g_TEMP/HWdetected | awk -F : '{print $8}'`
if [ $_number_cards -gt 1 ]; then
RunNetInst $_number_cards "$_adapter" "$_device" "$_id"
elif [ $_number_cards = 1 ]; then
InstNetCard 0 $_adapter $_device $_id
elif [ $_number_cards = 0 ]; then
echo "You have no Network cards in your machine"
return 1
fi
}
################################################################################
#
# RunNetInst <number of cards> <adapter types> <devices> <id (is it gig?)>
#
# seperates the multiple adapter strings and devices
# and calls InstNetCard for each card
#
################################################################################
RunNetInst () {
_number_cards=$1
# at this point $# should be _number_cards * 2 + 1
_total=$#
_target=`expr $_number_cards \* 2 + 1`
if [ _total -ne _target ]; then
# We have an error here!!!
echo "RunNetInst () aborted: wrong number arguments"
exit 1
else
_count=1
while [ $_number_cards -gt 0 ]
do
# note - this works in this way
# essentially we are passing GetField 1 ADAP1 ADAP2 ADAP3
_adapter=`GetField $_count $2`
_device=`GetField $_count $3`
_id=`GetField $_count $4`
_num=`expr $_count - 1`
_count=`expr $_count + 1`
_number_cards=`expr $_number_cards - 1`
echo "Installing Network card: $_adapter on net$_num" >> $g_TEMP/hardware.log
InstNetCard $_num $_adapter $_device $_id
done
fi
}
################################################################################
#
# GetField n <argument list>
# returns the nth argument from the list (which should be seperated by spaces)
#
################################################################################
GetField () {
_field=$1
shift
while [ $_field -gt 1 ]
do
shift
_field=`expr $_field - 1`
done
echo $1
}
################################################################################
#
# InstNetCard [ number ] [ adapter ] [ device ]
#
# number : 0 ... n - number of network card : ie net0
# adapter : name of adapter
#
################################################################################
InstNetCard () {
# This installs the network card
_number=$1
_adapter=$2
_device=$3
_id=$4
HWosver=`uname -v`
INSTALL_DIR=`ls /var/opt/K/SCO/lli | grep $HWosver`
INSTALL_DIR="/var/opt/K/SCO/lli/$INSTALL_DIR"
# determine which card
if [ $_adapter ]; then
if [ `ifconfig -a | grep -c "net$_number"` -ne 0 ]; then
echo "Error: Already network card configured as net$_number!"
return 1
fi
case $_adapter in
"D-link")
CustomInst software D-Link:d5B $root_dir/Drivers/DLINK
ADAPTYPE="D-Link DFE-530TX PCI Fast Ethernet Adapter."
cp $root_dir/Drivers/DLINK/net0 ${INSTALL_DIR}/sysdb/net$_number
;;
"ALLIED")
ADAPTYPE="AT-2500TX Fast Ethernet Adapter Driver."
cp -r $root_dir/Drivers/AT2500/r8e ${INSTALL_DIR}/ID
cp $root_dir/Drivers/AT2500/net0 ${INSTALL_DIR}/sysdb/net$_number
cd ${INSTALL_DIR}/ID/r8e
chmod +x r8e.h
chmod +x Driver.o
chmod +x System
chmod +x Node
chmod +x Master
chmod +x Space.c
chmod +x space.h
chmod +x lkcfg
cd AOF
chmod +x r8e
;;
"3Com")
ADAPTYPE="3Com 3C9x whichever corresponds to your NIC"
cp $root_dir/Drivers/3COM/net0 ${INSTALL_DIR}/sysdb/net$_number
CustomInst software 3Com:e3H $root_dir/Drivers/3COM
chmod 777 ${INSTALL_DIR}/sysdb/net$_number
repline ${INSTALL_DIR}/sysdb/net$_number "\tSELECT=14" "\tSELECT=$_device"
if [ $HWosver = "5.0.5" ]; then
echo "Your Network card may not be supported by 5.0.5."
echo "If your network card is non-functional on reboot."
echo "please install manually using correct drivers."
echo "- Note, this feature will be implemented if requested."
fi
;;
"Intel")
case $_id in
"0x1010"|"0x100d")
if [ $HWosver = "5.0.5" ]; then
setcolor $ERROR_COL
echo "Gigabit NIC's are not supported by 5.0.5 or lower"
echo "these drivers can not be installed!"
return 1
else
ADAPTYPE="Intel Gbit"
CustomInst software SCO:eeG $root_dir/Drivers/InteleeG
cp $root_dir/Drivers/InteleeG/net0 ${INSTALL_DIR}/sysdb/net$_number
chmod 777 ${INSTALL_DIR}/sysdb/net$_number
repline ${INSTALL_DIR}/sysdb/net$_number "\tSELECT=4" "\tSELECT=$_device"
fi
;;
*)
ADAPTYPE="Intel"
cp $root_dir/Drivers/INTEL/net0 ${INSTALL_DIR}/sysdb/net$_number
chmod 777 ${INSTALL_DIR}/sysdb/net$_number
repline ${INSTALL_DIR}/sysdb/net$_number "\tSELECT=3" "\tSELECT=$_device"
;;
esac
;;
*)
echo "Network card does not appear to be supported"
return 1
;;
esac
echo "Installing $ADAPTYPE on Dev $_device as net$_number"
case $RunMode in
"AUTO")
[ $g_TEMP/hardware.cfg ] && { . $g_TEMP/hardware.cfg; }
if [ -z "$hw_Ipnet0" ]; then
echo "hardware.cfg not complete. Aborting network card install"
exit 1
fi
IQM_DOMAIN_NAME=$hw_Domainnet0
IQM_TCP_IPADDR=`grep "hw_Ipnet$_number=" $g_TEMP/hardware.cfg | awk -F = '{print $2}'`
IQM_TCP_NETMASK=`grep "hw_Masknet$_number=" $g_TEMP/hardware.cfg | awk -F = '{print $2}'`
IQM_TCP_BROADCAST=`grep "hw_Broadnet$_number=" $g_TEMP/hardware.cfg | awk -F = '{print $2}'`
if [ $_number -ne 0 ]; then
IQM_SYSTEM_NAME=`grep "hw_HostNamenet$_number=" $g_TEMP/hardware.cfg | awk -F = '{print $2}'`
else
IQM_SYSTEM_NAME=`uname -n`
fi
;;
"INT")
echo "Please enter the following for $_adapter Adapter net$_number."
echo -n "Host name (of card) : "
read IQM_SYSTEM_NAME
echo -n "IP Address : "
read IQM_TCP_IPADDR
echo -n "Broadcast Address : "
read IQM_TCP_BROADCAST
echo -n "Netmask : "
read IQM_TCP_NETMASK
echo -n "Domain : "
read IQM_DOMAIN_NAME
;;
*)
echo "Not a valid runmode for Network Install"
return 1
;;
esac
for i in "$IQM_TCP_IPADDR" "$IQM_TCP_BROADCAST" "$IQM_TCP_NETMASK" "$IQM_DOMAIN_NAME" "$IQM_SYSTEM_NAME"
do
if [ -z "$i" ]; then
if [ $_number = 1 ]; then
echo "Value missing for network set up.\nCheck you Hardware.cfg file"
return 1
fi
echo "Missing IP information - a card was not installed.\nThis may be intentional - please check your Hardware.cfg file"
return 1
fi
done
# we have got this far - so the Variables are all loaded, and ready to go
IQM_FILE=/bin/true
IQM_INSTALL_TYPE="fresh"
DHCP=No
export DHCP IQM_TCP_IPADDR IQM_TCP_BROADCAST IQM_TCP_NETMASK IQM_DOMAIN_NAME IQM_FILE IQM_INSTALL_TYPE IQM_SYSTEM_NAME
netconfig -a sco_tcp#net${_number}
fi
}
| true
|
9299b384a41ec5439312b0ca0335b0915f08eb5e
|
Shell
|
lodenrogue/covid-cli
|
/test/test_county
|
UTF-8
| 1,149
| 2.953125
| 3
|
[
"MIT"
] |
permissive
|
#! /bin/bash
source xunit
curl() {
cat county/base.csv
}
test_county_all() {
result=$(../covid -t all)
expected=$(cat county/base.csv)
assert_equals "$expected" "$result"
}
test_single_county() {
result=$(../covid -t snohomish)
expected=$(cat county/single_county.csv)
assert_equals "$expected" "$result"
}
test_single_county_cases_cumulative() {
result=$(../covid -t snohomish -c cumul)
expected=$(cat county/cases_cumul.csv)
assert_equals "$expected" "$result"
}
test_single_county_cases_daily() {
result=$(../covid -t snohomish -c daily)
expected=$(cat county/cases_daily.csv)
assert_equals "$expected" "$result"
}
test_single_county_deaths_cumulative() {
result=$(../covid -t snohomish -d cumul)
expected=$(cat county/deaths_cumul.csv)
assert_equals "$expected" "$result"
}
test_single_county_deaths_daily() {
result=$(../covid -t snohomish -d daily)
expected=$(cat county/deaths_daily.csv)
assert_equals "$expected" "$result"
}
export -f curl
test_county_all
test_single_county
test_single_county_cases_cumulative
test_single_county_cases_daily
test_single_county_deaths_cumulative
test_single_county_deaths_daily
| true
|
e44ada694ae57298c739f66dad91455e0a7bb23d
|
Shell
|
italiangrid/storm-deployment-test-gpfs
|
/remote/run.sh
|
UTF-8
| 1,429
| 3.6875
| 4
|
[] |
no_license
|
#!/bin/bash
set -ex
trap "exit 1" TERM
MODE=$1
PLATFORM=$2
STORM_REPO=$3
STORAGE_ROOT_DIR=$4
HOSTNAME=$5
MODE="${MODE:-"clean"}"
PLATFORM="${PLATFORM:-"centos6"}"
if [ -z "$STORAGE_ROOT_DIR" ]; then
echo "Need to set STORAGE_ROOT_DIR"
exit 1
fi
if [ -z "$HOSTNAME" ]; then
echo "Need to set HOSTNAME"
exit 1
fi
if [ -z "$STORM_REPO" ]; then
echo "Need to set STORM_REPO"
exit 1
fi
UMD_RELEASE_RPM="${UMD_RELEASE_RPM:-"http://repository.egi.eu/sw/production/umd/4/sl6/x86_64/updates/umd-release-4.1.3-1.el6.noarch.rpm"}"
echo "Setting FQDN hostname as ${HOSTNAME} ..."
hostname ${HOSTNAME}
echo "Create storage root directory ${STORAGE_ROOT_DIR} ..."
mkdir -p ${STORAGE_ROOT_DIR}
echo "Inject storage root directory into configuration ..."
sed -i '/^STORM_DEFAULT_ROOT/d' data/siteinfo/clean/storm.def
echo "STORM_DEFAULT_ROOT=${STORAGE_ROOT_DIR}" >> data/siteinfo/clean/storm.def
sed -i '/^STORM_DEFAULT_ROOT/d' data/siteinfo/update/storm.def
echo "STORM_DEFAULT_ROOT=${STORAGE_ROOT_DIR}" >> data/siteinfo/update/storm.def
echo "Running ${MODE} deployment for ${PLATFORM} with ..."
echo "STORM_REPO: ${STORM_REPO}"
echo "UMD_RELEASE_RPM: ${UMD_RELEASE_RPM}"
echo "STORAGE_ROOT_DIR: ${STORAGE_ROOT_DIR}"
echo "HOSTNAME: ${HOSTNAME}"
STORM_REPO="${STORM_REPO}" \
UMD_RELEASE_RPM="${UMD_RELEASE_RPM}" \
STORAGE_ROOT_DIR="${STORAGE_ROOT_DIR}" \
HOSTNAME="${HOSTNAME}" \
sh scripts/run-${MODE}-${PLATFORM}.sh
| true
|
9484272f0224cc97e62f5832251af3e353ed3ee4
|
Shell
|
yaoyaoding/lecture
|
/2018-01-17-THU-simu/day1/data/chess/fc.sh
|
UTF-8
| 249
| 3.09375
| 3
|
[] |
no_license
|
#!/bin/bash
A="attack"
B="brute"
for((i=1;;i++))
do
./yprod.py > input
./${A} < input > ${A}.out
./${B} < input > ${B}.out
diff ${A}.out ${B}.out > /dev/null
if [ $? -ne 0 ]
then
echo $i N
exit
else
echo $i Y
cat ${A}.out
fi
done
| true
|
e8dfe70b220864d9d68d8c4fad870d6a4834812d
|
Shell
|
b-ggs/i3config
|
/bootstrap.sh
|
UTF-8
| 218
| 3.234375
| 3
|
[] |
no_license
|
#!/bin/sh
dir=$PWD
installdir=~
files=".i3 i3blocks scripts xrandr"
for file in $files; do
rm -rf $installdir/$file
echo "Linking $dir/$file to $installdir/$file."
ln -s $dir/$file $installdir/$file
done
| true
|
5f93bfe961b6aaac14c0ee9c2dd0b6784dc714f0
|
Shell
|
VernitaJ/git-katas
|
/submodules/setup.sh
|
UTF-8
| 464
| 3.5625
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
SCRIPT_PATH="$( cd "$(dirname "$0")" ; pwd -P )"
echo ${SCRIPT_PATH}
EXERCISE_DIR=${SCRIPT_PATH}/exercise
if [ -d ${EXERCISE_DIR} ]; then
rm -rf ${EXERCISE_DIR}
fi
mkdir ${EXERCISE_DIR}
cd ${EXERCISE_DIR}
mkdir component
cd component
git init
touch component.h
git add component.h
git commit -m "Touch component header."
cd ..
mkdir product
cd product
git init
touch product.h
git add product.h
git commit -m "Touch product header."
| true
|
6d7a52ec6f0f96c0d808c081dce98b94c37b4585
|
Shell
|
Samyuktha1995/Action-Detection
|
/utility/splitVideo.sh
|
UTF-8
| 305
| 2.9375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
A=0
DURATION="$(ffprobe -v error -show_entries format=duration -of default=noprint_wrappers=1:nokey=1 sample.mp4)"
DURATION=${DURATION%.*}
CLIPLENGTH=10
while [ $A -le $DURATION ]
do
ffmpeg -ss $A -i sample.mp4 -t $CLIPLENGTH -c:v libx264 output-${A}.mp4;
let "A=A+$CLIPLENGTH"
done
| true
|
781cf2978b0815c7ec6cf4200ca809cc7405c6c5
|
Shell
|
espressif/esp-afr-sdk
|
/tools/windows/tool_setup/build_installer.sh
|
UTF-8
| 1,306
| 4.09375
| 4
|
[
"GPL-2.0-only",
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
#
# Script to build the IDF Tools installer for Windows with Inno Setup.
# This script should be executed inside wine-innosetup docker image.
#
# - Downloads all tools to install into the "dist/" directory
# - Downloads 7z and idf_versions.txt
# - Runs ISCC under wine to compile the installer itself
set -e
set -u
iscc_path=$(which iscc)
if [[ -z "$iscc_path" ]]; then
echo "Inno setup compiler (iscc) not found. Are you running wine-innosetup Docker image?"
exit 1
fi
if [[ -z "${IDF_PATH:-}" ]]; then
export IDF_PATH=$(cd ../../../; pwd)
echo "Assuming IDF_PATH: ${IDF_PATH}"
fi
echo "Downloading IDF Tools..."
mkdir -p idf_tools_tmp
export IDF_TOOLS_PATH=$PWD/idf_tools_tmp
$IDF_PATH/tools/idf_tools.py --non-interactive download --platform Windows-x86_64 all
$IDF_PATH/tools/idf_tools.py --tools-json tools_fallback.json --non-interactive download --platform Windows-x86_64 all
mkdir -p dist
cp idf_tools_tmp/dist/* dist/
echo "Downloading 7z..."
mkdir -p unzip
pushd unzip
wget --no-verbose -O 7z1900-extra.7z https://www.7-zip.org/a/7z1900-extra.7z
7zr e -y 7z1900-extra.7z
popd
echo "Downloading idf_versions.txt..."
wget --no-verbose -O idf_versions.txt https://dl.espressif.com/dl/esp-idf/idf_versions.txt
echo "Running ISCC..."
iscc idf_tool_setup.iss
| true
|
12baa5824972790fe36aa0f5dd9beebaf3ecade0
|
Shell
|
rajneesh44/Programs
|
/Lab.sh
|
UTF-8
| 561
| 3.625
| 4
|
[] |
no_license
|
#!/bin/sh
echo "Enter the max number you want to see in the pattern"
read num1
num2=2
ans=$((num1*num2))
ans=`expr $ans + 1`
a=0
echo "Printing the pattern ..."
while [ "$a" -lt "$ans" ]
do
if [ "$a" -le "$num1" ]
then
b="$a"
while [ "$b" -ge 0 ]
do
echo -n "$b "
b=`expr $b - 1`
done
echo
fi
if [ "$a" -gt "$num1" ]
then
b=`expr $ans - $a - 1`
while [ "$b" -ge 0 ]
do
echo -n "$b "
b=`expr $b - 1`
done
echo
fi
a=`expr $a + 1`
done
| true
|
7d63374be5688ddb070677e25f271183662e7755
|
Shell
|
jussi-kalliokoski/dotfiles
|
/install.sh
|
UTF-8
| 1,866
| 3.71875
| 4
|
[] |
no_license
|
#!/usr/bin/env sh
OS=`uname`
if [ "${OS}" == "Darwin" ]; then
OS="osx"
else
OS="linux"
fi
if [ ! -d ~/.dotfiles ]; then
git clone --recursive https://github.com/jussi-kalliokoski/dotfiles ~/.dotfiles
fi
OLD_PWD=`pwd`
cd ~/.dotfiles
if [ "$PREFIX" == "" ]; then
PREFIX="/usr/local"
fi
# link, confirm if directory
link(){
ln -s $1 $2
}
# link, prompt if needs to overwrite
link_prompt(){
if [ -L $2 ]; then
echo "WARN: $2 is already linked, ignoring."
elif [ -f $2 ] || [ -d $2 ]; then
echo "ERROR: $2 already exists. Do you wish to overwrite?"
select yn in "Yes" "No"; do
case $yn in
Yes ) rm -rf $2; link $1 $2; break;;
No ) break;;
esac
done
else
link $1 $2
fi
}
add(){
for p in "$@"; do
link_prompt ~/.dotfiles/$p ~/$p
done
}
if [ "$OS" == "linux" ] && command -v apt-get >/dev/null 2>&1; then
apt-get install -y tmux fish vim git
fi
add .vimrc .vim
add .gitconfig .gitignore_global
add .inputrc
add .Xdefaults
add .tmux.conf
mkdir -p ~/.config/fish
add .config/fish/*.fish
mkdir -p ~/.config/xcolors
add .config/xcolors/thayer
mkdir -p ~/.config/alacritty
add .config/alacritty/alacritty.yml
add .phoenix.js
for executable in platform_bin/$OS/*; do
link_prompt ~/.dotfiles/$executable $PREFIX/bin/${executable##*/}
done
for executable in platform_bin/all/*; do
link_prompt ~/.dotfiles/$executable $PREFIX/bin/${executable##*/}
done
if ! test -e ~/.tmux/plugins/tpm; then
mkdir -p ~/.tmux/plugins
git clone https://github.com/tmux-plugins/tpm ~/.tmux/plugins/tpm
fi
vim "+PlugInstall" "+qall"
# GNOME 3
if command -v gsettings >/dev/null 2>&1; then
for config_file in ~/.dotfiles/.config/gnome3-extra/*.conf; do
gsettings-import.js $config_file
done
fi
cd $OLD_PWD
| true
|
66bb66d6a422a77a3c97484ab897823c2312df19
|
Shell
|
iphilgood/DockSTARTer
|
/scripts/run_compose.sh
|
UTF-8
| 1,636
| 3.9375
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
set -euo pipefail
IFS=$'\n\t'
run_compose() {
local PROMPT
PROMPT=${1:-}
local QUESTION
QUESTION="Would you like to run your selected containers now?"
info "${QUESTION}"
local YN
while true; do
if [[ ${CI:-} == true ]] && [[ ${TRAVIS:-} == true ]]; then
info "Travis will not run this."
return
elif [[ ${PROMPT} == "menu" ]]; then
local ANSWER
set +e
ANSWER=$(whiptail --fb --clear --title "DockSTARTer" --yesno "${QUESTION}" 0 0 3>&1 1>&2 2>&3; echo $?)
set -e
[[ ${ANSWER} == 0 ]] && YN=Y || YN=N
else
read -rp "[Yn]" YN
fi
case ${YN} in
[Yy]* )
run_script 'install_docker'
run_script 'install_compose'
local PUID
PUID=$(run_script 'env_get' PUID)
local PGID
PGID=$(run_script 'env_get' PGID)
run_script 'set_permissions' "${SCRIPTPATH}" "${PUID}" "${PGID}"
cd "${SCRIPTPATH}/compose/" || fatal "Unable to change directory to ${SCRIPTPATH}/compose/"
su -c "docker-compose up -d --remove-orphans" "${DETECTED_UNAME}" || fatal "Docker Compose failed."
cd "${SCRIPTPATH}" || fatal "Unable to change directory to ${SCRIPTPATH}"
break
;;
[Nn]* )
info "Compose will not be run."
return 1
;;
* )
error "Please answer yes or no."
;;
esac
done
}
| true
|
0017f5f2d3118d075068d1242c9ae1822aea205a
|
Shell
|
aqing1987/s-app-layer
|
/sh/samples/check-os-type.sh
|
UTF-8
| 138
| 2.953125
| 3
|
[] |
no_license
|
#!/bin/bash
if [ $(getconf WORD_BIT) = '32' ] && [ $(getconf LONG_BIT) = '64' ] ; then
echo "64 bit OS"
else
echo "32 bit OS"
fi
| true
|
8c17d35a744a0dcf47634bc37cdcd25636229037
|
Shell
|
elgenie/dot-files
|
/shell/bash_aliases
|
UTF-8
| 1,049
| 2.71875
| 3
|
[] |
no_license
|
# -*- sh -*-
# Section: Editing
alias e="$EDITOR"
alias em="emacs"
alias ec="emacsclient -c --alternate-editor=''"
alias ebc="emacs -batch -f batch-byte-compile"
# Section: Git
alias g="git"
alias gr="grep -ri"
alias gdc="git diff -U6 --cached"
alias grn="grep -rin"
alias grl="grep -ril"
alias grv="grep -v"
# more git aliases are set up, along with completion, in git-completion-eletuchy.bash
# Section: SVN
alias sup="svn up"
alias sin="svn commit"
alias sd="svn diff -x \"-U6\" --diff-cmd diff"
# Section: Navigation
alias u="cd .."
alias u2="cd ../.."
alias u3="cd ../../.."
# Section: ls
ls --version &> /dev/null
if [ $? -eq 0 ] ; then
alias ll="ls -alh --color=tty"
else
alias ll="ls -alh"
fi
# Section: Uncategorized
alias sssh="sudo ssh"
alias ssh="ssh -A"
alias l="less"
alias lr="less -R"
alias wcl="wc -l"
alias aliasreload="source ${CONFIG_HOME}/bash_aliases"
alias rebash="source ~/.bash_profile"
# Platform Specific / Sensitive Aliases
source ~/.alias
| true
|
54fc899f41972126b85a4d57166d9d2dbebf3f14
|
Shell
|
daumann/timeline-maps
|
/buildingGeojson/listRaw.txt
|
UTF-8
| 2,777
| 2.953125
| 3
|
[] |
no_license
|
#!/bin/bash
blue='\033[0;34m'
yellow='\033[1;33m'
green='\033[0;32m'
red='\033[0;31m'
NC='\033[0m' # No Color
re='^[0-9]+$'
#umap=# COPY leaflet_storage_datalayer FROM '/home/aumannd/my-app/subjects.csv' CSV;
rawurlencode() {
local string="${1}"
local strlen=${#string}
local encoded=""
for (( pos=0 ; pos<strlen ; pos++ )); do
c=${string:$pos:1}
case "$c" in
[-_.~a-zA-Z0-9] ) o="${c}" ;;
* ) printf -v o '%%%02x' "'$c"
esac
encoded+="${o}"
done
echo "${encoded}" # You can either set a return variable (FASTER)
REPLY="${encoded}" #+or echo the result (EASIER)... or both... :p
}
while read line
do
NAME=`echo "$line people" | awk -F'\t' '{ print $1; }'`
NAME1=`echo "$line Culture" | awk -F'\t' '{ print $1; }'`
NAME2=`echo "Culture of $line" | awk -F'\t' '{ print $1; }'`
NAME3=`echo "$line" | awk -F'\t' '{ print $1; }'`
decodedString=`echo $NAME | tr "|" " " | tr "?" " "`
encodedString="$(perl -MURI::Escape -e 'print uri_escape($ARGV[0]);' "$decodedString")"
decodedString1=`echo $NAME | tr "|" " " | tr "?" " "`
encodedString1="$(perl -MURI::Escape -e 'print uri_escape($ARGV[0]);' "$decodedString1")"
decodedString2=`echo $NAME2 | tr "|" " " | tr "?" " "`
encodedString2="$(perl -MURI::Escape -e 'print uri_escape($ARGV[0]);' "$decodedString2")"
decodedString3=`echo $NAME3 | tr "|" " " | tr "?" " "`
encodedString3="$(perl -MURI::Escape -e 'print uri_escape($ARGV[0]);' "$decodedString3")"
wikiURL=`curl --silent "http://en.wikipedia.org/w/api.php?action=query&list=search&format=json&srlimit=2&srwhat=text&srsearch=$encodedString" | jq . | grep -oP '\"title\": \"\K.*?(?=\"\,)' | head -n 1`
wikiURL1=`curl --silent "http://en.wikipedia.org/w/api.php?action=query&list=search&format=json&srlimit=2&srwhat=text&srsearch=$encodedString1" | jq . | grep -oP '\"title\": \"\K.*?(?=\"\,)' | head -n 1`
#if [ -z "$wikiURL" ]; then
wikiURL2=`curl --silent "http://en.wikipedia.org/w/api.php?action=query&list=search&format=json&srlimit=2&srwhat=text&srsearch=$encodedString2" | jq . | grep -oP '\"title\": \"\K.*?(?=\"\,)' | head -n 1`
#fi
#if [ -z "$wikiURL" ]; then
wikiURL3=`curl --silent "http://en.wikipedia.org/w/api.php?action=query&list=search&format=json&srlimit=2&srwhat=text&srsearch=$encodedString3" | jq . | grep -oP '\"title\": \"\K.*?(?=\"\,)' | head -n 1`
#fi
tmpDecode="http://en.wikipedia.org/wiki/"$( rawurlencode "$wikiURL" )
tmpDecode1="http://en.wikipedia.org/wiki/"$( rawurlencode "$wikiURL1" )
tmpDecode2="http://en.wikipedia.org/wiki/"$( rawurlencode "$wikiURL2" )
tmpDecode3="http://en.wikipedia.org/wiki/"$( rawurlencode "$wikiURL3" )
echo -e ,\"$wikiURL1\"],
done </home/aumannd/Code/github/uMap/buildingGeojson/listForURL.csv
| true
|
280a7090e2e821d86fb435c465d27759057c52f6
|
Shell
|
du-ulises/Proyecto-en-JavaCC-m6800-family-master-
|
/m6800-family-master/changelog.sh
|
UTF-8
| 912
| 3.34375
| 3
|
[] |
no_license
|
#!/bin/bash
file=""
array=($(git tag -l))
length=${#array[@]}
tags=""
if [ $length -gt 1 ]
then
tags="${array[length-2]}..${array[length-1]}"
elif [ $length -gt 0 ]
then
tags="${array[length-1]}"
fi
file+="**Changes**\n\n"
file+=$(git --no-pager log $tags --pretty=format:"\n- [(%h)]($CI_PROJECT_URL/commit/%H) **%cn** - %s" --reverse)
curl https://gitlab.com/api/v4/projects/$CI_PROJECT_ID/releases --request POST --header 'Content-Type: application/json' --header "PRIVATE-TOKEN: $TOKEN_GITLAB" --data @<(cat <<EOF
{
"id": "$CI_PROJECT_ID",
"name": "Release $CI_COMMIT_TAG",
"tag_name": "$CI_COMMIT_TAG",
"description": "$file",
"assets": {
"links": [
{
"name": "M6800.jar",
"url": "https://gitlab.com/api/v4/projects/$CI_PROJECT_ID/jobs/artifacts/$CI_COMMIT_TAG/raw/M6800.jar?job=build"
}
]
}
}
EOF
)
| true
|
e7b521eae9c75876965cfb07ebaab5cc9434dc83
|
Shell
|
benbariteau/dotfiles
|
/files/bashrc
|
UTF-8
| 435
| 2.796875
| 3
|
[] |
no_license
|
if [ -f ~/.bashrc.local ]; then
source ~/.bashrc.local
fi
alias ls="ls --color=auto"
alias tmx="tmux attach -t"
alias gitsh="tpol git"
function mvim() {
vim -p `git status --porcelain | sed -ne 's/^ M //p'`
}
function grim() {
vim +/$1 -p $(git grep -l $1)
}
# my own binaries
export PATH=$PATH:$HOME/bin
# add go binaries to path
export PATH=$PATH:`go env GOPATH`/bin
# add current dir to path
export PATH=$PATH:.
| true
|
2da4d39cf09031eb8b9744ec66130973b82abd4f
|
Shell
|
ravero/setup
|
/setup_network.sh
|
UTF-8
| 1,629
| 4.59375
| 5
|
[] |
no_license
|
#!/bin/bash
#------------------------------------------------------------------------------
# Network Setup
#
# This script will set defaults for network connections on the computer.
# It will list the identified networks on the computer and apply and ask
# each one to apply settings.
#------------------------------------------------------------------------------
# DNS Settings
dns1_ipv4="1.1.1.1" # Cloudflare DNS 1
dns2_ipv4="1.0.0.1" # Cloudflare DNS 2
dns1_ipv6="2606:4700:4700::1111" # Cloudflare DNS 1 for IPv6
dns2_ipv6="2606:4700:4700::1001" # Cloudflare DNS 2 for IPv6
# A function to apply default dns settings to the specified adapter
function apply_dns() {
adapter=$1
read -p "Do you want to apply DNS settings for '${adapter}' adapter? " -n 1 -r
echo
if [[ $REPLY =~ ^[Yy]$ ]]; then
echo "Setting DNS Servers for '${adapter}'adapter..."
networksetup -setdnsservers "${adapter}" $dns1_ipv4 $dns2_ipv4 $dns1_ipv6 $dns2_ipv6
echo ""
echo "${adapter} DNS Servers:"
networksetup -getdnsservers "${adapter}"
fi
}
# Determine available networks to list
NETS_FILE=`mktemp`
NETS_FILTERED_FILE=`mktemp`
networksetup -listallnetworkservices >> $NETS_FILE
LINES=`wc -l ${NETS_FILE} | awk '{ print $1 }'`
LINES=$((LINES-1))
tail -n $LINES $NETS_FILE >> $NETS_FILTERED_FILE
IFS=$'\n' read -d '' -r -a adapters < $NETS_FILTERED_FILE
# Ask to apply on the default adapters
for adapter in "${adapters[@]}"
do
apply_dns "${adapter}"
done
# Cleanup
rm $NETS_FILE
rm $NETS_FILTERED_FILE
| true
|
d67d73dc7035712c70110f5617ba6c9ff409b759
|
Shell
|
davidbstein/spotsdk
|
/scripts/setup.bash
|
UTF-8
| 944
| 3.734375
| 4
|
[
"MIT"
] |
permissive
|
read -p "
This is a destructive operation if you already have a
.SpotifyModified.app in this directory. Continue (Y/n)? " -n 1 -r
echo
if [[ $REPLY =~ ^[Nn]$ ]]
then
exit 1
fi
echo "
copying Spotify.app into local directory as .SpotifyModified.app
"
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
rm -rf $DIR/../.SpotifyModified.app
cp -r /Applications/Spotify.app $SCRIPT_DIR/../.SpotifyModified.app
echo "
wiping .unzipped_raw
"
rm -rf $SCRIPT_DIR/../.unzipped_raw
mkdir $SCRIPT_DIR/../.unzipped_raw
echo "
making a copy of the raw source in .unzipped_raw
"
for rawfn in $SCRIPT_DIR/../.SpotifyModified.app/Contents/Resources/Apps/*;
do
cd $SCRIPT_DIR/../.unzipped_raw
fn=$(echo $rawfn | tr "/" "\n" | tail -n1)
mkdir ${fn}
cd ${fn}
yes | unzip -a -q $rawfn
cd $SCRIPT_DIR/..
done
echo "
unbundling all subcomponents
"
./scripts/unbundle.bash
echo "
creating src folders
"
./scripts/build_source.bash
| true
|
6b326919e5f55c9c59e80b569c52c617036f197f
|
Shell
|
mathpopo/Ultra-Fast-Face-Detector-1MB
|
/TNN-Face-Detector/build_aarch64_linux_face_detector.sh
|
UTF-8
| 294
| 2.6875
| 3
|
[] |
no_license
|
#!/bin/bash
CC=aarch64-linux-gnu-gcc
CXX=aarch64-linux-gnu-g++
TNN_LIB_PATH=./TNN/scripts/build_aarch64_linux/
rm -r build
mkdir build
cd build
cmake .. \
-DCMAKE_C_COMPILER=$CC \
-DCMAKE_CXX_COMPILER=$CXX \
-DCMAKE_BUILD_TYPE=Release \
-DTNN_LIB_PATH=$TNN_LIB_PATH
make -j4
| true
|
dbe239073d4a0b4399ce9e763d026ba0a166b9a3
|
Shell
|
ausnimbus/openshift-origin-cartridge-ioncube
|
/bin/control
|
UTF-8
| 711
| 3.390625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash -e
source $OPENSHIFT_CARTRIDGE_SDK_BASH
function start {
nohup $OPENSHIFT_IONCUBE_DIR/bin/monitor > /dev/null 2>&1 & echo $! > $OPENSHIFT_IONCUBE_DIR/pid/run.pid
echo "IonCube is loaded..."
}
function stop {
kill -9 `cat $OPENSHIFT_IONCUBE_DIR/pid/run.pid`
echo "IonCube will no longer be auto loaded.."
}
function status {
grep "zend_extension = $OPENSHIFT_IONCUBE_DIR/ioncube_loader/ioncube/ioncube_loader_lin_$OPENSHIFT_PHP_VERSION.so" $PHPRC && echo "Ioncube is loaded"
}
# Clean up any log files
function tidy() {
client_message "Nothing to clear..."
}
case "$1" in
start) start ;;
stop) stop ;;
restart)
stop
start
;;
status) status ;;
tidy) tidy ;;
esac
| true
|
5f7d2de613ff2d1c0b75a71682cdbc8da0e95103
|
Shell
|
Quillshot/Torque2D
|
/tgb/CleanDSO.command
|
UTF-8
| 443
| 2.578125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
cd `dirname "$0"`
echo -n "Deleting all compiled script files... "
find . -iname "*.dso" -delete
echo "done."
echo -n "Deleting all cached fonts... "
find . -iname "*.uft" -delete
echo "done."
echo -n "Deleting all saved keybinds... "
find . -iname "bind.cs" -delete
echo "done."
echo -n "Deleting all saved preferences... "
find . -iname "prefs.cs" -delete
echo "done."
echo ""
echo "You may now close this window."
echo ""
| true
|
ec53a3384aa8d866e80fb0b3989095c9090f930a
|
Shell
|
wtraylor/HadCM3B_60ka_for_LPJ-GUESS
|
/sort_nc_files_chronologically.sh
|
UTF-8
| 948
| 3.8125
| 4
|
[
"CC0-1.0",
"CC-BY-4.0"
] |
permissive
|
#!/bin/bash
# SPDX-FileCopyrightText: 2021 W. Traylor <wolfgang.traylor@senckenberg.de>
#
# SPDX-License-Identifier: MIT
#
# Sort the HadCM3B output files in the argument list chronologically, using
# the filename pattern.
#
# Author: Wolfgang Traylor <wolfgang.traylor@senckenberg.de>
# The problem is that the years in the HadCM3B output files don’t have leading
# zeros. So when we sort by their original name, the order is broken.
# The simple solution is to *add leading zeros*, sort, and then restore the original names.
function add_zeros(){
sed 's;_0_2.5kyr.nc;_00_2.5kyr.nc;' |\
sed 's;_2.5_5kyr.nc;_02.5_5kyr.nc;' |\
sed 's;_5_7.5kyr.nc;_05_7.5kyr.nc;' |\
sed 's;_7.5_10kyr.nc;_07.5_10kyr.nc;'
}
function restore_original(){
sed 's;_00_2.5kyr.nc;_0_2.5kyr.nc;' |\
sed 's;_02.5_5kyr.nc;_2.5_5kyr.nc;' |\
sed 's;_05_7.5kyr.nc;_5_7.5kyr.nc;' |\
sed 's;_07.5_10kyr.nc;_7.5_10kyr.nc;'
}
printf "%s\n" $* | sed 's; ;\n;g' | add_zeros | sort --reverse | restore_original
| true
|
826fe1302568d598947d30ded208411b94f6d845
|
Shell
|
fflooos/Misc_auto_installer
|
/init_bind.sh
|
UTF-8
| 4,078
| 3.484375
| 3
|
[] |
no_license
|
#!/bin/bash
IP_RANGE="\t\t127.0.0.1/8\\;\\
\t\t127.0.1.1/8\\;\\
\t\t192.168.0.0/24\\;\\"
DOMAIN="yourdomain.com"
DOMAIN_IP="192.168.0.205"
DNS_SERVER="${HOSTNAME}"
echo "Bind installation & config _ @fslab 20160426-16:43"
echo "Installing bind9..."
sudo apt-get update -q -y && sudo apt-get -q -y install bind9 bind9-doc
echo "Enabling security logging for bind..."
echo "Log stored in /var/log/named/security.log"
if [ -z "file "/var/log/named/security.log" versions 3 size 30m;" ]; then
echo '
// Logging security events for fail2ban
logging {
channel security_file {
file "/var/log/named/security.log" versions 3 size 30m;
severity dynamic;
print-time yes;
};
category security {
security_file;
};
};' >> '/etc/bind/named.conf.options'
fi
echo "--> Updated /etc/bind/named.conf.options"
command mkdir --parent /var/log/named/
command chown -R bind:bind /var/log/named/
echo "Setup log rotation"
echo '/var/log/named/security.log {
daily
missingok
rotate 7
compress
delaycompress
notifempty
create 644 bind bind
postrotate
/usr/sbin/invoke-rc.d bind9 reload > /dev/null
endscript
}' > '/etc/logrotate.d/bind9-security'
echo "--> Created /etc/logrotate.d/bind9-security"
echo "Installing fail2ban..."
command apt-get install fail2ban -y -q
echo "Enabling Bind server protection by fail2ban"
if [ ! -e '/etc/fail2ban/jail.local' ]; then
command touch '/etc/fail2ban/jail.local'
fi
if [ -z "[named-refused-tcp]
enabled = true" ]; then
echo "
[named-refused-tcp]
enabled = true
" >> '/etc/fail2ban/jail.local'
fi
echo "--> Created /etc/fail2ban/jail.local"
echo "Reloading bind & fail2ban..."
/etc/init.d/bind9 reload
/etc/init.d/fail2ban restart
echo "Setting up opennic log free DNS"
NAME_SERVERS="\t\t5.9.49.12\\;\\
\t\t193.183.98.154\\;\\
\t\t185.83.217.248\\;\\
\t\t87.98.242.252\\;\\"
echo "--> $NAME_SERVERS"
echo "Redirect DNS request to selected servers"
if [ -n "${NAME_SERVERS}" ]; then
command sed -i \
-e '/^[ \t]*forwarders/,/^[ \t]*};/d' \
-e "/directory/a\\
\\
\t// Forwarding DNS queries to ISP DNS.\\
\tforwarders {\\
${NAME_SERVERS}
\t}\\;" '/etc/bind/named.conf.options'
fi
echo "--> Updated /etc/bind/named.conf.options"
echo "Reloading bind..."
/etc/init.d/bind9 reload
echo "Setting up system to use local DNS..."
command sed -i -e 's/^\([ \t]*nameserver\)/#\1/' '/etc/resolv.conf'
command echo 'nameserver 127.0.0.1' >> '/etc/resolv.conf'
echo "Creating local network IP address ACL..."
command echo -e "
// Local networks access control list.
acl local-networks {
\t127.0.0.0/8;
${IP_RANGES}
};" >> '/etc/bind/named.conf.options'
echo "--> Updated /etc/bind/named.conf.options"
command sed -i -e '/directory/a\
\
\t// Allowing queries for local networks.\
\tallow-query {\
\t\tlocal-networks\;\
\t}\;\
\
\t// Allowing recursion for local networks.\
\tallow-recursion {\
\t\tlocal-networks\;\
\t}\;' '/etc/bind/named.conf.options'
echo "--> Updated /etc/bind/named.conf.options"
echo "Reloading bind..."
/etc/init.d/bind9 reload
echo "Creating the zone file for domain ${DOMAIN} ..."
echo "\$ttl 86400
${DOMAIN}. IN SOA ${DNS_SERVER}. postmaster.${DOMAIN}. (
2010111504; Serial
3600; refresh after 3 hours.
3600; Retry after 1 hour.
1209600; expire after 1 week.
86400; Minimum TTL of 1 day.
);
;
; Name servers declaration.
;
${DOMAIN}. IN NS ${DNS_SERVER}.;
;
; Hostnames declaration.
;
${HOSTNAME}. IN A ${DOMAIN_IP};
" > "/etc/bind/db.${DOMAIN}"
echo "--> Created /etc/bind/db.${DOMAIN}"
echo "Adding zone to server configuration..."
if [ -z "$(command grep "${DOMAIN}" "/etc/bind/named.conf.local")" ]; then
echo "
zone \"${DOMAIN}\" in {
type master;
file \"/etc/bind/db.${DOMAIN}\";
allow-query { any; };
};
" >> "/etc/bind/named.conf.local"
fi
echo "--> Updated /etc/bind/named.conf.local"
echo "Reloading bind..."
/etc/init.d/bind9 reload
| true
|
451f48441f2ffec675622007d8cfb094ddd2ed38
|
Shell
|
Maarc/ansiblOSX
|
/scripts/download_as_pdf.sh
|
UTF-8
| 909
| 3.25
| 3
|
[] |
no_license
|
#!/bin/bash
# Script to Download Google Drive Documents as pdf and compress them
set -e
set -u
set -o pipefail
#set -x # SET THIS FOR DEBUGGING
GDRIVE_REMOTE='drive'
LOCAL_DOWNLOAD_FOLDER='/Users/mzottner/Downloads/'
GOOGLE_DRIVE_ID=$(pbpaste | perl -n -e'/([-\w]{25,})/ && print $1')
OBJECT_NAME=$(rclone -vv backend copyid --use-json-log "${GDRIVE_REMOTE}:" "${GOOGLE_DRIVE_ID}" "${LOCAL_DOWNLOAD_FOLDER}" --drive-export-formats pdf 2<&1 | sed 'x;$!d' | jq --raw-output .object)
# Compress downloaded pdf
gs -sstdout=%stderr -sDEVICE=pdfwrite -dCompatibilityLevel=1.4 -dPDFSETTINGS=/ebook -dNOPAUSE -dQUIET -dBATCH -dDetectDuplicateImages -dCompressFonts=true -r150 -sOutputFile="${LOCAL_DOWNLOAD_FOLDER}compressed.pdf" "${LOCAL_DOWNLOAD_FOLDER}${OBJECT_NAME}" 2>/dev/null
mv "${LOCAL_DOWNLOAD_FOLDER}compressed.pdf" "${LOCAL_DOWNLOAD_FOLDER}${OBJECT_NAME}"
echo "${LOCAL_DOWNLOAD_FOLDER}${OBJECT_NAME}"
| true
|
2a334bd3b263986c6aef00479ab7c5ad40a04767
|
Shell
|
ComputationalAdvertising/tech-stacks
|
/tools_glog/build.sh
|
UTF-8
| 355
| 3.28125
| 3
|
[] |
no_license
|
#!/bin/bash
set -o pipefail
set -o errexit
SCRIPT_DIR=$(cd $(dirname "${BASH_SOURCE[0]}") && pwd)
PROJECT_DIR=$SCRIPT_DIR
if [ ! -d ${PROJECT_DIR}/build ]; then mkdir -p $PROJECT_DIR/build; fi
cd $PROJECT_DIR/build
cmake -D CMAKE_C_COMPILER=`which gcc` -D CMAKE_CXX_COMPILER=`which g++` $PROJECT_DIR
make
echo "======== ${BASH_SOURCE[0]} ========"
| true
|
603781088a5b4bff9e67f7da93ca1a131298d40e
|
Shell
|
sinewalker/dotfiles
|
/init/20_suse_zypper.sh
|
UTF-8
| 1,169
| 3.390625
| 3
|
[
"MIT",
"CC-BY-4.0"
] |
permissive
|
# SUSE-only stuff. Abort if not SUSE.
is_suse || return 1
# Update Zypper.
e_header "Updating Zypper"
cat <<EOM
You will need to enter the root password for installing packages.
If you don't know the root password, just press Enter to skip these.
EOM
e_header "Refreshing Zypper repos"
sudo zypper --quiet refresh
#e_header "Upgrading installed packages"
#sudo zypper --quiet update -y
e_header "Installing SUSE Patterns"
sudo zypper install -y \
pattern:console \
pattern:devel_basis pattern:devel_python pattern:devel_python3
# Install RPM packages.
PACKAGES=(
ansible
cowsay
xcowsay
htop
nano
screen
tmux
libopenssl-devel
password-store
python3-virtualenv
python-devel
python3-pip
git-core
mercurial
nmap
telnet
tree
nodejs-common
)
if (( ${#PACKAGES[@]} > 0 )); then
e_header "Installing RPM packages: ${PACKAGES[*]}"
sudo zypper install -y ${PACKAGES[*]}
fi
# Install Git Extras
if [[ ! "$(type -P git-extras)" ]] && [[ -d ${DOTFILES}/vendor/git-extras ]]; then
e_header "Installing Git Extras"
(
cd $DOTFILES/vendor/git-extras &&
sudo make install
)
fi
| true
|
3c60020ab1601382f8c6ebb01a7fdd1445a30031
|
Shell
|
AsthaTyagiU/Parquet_Reader_Script
|
/Parquet_Reader_Script/install_R.sh
|
UTF-8
| 557
| 2.65625
| 3
|
[] |
no_license
|
#!/bin/bash
#installing R
sudo apt-get install software-properties-common
sudo add-apt-repository main
sudo add-apt-repository universe
sudo add-apt-repository restricted
sudo add-apt-repository multiverse
sudo apt-get update
sudo apt-get install -y build-essential libssl-dev libxml2-dev libcurl4-openssl-dev
sudo apt install r-base -y
sudo su - -c "R -e \"install.packages('ps', repos='http://cran.rstudio.com/')\""
#install required R packages and run R script to read parquet file
sudo Rscript Parquet_Reader.R $1 $2
echo "Exiting EC2 instance"
exit
| true
|
ababa687207591ab0e4f110402b53675413b70ba
|
Shell
|
convoyinc/apollo-cache-hermes
|
/scripts/include/shell.sh
|
UTF-8
| 275
| 3.5
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
(( __SHELL_INCLUDED__ )) && return
__SHELL_INCLUDED__=1
export OPTIONS_FLAGS=()
export OPTIONS_ARGS=()
for argument in "${@}"; do
if [[ "${argument}" =~ ^- ]]; then
OPTIONS_FLAGS+=("${argument}")
else
OPTIONS_ARGS+=("${argument}")
fi
done
| true
|
1a55c01d70fc35ea186b498a72b7153d01395d46
|
Shell
|
denil1111/JSint
|
/testFile
|
UTF-8
| 652
| 3.625
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/sh
echo "\033[1;34m" Now Test all files in text folder "\033[0m"
FILES=`ls test/*.js`
i=0
suc=0
for d in $FILES; do
i=$[$i+1]
filename=`basename $d .js`
node < test/$filename.js > test/$filename.out
resfile="test/$filename.out"
echo "\033[1m" Test $i "\033[0;35m" $filename.js : "\033[0m""\c"
if diff test/$filename.res test/$filename.out >/dev/null ; then
echo "\033[1;32m √ \033[0m"
suc=$[$suc+1]
else
echo "\033[1;31m Failed \033[0m"
fi
done;
if [[ $i -eq $suc ]];then
echo "\033[1;32m" All file tested, $suc/$i files is correct "\033[0m"
else
echo "\033[1;31m" All file tested, $suc/$i files is correct "\033[0m"
fi
| true
|
07882917e88390c181cac8927ac85b5b94abeee3
|
Shell
|
sarva/puppet-mongodb
|
/files/check-shard-db
|
UTF-8
| 207
| 2.5625
| 3
|
[] |
no_license
|
#!/bin/bash
db=$1
mongorouter=`cat /etc/mongorouter.conf | tr "\n" "," | sed -e 's/.$//g'`
echo "db.printShardingStatus()" | mongo $mongorouter/admin --quiet | grep '"_id" : "'$db'", "partitioned" : true'
| true
|
b7e98b9a803c876a21af371b1fb0f51f6b6e286f
|
Shell
|
kpelelis/onevone
|
/install_public.sh
|
UTF-8
| 1,255
| 3.25
| 3
|
[] |
no_license
|
#!/bin/bash
command_exists () {
type "$1" &> /dev/null;
}
apt-get update
apt-get upgrade
apt-get autoremove
PIP=/usr/local/bin/pip
if command_exists pip ;then
echo 'pip already installed updating'
$PIP install --upgrade pip
else
apt-get install python3-dev build-essential
easy_install pip
fi
if command_exists psql ;then
echo 'postgresql already installed'
else
apt-get install postgresql
fi
if command_exists redis-cli ;then
echo 'redis already installed'
else
apt-get install redis-server
fi
$PIP install --upgrade virtualenvwrapper
export WORKON_HOME=$HOME/.virtualenvs
source /usr/local/bin/virtualenvwrapper.sh
if [ -d "$WORKON_HOME/onevone" ]; then
rm -r $WORKON_HOME/onevone
fi
PYTHON3_BIN=$(which python3)
mkvirtualenv onevone --python=$PYTHON3_BIN
POSTACTIVATE_SCRIPT=$WORKON_HOME/onevone/bin/postactivate
echo 'export ONEVONE_DEV_DB=' >> $POSTACTIVATE_SCRIPT
echo 'export ONEVONE_PRODUCTION_DB=' >> $POSTACTIVATE_SCRIPT
echo 'export PSQL_ADMIN_URI=' >> $POSTACTIVATE_SCRIPT
echo 'export RIOT_API_KEY=' >> $POSTACTIVATE_SCRIPT
workon onevone
pip install -r requirements.txt
if [ -d "./alembic/versions" ]; then
find alembic/versions/ -type f -name '*.py' -exec rm {} \;
fi
python setup.py
source ./static_files.sh
| true
|
250ea2e1c2e45028e316a86ae25a5bf64b62724a
|
Shell
|
latextemplates/Handin-LaTeX-template
|
/src/aftercompile.sh
|
UTF-8
| 1,705
| 3.703125
| 4
|
[
"LPPL-1.3c"
] |
permissive
|
cd $sourceDir;
echo "$mainDir";
function tex_compile(){
pushd . >/dev/null;
cd "$CTANDir"
outHandle "Error in latexmk $1" latexmk -pdf "$1.tex" -outdir="./bin" --shell-escape -interaction=nonstopmode -f
cp "bin/$1.pdf" ./
rm -rf bin/
popd >/dev/null;
}
# ---
# - handin.sty : The handin package
# - handin-doc.pdf : Description of how the package works
# - handin-doc.tex : Source of handin-doc.tex
# ^^ AUTO
# - example.tex : Contains example code
# - universityTromsoLogo.pdf : An image to be used in the example
add_to_CTANDir layout.tex example.tex img/universityTromsoLogo.pdf
# - example.pdf : Output of example.tex
tex_compile "example"
# - layout.pdf : Contains an overview on which macro goes where in the new \maketitle
tex_compile "layout"
# layout.tex is not going to CTAN
rm $CTANDir/layout.tex
# Get github README ready
mv ../README.md ../README.md.bak
echo -e "<!---\n DO NOT EDIT THIS FILE \n EDITS SHOULD BE DONE IN src/README.template.md \n-->" > ../README.md
cat README.template.md >> ../README.md
pushd . >/dev/null
cd ../
outHandle "Error when inserting package-variables to README.md-file" $perlDir/vars.pl -v $version -p $packagename -g $github -b $build README.md
popd >/dev/null
# Update layout
cp -f "$CTANDir/layout.pdf" "$mainDir/layout.pdf"
# Zip CTAN file
sleep 0.3
echo "Zipping CTAN"
rm haninCTAN.zip
zip handinCTAN.zip -r "$packagename/"
sleep 0.3
# remove old handin directory and move new one to main
rm -rf "$mainDir/handin"
mv "$CTANDir/" "$mainDir/$packagename"
# move zip file to main and make copy that is included in git
mv "$sourceDir/handinCTAN.zip" "$mainDir/"
cp "$mainDir/handinCTAN.zip" "$mainDir/handin.zip"
echo "Tag as v${version}b$build"
| true
|
6e2b71bd5403223305a471d6acb47b17fe2266b8
|
Shell
|
Joel2B/Intel-Galileo
|
/scripts/build-debian-from-scratch.sh
|
UTF-8
| 7,543
| 3.640625
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/sh
KERNEL=$1
if [ -z $KERNEL ]; then
echo "No kernel supplied"
echo "to build the kernel run the script build-kernel.sh"
exit 0
fi
IFS='_'
read -r kernel_name p2 p3 << EOF
$KERNEL
EOF
id_kernel=$( echo $kernel_name | sed -e "s/linux-image-//g")
linux_headers=$( echo ${kernel_name}_${p2}_${p3} | sed -e "s/image/headers/g")
VERSION=jessie
echo "========================================="
echo "Version: $VERSION"
echo "========================================="
echo "Creating loopback.img"
echo "========================================="
dd of=loopback.img bs=1 count=0 seek=1G
echo "========================================="
echo "Mounting loopback.img -> loop0"
echo "========================================="
#sudo kpartx -v -a loopback.img
echo "Creating partitions"
sudo losetup /dev/loop0 loopback.img
sudo parted /dev/loop0 mktable msdos
sudo parted /dev/loop0 mkpart primary fat32 1 100MB
#sudo parted /dev/loop0 mkpart primary linux-swap 101MB 613MB
#sudo parted /dev/loop0 mkpart primary ext3 614MB 3000MB
sudo parted /dev/loop0 mkpart primary ext3 101MB 1000MB
#sudo parted /dev/loop0 set 1 boot onlos
#sudo parted /dev/loop0 set 3 msftdata on
sudo partx -a /dev/loop0
sudo mkfs.vfat -I /dev/loop0p1
#sudo mkfs.ext3 /dev/loop0p3
sudo mkfs.ext3 /dev/loop0p2
if [ ! -d sd_root ]; then
echo "Creating sd_root dir"
mkdir sd_root
fi
if [ ! -d sd_boot ]; then
echo "Creating sd_boot dir"
mkdir sd_boot
fi
#sudo mount /dev/loop0p3 sd_root
sudo mount /dev/loop0p2 sd_root
echo "========================================="
if [ -d $VERSION ]; then
echo "Copying temporary dir $VERSION -> sd_root"
cp -a $VERSION/. ./sd_root
else
echo "Downloading $VERSION system"
sudo debootstrap --arch i386 $VERSION $VERSION http://http.debian.net/debian
echo "Copying temporary dir $VERSION -> sd_root"
cp -a ./$VERSION/. ./sd_root
fi
echo "========================================="
echo "Mounting downloaded system"
sudo mount --bind /dev sd_root/dev/
#sudo mount --bind /dev/pts sd_root/dev/shm
sudo mount --bind /dev/pts sd_root/dev/pts
sudo mount --bind /proc sd_root/proc
#sudo mount -t sysfs /sys sd_root/sys
echo "Copying libgmp"
sudo cp lib/libgmp.so.10.4.0 sd_root/opt
echo "Copying $KERNEL"
sudo cp $KERNEL sd_root/opt
echo "Copying $linux_headers"
sudo cp $linux_headers sd_root/opt
echo "Updating sources.list"
sudo echo "
deb http://deb.debian.org/debian jessie main contrib non-free
deb-src http://deb.debian.org/debian jessie main contrib non-free
deb http://deb.debian.org/debian-security/ jessie/updates main contrib non-free
deb-src http://deb.debian.org/debian-security/ jessie/updates main contrib non-free
deb http://deb.debian.org/debian jessie-updates main contrib non-free
deb-src http://deb.debian.org/debian jessie-updates main contrib non-free
" > sd_root/etc/apt/sources.list
chroot sd_root/ apt-get update
chroot sd_root/ apt-get upgrade -y
echo "========================================="
echo "Installing applications"
echo "========================================="
chroot sd_root/ apt-get install -y sudo locales ntp openssh-server initramfs-tools net-tools bash-completion connman parted gdb
#make build-essential libssl-dev zlib1g-dev libbz2-dev \
#libreadline-dev libsqlite3-dev wget curl llvm libncurses5-dev libncursesw5-dev \
#xz-utils tk-dev libffi-dev liblzma-dev git autoconf libtool zip cmake python3-dev python3 python3-pip python3-setuptools python-openssl
echo "========================================="
echo "Configuring system language"
chroot sd_root/ locale-gen en_US.UTF-8
chroot sd_root/ localedef -i en_US -f UTF-8 en_US.UTF-8
chroot sd_root/ su -c "echo 'LC_ALL=en_US.UTF-8' >> /etc/default/locale"
chroot sd_root/ su -c "echo 'LANG=en_US.UTF-8' >> /etc/default/locale"
echo "Configuring ssh"
chroot sd_root/ sed -i 's/PermitRootLogin without-password/PermitRootLogin yes/g' /etc/ssh/sshd_config
echo "Configuring modules"
chroot sd_root/ su -c "echo 'pch_udc' >> /etc/modules"
chroot sd_root/ su -c "echo 'g_serial' >> /etc/modules"
echo "Configuring serial console"
chroot sd_root/ su -c "echo 'GS0:23:respawn:/sbin/getty -L 115200 ttyGS0 vt100' >> /etc/inittab"
chroot sd_root/ su -c "echo 'T1:23:respawn:/sbin/getty -L 115200 ttyS1 vt100' >> /etc/inittab"
#echo "Configuring swap"
#chroot sd_root/ su -c "echo '/dev/mmcblk0p2 none swap sw 0 0' >> /etc/fstab"
#chroot sd_root/ su -c "mkswap /dev/mmcblk0p2"
#chroot sd_root/ su -c "swapon -a"
echo "========================================="
echo "add password for the root"
chroot sd_root/ passwd
echo "Adding a new user"
chroot sd_root/ adduser user
echo "Adding the new user to group sudo"
chroot sd_root/ addgroup user sudo
echo "Configuring visudo"
chroot sd_root/ su -c "echo '#!/bin/sh' > /tmp/tmp.sh"
chroot sd_root/ su -c "echo \"sed -i 's/%sudo\tALL=(ALL:ALL) ALL/%sudo ALL=(ALL) NOPASSWD:ALL/g' /etc/sudoers\" >> /tmp/tmp.sh"
chroot sd_root/ su -c "chmod +x /tmp/tmp.sh"
chroot sd_root/ su -c "EDITOR=/tmp/tmp.sh visudo"
echo "Configuring hostname, hosts, interfaces"
chroot sd_root/ su -c "echo 'Galileo' > /etc/hostname"
chroot sd_root/ sed -i 's/.1\tlocalhost/.1\tlocalhost Galileo/g' /etc/hosts
chroot sd_root/ sed -i 's/loopback/loopback Galileo/g' /etc/hosts
chroot sd_root/ su -c "echo 'auto eth0' >> /etc/network/interfaces"
chroot sd_root/ su -c "echo 'iface eth0 inet dhcp' >> /etc/network/interfaces"
echo "========================================="
echo "Installing $KERNEL"
chroot sd_root/ su -c "dpkg -i /opt/$KERNEL"
echo "========================================="
echo "Installing $linux_headers"
chroot sd_root/ su -c "dpkg -i /opt/$linux_headers"
echo "========================================="
echo "Linking libgmp"
chroot sd_root/ su -c "unlink /usr/lib/i386-linux-gnu/libgmp.so.10"
chroot sd_root/ su -c "rm /usr/lib/i386-linux-gnu/libgmp.so.10.2.0"
chroot sd_root/ su -c "cp /opt/libgmp.so.10.4.0 /usr/lib/i386-linux-gnu"
chroot sd_root/ su -c "ln -s /usr/lib/i386-linux-gnu/libgmp.so.10.4.0 /usr/lib/i386-linux-gnu/libgmp.so.10"
echo "Patching libpthread"
chroot sd_root/ su -c '
for i in `/usr/bin/find /lib -type f -name \*pthread\*so`
do
cp ${i} ${i}.bak
sed -i "s/\xf0\x0f\xb1\x8b/\x90\x0f\xb1\x8b/g" ${i}
done
'
echo "Mounting boot"
sudo mount /dev/loop0p1 sd_boot
echo "Creating dir sd_boot/boot/grub"
sudo mkdir -p sd_boot/boot/grub
echo "Configuring boot"
echo "kernel: vmlinuz-$id_kernel"
echo "initrd: initrd.img-$id_kernel"
sudo echo "
default 1
timeout 10
color white/blue white/cyan
title Clanton SVP kernel-SPI initrd-SPI IMR-On IO-APIC/HPET NoEMU
kernel --spi root=/dev/ram0 console=ttyS1,115200n8 earlycon=uart8250,mmio32,\$EARLY_CON_ADDR_REPLACE,115200n8 vmalloc=384M reboot=efi,warm apic=debug rw
initrd --spi
title Custom Quark Kernel with Debian $VERSION
root (hd0,1)
kernel /boot/vmlinuz-$id_kernel root=/dev/mmcblk0p2 2 console=ttyS1,115200n8 earlycon=uart8250,mmio32,\$EARLY_CON_ADDR_REPLACE,115200n8 vmalloc=3844M reboot=efi,warm apic=debug rw LABEL=boot debugshell=5
initrd /boot/initrd.img-$id_kernel
" > sd_boot/boot/grub/grub.conf
echo "Umounting system"
sudo killall ntpd
sudo umount -l sd_root/dev/pts sd_root/dev sd_root/proc sd_root sd_boot
losetup -a
sudo losetup -d /dev/loop0
losetup -a
sudo kpartx -d loopback.img
sudo kpartx -d /dev/loop0
DATE=$(date +"%m%d%Y")
TIME=$(date +"%H%M%S")
# if [ ! -d sd_card ]; then
# mkdir sd_card
# fi
#mv loopback.img galileo-$VERSION-$DATE-$TIME.img
cp loopback.img /media/sf_PUBLICO/galileo-$VERSION-$DATE-$TIME.img
rm loopback.img
| true
|
430b240a60209e131683f233b95f4a9882e23b7d
|
Shell
|
epcim/dotfiles
|
/home/bin/svn_ignore-recursive
|
UTF-8
| 1,175
| 4.125
| 4
|
[] |
no_license
|
#!/bin/bash
# Recursively find and set svn:ignore on files or folders.
# If the file was already version it will be removed from the repository
# and keep the local copy (can be overridden).
# This does not perform the final commit so you can review the changes
# using svn status.
#
# $1 pattern to ignore
# $2 remove the ignored files locally as well (optional)
#
# Example: find_svn_ignore "*.bak" . --remove
#
for a in `find . -name $1`
do
svn info ${a%/*} > /dev/null 2>&1;
if [ "$?" -ne "0" ]; then
echo "Skipping ${a%/*} because it is not under version control.";
continue;
fi;
echo "Ignoring ${a##*/} in ${a%/*}";
svn propget svn:ignore ${a%/*} > svnignore.tempfile;
echo "$1" >> svnignore.tempfile;
svn propset -q svn:ignore -F svnignore.tempfile ${a%/*};
rm svnignore.tempfile;
if [ "$2" = "--remove" ]; then
# Remove file from working directory and repository
svn rm --force -q ${a##*/};
else
# Remove file from repository only
svn rm --force --keep-local ${a##*/};
fi;
done
| true
|
c42f66e64849bb9378e2d20f866b089273efd8e6
|
Shell
|
eduardomarcial/mass-create-branch-aws-codecommit
|
/createbranch.sh
|
UTF-8
| 1,015
| 3.578125
| 4
|
[] |
no_license
|
#!/bin/bash
#
# Autor: EDUARDO
# Criar branch em todos os repositórios
# Lista nome dos repositorios
for REPONAME in $(aws codecommit list-repositories|grep repositoryName|cut -d\" -f4); do
# Repo da vez
echo "$REPONAME - Repositorio da vez." >>/tmp/logcreatebranch.log 2>&1
# Lista commit-id da branch development da repositorio da vez no laco
for DEVELOPMENTID in $(aws codecommit get-branch --repository-name $REPONAME --branch-name development|grep commitId|cut -d\" -f4 );do
# Criar branch "test" com commit-id e repo name dos laços.
aws codecommit create-branch --repository-name $REPONAME --branch-name test --commit-id $DEVELOPMENTID
if [ $? -ne 0 ]; then
echo "$REPONAME - $DEVELOPMENTID - erro!" >> /tmp/logcreatebranch.log
else
echo "$REPONAME - $DEVELOPMENTID - Sucesso!" >> /tmp/logcreatebranch.log
fi
done
done
| true
|
44db93efe229c32decee4f4ff609dce9354d304f
|
Shell
|
yuri91/stm32f40x-rs
|
/gen-api.sh
|
UTF-8
| 558
| 2.78125
| 3
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] |
permissive
|
#!/bin/sh
svd2rust -i resources/STM32F40x.svd > src/lib.rs
# Reformat compiler attributes removing unnecessary spaces
# Remove spaces from # [ attribute ] => #[attribute] and add \n
sed -i 's/\s*# \[ \([^]]*\) \]/\n#[\1]/g' src/lib.rs
# Remove spaces from # ! [ attribute ] and add \n
sed -i 's/\s*# ! \[ \([^]]*\) \]/#![\1]\n/g' src/lib.rs
sed -i 's/ \([()]\) */\1/g' src/lib.rs
# Use rustfmt to reformat to human readable format
rustfmt src/*.rs
# Test that build succeeds for target platform (ARM Cortex-M4)
xargo check --target thumbv7em-none-eabihf
| true
|
2a2f724167d2806cc0d63ae96c330328d84b4e2e
|
Shell
|
Jinzai-solution/SALVATOR_SHELL
|
/board-salvator/DU/index.sh
|
UTF-8
| 3,537
| 2.796875
| 3
|
[] |
no_license
|
#!/bin/sh
# display unit device driver autotest shell-script
set -a
#set -x
case "$1" in
1)
echo "1)"
$(dirname $0)/exec_du.sh 001_du_check_dmesg.sh
sleep 3
;;
2)
echo "2)"
$(dirname $0)/exec_du.sh 002_du_check_interrupt.sh
sleep 3
;;
3)
echo "3)"
$(dirname $0)/exec_du.sh 003_du_ram_display_image_on_hdmi0.sh
sleep 3
;;
4)
echo "4)"
$(dirname $0)/exec_du.sh 004_du_ram_display_image_on_hdmi1.sh
sleep 3
;;
5)
echo "5)"
$(dirname $0)/exec_du.sh 005_du_ram_display_image_on_rgb.sh
sleep 3
;;
6)
echo "6)"
$(dirname $0)/exec_du.sh 006_du_sd1_display_image_on_hdmi0.sh
sleep 3
;;
7)
echo "7)"
$(dirname $0)/exec_du.sh 007_du_sd1_display_image_on_hdmi1.sh
sleep 3
;;
8)
echo "8)"
$(dirname $0)/exec_du.sh 008_du_sd1_display_image_on_rgb.sh
sleep 3
;;
9)
echo "9)"
$(dirname $0)/exec_du.sh 009_du_sd2_display_image_on_hdmi0.sh
sleep 3
;;
10)
echo "10)"
$(dirname $0)/exec_du.sh 010_du_sd2_display_image_on_hdmi1.sh
sleep 3
;;
11)
echo "11)"
$(dirname $0)/exec_du.sh 011_du_sd2_display_image_on_rgb.sh
sleep 3
;;
12)
echo "12)"
$(dirname $0)/exec_du.sh 012_du_counter_interupt_on_hdmi0.sh
sleep 3
;;
13)
echo "13)"
$(dirname $0)/exec_du.sh 013_du_counter_interupt_on_hdmi1.sh
sleep 3
;;
14)
echo "14)"
$(dirname $0)/exec_du.sh 014_du_counter_interupt_on_rgb.sh
sleep 3
;;
15)
echo "15)"
$(dirname $0)/exec_du.sh 015_du_display_image_on_hdmi0_after_suspend.sh 1
sleep 3
;;
16)
echo "16)"
$(dirname $0)/exec_du.sh 016_du_display_image_on_hdmi1_after_suspend.sh 1
sleep 3
;;
17)
echo "17)"
$(dirname $0)/exec_du.sh 017_du_display_image_on_rgb_after_suspend.sh 1
sleep 3
;;
18)
echo "18)"
$(dirname $0)/exec_du.sh 018_du_change_resolution_on_hdmi0.sh
sleep 3
;;
19)
echo "19)"
$(dirname $0)/exec_du.sh 019_du_change_resolution_on_hdmi1.sh
sleep 3
;;
20)
echo "20)"
$(dirname $0)/exec_du.sh 020_du_change_resolution_on_rgb.sh
sleep 3
;;
21)
echo "21)"
$(dirname $0)/exec_du.sh 021_du_change_color_on_hdmi0.sh
sleep 3
;;
22)
echo "22)"
$(dirname $0)/exec_du.sh 022_du_change_color_on_hdmi1.sh
sleep 3
;;
23)
echo "23)"
$(dirname $0)/exec_du.sh 023_du_change_color_on_rgb.sh
sleep 3
;;
24)
echo "24)"
$(dirname $0)/exec_du.sh 024_du_check_drm.sh
sleep 3
;;
25)
echo "25)"
$(dirname $0)/exec_du.sh 025_du_switch_hdmi0_to_du1.sh
sleep 3
;;
26)
echo "26)"
$(dirname $0)/exec_du.sh 026_du_switch_hdmi1_to_du2.sh
sleep 3
;;
27)
echo "27)"
$(dirname $0)/exec_du.sh 027_switch_rgb_to_du3.sh
sleep 3
;;
28)
echo "28)"
$(dirname $0)/exec_du.sh 028_du_change_format_screen_on_hdmi0.sh
sleep 3
;;
29)
echo "29)"
$(dirname $0)/exec_du.sh 029_du_change_format_screen_on_hdmi1.sh
sleep 3
;;
30)
echo "30)"
$(dirname $0)/exec_du.sh 030_du_change_format_screen_on_rgb.sh
sleep 3
;;
31)
echo "31)"
$(dirname $0)/exec_du.sh 031_du_change_resolution_from_hdmi0_to_du1.sh
sleep 3
;;
32)
echo "32)"
$(dirname $0)/exec_du.sh 032_du_change_resolution_from_hdmi1_to_du2.sh
sleep 3
;;
33)
echo "33)"
$(dirname $0)/exec_du.sh 033_du_change_resolution_from_rgb_to_du3.sh
sleep 3
;;
34)
echo "34)"
$(dirname $0)/exec_du.sh 034_du_change_offset_on_hdmi0.sh
sleep 3
;;
35)
echo "35)"
$(dirname $0)/exec_du.sh 035_du_change_offset_on_hdmi1.sh
sleep 3
;;
36)
echo "36)"
$(dirname $0)/exec_du.sh 036_du_change_offset_on_rgb.sh
sleep 3
;;
37)
echo "37)"
$(dirname $0)/exec_du.sh 037_du_change_layer_position_from_hdmi0_to_du1.sh
sleep 3
;;
38)
echo "38)"
$(dirname $0)/exec_du.sh 038_du_change_layer_position_from_hdmi1_to_du2.sh
;;
39)
echo "39)"
$(dirname $0)/exec_du.sh 039_du_change_layer_position_from_rgb_to_du3.sh
sleep 3
;;
esac
| true
|
8c3d3d61e3409e1337c02ef47454f12eac01256f
|
Shell
|
Gilbert-Gb-Li/sage-bigdata-audit
|
/bigdata-audit/src/main/resources/assemblies/bin/core.sh
|
UTF-8
| 3,009
| 3.6875
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
BASE="$( cd "$( dirname "${BASH_SOURCE[0]}" )/.." && pwd )"
################################
# params
################################
#parse comand name, app name, app parameter
OPERATE=$1
shift
APP_PARAMS=$@
################################
# constants
################################
#opts
APP_OPTS="`cat $BASE/conf/jvm.options|grep -v -e ^#|grep -v -e ^$|awk '{{printf"%s ",$0}}'`"
APP_MAIN="bigdata.audit.AuditApp"
APP_TMPDIR="/tmp"
#dir
JAVA="${JAVA_HOME}/bin/java"
APP_CONF="$BASE/conf"
APP_LOGS="$BASE/logs"
APP_PIDS="$BASE/pids"
APP_NAME="bigdata-audit"
APP_PID="${APP_PIDS}/${APP_NAME}.pid"
APP_LIB="$BASE/lib"
APP_JAR="`ls ${APP_LIB}/${APP_NAME}-*.jar`"
APP_CP="${APP_CONF}:${APP_JAR}:${APP_LIB}/*"
#file
APP_OUT="${APP_LOGS}/${APP_NAME}.out"
#port
APP_PORT="`cat ${APP_CONF}/audit.conf |grep api.port|cut -d '=' -f 2|cut -d ' ' -f 2`"
################################
# functions
################################
# Shell Colors
GREEN=$'\e[0;32m'
LGREEN=$'\e[1;32m'
RED=$'\e[0;31m'
LRED=$'\e[1;31m'
BLUE=$'\e[0;34m'
LBLUE=$'\e[1;34m'
RESET=$'\e[m'
function error() {
debug error $@
}
function debug() {
if [[ "$1" == "warn" ]]; then
shift
echo -e " ${LBLUE}$1${RESET}"
elif [[ "$1" == "info" ]]; then
shift
echo -e " ${BLUE}$1${RESET}"
elif [[ "$1" == "error" ]]; then
shift
echo -e " ${RED}ERROR:${LRED} $@${RESET}"
exit 1
else
echo -e $@
fi
}
function cmd() {
echo -e "CMD: $OPERATE \t ${APP_PARAMS}"
}
function env() {
cat << EOF
ENV:
APP_CONF: ${APP_CONF}
APP_LOGS: ${APP_LOGS}
APP_OUT : ${APP_OUT}
APP_LIB : ${APP_LIB}
APP_PORT: ${APP_PORT}
EOF
}
function init() {
mkdir -p ${APP_CONF} ${APP_LOGS} ${APP_PIDS}
}
function check() {
if [ -f ${APP_PID} ]; then
PID=`getPid`
if kill -0 ${PID} > /dev/null 2>&1; then
debug error "The ${APP_NAME} already started! PID: ${PID}"
exit 1
fi
fi
}
function start() {
check
cd ${BASE}
${JAVA} ${APP_OPTS} -cp ${APP_CP} ${APP_MAIN} ${APP_PARAMS} > ${APP_OUT} 2>&1 &
PID=$!
echo ${PID} > "${APP_PID}"
debug info "${APP_NAME}(pid ${PID}) is started."
}
function status() {
PID=`getPid`
if [ -n "$PID" ]; then
debug info "${APP_NAME}(pid ${PID}) is running..."
else
debug info "${APP_NAME} is not running."
fi
}
function getPid() {
# if [ -f ${APP_PID} ]; then
# return "`cat ${APP_PID}`"
# else
echo "` ps aux|grep java|grep ${APP_MAIN}|awk '{print $2}'`"
# fi
}
function stop() {
PID=`getPid`
if kill -0 ${PID} > /dev/null 2>&1; then
debug info "stopping ${APP_NAME} PID: ${PID}"
kill ${PID}
else
debug error "${APP_NAME} is not running."
fi
}
################################
# run
################################
init
cmd
env
echo "RES:"
case $OPERATE in
start)
start
;;
stop)
stop
;;
status)
status
;;
esac
| true
|
f9c6f8d4008ebc563c2d4185ac06b489c4b04725
|
Shell
|
arfletcher/creport
|
/lib/creport/ignore-uptodate-wordpress
|
UTF-8
| 779
| 4.21875
| 4
|
[] |
no_license
|
#!/bin/ksh
# Post process the wordpress version script thowing away those that are
# up to date.
#
# Used with "creport old-wordpress-versions"
VERS=/var/lib/misc/wordpress-curent-versions
# Find the most recent wordpress versions
# Dont update the versions more that once a day because they lock us out
if [ ! -f "$VERS" ] || [ "$(stat -c %Y "$VERS" )" -lt $(date +%s -d 'yesterday') ]
then
# Cache file is non existant or old.
{
# Wordpress versions.
curl -s https://wordpress.org/download/ | sed -n -e 's/.*Download WordPress \([0-9]\)/\1/p' | sed -e 's/[[:space:]].*//'
} | sed -e 's/^/\\b/' -e 's/$/\\b/' > "$VERS"
fi
V=$(cat "$VERS" | sed -e 's/\\b//g' | ifne paste -d, -s)
# Exclude the recent versions from STDIN
egrep -v -f "$VERS" | sed -e "1i CURRENT $V"
| true
|
b2b1af34d919ee6d1d6337fbfc6722fd7aa1a217
|
Shell
|
Asura-one/tools
|
/autossh/autossh
|
UTF-8
| 1,696
| 4.125
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# set -x
# 'Tag|Descript|Alias|IP|Username|Port|Password'
# $ 'test|测试|test|192.168.1.1|root|22|password' > ~/.autosshrc
#
# @usage
# $ autossh // List Server
# $ autossh 1 // Login Num n Server
AUTO_SSH_CONF=~/.autosshrc
if [[ $(cat "${AUTO_SSH_CONF}") == "" ]]; then
echo -e "\033[1;31m# Config(~/.autosshrc) Not Found # \033[0m";
echo -e "\033[1;31m# # \033[0m"
echo -e "\033[1;31m# # \033[0m"
else
server=$(awk -F\| '{print $1, $2, $3, $4, $5, $6, $7}' "${AUTO_SSH_CONF}")
fi
# list server
function list() {
info=$(awk -F\| '{print $1, $2, $3, $4, $5, $6}' "${AUTO_SSH_CONF}")
echo -e "Tag Description Alias Host Username Port\n${info}" | column -t
if [ "$1" != "" ]; then
name=$1
else
echo -e 'Server Tag/Alias/IP: \c'
read name
fi
login $name
}
function login() {
name=$1
ssh_info=($(echo "${server}" | grep -w "${name}" | awk '{print $4, $5, $6, $7}'))
sshpass -p ${ssh_info[3]} ssh -o StrictHostKeyChecking=no -p ${ssh_info[2]} -l ${ssh_info[1]} ${ssh_info[0]}
}
function add() {
echo $1 >> ${AUTO_SSH_CONF}
}
function change() {
pass
}
function usage() {
echo "usege:"
echo "autossh [-h] [-l] [-s <server alias>] [-a <server info>] [-c <server info>]"
exit 0
}
# clear screen
clear
while getopts hls:a:c: ARGS
do
case $ARGS in
s)
login $OPTARG
;;
l)
list
;;
a)
add $OPTARG
;;
c)
change $OPTARG
;;
h)
usage
;;
*)
usage
;;
esac
done
| true
|
71a6281159363d1ba93013928f40b069b152d5db
|
Shell
|
GioCS17/pi3_brams
|
/install_grads_v2_2.sh
|
UTF-8
| 9,922
| 3.328125
| 3
|
[] |
no_license
|
#!/bin/sh
# Based on http://cola.gmu.edu/grads/gadoc/supplibs2.html
# 26-10-2020
ROOT_PATH=/home/galvitez/brams_workspace/tools
GRADS="${ROOT_PATH}/grads"
SUPPLIBS="${GRADS}/supplibs"
function module_loads {
echo "Loading modules"
module load openmpi/2.1.6
module load gcc/5.5.0
module load cmake/3.16.5
}
function create_directories {
echo "Creating directories"
mkdir -p $GRADS
mkdir -p $GRADS/installed
mkdir -p $SUPPLIBS
mkdir -p $SUPPLIBS/tarfiles
mkdir -p $SUPPLIBS/src
mkdir -p $SUPPLIBS/lib
mkdir -p $SUPPLIBS/include
mkdir -p $SUPPLIBS/bin
}
function install_readline {
echo "Installing readline"
wget -nc -P $SUPPLIBS/tarfiles ftp://cola.gmu.edu/grads/Supplibs/2.2/src/readline-5.0.tar.gz
cd $SUPPLIBS/src
tar xvfz $SUPPLIBS/tarfiles/readline-5.0.tar.gz
cd readline-5.0
./configure --prefix=$SUPPLIBS
make install
}
function install_ncurses {
echo "Installing ncurses"
wget -nc -P $SUPPLIBS/tarfiles ftp://cola.gmu.edu/grads/Supplibs/2.2/src/ncurses-5.7.tar.gz
cd $SUPPLIBS/src
tar xvfz $SUPPLIBS/tarfiles/ncurses-5.7.tar.gz
cd ncurses-5.7
./configure --prefix=$SUPPLIBS --without-ada --with-shared
make install
}
function install_zlib {
echo "Installing zlib"
wget -nc -P $SUPPLIBS/tarfiles ftp://cola.gmu.edu/grads/Supplibs/2.2/src/zlib-1.2.8.tar.gz
cd $SUPPLIBS/src
tar xvfz $SUPPLIBS/tarfiles/zlib-1.2.8.tar.gz
cd zlib-1.2.8
./configure --prefix=$SUPPLIBS
make install
}
function install_libpng {
echo "Installing libpng"
wget -nc -P $SUPPLIBS/tarfiles ftp://cola.gmu.edu/grads/Supplibs/2.2/src/libpng-1.5.12.tar.gz
#wget -nc -P $SUPPLIBS/tarfiles https://ftp-osl.osuosl.org/pub/libpng/src/libpng15/libpng-1.5.30.tar.gz
cd $SUPPLIBS/src
tar xvfz $SUPPLIBS/tarfiles/libpng-1.5.30.tar.gz
cd libpng-1.5.30
./configure --prefix=$SUPPLIBS
make install
}
function install_jpeg {
echo "Installing jpeg"
wget -nc -P $SUPPLIBS/tarfiles ftp://cola.gmu.edu/grads/Supplibs/2.2/src/jpegsrc.v6b.tar.gz
cd $SUPPLIBS/src
tar xvfz $SUPPLIBS/tarfiles/jpegsrc.v6b.tar.gz
cd jpeg-6b
./configure --prefix=$SUPPLIBS
make
cp libjpeg.a $SUPPLIBS/lib
cp *.h $SUPPLIBS/include
}
function install_gd {
echo "Installing gd"
wget -nc -P $SUPPLIBS/tarfiles ftp://cola.gmu.edu/grads/Supplibs/2.2/src/libgd-2.1.0.tar.gz
#wget -nc -P $SUPPLIBS/tarfiles https://github.com/libgd/libgd/releases/download/gd-2.2.5/libgd-2.2.5.tar.gz
cd $SUPPLIBS/src
tar xvfz $SUPPLIBS/tarfiles/libgd-2.2.5.tar.gz
cd libgd-2.2.5
./configure --prefix=$SUPPLIBS --with-png=$SUPPLIBS --with-jpeg=$SUPPLIBS --disable-shared
make install
}
function install_jasper {
echo "Installing jasper"
wget -nc -P $SUPPLIBS/tarfiles ftp://cola.gmu.edu/grads/Supplibs/2.2/src/jasper-1.900.1-14ubuntu3.2.debian.tar.gz
#wget -nc -P $SUPPLIBS/tarfiles https://www.ece.uvic.ca/~frodo/jasper/software/jasper-1.900.16.tar.gz
cd $SUPPLIBS/src
tar xvfz $SUPPLIBS/tarfiles/jasper-1.900.16.tar.gz
cd jasper-1.900.16
./configure --prefix=$SUPPLIBS --with-png=$SUPPLIBS --with-jpeg=$SUPPLIBS
make install
}
function install_g2clib {
echo "Installing j2clib"
wget -nc -P $SUPPLIBS/tarfiles ftp://cola.gmu.edu/grads/Supplibs/2.2/src/g2clib-1.6.0.tar.gz
cd $SUPPLIBS/src
tar xvf $SUPPLIBS/tarfiles/g2clib-1.6.0.tar
cd g2clib-1.6.0
SED_SUPPLIBS=${SUPPLIBS////\\/}
sed -i "20s/.*/INC=-I$SED_SUPPLIBS\/include -I$SED_SUPPLIBS\/include\/libpng15\//" makefile
sed -i "28s/.*/CFLAGS= -O3 -g -m64 \$(INC) \$(DEFS) -D__64BIT__ -fPIC/" makefile
make
cp -f libg2c_v1.6.0.a $SUPPLIBS/lib/libgrib2c.a
cp -f grib2.h $SUPPLIBS/include
}
function install_szip {
echo "Installing szip"
wget -nc -P $SUPPLIBS/tarfiles https://support.hdfgroup.org/ftp/lib-external/szip/2.1.1/src/szip-2.1.1.tar.gz
cd $SUPPLIBS/src
tar xvf $SUPPLIBS/tarfiles/szip-2.1.1.tar.gz
cd szip-2.1.1/
./configure --prefix=$SUPPLIBS
make
make install
}
function install_udunits {
echo "Installing udunits"
wget -nc -P $SUPPLIBS/tarfiles ftp://cola.gmu.edu/grads/Supplibs/2.2/src/udunits-1.11.7.tar.gz
#wget -nc -P $SUPPLIBS/tarfiles https://www.unidata.ucar.edu/downloads/udunits/udunits-2.2.25.tar.gz
cd $SUPPLIBS/src
tar xvfz $SUPPLIBS/tarfiles/udunits-2.2.25.tar.gz
cd udunits-2.2.25
./configure --prefix=$SUPPLIBS --disable-shared
make
make install
}
function install_hdf4 {
echo "Installing hdf4"
wget -nc -P $SUPPLIBS/tarfiles ftp://cola.gmu.edu/grads/Supplibs/2.2/src/HDF4.2r3.tar.gz
cd $SUPPLIBS/src
tar xvfz $SUPPLIBS/tarfiles/CMake-hdf-4.2.15.tar.gz
cd CMake-hdf-4.2.15/
cmake -D CMAKE_INSTALL_PREFIX=$SUPPLIBS hdf-4.2.15/
make
make install
}
function install_hdf5 {
echo "Installing hdf5"
wget -nc -P $SUPPLIBS/tarfiles ftp://cola.gmu.edu/grads/Supplibs/2.2/src/hdf5-1.8.11.tar.gz
cd $SUPPLIBS/src
tar xvfz $SUPPLIBS/tarfiles/CMake-hdf5-1.10.7.tar.gz
cd CMake-hdf5-1.10.7/
cmake -D CMAKE_INSTALL_PREFIX=$SUPPLIBS/ hdf5-1.10.7/
make
make install
}
function install_curl {
echo "Installing curl"
wget -nc -P $SUPPLIBS/tarfiles ftp://cola.gmu.edu/grads/Supplibs/2.2/src/curl-7.35.0.tar.gz
cd $SUPPLIBS/src
tar -xvf $SUPPLIBS/tarfiles/curl-7.67.0.tar.gz
cd curl-7.67.0/
./configure --prefix=$SUPPLIBS
make
make install
}
function install_netcdf {
echo "Installing netcdf"
wget -nc -P $SUPPLIBS/tarfiles ftp://cola.gmu.edu/grads/Supplibs/2.2/src/netcdf-4.3.3.tar.gz
cd $SUPPLIBS/src
tar -xvf $SUPPLIBS/tarfiles/v4.7.3.tar.gz
cd netcdf-c-4.7.3/
mkdir build
cd build
cmake ../ -DCMAKE_INSTALL_PREFIX=$SUPPLIBS -DCMAKE_FIND_ROOT_PATH=$SUPPLIBS/
make
make install
}
function install_tiff {
echo "Installing diff"
wget -nc -P $SUPPLIBS/tarfiles ftp://cola.gmu.edu/grads/Supplibs/2.2/src/tiff-3.8.2.tar.gz
cd $SUPPLIBS/src
tar xvfz $SUPPLIBS/tarfiles/tiff-3.8.2.tar.gz
cd tiff-3.8.2
./configure --prefix=$SUPPLIBS
make install
}
function install_geotiff {
echo "Installing geotiff"
wget -nc -P $SUPPLIBS/tarfiles ftp://cola.gmu.edu/grads/Supplibs/2.2/src/libgeotiff-1.2.5.tar.gz
cd $SUPPLIBS/src
tar xvzf $SUPPLIBS/tarfiles/libgeotiff-1.2.5.tar.gz
cd libgeotiff-1.2.5
./configure --prefix=$SUPPLIBS --enable-incode-epsg --enable-static --with-libtiff=$SUPPLIBS
make
make install
}
function install_shapelib {
echo "Installing shapelib"
wget -nc -P $SUPPLIBS/tarfiles ftp://cola.gmu.edu/grads/Supplibs/2.2/src/shapelib-1.2.10.tar.gz
cd $SUPPLIBS/src
tar xvfz $SUPPLIBS/tarfiles/shapelib-1.2.10.tar.gz
cd shapelib-1.2.10
sed -i "3s/.*/CFLAGS = -g -fPIC/" Makefile
sed -i 's/-g -O2/-g -fPIC -O2/g' Makefile
make all lib
cp -f .libs/libshp.a $SUPPLIBS/lib
cp -f shapefil.h $SUPPLIBS/include
cp -f shpcreate shpadd shpdump shprewind dbfcreate dbfadd dbfdump shptest $SUPPLIBS/bin
}
function install_xml2 {
echo "Installing xml2"
wget -nc -P $SUPPLIBS/tarfiles ftp://cola.gmu.edu/grads/Supplibs/2.2/src/libxml2-2.9.0.tar.gz
cd $SUPPLIBS/src
tar xvfz $SUPPLIBS/tarfiles/libxml2-2.9.0.tar.gz
cd libxml2-2.9.0
./configure --prefix=$SUPPLIBS --with-zlib=$SUPPLIBS --without-threads --without-iconv --without-iso8859x --without-lzma
make install
}
function install_xrender {
echo "Installing xrender"
wget -nc -P $SUPPLIBS/tarfiles ftp://cola.gmu.edu/grads/Supplibs/2.2/src/libXrender-0.9.6.tar.gz
cd $SUPPLIBS/src
tar xvfz $SUPPLIBS/tarfiles/libXrender-0.9.6.tar.gz
cd libXrender-0.9.6/
./configure --prefix=$SUPPLIBS
make install
}
# TODO: ERROR
function install_pkgconfig {
echo "Installing pkgconfig"
wget -nc -P $SUPPLIBS/tarfiles ftp://cola.gmu.edu/grads/Supplibs/2.2/src/pkgconfig-0.23.tar.gz
cd $SUPPLIBS/src
tar xvfz $SUPPLIBS/tarfiles/pkg-config-0.23.tar.gz
cd pkg-config-0.23
./configure --prefix=$SUPPLIBS
make install
export PKG_CONFIG=$SUPPLIBS/bin/pkg-config
export PKG_CONFIG_PATH=$SUPPLIBS/lib/pkgconfig
}
function install_libdap {
echo "Installing libdap"
wget -nc -P $SUPPLIBS/tarfiles ftp://cola.gmu.edu/grads/Supplibs/2.2/src/libdap-3.7.8-modified.tar.gz
cd $SUPPLIBS/src
tar xvfz $SUPPLIBS/tarfiles/libdap-3.18.1.tar.gz
cd libdap-3.18.1/
export CPPFLAGS=-I${SUPPLIBS}/include
./configure --prefix=$SUPPLIBS --with-curl=$SUPPLIBS
make install
}
# TODO: Fixing
function install_gapad {
echo "Installing gadap"
wget -nc -P $SUPPLIBS/tarfiles ftp://cola.gmu.edu/grads/Supplibs/2.2/src/gadap-2.1.tar.gz
cd $SUPPLIBS/src
tar xvfz $SUPPLIBS/tarfiles/gadap-2.1.tar.gz
cd gadap-2.1
export PATH=$SUPPLIBS/bin:$PATH
export CPPFLAGS=-I${SUPPLIBS}/include
./configure --prefix=$SUPPLIBS
make install
}
# TODO: Missing
function install_pixman {
echo "Installing pixman"
}
# TODO: Missing
function install_freetype {
echo "Installing freetype"
}
# TODO: Missing
function install_fontconfig {
echo "Installing fontconfig"
}
# TODO: Missing
function install_cairo {
echo "Installing cairo"
}
function install_grads {
echo "Installing grads"
wget -nc -P $SUPPLIBS/tarfiles ftp://cola.gmu.edu/grads/2.2/grads-2.2.0-src.tar.gz
cd $SUPPLIBS/src
tar xvfz $SUPPLIBS/tarfiles/grads-2.2.0-src.tar.gz
cd grads-2.2.0
./configure --prefix=$GRADS/installed \
SUPPLIBS=$SUPPLIBS --disable-dyn-supplibs \
CFGLAGS=-I$SUPPLIBS/include \
LDFLAGS=-L$SUPPLIBS/lib
make
make install
}
function module_unloads {
echo "Unloading modules"
module unload openmpi/2.1.6
module unload gcc/5.5.0
module unload cmake/3.16.5
}
module_loads
create_directories
install_readline
install_ncurses
install_zlib
install_libpng
install_jpeg
install_gd
install_jasper
install_g2clib
install_szip
install_udunits
install_hdf4
install_hdf5
install_curl
install_netcdf
install_tiff
install_geotiff
install_shapelib
#install_xml2
#install_xrender
#install_pkgconfig
#install_libdap
#install_gapad
#install_pixman #TODO
#install_freetype #TODO
#install_fontconfig #TODO
#install_cairo #TODO
install_grads
module_unloads
| true
|
f274ef1cc1006ca9994debf604bac7b1abf916a4
|
Shell
|
damaainan/Reading
|
/philosophy/Buddhism/2.sh
|
UTF-8
| 924
| 2.984375
| 3
|
[] |
no_license
|
#!/bin/bash
#sed -n '1,2p' 心经.md | awk '{for (i=1; i<=length($1); ++i) if (substr($1, i, 1) > "\177") {print substr($1, i, 2) "\t中文"; ++i;} else {print substr($1, i, 1) "\tNot Chinese Character";}}'
sed -n '1,2p' 心经.md | awk '{for (i=1; i<=length($1); ++i) if (substr($1, i, 1) > "\177") {print substr($1, i, 1) "\t中文"; }}'
sed -n '1,2p' 心经.md | awk '{print length($1)}'
sed -n '1,2p' 心经.md | awk '{ {print substr($1, 1, 1); }}'
sed -n '1,2p' 心经.md | awk '{ {print substr($1, 2, 1); }}'
sed -n '1,2p' 心经.md | awk '{ {print substr($1, 3, 1); }}'
sed -n '1,2p' 心经.md | awk '{ {print substr($1, 4, 1); }}'
# 打印长度大于 1 的行,过滤空行
sed -n '1,$p' 1.txt | awk '{if (length($1) ) {print $1}}'
# 打印所有拼音内容行 ,过滤空行
sed -n '1,$p' 2.txt | awk -F: '{if (length($1) ) {print $1}}'
# awk 默认以空格分隔,设置为 : ,使其不能分隔
| true
|
3abb631f0d88a45ca8e03e6b484c997631817e32
|
Shell
|
amargaritov/PTEMagnet_AE
|
/evaluation/show_results.sh
|
UTF-8
| 532
| 3.578125
| 4
|
[
"MIT"
] |
permissive
|
RESULTS_DIR="$1"
if [ -z "$1" ]
then
RESULTS_DIR=$REPO_ROOT/evaluation/results
fi
echo "Using results from $RESULTS_DIR"
if [ ! -d $RESULTS_DIR ]; then
echo "can't find directory with the results. Are you sure it exists?"
else
for i in $(ls $RESULTS_DIR); do pushd $RESULTS_DIR/$i > /dev/null; avg_time=$(grep -R Full_time | awk '{ sum += $2; n++ } END { if (n > 0) print sum / n; }'); echo "$i $avg_time"; popd > /dev/null ; done | sed 's/-on-/,/g' | sed 's/-kernel\ */,/g' | $REPO_ROOT/evaluation/calc_impr.py
fi
| true
|
9f359a58ca15125a443a650aabe89cf42aca523a
|
Shell
|
CmsHI/ElectroWeak-Jet-Track-Analyses
|
/ShellScripts/Performance/runPrintRunLumiEvent.template.sh
|
UTF-8
| 1,001
| 3.125
| 3
|
[] |
no_license
|
#!/bin/bash
runCmd="./ShellScripts/myRun.sh"
progPath="./Performance/printRunLumiEvent.exe"
inputFileTmp="Configurations/filelists/Pythia8_AllQCDPhoton30Flt30_Hydjet_Cymbal_MB_HINPbPbWinter16DR_FOREST.list"
inputFile="${inputFileTmp/.list/_TMP.list}"
cat $inputFileTmp | head -10 > $inputFile
configFiles=(
"Configurations/examples/printRunLumiEvent.conf"
);
outDirBase=$EWJTAOUT
if [ -z "$outDirBase" ]; then
outDirBase="/export/d00/scratch/"$USER"/EWJTA-out"
fi
outputSuffix="Pythia8_AllQCDPhoton30Flt30_Hydjet_Cymbal_MB"
outList=(
$outDirBase"/Configurations/examples/printRLE_"$outputSuffix".txt"
);
arrayIndices=${!outList[*]}
for i1 in $arrayIndices
do
configFile=${configFiles[i1]}
outputFile=${outList[i1]}
outputFileLOG="${outputFile/.txt/.log}"
outDir=$(dirname "${outputFile}")
mkdir -p $outDir
$runCmd $progPath $configFile $inputFile $outputFile &> $outputFileLOG &
echo "$runCmd $progPath $configFile $inputFile $outputFile &> $outputFileLOG &"
done
| true
|
47b1bd755e7707ae0ede0c7513c7d28d98432767
|
Shell
|
dtinth/eXceedGit
|
/newuser
|
UTF-8
| 240
| 3.046875
| 3
|
[] |
no_license
|
#!/bin/bash
USERNAME=$1
DIR=/git/users/$USERNAME
PASSWORD=$2
set -e
set -v
useradd -d $DIR -m -s /usr/bin/git-shell "$USERNAME"
echo "$USERNAME:$PASSWORD" | chpasswd
sudo -u "$USERNAME" git init --bare "$DIR/repo.git"
cd "$DIR/repo.git"
| true
|
7ad4a0b85283f52a01885152effb971b7dbae1eb
|
Shell
|
zhongzhd/ont_m6a_detection
|
/optimal_cut-off_selection/compare_tools.sh
|
UTF-8
| 1,176
| 2.875
| 3
|
[] |
no_license
|
#!/bin/bash
validated_set_bed=path_to_validated_set
cp coordinate_transformation/compare_tools/*chr1.bed .
#choose sites
for file in *chr1.bed
do
awk '$5>=5' $file > ${file%%1.bed}2.bed
done
#calculate F1 socres
for file in *chr2.bed
do
bedtools intersect -a $file -b $validated_set_bed -wa -s -u > ${file%%chr2.bed}m6a.bed
awk 'BEGIN{OFS="\t"}''NR==FNR{a[$7]=$7;next}{if($7 in a){print $7,1}else{print $7,0}}' ${file%%chr2.bed}m6a.bed $file > ${file%%chr2.bed}id_label.txt
cut -f 4 $file > ${file%%chr2.bed}pred.txt
paste ${file%%chr2.bed}id_label.txt ${file%%chr2.bed}pred.txt > ${file%%chr2.bed}id_label_pred.txt
done
python F1_thr_tsv.py Tombo_com_id_label_pred.txt Tombo_com.tsv
python F1_thr_tsv1.py Nanocompore_id_label_pred.txt Nanocompore.tsv
python F1_thr_tsv2.py Xpore_id_label_pred.txt Xpore.tsv
python F1_thr_tsv.py DiffErr_id_label_pred.txt DiffErr.tsv
python F1_thr_tsv1.py DRUMMER_id_label_pred.txt DRUMMER.tsv
python F1_thr_tsv1.py ELIGOS_diff_id_label_pred.txt ELIGOS_diff.tsv
python F1_thr_tsv.py Epinano_delta_id_label_pred.txt Epinano_delta.tsv
#output tsv format
#precision recall F1_score thresholds
#0.18720218950887643 1.0 0.31536698830772625 0.0
| true
|
8ba54349b6249e46efee239201e7aee4daccf07e
|
Shell
|
Fattouche/Stratocumulus
|
/src/service_images/django/cumulus-docker-entrypoint.sh
|
UTF-8
| 3,386
| 3.890625
| 4
|
[] |
no_license
|
#!/bin/bash
if [ "$1" == "INIT" ]
then
# User's cumulus config and code is mounted into /cumulus
cd /cumulus
if [ ! -d "django" ]
then
mkdir django
fi
# Only run if Django project folder doesn't already exist
if [ ! -d "django/${CUMULUS_PROJECT_NAME}" ]
then
cd django
django-admin startproject ${CUMULUS_PROJECT_NAME}
fi
# Perform any configuration necessary to communicate with neighbouring services
for service in ${CUMULUS_SERVICES//,/ } # iterate over comma-separated string
do
if [ "${service}" == "mysql" ]
then
# Need to do this due to the bug in the MySQL docker container
# see https://github.com/docker-library/mysql/issues/448#issuecomment-403552073
#
# Once this is fixed, the database name can instead be passed to MySQL
# during init (possibly into the docker-compose file, if we want to
# expose it to the user), and written to the user's MySQL config file, which
# Django will then read from (as set up in Django's settings.py)
MYSQL_DATABASE="${CUMULUS_PROJECT_NAME}_default"
cd /service
python modify-django-settings.py /cumulus/django/${CUMULUS_PROJECT_NAME}/${CUMULUS_PROJECT_NAME}/settings.py \
--mysql-config-path /cumulus/mysql/my.cnf \
--mysql-db ${MYSQL_DATABASE}
fi
if [ "${service}" == "memcached" ]
then
cd /cumulus/django/${CUMULUS_PROJECT_NAME}
pip install python-memcached
cd /service
python modify-django-settings.py /cumulus/django/${CUMULUS_PROJECT_NAME}/${CUMULUS_PROJECT_NAME}/settings.py \
--memcached
fi
if [ "${service}" == "elasticsearch" ]
then
cd /cumulus/django/${CUMULUS_PROJECT_NAME}
pip intsall elasticsearch_dsl==6.1.0 #Due to issue https://github.com/sabricot/django-elasticsearch-dsl/issues/119
pip install django-elasticsearch-dsl
cd /service
python modify-django-settings.py /cumulus/django/${CUMULUS_PROJECT_NAME}/${CUMULUS_PROJECT_NAME}/settings.py \
--elastic-search
fi
if [ "${service}" == "redis" ]
then
cd /cumulus/django/${CUMULUS_PROJECT_NAME}
pip install django-rq
cd /service
python modify-django-settings.py /cumulus/django/${CUMULUS_PROJECT_NAME}/${CUMULUS_PROJECT_NAME}/settings.py \
--redis=/cumulus/django/${CUMULUS_PROJECT_NAME}/${CUMULUS_PROJECT_NAME}/urls.py
fi
done
else
cd /cumulus
cd "django/${CUMULUS_PROJECT_NAME}"
# Wait for any services that we need to wait for
for service in ${CUMULUS_WAIT_FOR//,/ }
do
if [ "${service}" == "mysql" ]
then
# Somehow this works with port 3306, even if the port on mysql on the
# host is not 3306
bash /service/wait-for-it.sh mysql:3306 --timeout=300
fi
if [ "${service}" == "memcached" ]
then
pip install python-memcached #Just in case
bash /service/wait-for-it.sh memcached:11211 --timeout=300
fi
if [ "${service}" == "elasticsearch" ]
then
pip install django-elasticsearch-dsl #Just in case
bash /service/wait-for-it.sh elasticsearch:9200 --timeout=300
fi
if [ "${service}" == "redis" ]
then
pip install django-rq #Just in case
bash /service/wait-for-it.sh redis:6379 --timeout=300
fi
done
exec "$@"
fi
| true
|
555a3a3d6b24c62e11b6c58116f10880a0e70155
|
Shell
|
ercanlab/chromosome_conformation_capture
|
/homer_hic_analysis.sh
|
UTF-8
| 9,368
| 3.4375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
#
#SBATCH --verbose
#SBATCH --job-name=homer_hic
#SBATCH --output=/scratch/mrp420/reports/slurm_homerhic_%j.out
#SBATCH --error=/scratch/mrp420/reports/slurm_homerhic_%j.err
#SBATCH --time=24:00:00
#SBATCH --nodes=1
#SBATCH --mem=125GB
#SBATCH --mail-type=ALL
#SBATCH --mail-user=mrp420@nyu.edu
#------------------------------------------------------------------------------#
# INSTRUCTIONS #
#------------------------------------------------------------------------------#
#Use the script to map fastq files from Hi-C experoments.
#Once mapping is complete matrices are built and background subtraacted models built.
#Compartment analysis is then completed.
#Lastly, inputs for the visualisation software with Juicebox
#Homer package is used to conduct this analysis
#Argument options:
# EXPID - ID for experiment that will be used in creating output files and directory
# FQ1 - Fastq file 2
# FQ2 - Fastq file 1
# DIG - Restriction site
### EXAMPLE:
# sbatch --export \
# EXPID="meyer2017_wt_rep1",\
# FQ1='/scratch/cgsb/ercan/hic/newmeyer/SRR1665088_1.fastq',\
# FQ2='/scratch/cgsb/ercan/hic/newmeyer/SRR1665082_2.fastq',\
# DIG='GATC',\
# GENOME='sacCer3' #or ce10 or dm6
# BOWTIE='~/yeast/genomes/SK1_yue/SK1_yue'
# ~/yeast/scripts/homer_hic.sh
#------------------------------------------------------------------------------#
# Functions #
#------------------------------------------------------------------------------#
function elapsed_time() {
ENDTIME=$(date +%s)
TIME=$(($ENDTIME - $1))
if [ $TIME -lt 60 ]
then
echo "$TIME sec"
elif [ $TIME -ge 60 ] && [ $TIME -lt 3600 ]
then
echo "$(($TIME / 60)) min"
else
echo "$(($TIME / 60 / 60)) hr"
fi
}
function check_arg() {
if [ -z "$1" ]
then
echo ">>>>> Please provide values for all required arguments"
exit 1
else
echo "Input argument is $1"
fi
}
#------------------------------------------------------------------------------#
# IO checks #
#------------------------------------------------------------------------------#
# Check arguments
check_arg $EXPID
check_arg $FQ1
check_arg $FQ2
check_arg $DIG
check_arg $GENOME
check_arg $BOWTIE
# Check input files / dirs
[ -f $FQ1 ] || { echo "Could not find file: $FQ1"; exit 1; }
[ -f $FQ2 ] || { echo "Could not find file: $FQ2"; exit 1; }
#Make output directory
output_dir=/scratch/mrp420/homer_hic/$EXPID
mkdir /scratch/mrp420/homer_hic/$EXPID
#Load in packages
module load homer/intel/4.10.1
module load bowtie2/intel/2.3.2
module load samtools/intel/1.3.1
module swap r/intel/3.4.2
#------------------------------------------------------------------------------#
# #
# Run pipeline #
# #
#------------------------------------------------------------------------------#
STARTTIME=$(date +%s)
echo \
ad
"------------------------------------------------------------------------------"
echo ">>>>> Started Homer HiC analysis: $EXPID"
echo \
"------------------------------------------------------------------------------"
date
#------------------------------------------------------------------------------#
# Align reads to reference genome with Bowtie #
#------------------------------------------------------------------------------#
cd $output_dir
# Trim. This cuts the reads at restriction sites. This allows better mapping. I
# If ligation junction happen within your read then the read will have multiple distinct locations it can map to, but overall no single place.
echo 'Start Trimming' $(date +%r)
homerTools trim -3 $DIG -mis 0 -matchStart 20 -min 20 $FQ1
homerTools trim -3 $DIG -mis 0 -matchStart 20 -min 20 $FQ2
echo 'End Trimming' $(date +%r)
# Map trimmed files to the genome seperately
echo 'Start mapping' $(date +%r)
bowtie2 --local -p 20 -x $BOWTIE -U ${FQ1}.trimmed > ${output_dir}/${EXPID}_1.sam
bowtie2 --local -p 20 -x $BOWTIE -U ${FQ2}.trimmed > ${output_dir}/${EXPID}_2.sam
echo 'End mapping' $(date +%r)
#Clean up
rm ${FQ1}.*
rm ${FQ2}.*
#------------------------------------------------------------------------------#
# Assign mapped reads to restriction enzyme fragemnt and pair with PE partner #
#------------------------------------------------------------------------------#
# tag assignment. Initial assignment is put in subdirectory that will be home for this unfiltered tag assignment.
mkdir unfiltered
echo 'Start tag assignment' $(date +%r)
makeTagDirectory unfiltered ${output_dir}/${EXPID}_1.sam,${output_dir}/${EXPID}_2.sam
echo 'End tag assignment' $(date +%r)
# Filter tag assignment. Filters detailed:
# -removePEbg : Removes fragments likely to derive from a contiguous piece of DNA
# -restrictionSite : Removes read that are far from a restriction enzyme site
# -removeSelfLigation : Remove reads if their ends form a self ligation with adjacent restriction sites
# -removeSpikes : Remove spikes with 5x the number of reads over the average for a 10Kb region
# -tbp 1 : Only keep unique mapped partners to remove any PCR duplicates
echo 'Filter tag assignment' $(date +%r)
cp unfiltered/* .
makeTagDirectory . -update -genome $GENOME -removePEbg -restrictionSite $DIG -removeSelfLigation -removeSpikes 10000 5 -tbp 1
echo 'Filter tag assignment' $(date +%r)
###I'm here!
#------------------------------------------------------------------------------#
# Normalise the matrix and create a model fro background interactions #
#------------------------------------------------------------------------------#
# Normalize the marix
echo 'Create output matrix' $(date +%r)
analyzeHiC . -res 10000 -o ${EXPID}_output_matrix.txt -cpu 8
echo 'Create output matrix' $(date +%r)
echo 'Normalize matrix with balancing' $(date +%r)
analyzeHiC . -res 10000 -balance -o ${EXPID}_balanced_output_matrix.txt -cpu 8
echo 'Normalized matrix' $(date +%r)
# Background subtraction
echo 'Binning and background subtraction' $(date +%r)
analyzeHiC . -res 10000 -bgonly -cpu 8
echo 'Binning and background subtraction' $(date +%r)
# Distance normalized
echo 'Binning and background subtraction' $(date +%r)
analyzeHiC . -res 10000 -cpu 8 -norm -o ${EXPID}_distnorm_output_matrix.txt echo 'Binning and background subtraction' $(date +%r)
#Pearson correlation matrices
echo 'Pearson correlation' $(date +%r)
analyzeHiC . -res 10000 -cpu 8 -corr -o ${EXPID}_pearson_correlation_output_matrix.txt
echo 'Binning and background subtraction' $(date +%r)
#------------------------------------------------------------------------------#
# Do PCA analysis to extract compartment coordinates #
#------------------------------------------------------------------------------#
# PCA
echo 'Start PCA' $(date +%r)
/share/apps/homer/4.10.1/intel/bin/runHiCpca.pl pcaOut . -rpath /share/apps/r/3.4.2/intel/bin/R -res 25000 -superRes 50000 -genome $GENOME -cpu 10
echo 'End PCA' $(date +%r)
# PCA
echo 'Start PCA with epigentic info' $(date +%r)
/share/apps/homer/4.10.1/intel/bin/runHiCpca.pl pcaOut_emb_H3K27ac . -rpath /share/apps/r/3.4.2/intel/bin/R -res 25000 -superRes 50000 -active ~/worms/files/MACS143_e10_N2_emb_H3K27ac_LW201_LW204_LW215_ext173_159_174_peaks_final.bed -cpu 10
/share/apps/homer/4.10.1/intel/bin/runHiCpca.pl pcaOut_L3_H3K27ac . -rpath /share/apps/r/3.4.2/intel/bin/R -res 25000 -superRes 50000 -active ~/worms/files/MACS143_e10_N2_L3_H3K27ac_FE1_CTTGTA_L001_ME5_CAGATC_L005_5054_peaks_final.bed -cpu 10
echo 'End PCA with epigentic info' $(date +%r)
#------------------------------------------------------------------------------#
# Make Juicebox comaptible files #
#------------------------------------------------------------------------------#
#Juicebox
echo 'Make juicebox file' $(date +%r)
/share/apps/homer/4.10.1/intel/bin/tagDir2hicFile.pl . -juicer auto -genome $GENOME -p 10 -rpath /share/apps/r/3.4.2/intel/bin/R
echo 'End juicebox file' $(date +%r)
#------------------------------------------------------------------------------#
# Report Significant interactions #
#------------------------------------------------------------------------------#
echo 'Check for signicant reads' $(date +%r)
analyzeHiC . -res 10000 -interactions ${EXPID}_significant_interactions.txt -nomatrix
echo 'Check for signicant reads' $(date +%r)
#------------------------------------------------------------------------------#
# Remove extra files #
#------------------------------------------------------------------------------#
rm *.sam
#------------------------------------------------------------------------------#
ELAPSEDTIME=$(elapsed_time $STARTTIME)
echo
echo "-----"
echo "-----"
echo "Completed pipeline in $ELAPSEDTIME"
echo \
"------------------------------------------------------------------------------"
exit 0;
| true
|
b4087915534df7a4ece2a7d1bc9720c5044dbe5d
|
Shell
|
epandurski/cmbarter
|
/docker/configure_nginx.sh
|
UTF-8
| 495
| 2.703125
| 3
|
[] |
no_license
|
#!/bin/bash
#
# This file is used by "Dockerfile.proxy".
set -e
envsubst '$PROXY_PASS_TO:$CMBARTER_HOST' < /etc/nginx/nginx.template > /etc/nginx/nginx.conf
if [[ -e /run/secrets/cert.pem ]] && [[ -e /run/secrets/key.pem ]]; then
cp /run/secrets/cert.pem /run/secrets/key.pem /etc/nginx/ssl/
fi
iptables -P INPUT DROP
iptables -A INPUT -m state --state RELATED,ESTABLISHED -j ACCEPT
iptables -A INPUT -m state --state NEW -m limit --limit 5000/second --limit-burst 250 -j ACCEPT
exec "$@"
| true
|
29623ae897730915d2173c20f5ea607fccd5964a
|
Shell
|
ajneu/fractionizer
|
/images/latex2xml/latex2xml.sh
|
UTF-8
| 1,059
| 3.53125
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/sh
## http://www.albany.edu/~hammond/demos/Html5/arXiv/lxmlexamples.html
## sudo apt-get install latexml
## sudo apt-get instal xmlstarlet # used below to remove the footer
pname=`basename $0`
if [ "$#" != "1" ] ; then
echo "Usage: ${pname} stem-name"
exit 1
fi
stem="$1"
if [ ! \( -f "${stem}.tex" \) ] ; then
echo "${pname}: Cannot find ${stem}.tex"
exit 2
fi
latexml "--destination=${stem}.xml" "${stem}.tex"
if [ "$?" != "0" ] ; then
echo "${pname}: latexml did not finish cleanly on ${stem}.tex"
exit 3
fi
if [ ! \( -f "${stem}.xml" \) ] ; then
echo "${pname}: Cannot find latexml output file ${stem}.xml"
exit 4
fi
latexmlpost --format=html5 "--destination=${stem}.html" --presentationmathml "${stem}.xml"
if [ "$?" != "0" ] ; then
echo "${pname}: latexmlpost did not finish cleanly on ${stem}.xml"
fi
## remove footer
cat "${stem}.html" | xmlstarlet -q fo --noindent --html --omit-decl go.html | xmlstarlet ed -O -P -d "/html/body/div/footer" > "${stem}.html2"
mv "${stem}.html2" "${stem}.html"
| true
|
920815bd295beeb2b6d6858a46c8b649546b786d
|
Shell
|
Aeronavics/ValleyForge
|
/bld/code_compliance/checkfuncs_structurename
|
UTF-8
| 8,438
| 3.90625
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
# Copyright (C) 2011 Unison Networks Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
###########################################################################################################################
###
### Name: checkfuncs_structurename
###
### Author: Ravi Shankar
###
### Date Created: 12-01-2015
###
### Type: Bash Function Definitions
###
### Purpose: Implements code compliance tests: tests for structure name validity.
###
###########################################################################################################################
######################################## FUNCTION #############################################
###
### Name: compliance_check_file_structurename
###
### Inputs/Outputs: Path to the file to test.
###
### Purpose: Tests a single C/CPP source or header file for valid structure names.
###
###############################################################################################
compliance_check_file_structurename()
{
# The file we're checking was the first argument we received.
local FILE=$1
# Arrays which are used as flags to denote if any error occurs.
unset array_error_flag_struct_name
unset array_error_flag_access_mod
check_line()
{
#
# $1 - The content of the line in question.
#
# Pattern to match one line structure declarations.
pattern_struct_declar="^[ \t]*struct ([a-zA-Z_]+) (.*);"
# Condition to check for one line struct declarations.
if [[ $LINE =~ $pattern_struct_declar ]]; then
struct_name=${BASH_REMATCH[1]}
# Check whether the structure name starts with lower case letter.
if [[ ${struct_name:0:1} =~ [a-z] ]]; then
# Report the line has failed.
ERROR="$struct_name starts with a lowercase letter."
return 1
fi
fi
# All done.
return 0
}
check_struct_name()
{
# Iterate through each match of structure name to check for compliance.
for string in `$TCPATH/res/vendor/pcre-8.35/pcregrep -n -o4 -o7 -o12 -M '(struct[ ]*[\n]*{(\n(.*?))+[}]+[ ]*([a-zA-Z0-9_]+);)|(struct ([a-zA-Z0-9_]*[ ])*([a-zA-Z0-9_]+)\s*\n*{)|(typedef struct[ ]*([a-zA-Z]+)[\n]*{(\n(.*?))+[}]+[ ]*([a-zA-Z0-9_]+);)' $FILE`
do
# Obtain the structure name.
struct_name=${string#*:}
# Obtain the line number.
line_number_struct=${string%:*}
# Check whether the structure name starts with lower case letter.
if [[ ${struct_name:0:1} =~ [a-z] ]]; then
# Set the error flag.
array_error_flag_struct_name[$line_number_struct]=$struct_name
fi
done
# All done.
return 0
}
check_access_modifiers()
{
# Pattern to match access modifier.
pattern_access_mod="^[ \t]*(public|protected|private):"
# Pattern to match line number from output of pcregrep.
pattern_line_number="^[0-9]+:"
# Save the default IFS to a temporary variable.
SAVEIFS=$IFS
# Modify the internal field separator to process each lines separately.
IFS=$(echo -en "\n\b")
# Iterate through each match of structure to check for access modifiers within structures [ Format: Any structure ].
for string in `$TCPATH/res/vendor/pcre-8.35/pcregrep -o1 -n -M 'struct[ a-zA-Z_ ]*[\n]*{((\n(.*?))+)[}]+[ ]*([a-zA-Z0-9_]*);' $FILE`
do
# Check whether the line is an access modifier.
if [[ $string =~ $pattern_line_number ]]; then
line_number_struct=${struct_name%:*}
fi
if [[ $string =~ $pattern_access_mod ]]; then
# Set the error flag.
array_error_flag_access_mod[$line_number_struct]=1
fi
done
# Reset IFS to the default value.
IFS=$SAVEIFS
# All done.
return 0
}
# Need to keep track of the line number we are up to.
local LINE_NUM=
# If we encounter something wrong, then we make a note that there is at least some kind of issue with the file as a whole.
local CHECKS_OK=1
# Initially, we are checking.
local CHECKS_EN=1
# Calculate the maximum number of lines which could sensibly have compliance disabled before we suspect something is amiss.
local DISABLED_LINE_COUNT=0
# Check each structure name.
check_struct_name $FILE
# Check whether a structure has any access modifiers in it.
check_access_modifiers $FILE
# Pattern to match multi line comment start.
pattern_comment_start="^[ \t]*(\/\*)"
# Pattern to match multi line comment end.
pattern_comment_end="(.*)(\*\/)"
# Pattern to match single line comment.
pattern_comment_single="^[ \t]*(\/\*)(.*)(\*\/)"
# Pattern to match single line comment which appear after the statements Eg. int a; /* Used to declare a variable */
pattern_comment_single_after_statement="(\/\*)(.*)(\*\/)"
while read LINE
do
# Increment the line number.
((LINE_NUM++))
if [ ${CHECKS_EN} ]; then
# Check to see whether this line disables checking temporarily.
if [[ "$LINE" =~ "#pragma COMPL_DISAB_START" ]]; then
CHECKS_EN=
continue
fi
# Check whether the line is the start of a multi line comment. If yes, set 'CHECKS_COMMENT' flag and read the next line.
if [[ "$LINE" =~ $pattern_comment_start && ! "$LINE" =~ $pattern_comment_single_after_statement ]]; then
CHECKS_COMMENT=1
continue
fi
# Check whether the line is the end of a multi line comment. If yes, unset 'CHECKS_COMMENT' flag and read the next line.
if [[ "$LINE" =~ $pattern_comment_end && ! "$LINE" =~ $pattern_comment_single_after_statement ]]; then
CHECKS_COMMENT=
continue
fi
# Check whether the line is a single comment line. If yes, read the next line.
if [[ "$LINE" =~ $pattern_comment_single ]]; then
continue
fi
# Check if the line has any errors with structure names.
if [ ${array_error_flag_struct_name[$LINE_NUM]} ]; then
# Report the line has failed.
compliance_fail_line "compliance_check_file_structurenamecheck" $FILE $LINE_NUM "Non-compliant structure: '${array_error_flag_struct_name[$LINE_NUM]}' starts with a lowercase letter."
# At least one thing is wrong in this file.
CHECKS_OK=
fi
# Check if the line has any access modifiers within structures.
if [ ${array_error_flag_access_mod[$LINE_NUM]} ]; then
# Report the line has failed.
compliance_fail_line "compliance_check_file_structurenamecheck" $FILE $LINE_NUM "Non-compliant structure: contains access modifiers."
# At least one thing is wrong in this file.
CHECKS_OK=
fi
# Proceed checking for compliance only if the line is not a comment.
if [[ ! ${CHECKS_COMMENT} ]]; then
# Ok, so now we actually want to check this line for compliance.
if ! check_line "$LINE" $LINE_NUM; then
# Report the line has failed.
compliance_fail_line "compliance_check_file_structurenamecheck" $FILE $LINE_NUM "Non-compliant structure name: $ERROR"
# At least one thing is wrong in this file.
CHECKS_OK=
fi
fi
else
DISABLED_LINE_COUNT=$((DISABLED_LINE_COUNT+1))
# Checking is temporarily disabled, so all we do is see if this line enables them again.
if [[ "$LINE" =~ "#pragma COMPL_DISAB_END" ]]; then
# Enable checking again.
CHECKS_EN=1
fi
fi
done < $FILE
# Calculate the maximum number of lines which could sensibly have compliance disabled before we suspect something is amiss.
local LINE_THRESH=$((LINE_NUM/10))
# Check if the number of lines disabled is lesser than the threshold
if [ $DISABLED_LINE_COUNT -gt $LINE_THRESH ]; then
# Report that the file has failed.
compliance_fail_file "exception_limits" $FILE "A suspiciously high number of lines '$DISABLED_LINE_COUNT' have compliance testing disabled."
fi
if [ ! ${CHECKS_OK} ]; then
# Report that the file has failed.
compliance_fail_file "compliance_check_file_structurename" $FILE "This file contains non-compliant structure(s)."
fi
# All done.
return 0
}
# ALL DONE.
| true
|
58351213b2bda92be23e78049cf0bc84cda290b9
|
Shell
|
alher11/tool_santet_sms
|
/santet.sh
|
UTF-8
| 2,297
| 2.8125
| 3
|
[] |
no_license
|
#!/bin/bash
echo Selamat datang tod, Siapa nama lu? #tulisan keluar
read nick #membaca yang ditulis
sleep 0.03
echo " _.-^^---....,,-- "
sleep 0.03
echo " _-- jika ada --_ "
sleep 0.03
echo " < kendala >) "
sleep 0.03
echo " | hubungi | "
sleep 0.03
echo " \._ fb: lilung _./ "
sleep 0.03
echo " '''--. . , ; .--''' "
sleep 0.03
echo " | | | "
sleep 0.03
echo " .-=|| | |=-. "
sleep 0.03
echo " '-=#$%&%$#=-' "
sleep 0.03
echo " | ; :| "
sleep 0.03
echo " _____.,-#%&$@%#&#~,._____ "
sleep 0.03
echo "============================="
sleep 2
echo "== Tool santet sms by lilung =="
sleep 0.3
echo "== youtube: lilung project =="
sleep 0.7
echo "============================="
echo Selamat datang $nick ":)"
get_url=$(curl -s http://zlucifer.com/api/sms.php)
cek='curl -s '$get_url # check status
response=`curl -s -o /dev/null -w "%{http_code}" $cek`
if [[ $response = *sleeping* ]]; then
echo
echo "Website Offline/Restart untuk sementara"
else
echo
echo "Silahkan masukan nomor hp musuh klean "
echo contoh 0812345678
read target # masukin no telp
echo
echo "Berapa santet yang mau dikirim?"
read paket
echo
echo Apakah nomor $target "dan SANTET dikirim "$paket" sudah benar?"
echo y/n?
read confirm
echo
if [ $confirm = "y" ]; then
echo Melakukan spam SMS ke nomor $target
i=0
max=100
while [ $i -le $max ]; do
echo -ne "\nSpamming percentage : $i% "
sleep 0.03
if [ $i -eq 100 ]; then
echo -ne " [complete!]\n"
echo "Jangan close dulu aplikasi tod sebelum spam selesai buru buru amat lu emang mau coli"
echo "========================================"
target_do=$get_url'/sms.php?nomor='$target'&paket='$paket
CURL_RESPONSE=`curl -s -o /dev/null -w "%{http_code}" $target_do`
echo " selamat anda jadi hackel plo"
echo
echo " udah selesai goblok "
echo " -lilung"
echo "======================================="
fi
let i++
done
else
echo "ada Kesalahan goblok"
fi
fi
| true
|
659c50f1d42023918eea6adad5605130121d7514
|
Shell
|
0x384c0/Experiments-iOS-CoreML
|
/image_classifier/scripts_image_net/download_images.sh
|
UTF-8
| 319
| 2.703125
| 3
|
[] |
no_license
|
cd tmp
for D in *; do
if [ -d "${D}" ]; then
cat "$D/batch.txt" | tr -d '\r' | xargs -n 1 -P 10 wget -c -T 2 --tries 1 -P "$D/"
# wget -i "$D/batch.txt" -c -T 2 --tries 1 -P "$D/"
# cat "$D/batch.txt" | tr -d '\r' | xargs -n 1 -P 10 -I {} sh -c "echo {}; wget -c -T 2 --tries 1 -P "$D/" \"{}\""
fi
done
| true
|
e573d95a2fab38818b2aa71b0b81bec703365b31
|
Shell
|
juanvallejo/linux-scripts
|
/minfo
|
UTF-8
| 799
| 3.421875
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/sh
# minfo - music info
# prints a string containing the current <artist> - <songname> to stdout
# requires cmus, cmus-remote
# @author juanvallejo
songquery=`cmus-remote --server 127.0.0.1:8080 --passwd password -Q 2>&1 | grep "tag title" -m 1 | sed -E "s/tag\ title//g"`
# determine if cmus is not open
if [[ $songquery == "" ]]
then
echo ""
exit 1
fi
artistquery=`cmus-remote --server 127.0.0.1:8080 --passwd password -Q | grep "tag artist" -m 1 | sed -E "s/tag\ artist//g"`
# check for albumartist tag if artist tag is blank
if [[ -z "$artistquery" ]] || [[ "$artistquery" == "" ]]
then
artistquery=`cmus-remote --server 127.0.0.1:8080 --passwd password -Q | grep "tag albumartist" -m 1 | sed -E "s/tag\ albumartist//g"`
fi
echo $artistquery" -"$songquery
| true
|
b2fd6cca891a0e361ad1509ade3a9dcfaba3135b
|
Shell
|
SDCND/SDC-Capstone
|
/object_detection_training/src/download_training_data.sh
|
UTF-8
| 435
| 2.625
| 3
|
[] |
no_license
|
#!/bin/bash
PRETRAINED_MODEL=ssd_mobilenet_v1_coco_2017_11_17.tar.gz
PRETRAINED_MODEL_URL=https://s3.amazonaws.com/ricardosdc/capstone_project/$PRETRAINED_MODEL
SDC_DATASET_URL=https://s3.amazonaws.com/ricardosdc/capstone_project/sim_data.record
DESTINATION=$SDC_WORKING_DIR/downloads
wget $PRETRAINED_MODEL_URL -P $DESTINATION
wget $SDC_DATASET_URL -P $DESTINATION
tar -xvf $DESTINATION/$PRETRAINED_MODEL --directory $DESTINATION
| true
|
b84bc2d0eaafca84f7193f7abd88556aaf487bf9
|
Shell
|
NOAA-EMC/GEFS
|
/jobs/JGEFS_ATMOS_INIT_RECENTER
|
UTF-8
| 3,140
| 3.5
| 4
|
[] |
no_license
|
#!/bin/ksh
echo "$(date -u) begin ${.sh.file}"
set -xa
if [[ ${STRICT:-NO} == "YES" ]]; then
# Turn on strict bash error checking
set -eu
fi
export PS4='$SECONDS + $(basename ${.sh.file})[$LINENO] '
####################################
# Determine Job Output Name on System
####################################
export jobid=${jobid:-"$job.$PBS_JOBID"}
export pgmout="OUTPUT.${PBS_JOBID}"
export pgmerr=errfile
####################################
# Specify and create working directory
####################################
export DATA=${DATA:-${DATAROOT}/${jobid}}
mkdir -p $DATA
cd $DATA
######################################
# Set up compute resources
######################################
export total_tasks=${total_tasks:-6}
export APRUN_MPMD=${gefsmpexec_mpmd:-"mpiexec -n $total_tasks cfp mpmd_cmdfile"}
####################################
# Specify NET and RUN Name and model
####################################
export NET=${NET:-gefs}
export envir=${envir:-prod}
export RUN=${RUN:-gefs}
######################################
# SENDCOM - Copy Files From TMPDIR to $COMOUT
# SENDDBN - Issue DBNet Client Calls
# SENDECF - Flag Events on ECF
# VERBOSE - Specify Verbose Output
######################################
export SENDCOM=${SENDCOM:-YES}
export SENDDBN=${SENDDBN:-YES}
export SENDECF=${SENDECF:-YES}
export VERBOSE=${VERBOSE:-YES}
######################################
# Set up the date information
######################################
export cycle=t${cyc}z
setpdy.sh
source PDY
####################################
# Specify Execution Areas
####################################
export HOMEgefs=${HOMEgefs:-${PACKAGEROOT}/gefs.${gefs_ver}}
export PARMgefs=${PARMgefs:-$HOMEgefs/parm}
##############################################
# Define COM directories
##############################################
ver=${ver:-$(echo ${gefs_ver}|cut -c1-5)}
export COMPONENT=atmos
export COMOUT=${COMOUT:-$(compath.py -o $NET/${ver})/${RUN}.${PDY}/$cyc/$COMPONENT}
export GESIN=${GESIN:-$(compath.py $envir/com/$NET/${ver})/${RUN}.${PDY}/$cyc/nwges}
export GESOUT=${GESOUT:-$(compath.py -o $NET/${ver})/${RUN}.${PDY}/$cyc/nwges}
#############################
# Source relevant config files
#############################
configs="gefs gefs_init"
config_path=$PARMgefs
for config in $configs; do
. $config_path/$config.parm
export err=$?
if [[ $err != 0 ]]; then
echo "FATAL ERROR in ${.sh.file}: Error while loading parm file $config_path/$config.parm"
exit $err
fi
done
echo "Environment before calling script"
env | sort
#############################################################
# Execute the script
$HOMEgefs/scripts/exgefs_atmos_init_recenter.sh
export err=$?
if [[ $err == 0 ]]; then
echo "$job completed normally!"
else
echo "FATAL ERROR in ${.sh.file}: $job failed!"
exit $err
fi
#############################################################
##############################
# Remove the Temporary working directory
##############################
cd $DATAROOT
if [ ${KEEPDATA:-NO} = NO ] ; then rm -rf $DATA ; fi
echo "$(date -u) end ${.sh.file}"
exit $err
| true
|
a3af9b94c625b1a7b9b5d6bfc6f55dce8239032f
|
Shell
|
wzbbbb/SD
|
/download/version_x_upgrade.sh
|
UTF-8
| 406
| 3.078125
| 3
|
[] |
no_license
|
#!/bin/bash
#To be used for the GW update
#to upload the file to server:
#curl -F "file=@version_x_upgrade.sh" http://192.168.115.41/SDC/gateway
if [ -f ~/ready_for_upgrade ] ; then
echo "ready for upgrade!"
# remove the file during the upgrade
# then, put it back after the upgrade
# touch ~/ready_for_upgrade
else
echo "upgrade on going!"
fi
v=`cat ~/gw.version`
let v+=1
echo $v > ~/gw.version
| true
|
b2e326c947e9e6073c46d43f538ea67b5758fb08
|
Shell
|
Snehlata0305/Software-lab
|
/inLab2/q1/q1.sh
|
UTF-8
| 96
| 2.875
| 3
|
[] |
no_license
|
#!/bin/bash
a="$1"
s=0
x=0
for ((i=1;i<=$((a));i++)); do
x=$((i*i))
s=$((s+x))
done
echo "$s"
| true
|
40ced1b1eacdf483cdaf8f60608ddcd1ba9472b2
|
Shell
|
meom-configurations/eORCA12.L75-GJM2020
|
/RUN_eORCA12.L75/eORCA12.L75-MJM2020/CTL/xiosremove.sh
|
UTF-8
| 546
| 3.125
| 3
|
[] |
no_license
|
#!/bin/bash
#set -x
CONFIG=eORCA12.L75
CASE=GJM2020
CONFCASE=${CONFIG}-${CASE}
cd $SDIR/$CONFIG/${CONFCASE}-S
lsta=( $( ./chkseg.sh | grep 'archived' | awk '{print $1}' ) )
lstb=( $( ./chkseg.sh | grep 'not ready' | awk '{print $1}' ) )
cd $DDIR
for n in ${lsta[@]} ; do
if [ -d ${CONFCASE}-XIOS.$n ] ; then
echo ${CONFCASE}-XIOS.$n can be removed
rm -rf ${CONFCASE}-XIOS.$n &
fi
done
for n in ${lstb[@]} ; do
if [ -d ${CONFCASE}-XIOS.$n ] ; then
echo "${CONFCASE}-XIOS.$n must be transfered (when ready )"
fi
done
| true
|
9f6a0eeafed1fd278b8323d203c913905b06e066
|
Shell
|
aharon-br/jamf-scripts
|
/Add DNS Servers.sh
|
UTF-8
| 639
| 3.28125
| 3
|
[] |
no_license
|
#!/bin/bash
## This will add the Specified DNS Servers in Script Options in Jamf ($4, $5, $6, $7) to the users existing DNS.
## If running outside of Jamf edit $4,$5,$6,$7 to actual DNS entries
### Harry Richman 2020
## Make a text copy of current DNS for emergency roll back purposes
sudo networksetup -getdnsservers Wi-Fi >> /var/tmp/old_dns.txt
## Get Current DNS to ensure being added back in
currentDNS=$( networksetup -getdnsservers Wi-Fi )
## If wanting to replace DNS servers entirely remove ${currentDNS} from this line
sudo networksetup -setdnsservers Wi-Fi sudo networksetup -setdnsservers Wi-Fi $4 $5 $6 $7
${currentDNS}
exit 0
| true
|
28e7a4b2295d84d3991191aae1a05ab9e20e48e7
|
Shell
|
mysqlonarm/benchmark-suites
|
/mysql-cluster-bench/workload/sbm.monitor.sh
|
UTF-8
| 266
| 2.890625
| 3
|
[] |
no_license
|
#! /bin/bash
slavecmd=$1
monitorcounter=$2
for (( c=1; c<=$monitorcounter; c+=1 ))
do
$slavecmd -e "show slave status\G" | grep Seconds_Behind_Maste
$slavecmd -e "show status like 'wsrep_flow_control_paused_ns'" | grep wsrep_flow_control_paused_ns
sleep 1
done
| true
|
147a320d2813ae948474fc4111f63366dbff4b3c
|
Shell
|
codicuz/portf-shop-db-migration
|
/cmd-flyway.sh
|
UTF-8
| 906
| 3.75
| 4
|
[] |
no_license
|
#!/bin/bash
NO_ARGS=0
E_OPTERR=65
if [ $# -eq "$NO_ARGS" ]
then
printf "Отсутствуют аргументы. Должен быть хотя бы один аргумент.\n"
printf "Использование: $0 {migrate|-migrate|clean|-clean|info|-info|repair|-repair}\n"
printf " $0 migrate|-migrate - run migrations\n"
printf " $0 clean|-clean - clean migrations\n"
printf " $0 info|-info - information about migrations"
printf " $0 repair|-repair - repair migrations\n"
exit $E_OPTERR
fi
while :; do
case "$1" in
migrate|-migrate)
./flyway migrate
;;
clean|-clean)
./flyway clean
;;
info|-info)
./flyway info
;;
repair|-repair)
./flyway repair
;;
--)
shift
;;
?* | -?*)
printf 'ПРЕДУПРЕЖДЕНИЕ: Неизвестный аргумент (проигнорировано): %s\n' "$1" >&2
;;
*)
break
esac
shift
done
exit 0
| true
|
7522bf8056cd10fabb83c8cf1e8ce6a4ad75a526
|
Shell
|
platform9/decco
|
/hack/ci-release.sh
|
UTF-8
| 2,428
| 4.0625
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
set -o nounset
set -o errexit
set -o pipefail
# ci-release.sh - automated a release of Decco for CI systems. The version to
# release is set in the VERSION file.
#
# The script assumes a Linux host with Docker and Make installed. It
# installs all further prerequisites (like Go, goreleaser) and does the
# git tagging. If you want to create a release without all this, you can use
# `make release`.
#
# Expected environment variables:
#
# - DOCKER_PASSWORD The password for the Docker account to push the images too.
# - DOCKER_USERNAME The username for the Docker account to push the images too.
# - DRY_RUN If non-empty, no resulting artifacts will actually be published.
# - GIT_USER_EMAIL The email of the user creating the git tag.
# - GIT_USER_NAME The name of the user creating the git tag.
# - GITHUB_TOKEN A Github access token to publish the release.
# - GO_VERSION The version of Go to use.
# - GORELEASER_VERSION The version of goreleaser to use.
# Configure Docker
echo -n "${DOCKER_PASSWORD}" | docker login --password-stdin -u "${DOCKER_USERNAME}"
# Install gimme
mkdir -p ./bin
export PATH="$(pwd)/bin:${PATH}"
curl -sL -o ./bin/gimme https://raw.githubusercontent.com/travis-ci/gimme/master/gimme
chmod +x ./bin/gimme
gimme --version
# Install go
eval "$(GIMME_GO_VERSION=${GO_VERSION} gimme)"
mkdir -p ./build/gopath
export GOPATH="$(pwd)/build/gopath"
go version
# Gimme sets GOROOT. The Makefile redeclares GOROOT based on GO_TOOLCHAIN,
# so we need this roundabout hack
export GO_TOOLCHAIN=${GOROOT}
# Install goreleaser
curl -L -o ./bin/goreleaser.tar.gz https://github.com/goreleaser/goreleaser/releases/download/${GORELEASER_VERSION}/goreleaser_$(uname)_$(uname -m).tar.gz
(cd ./bin && tar -xvf goreleaser.tar.gz)
goreleaser -v
export GORELEASER="goreleaser"
# Determine the VERSION.
if stat VERSION > /dev/null ; then
export VERSION=$(head -n 1 ./VERSION)
else
export VERSION="$(grep 'VERSION ?= ' ./Makefile | sed -E 's/^VERSION \?= (.+)$/\1/g')"
fi
echo "VERSION=${VERSION}"
# Make the release
if [ -z "$DRY_RUN" ]
then
# Tag the current/last commit with the VERSION (required by make release)
git config user.email "${GIT_USER_EMAIL}"
git config user.name "${GIT_USER_NAME}"
git status -b
git tag -a "${VERSION}" -m "${VERSION}"
make release
else
make release-dry-run
fi
| true
|
34762f99f1494fdad6e4cc60febe88b398aa40c6
|
Shell
|
melon-li/tools
|
/satstat
|
UTF-8
| 6,793
| 3.578125
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
#功能:统计连接某个网络的所有端口的进出口流量
#参数:网络网关IP地址或"get"字符
GW='192.168.99.1'
trap ctl_c INT
pid=$(ps axu | grep $0| grep grep -v| grep color -v| awk -F" " '{print $2}')
PS1_BK=$PS1
ps1="export PS1=\"\u@\h:\W\[\e[31;1m\]\\\$(satstat get)\[\e[30;1m\]\$ \""
ifstat_cmd="ifstat -t -i"
cmd=$0
cmd=${cmd##*/}
LOG='/var/'${cmd##*/}'/curtraffic.KB'
cmd_arr=()
statfile_arr=()
nowstamp=$(date +%s)
eth_arr=()
cur=0
admins=$(ps axu | grep $cmd| grep grep -v | grep color -v|awk -F" " '{print $1}')
if [[ "$1" != "get" ]];then
for e in $(echo $admins)
do
if [[ "$e" != "root" ]];then
echo "ERROR: Permission denied, Please run $cmd by root"
exit 1
fi
done
else
if [[ "$1" != "" ]];then
GW=$1
fi
fi
function ctl_c(){
export PS1=$PS1_BK
pids=$(ps axu | grep "$ifstat_cmd"| grep grep -v| grep color -v| awk -F" " '{print $2}')
for e in $(echo $pids)
do
kill -9 $e
done
kill -9 $pid
}
#input: bytes
function readable_size(){
sz=$1
sz=${sz%.*}
if [[ $sz -lt 1024 ]];then
echo "$sz B"
return 0
fi
if [[ $sz -lt $((1024*1024)) ]];then
sz=$(echo "scale=2;$sz/1024" |bc)
echo "$sz""KB"
return 0
fi
if [[ $sz -lt $((1024*1024*1024)) ]];then
sz=$(echo "scale=2;$sz/1024/1024" |bc)
echo "$sz""MB"
return 0
fi
if [[ $sz -lt $((1024*1024*1024*1024)) ]];then
sz=$(echo "scale=2;$sz/1024/1024/1024" |bc)
echo "$sz""GB"
return 0
fi
sz=$(echo "scale=2;$sz/1024/1024/1024/1024" |bc)
echo "$sz""TB"
return 0
}
function is_installed(){
flag=$(whereis $1|awk -F":" '{print $2}')
if [ "$flag" == "" ];then
return 1
fi
return 0
}
function checkout_env(){
soft=$1
is_installed $soft
if [[ $? -eq 0 ]];then
echo "Tool $soft is installed!"
return 0
fi
is_installed apt
if [[ $? -eq 0 ]];then
sudo apt-get install -y $soft
return 0
fi
is_installed yum
if [[ $? -eq 0 ]];then
sudo yum install -y $soft
return 0
fi
echo "Error:apt/yum is not found!"
exit 1
}
function checkout_satif(){
#echo "start to fastly checkout"
cnt=0
if [[ ${#eth_arr[*]} -ne 0 ]];then
for e in ${eth_arr[*]}
do
cnt=$(($cnt+1))
ping -w 2 -c 1 -I $e $GW 1>/dev/null 2>/dev/null
if [[ $? -ne 0 ]];then
p=$(ps axu | grep "$ifstat_cmd $e"| grep grep -v| grep color -v| awk -F" " '{print $2}')
kill -9 $p
cnt=$(($cnt -1))
fi
done
if [[ $cnt -eq ${#eth_arr[*]} ]];then
return 0
fi
fi
echo "start to slowly checkout"
#lo=$(ifconfig | grep "127.0.0.1" -B1|grep mtu|awk -F":" '{print $1}')
while [ 1 ]
do
eths=$(ifconfig | grep mtu| grep grep -v| grep color -v|awk -F" " '{print $1}')
eth_arr=()
for e in $(echo $eths)
do
ping -w 1 -c 1 -I ${e%:} $GW 1>/dev/null 2>/dev/null
if [[ $? -eq 0 ]];then
eth_arr[${#eth_arr[*]}]=${e%:}
fi
done
if [[ ${#eth_arr[*]} -eq 0 ]];then
echo "ERROR,not find any interface connected to satellite link!"
echo -e "\tMaybe the IP for checking is wrong. Please input the IP again."
# echo -n ":"
# read GW
sleep 1
else
break
fi
done
start_ifstat
echo "Network Interfaces listened: "${eth_arr[*]}
}
function get_current_traffic(){
dir=${LOG%/*}
if [ ! -d "$dir" ];then
sudo mkdir -p "$dir"
sudo touch $LOG
sudo chmod 777 $LOG
sudo echo "0" >$LOG
cur=0
return 0
fi
if [ ! -f $LOG ];then
sudo touch $LOG
sudo chmod 777 $LOG
sudo echo "0" >$LOG
cur=0
return 0
fi
cur=$(head -n 1 $LOG)
}
function start_ifstat(){
cmd_arr=()
statfile_arr=()
dir=${LOG%/*}
for e in $(echo ${eth_arr[*]})
do
#echo "eth=$e"
statfile="$dir/ifstat.$e"
sudo touch $statfile
sudo chmod 777 $statfile
cmd="$ifstat_cmd $e >$statfile"
statfile_arr[${#statfile_arr[*]}]=$statfile
cmd_arr[${#cmd_arr[*]}]=$cmd
eval $cmd" &"
done
}
function checkout_ifstat_datafile(){
for e in $(echo ${statfile_arr[*]})
do
size=$(ls -trl $e |awk -F" " '{print $5}')
size=$(echo "scale=0;$size/1024/1024" |bc)
if [[ 50 -lt $size ]];then
lines=$(cat $e|wc -l)
now=$(date -d @$nowstamp |awk -F" " ' {print $4}')
grep $now -A$lines > "/tmp/tmp.txt"
mv "/tmp/tmp.txt" $e
fi
done
}
function cal_sattraffic(){
checkout_env ifstat
checkout_env bc
checkout_satif
get_current_traffic
echo "Start to stat!"
nowstamp=$(date +%s)
nowstamp=$(($nowstamp-2))
while [ 1 ]
do
nowstamp=$(($nowstamp+1))
checkout_ifstat_datafile
checkout_satif
#waiting for ifstat create traffic data
while [ 1 ]
do
time=$(date +%s)
if [[ nowstamp -lt $time ]];then
break
fi
sleep 0.1
done
sum=0
now=$(date -d @"$nowstamp" |awk -F" " ' {print $4}')
for e in $(echo ${statfile_arr[*]})
do
traffic=$(grep "$now" $e| grep grep -v | grep color -v|awk -F" " '{print $2+$3}')
if [[ "$traffic" == "" ]];then
traffic=0
fi
sum=$(echo "scale=2;$traffic+$sum" |bc)
done
cur=$(echo "scale=2;$cur+$sum" |bc)
echo "$cur" >$LOG
#readable_size $(echo "scale=0;$cur*1024"|bc)
sleep 0.2
done
}
function checkout_bashrcs(){
users=$(cat /etc/passwd | awk -F: '$3>=500' | cut -f 1 -d :)
users=$users" root"
for e in $(echo $users)
do
home="/home/$e"
if [[ "$e" == "root" ]];then
home="/root"
fi
if [[ -d "$home" ]];then
bashrc="$home/.bashrc"
if [[ -f $bashrc ]];then
is_ps1=$(grep "$cmd get" $bashrc|grep grep -v | grep color -v)
if [[ "$is_ps1" == "" ]];then
echo $ps1 >> $bashrc
#echo $c
#su $e -l -c "$c"
fi
fi
fi
done
}
if [[ "$1" == "get" ]];then
cur=$(head -n 1 $LOG)
cur=$(readable_size $(echo "scale=0;$cur*1024"|bc))
echo "($cur Used)"
else
checkout_bashrcs
cal_sattraffic
fi
| true
|
ca9edad97cd30b2c89b1eb293645792fdaea0928
|
Shell
|
jelaas/bifrost-build
|
/all/perl-5.10.1-1/Build.sh
|
UTF-8
| 2,711
| 3.0625
| 3
|
[] |
no_license
|
#!/bin/bash
# Builds a dynamically linked perl executable.
SRCVER=perl-5.10.1
PKG=$SRCVER-1 # with build version
PKGDIR=${PKGDIR:-/var/lib/build/all/$PKG}
SRC=/var/spool/src/$SRCVER.tar.gz
CDIR=/var/tmp/src
DST="/var/tmp/install/$PKG"
#########
# Install dependencies:
# pkg_install dependency-1.1 || exit 1
#########
# Unpack sources into dir under /var/tmp/src
./Fetch-source.sh || exit 1
rm -rf $CDIR/$SRCVER
cd $CDIR; tar xf $SRC
#########
# Patch
cd $CDIR/$SRCVER
# libtool_fix-1
# patch -p1 < $PKGDIR/mypatch.pat
#########
# Configure
#B-configure-1 --prefix=/usr || exit 1
#CFLAGS="-march=i586 -Os"
#CONFIG_OPTS="-Ui_db -Duseshrplib -Ulocincpth="
#PERL_PATHS="-Dprefix=/usr -Dvendorprefix=/usr -Dsiteprefix=/usr"
#./Configure -Doptimize="$CFLAGS" -d -e $CONFIG_OPTS $PERL_PATHS || exit 1
#./Configure -d -e -Uusedl -Uuseperlio -Dprefix=/usr -Dcc=cc -Ud_tm_tm_zone -Ud_tm_tm_gmtoff -Dldflags='' -Dnoextensions=IO/Compress -Dso=none -Accflags='-march=i586 -D_SVID_SOURCE' -Dd_usleepproto -Dd_castneg -Ud_u32align
# -D_SVID_SOURCE makes things BORK!
./Configure -d -e -Uusedl -Uuseperlio -Dprefix=/usr -Dcc=cc -Ud_tm_tm_zone -Ud_tm_tm_gmtoff -Dldflags='' -Dnoextensions=IO/Compress -Dso=none -Accflags='-march=i586' -Dd_usleepproto -Dd_castneg -Ud_u32align -Dvendorprefix=/usr -Dsiteprefix=/usr
# remove stack-protector crap
sed 's/-fstack-protector//g' config.sh > /tmp/config.$$; cp -f /tmp/config.$$ config.sh
sed "s/castflags='139'/castflags='0'/g" config.sh > /tmp/config.$$; cp -f /tmp/config.$$ config.sh
sed "s/d_Gconvert='sprintf((b),\"%.*g\",(n),(x))'/d_Gconvert='gcvt((x),(n),(b))'/g" config.sh > /tmp/config.$$; cp -f /tmp/config.$$ config.sh
sed "s/d_charvspr='define'/d_charvspr='undef'/g" config.sh > /tmp/config.$$; cp -f /tmp/config.$$ config.sh
sed "s/c99_variadic_macros='undef'/c99_variadic_macros='define'/g" config.sh > /tmp/config.$$; cp -f /tmp/config.$$ config.sh
sh ./config.sh
#########
# Post configure patch
# patch -p0 < $PKGDIR/Makefile.pat
#cp $PKGDIR/perl-uclibc-config.h config.h
#########
# Compile
make || exit 1
#########
# Install into dir under /var/tmp/install
rm -rf "$DST"
make install DESTDIR=$DST # --with-install-prefix may be an alternative
#########
# Check result
cd $DST
# [ -f usr/bin/myprog ] || exit 1
# (file usr/bin/myprog | grep -qs "statically linked") || exit 1
#########
# Clean up
cd $DST
rm -f *.0
rm -rf usr/lib/perl5/5.10.1/pod
# rm -rf usr/share usr/man
#[ -d bin ] && strip bin/*
#[ -d usr/bin ] && strip usr/bin/*
#########
# Make package
cd $DST
tar czf /var/spool/pkg/$PKG.tar.gz .
#########
# Cleanup after a success
cd /var/lib/build
[ "$DEVEL" ] || rm -rf "$DST"
[ "$DEVEL" ] || rm -rf "$CDIR/$SRCVER"
pkg_uninstall
exit 0
| true
|
7bb78e094e70caeda4e82ab758083eff89fc72f5
|
Shell
|
kim-jeongsu/baas2_caliper
|
/run_caliper.sh
|
UTF-8
| 626
| 2.78125
| 3
|
[] |
no_license
|
#/bin/bash
array=(300)
for value in "${array[@]}"; do
test=3orderer2org4peer_raft_batch_300
test=3orderer2org4peer_raft_batch_${value}
echo ----------------------------------------------------------------------------------
echo $test
echo ----------------------------------------------------------------------------------
caliper benchmark run --caliper-workspace ./ --caliper-benchconfig benchmarks/scenario/smallbank/config_linear_raft.yaml --caliper-networkconfig networks/fabric-v1.4/caliper/${test}/fabric-go.yaml
mv report* ./results/191127_scalabiltiy/scale_${value}.html
done;
| true
|
fe661a7b570aeaf455ed13f44cb96863eb2c5356
|
Shell
|
ejh243/BBF2019
|
/Scripts/Utilities/createSTARIndex.sh
|
UTF-8
| 563
| 2.546875
| 3
|
[] |
no_license
|
source $1
## create STAR index for reference genome
STAR --runThreadN 6 \
--runMode genomeGenerate \
--genomeDir ${STARIndex} \
--genomeFastaFiles ${REFGENOME} \
--sjdbGTFfile ${GENCODEGTF} \
--sjdbOverhang 99
## create STAR index for merged master transcriptome
BRAINGTF=${MASTERTRANSCRIPTOME}/TALON/pfc_merge_filter_talon_observedOnly.gtf
mkdir -p ${STARINDEXDIR}/Brain
STAR --runThreadN 6 \
--runMode genomeGenerate \
--genomeDir ${STARINDEXDIR}/Brain \
--genomeFastaFiles ${REFGENOME} \
--sjdbGTFfile ${BRAINGTF} \
--sjdbOverhang 99
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.