blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
9625805602fa21c5b22980b607c501ab3cda90ef
|
Shell
|
joshuaflores/http-request
|
/publish.sh
|
UTF-8
| 1,095
| 4.03125
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
set -e
# Notes:
# - Use only "master" branch
# - New release gets always published as LATEST
RED='\033[0;31m'
NC='\033[0m' # No Color
PACKAGE_VERSION=`node -pe "require('./package.json').version"`
BRANCH=`git status | grep 'On branch' | cut -d ' ' -f 3`
BRANCH_UP_TO_DATE=`git status | grep 'nothing to commit' | tr -s \n ' '`;
GIT_TAG="v${PACKAGE_VERSION}"
if [ -z "${BRANCH_UP_TO_DATE}" ]; then
printf "${RED}You have uncommitted changes!${NC}\n"
exit 1
fi
echo "Pushing to git ..."
git push
# Master gets published as LATEST if that version doesn't exists yet and retagged as LATEST otherwise.
if [ "${BRANCH}" = "master" ]; then
echo "Publishing version ${PACKAGE_VERSION} with tag \"latest\" ..."
RUNNING_FROM_SCRIPT=1 npm publish --tag latest --access public
echo "Tagging git commit with ${GIT_TAG} ..."
git tag ${GIT_TAG}
git push origin ${GIT_TAG}
echo "Git tag: ${GIT_TAG} created."
else
printf "${RED}apify-shared package uses only a master branch which gets published with a latest NPM tag!${NC}\n"
exit 1
fi
echo "Done."
| true
|
8c3785034ecbce79eedc96874c6e4d5ef611efcc
|
Shell
|
Duan-JM/dotfiles
|
/zsh/zsh-config/alias.sh
|
UTF-8
| 2,102
| 2.875
| 3
|
[
"MIT"
] |
permissive
|
# Temporary Settings
alias github='cd ~/Documents/Github'
alias zshrc='vim ~/.zshrc'
alias vimrc='vim ~/.vimrc'
alias vi='nvim'
alias vim='nvim'
alias pg='cd ~/Downloads/CodeTest'
alias cpv='rsync -ah --info=progress2'
alias task='asynctask -f'
alias gch='git branch -a | fzf| tr -d "[:space:]"'
alias python3='/opt/homebrew/opt/python@3.9/Frameworks/Python.framework/Versions/3.9/bin/python3.9'
# Most often
alias proxy='export https_proxy=http://127.0.0.1:7890 http_proxy=http://127.0.0.1:7890 all_proxy=socks5://127.0.0.1:7890'
alias unproxy='unset all_proxy'
alias ggrep='git grep --break --heading -n'
alias mnt='mount | grep -E ^/dev | column -t'
alias count='find . -type f | wc -l'
# other's configure
# Advanced Aliases.
# ls, the common ones I use a lot shortened for rapid fire usage
alias ls='exa -l'
alias grep='grep --color'
alias sgrep='grep -R -n -H -C 5 --exclude-dir={.git,.svn,CVS} '
alias t='tail -f'
# Command line head / tail shortcuts
alias -g H='| head'
alias -g T='| tail'
alias -g G='| grep'
alias -g L="| less"
alias -g M="| most"
alias -g LL="2>&1 | less"
alias -g CA="2>&1 | cat -A"
alias -g NE="2> /dev/null"
alias -g NUL="> /dev/null 2>&1"
alias -g P="2>&1| pygmentize -l pytb"
alias h='history'
alias hgrep="fc -El 0 | grep"
alias help='man'
alias p='ps -f'
alias sortnr='sort -n -r'
alias unexport='unset'
alias rmi='rm -i'
alias cpi='cp -i'
alias mvi='mv -i'
#Git self configure
#change gd(git diff) need to manually change in the zsh git plugin
function gdd() {
params="$@"
params=`scmpuff expand "$@" 2>/dev/null`
if [ $# -eq 0 ]; then
git difftool --no-prompt --extcmd "icdiff --line-numbers --no-bold" | less
elif [ ${#params} -eq 0 ]; then
git difftool --no-prompt --extcmd "icdiff --line-numbers --no-bold" "$@" | less
else
git difftool --no-prompt --extcmd "icdiff --line-numbers --no-bold" "$params" | less
fi
}
# change github source to boost up download speed
function gcn() {
git_url=${1}
remain_str=${git_url##*https://github.com/}
head_str="https://github.com.cnpmjs.org/"
`git clone ${head_str}${remain_str}`
}
| true
|
0ea2b4a32b1b7d8f5195f64682c26f5ed492a6f7
|
Shell
|
libos-nuse/nuse-msmt
|
/eurosys/nginx/nginx-plot.sh
|
UTF-8
| 4,252
| 2.65625
| 3
|
[] |
no_license
|
OUTPUT=$1
mkdir -p ${OUTPUT}/out
PKG_SIZES="64 128 256 512 1024 1500 2048"
# parse outputs
# thpt (req/sec)
grep -E -h Req/Sec ${OUTPUT}/nginx*-lkl-[0-9].* \
| awk '{print $2 " " $3}' | sed "s/k/ 1000/g" | awk '{print $1*$2*2 " " $3}' \
> ${OUTPUT}/nginx-lkl-thpt.dat
grep -E -h Req/Sec ${OUTPUT}/nginx*-native-[0-9].* \
| awk '{print $2 " " $3}' | sed "s/k/ 1000/g" | awk '{print $1*$2*2 " " $3}' \
> ${OUTPUT}/nginx-native-thpt.dat
grep -E -h Req/Sec ${OUTPUT}/nginx*-docker-[0-9].* \
| awk '{print $2 " " $3}' | sed "s/k/ 1000/g" | awk '{print $1*$2*2 " " $3}' \
> ${OUTPUT}/nginx-docker-thpt.dat
# latency
grep -E -h Latency ${OUTPUT}/nginx*-lkl-[0-9].* \
| awk '{print $2 " " $3}' | sed "s/ms/ 1000/g" | sed "s/us/ 1/g" | awk '{print $1*$2 " " $3*$4}' > ${OUTPUT}/nginx-lkl.dat
grep -E -h Latency ${OUTPUT}/nginx*-native-[0-9]* \
| awk '{print $2 " " $3}' | sed "s/ms/ 1000/g" | sed "s/us/ 1/g" | awk '{print $1*$2 " " $3*$4}' > ${OUTPUT}/nginx-native.dat
grep -E -h Latency ${OUTPUT}/nginx*-docker-[0-9]* \
| awk '{print $2 " " $3}' | sed "s/ms/ 1000/g" | sed "s/us/ 1/g" | awk '{print $1*$2 " " $3*$4}' > ${OUTPUT}/nginx-docker.dat
gnuplot << EndGNUPLOT
set terminal postscript eps lw 3 "Helvetica" 24
set output "${OUTPUT}/out/nginx-wrk.eps"
set pointsize 2
set xzeroaxis
set grid ytics
set boxwidth 0.3
set style fill pattern
set key top righ
set size 1.0,0.7
# trans/sec
set ylabel "Throughput (KReq/sec)"
set ytics 5
set yrange [0:20]
set xtics ("64" 0, "128" 1, "256" 2, "512" 3, "1024" 4, "1500" 5, "2048" 6)
set xlabel "File size (bytes)"
set xrange [-1:7]
set terminal postscript eps lw 3 "Helvetica" 24
set output "${OUTPUT}/out/nginx-wrk-thpt.eps"
plot \
'${OUTPUT}/nginx-docker-thpt.dat' usi (\$0-0.3):(\$1/1000):(\$2/1000) w boxerr lt 1 lc rgb "green" fill pattern 2 title "docker(mac)" ,\
'${OUTPUT}/nginx-lkl-thpt.dat' usi (\$0-0):(\$1/1000):(\$2/1000) w boxerr lt 1 lc rgb "cyan" fill pattern 4 title "lkl", \
'${OUTPUT}/nginx-native-thpt.dat' usi (\$0+0.3):(\$1/1000):(\$2/1000) w boxerr fill pattern 0 lt 1 lc rgb "red" title "native(mac)"
set terminal png lw 3 14 crop
set output "${OUTPUT}/out/nginx-wrk-thpt.png"
replot
# latency
set ylabel "Latency (msec)"
set ytics 1
set yrange [0:3.5]
set xtics ("64" 0, "128" 1, "256" 2, "512" 3, "1024" 4, "1500" 5, "2048" 6)
set xlabel "File size (bytes)"
set xrange [-1:7]
set terminal postscript eps lw 3 "Helvetica" 24
set output "${OUTPUT}/out/nginx-wrk-latency.eps"
plot \
'${OUTPUT}/nginx-docker.dat' usin (\$0-0.3):(\$1/1000):(\$2/1000) w boxerr fill pattern 2 lt 1 lc rgb "green" title "docker(mac)" ,\
'${OUTPUT}/nginx-lkl.dat' usin (\$0-0):(\$1/1000):(\$2/1000) w boxerr fill pattern 4 lt 1 lc rgb "cyan" title "lkl", \
'${OUTPUT}/nginx-native.dat' usin (\$0+0.3):(\$1/1000):(\$2/1000) w boxerr fill pattern 0 lt 1 lc rgb "red" title "native(mac)"
set terminal png lw 3 14 crop
set output "${OUTPUT}/out/nginx-wrk-latency.png"
replot
# combined
set ylabel "Throughput (KReq/sec)"
set ytics 5
set yrange [0:20]
set y2label "Latency (msec)"
set y2tics 5
set y2range [0:]
set xtics ("64" 0, "128" 1, "256" 2, "512" 3, "1024" 4, "1500" 5, "2048" 6)
set xlabel "File size (bytes)"
set xrange [-1:7]
set terminal postscript eps lw 3 "Helvetica" 24
set output "${OUTPUT}/out/nginx-wrk-combined.eps"
plot \
'${OUTPUT}/nginx-docker-thpt.dat' usi (\$0-0.3):(\$1/1000):(\$2/1000) w boxerr lt 1 lc rgb "green" fill pattern 2 title "docker(mac)" ,\
'${OUTPUT}/nginx-lkl-thpt.dat' usi (\$0-0):(\$1/1000):(\$2/1000) w boxerr lt 1 lc rgb "cyan" fill pattern 4 title "lkl", \
'${OUTPUT}/nginx-native-thpt.dat' usi (\$0+0.3):(\$1/1000):(\$2/1000) w boxerr lt 1 lc rgb "red" fill pattern 0 title "native(mac)" ,\
'${OUTPUT}/nginx-docker.dat' usin (\$0-0.3):(\$1/1000):(\$2/1000) w yerror ps 1 lc rgb "green" ax x1y2 notitle ,\
'${OUTPUT}/nginx-lkl.dat' usin (\$0-0):(\$1/1000):(\$2/1000) w yerror ps 1 lc rgb "cyan" ax x1y2 notitle, \
'${OUTPUT}/nginx-native.dat' usin (\$0+0.3):(\$1/1000):(\$2/1000) w yerror ps 1 lc rgb "red" ax x1y2 notitle
set terminal png lw 3 14 crop
set xtics nomirror font ",14"
set output "${OUTPUT}/out/nginx-wrk-combined.png"
replot
set terminal dumb
unset output
replot
quit
EndGNUPLOT
| true
|
69c7d8037cc9f617bd8a224e0063f4a16faa3568
|
Shell
|
RickCardoso/test-rehearsals
|
/scripts/git-check.sh
|
UTF-8
| 144
| 3.15625
| 3
|
[] |
no_license
|
#!/bin/sh
ERROR_MESSAGE="${GREEN}No changes to be pushed${NC}"
if
( ! git status --branch --short ); then
echo $ERROR_MESSAGE
exit 1
fi
| true
|
0281163a71c956b9efe0672e7ca653e76c878e88
|
Shell
|
Pocsel/pocsel
|
/tests/old/mktests.sh
|
UTF-8
| 1,595
| 4.15625
| 4
|
[] |
no_license
|
#!/bin/sh
SCRIPT="$0"
SCRIPTDIR=`python -c "import os;print os.path.abspath(os.path.dirname('$SCRIPT'))"`
test -d "$SCRIPTDIR/build" || mkdir "$SCRIPTDIR/build"
( cd build && cmake "$SCRIPTDIR" && make ) || exit 1
TESTS=""
for f in "$SCRIPTDIR"/build/*
do
[ -x "$f" -a -f "$f" ] && TESTS="$TESTS $f"
done
printError()
{
echo "\033[31;2mERROR:\033[0m $*"
}
printSuccess()
{
echo "\033[32;2mSUCCESS:\033[0m $*"
}
printWarning()
{
echo "\033[33mWARNING:\033[0m $*"
}
printInfo()
{
echo -n "\033[34;1m$*\033[0m"
}
printResults()
{
local msg success total
success=`( test -z $1 && echo 0 ) || echo $1`
total=`( test -z $2 && echo 0 ) || echo $2`
msg="passed $success / $total"
[ $total = 0 ] && echo "No test done" && return 1
if [ $success = $total ]
then
printSuccess "$msg"
else
if [ $success = 0 ]
then
printError "$msg:"
else
printWarning "$msg:"
fi
fi
}
execTest()
{
local msg res success total
msg=`exec "$1" 2>&1`
res=$?
if [ $res = 0 ]
then
lastLine="`echo \"$msg\" | tail -n1`"
success=`echo $lastLine | cut -f1 -d' '`
total=`echo $lastLine | cut -f2 -d' '`
printResults $success $total
echo "$msg" | head -n '-1'
else
printError "$msg"
fi
}
cd "$SCRIPTDIR/build/"
for TEST in $TESTS
do
printInfo "* Starting tests for $TEST: "
EXEC="$TEST"
if [ ! -x "$EXEC" ]
then
printError "Cannot find test program $EXEC"
else
execTest "$EXEC"
fi
done
| true
|
5280f4d9fc658a84c763f3aa0c58af1eeea3e9ec
|
Shell
|
minghuadev/minecraft-tests
|
/other-javasamples/webcam-openipc-t31/docker-gen.bash
|
UTF-8
| 3,245
| 3.5
| 4
|
[] |
no_license
|
#!/bin/bash
# docker-gen.bash
# special suffix for user name and directories
# e.g. "u7" will create:
# oipcu7 --docker guest user name
# oipcimgu7 --docker image name
# obu7 --docker container name
ux="u7"
echo Creating bb.dockerfile ...
# create bb.dockerfile:
cat << EOF1 > bb.dockerfile
#FROM ubuntu:focal-20210921 # focal fails aws_sdk build.
#FROM ubuntu:bionic-20210930
FROM ubuntu:bionic-20220415
RUN apt-get update
RUN apt-get install -y git tree openssh-client make
RUN apt-get install -y bzip2 gcc libncurses5-dev bc
RUN apt-get install -y file vim
RUN apt-get install -y zlib1g-dev g++
RUN apt-get install -y libssl-dev
# from a sdk document
RUN apt-get install -y ncurses-term libncursesw5-dev
# tzdata
## preesed tzdata, update package index, upgrade packages and install needed software
RUN truncate -s0 /tmp/preseed.cfg && \
(echo "tzdata tzdata/Areas select America" >> /tmp/preseed.cfg) && \
(echo "tzdata tzdata/Zones/America select Los_Angeles" >> /tmp/preseed.cfg) && \
debconf-set-selections /tmp/preseed.cfg && \
rm -f /etc/timezone /etc/localtime && \
apt-get update && \
DEBIAN_FRONTEND=noninteractive DEBCONF_NONINTERACTIVE_SEEN=true \
apt-get install -y tzdata
## cleanup of files from setup
RUN rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
# from a sdk document
RUN apt-get update
RUN apt-get install -y texinfo texlive gawk
# needed by wl18xx build
RUN apt-get install -y autoconf libtool libglib2.0-dev bison flex
# rk1808 96boards-tb-96aiot dependencies:
#RUN dpkg --add-architecture i386
#RUN apt-get update
RUN DEBIAN_FRONTEND="noninteractive" TZ="America/Los_Angeles" apt-get install -y \
curl sed \
make binutils build-essential gcc g++ bash patch gzip bzip2 perl tar cpio python unzip \
rsync file \
bc wget libncurses5 \
git \
openssh-client
# repo: removed to use a local copy
# gawk: needed to build u-boot image.
# libqt4-dev : removed for focal
# python-linaro-image-tools linaro-image-tools : removed for focal
# python-matplotlib : removed for focal
# time needed for the rk1808 recovery build.sh script:
RUN apt-get install -y time
# unbuffer from expect and cmake needed by rv11xx build script:
RUN apt-get update
RUN apt-get install -y expect cmake
ARG UNAME=oipc${ux}
ARG UID=9999
ARG GID=9999
RUN groupadd -g \$GID \$UNAME
RUN useradd -m -u \$UID -g \$GID -s /bin/bash \$UNAME
RUN rm /bin/sh && ln -s bash /bin/sh
RUN cp -a /etc /etc-original && chmod a+rw /etc
USER \$UNAME
CMD /bin/bash
EOF1
set -ex
echo Docker build off bb.dockerfile ...
docker build --build-arg UID=$(id -u) --build-arg GID=$(id -g) \
-f bb.dockerfile -t oipcimg${ux} .
echo Docker build finished ...
echo
echo Creating sh-create.bash file ...
cat << EOF2 > sh-create.bash
#!/bin/bash
if [ ! -d sharedfiles ]; then mkdir sharedfiles; fi
if [ ! -d buildfiles ]; then mkdir buildfiles; fi
docker run -td \
-v $(pwd)/sharedfiles:/home/oipc${ux}/sharedfiles \\
-v $(pwd)/buildfiles:/home/oipc${ux}/buildfiles \\
--name ob${ux} oipcimg${ux}
EOF2
echo Created sh-create.bash file ...
| true
|
a177d6d9680a0e0149ce22c26a0b88842fb8facc
|
Shell
|
DanielChuDC/how-to-create-logical-volume
|
/scripts/example.sh
|
UTF-8
| 1,990
| 3.71875
| 4
|
[] |
no_license
|
#!/bin/bash
# Create Physical Volumes
# Assume that you have a /dev/xvdc as data disk
# Using pvcreate to create a logical volume based on data disk
pvcreate /dev/xvdc
# Create Volume Groups
# Using vgcreate to create a volume group
vgcreate icp-vg /dev/xvdc
# Create Logical Volumes
# ${kubelet_lv} ${etcd_lv} ${docker_lv} ${management_lv} are the disk size
lvcreate -L ${kubelet_lv}G -n kubelet-lv icp-vg
lvcreate -L ${etcd_lv}G -n etcd-lv icp-vg
#lvcreate -L ${registry_lv}G -n registry-lv icp-vg
lvcreate -L ${docker_lv}G -n docker-lv icp-vg
lvcreate -L ${management_lv}G -n management-lv icp-vg
#Create Filesystems
# Format the logical volumes as ext4
mkfs.ext4 /dev/icp-vg/kubelet-lv
mkfs.ext4 /dev/icp-vg/docker-lv
mkfs.ext4 /dev/icp-vg/etcd-lv
#mkfs.ext4 /dev/icp-vg/registry-lv
mkfs.ext4 /dev/icp-vg/management-lv
# Create Directories
mkdir -p /var/lib/docker
mkdir -p /var/lib/kubelet
mkdir -p /var/lib/etcd
mkdir -p /var/lib/registry
mkdir -p /var/lib/icp
# Add mount in /etc/fstab
# Finally we link the folder with the logical volume
# put it into /etc/fstab to persist the volume during restart
cat <<EOL | tee -a /etc/fstab
/dev/mapper/icp--vg-kubelet--lv /var/lib/kubelet ext4 defaults 0 0
/dev/mapper/icp--vg-docker--lv /var/lib/docker ext4 defaults 0 0
/dev/mapper/icp--vg-etcd--lv /var/lib/etcd ext4 defaults 0 0
/dev/mapper/icp--vg-management--lv /var/lib/icp ext4 defaults 0 0
EOL
# Mount Registry for Single Master
# if condition, optional
if [ ${flag_ma_nfs} -eq 0 ]; then
lvcreate -L ${registry_lv}G -n registry-lv icp-vg
mkfs.ext4 /dev/icp-vg/registry-lv
cat <<EOR | tee -a /etc/fstab
/dev/mapper/icp--vg-registry--lv /var/lib/registry ext4 defaults 0 0
EOR
fi
# Mount Filesystems
# Using mount command to mount all
# If mount success, will have no error or log output.
mount -a
# How to verfiy?
# Using df -Th <the directory you create>
# The output should return you the example name as : /dev/mapper/icp--<logical volume you create just now>--lv
| true
|
367096ac2e204a6af5b169e990840f3cda8b2e45
|
Shell
|
baekip/Util_Script
|
/sh_run/other_sh/telomeric_sh/telo-seq.sh
|
UTF-8
| 449
| 2.71875
| 3
|
[] |
no_license
|
#!/bin/bash
sample_id=$1
project_path=/bio/BioProjects/YSU-Human-WGS-2016-12-TBD160883/
fastq_path=$project_path/result/01_fastqc_orig/$sample_id/
fastq_1=$fastq_path/$sample_id\_1.fastq
fastq_2=$fastq_path/$sample_id\_2.fastq
output_path=$project_path/result/00_telo-seq/
log_file=$output_path/$sample_id\.telo-seq.log
repeats=5
exec > $log_file 2>&1
echo "$fastq_1"
echo "$fastq_2"
/bio/BioTools/telo-seq/Telo-seq.pl $fastq_1 $fastq_2 $repeats
| true
|
f2e15219a0e7b2d2ef80923f1b289c568078ec01
|
Shell
|
Bondzio/AUR
|
/mingw-w64-sqlite/PKGBUILD
|
UTF-8
| 1,621
| 2.8125
| 3
|
[] |
no_license
|
# Contributor: napa3um <napa3um@gmail.com>
# Contributor: Filip Brcic <brcha@gna.org>
pkgname=mingw-w64-sqlite
_amalgamationver=3080900
pkgver=3.8.9
pkgrel=1
pkgdesc="A C library that implements an SQL database engine (mingw-w64)"
arch=(any)
groups=(mingw-w64)
depends=(mingw-w64-crt)
makedepends=(mingw-w64-configure mingw-w64-pdcurses mingw-w64-readline)
options=(!buildflags !strip staticlibs !emptydirs)
conflicts=(mingw-w64-sqlite3)
provides=("mingw-w64-sqlite3=$pkgver")
replaces=(mingw-w64-sqlite3)
license=('custom:Public Domain')
url="http://www.sqlite.org/"
source=("http://www.sqlite.org/2015/sqlite-autoconf-$_amalgamationver.tar.gz")
sha1sums=('db70dee268700b312cbaa5b3e5cf8454e1c8b7b9')
_architectures="i686-w64-mingw32 x86_64-w64-mingw32"
build() {
cd "${srcdir}/sqlite-autoconf-${_amalgamationver}"
for _arch in ${_architectures}; do
mkdir -p build-${_arch} && pushd build-${_arch}
CFLAGS="-DSQLITE_ENABLE_COLUMN_METADATA=1 -DSQLITE_DISABLE_DIRSYNC=1 -DSQLITE_ENABLE_FTS3=3 -DSQLITE_ENABLE_RTREE=1" \
config_TARGET_EXEEXT=.exe \
${_arch}-configure \
--target=${_arch} \
--enable-threadsafe
make
popd
done
}
package() {
cd "${srcdir}/sqlite-autoconf-${_amalgamationver}"
for _arch in ${_architectures}; do
pushd build-${_arch}
make DESTDIR="${pkgdir}" install
find "${pkgdir}/usr/${_arch}" -name "*.exe" -exec rm {} \;
find "${pkgdir}/usr/${_arch}" -name "*.dll" -exec ${_arch}-strip --strip-unneeded {} \;
find "${pkgdir}/usr/${_arch}" -name "*.a" -o -name "*.dll" | xargs -rtl1 ${_arch}-strip -g
rm -rf "${pkgdir}/usr/${_arch}/share"
popd
done
}
| true
|
1200b02cab84aac37621e6369b224eb047ffe28b
|
Shell
|
mingaleg/turingmarkov
|
/ejudge-binding/turing
|
UTF-8
| 662
| 3.4375
| 3
|
[
"WTFPL"
] |
permissive
|
#!/bin/sh
# Ejudge binding for turing compiler.
# Based on:
# * https://ejudge.ru/wiki/index.php/Foo-version.in
# * https://github.com/blackav/ejudge/tree/master/scripts
LANG_CONFIG_DIR="/home/judges/compile/conf/lang.d"
[ "${EJUDGE_LANG_CONFIG}" = "" ] && EJUDGE_LANG_CONFIG="${LANG_CONFIG_DIR}/turing"
if [ -f "${EJUDGE_LANG_CONFIG}" ]
then
. "${EJUDGE_LANG_CONFIG}"
else
version="unknown"
TURINGPATH=`which turingmarkov 2>/dev/null` || TURINGPATH=/bin/false
fi
if [ x"${version}" = x -o x"${TURINGPATH}" = x/bin/false ]
then
echo "This language is not supported." >&2
exit 1
fi
${TURINGPATH} compile turing $1 >$2 || exit 1
chmod +x $2
exit 0
| true
|
ba539fb1257377c4bed1a8bf59f5bc61134add33
|
Shell
|
dantesun/dotfiles-shell
|
/home/tools/bin/ssh
|
UTF-8
| 894
| 3.859375
| 4
|
[] |
no_license
|
#!/bin/bash
ssh() {
TMPDIR=~/tmp
case "$(uname -s)" in
CYGWIN_NT* | Linux)
tmp_fifo=$(mktemp -u --suffix=._ssh_fifo)
;;
Darwin)
tmp_fifo=$(/usr/bin/mktemp -u -t ._ssh_fifo)
;;
*)
tmp_fifo=
;;
esac
SSH="/usr/bin/ssh"
if ! [ -x $SSH ]; then
echo "ssh is not found in PATH!"
exit 1
fi
if cmmand -v sshpass &> /dev/null && [ -r ~/.ssh/ssh_pass ]; then
HOST=$1
PASSWORD=$(awk "/$HOST/ {print \$2}" ~/.ssh/ssh_pass)
if ! [ -z $PASSWORD ]; then
SSH="sshpass -p $PASSWORD $SSH"
fi
fi
if [ -z $tmp_fifo ]; then
$SSH -F "$tmp_fifo" "$@"
else
mkfifo "$tmp_fifo"
trap "rm -f $tmp_fifo" EXIT
cat ~/.ssh/config ~/.ssh/config.* >"$tmp_fifo" 2>/dev/null &
$SSH -F "$tmp_fifo" "$@"
fi
}
ssh $*
| true
|
8ab35c2e54146eaf4404451bf65eef4a7c8653d8
|
Shell
|
acidburn0zzz/svntogit-community
|
/python-llvmlite/trunk/PKGBUILD
|
UTF-8
| 812
| 2.546875
| 3
|
[] |
no_license
|
# Maintainer: Konstantin Gizdov <arch at kge dot pw>
_pkgname=llvmlite
pkgbase="python-${_pkgname}"
pkgname=("${pkgbase}")
pkgver=0.38.0
pkgrel=1
pkgdesc='A Lightweight LLVM Python Binding for Writing JIT Compilers'
arch=('x86_64')
makedepends=('cmake' 'llvm11' 'git' 'python-setuptools' 'python-setuptools-scm')
depends=('python' 'llvm11-libs')
url="https://github.com/numba/${_pkgname}"
license=('BSD')
source=("${_pkgname}-${pkgver}::git+${url}")
sha256sums=('SKIP')
build() {
cd "${srcdir}/${_pkgname}-${pkgver}"
python setup.py build
}
check() {
cd "${srcdir}/${_pkgname}-${pkgver}"
python setup.py test
}
package() {
cd "${srcdir}/${_pkgname}-${pkgver}"
python setup.py install --root="${pkgdir}/" --optimize=1
install -D LICENSE "${pkgdir}/usr/share/licenses/${pkgname}/LICENSE"
}
| true
|
108e74715e51a4f40f53dc4b3297cefc9b83d5d1
|
Shell
|
rohelvai/iAmLazy
|
/Bash/oracle_11g_installer.sh
|
UTF-8
| 2,449
| 3.34375
| 3
|
[
"MIT",
"CC-BY-NC-SA-4.0"
] |
permissive
|
#!/bin/bash
echo -e "[+] Installing Oracle 11g XE..."
sudo apt-get update -y
# unzip oracle-xe-11.2.0-1.0.x86_64.rpm.zip
sudo apt-get install alien libaio1 unixodbc -y
sudo alien --scripts -d oracle-*.rpm
# sudo alien -i --scripts oracle-*.rpm
sudo touch /sbin/chkconfig
echo -e "[+] Writing the following config to Configuration file...\n"
echo '
#!/bin/bash
# Oracle 11gR2 XE installer chkconfig hack for Ubuntu
file=/etc/init.d/oracle-xe
if [[ ! `tail -n1 $file | grep INIT` ]]; then
echo >> $file
echo "### BEGIN INIT INFO" >> $file
echo "# Provides: OracleXE" >> $file
echo "# Required-Start: $remote_fs $syslog" >> $file
echo "# Required-Stop: $remote_fs $syslog" >> $file
echo "# Default-Start: 2 3 4 5" >> $file
echo "# Default-Stop: 0 1 6" >> $file
echo "# Short-Description: Oracle 11g Express Edition" >> $file
echo "### END INIT INFO" >> $file
fi
update-rc.d oracle-xe defaults 80 01' | sudo tee /sbin/chkconfig
echo -e "\n[+] Done!"
sudo chmod 755 /sbin/chkconfig
echo -e "[+] Changing Kernel Parameters...\n"
echo '# Oracle 11g XE kernel parameters
fs.file-max=6815744
net.ipv4.ip_local_port_range=9000 65000
kernel.sem=250 32000 100 128
kernel.shmmax=536870912' | sudo tee /etc/sysctl.d/60-oracle.conf
sudo service procps start
echo -e "\n[+] Kinda rebooting..."
sudo sysctl -q fs.file-max
echo -e "[+] Writing loader script...\n"
echo '#!/bin/sh
case "$1" in
start)
mkdir /var/lock/subsys 2>/dev/null
touch /var/lock/subsys/listener
rm /dev/shm 2>/dev/null
mkdir /dev/shm 2>/dev/null
*)
echo error
exit 1
;;
esac' | sudo tee /etc/rc2.d/S01shm_load
sudo chmod 755 /etc/rc2.d/S01shm_load
echo "[+] Symlincing the system..."
sudo ln -s /usr/bin/awk /bin/awk
sudo mkdir /var/lock/subsys
sudo touch /var/lock/subsys/listener
sudo sysctl -p /etc/sysctl.d/60-oracle.conf
echo -e "[+] Now installing the database..."
# sudo alien -i --scripts oracle-*.rpm
sudo dpkg --install oracle-*.deb
sudo /etc/init.d/oracle-xe configure
echo -e "[+] Writing Oracle Configurations..."
echo 'export ORACLE_HOME=/u01/app/oracle/product/11.2.0/xe
export ORACLE_SID=XE
export NLS_LANG=`$ORACLE_HOME/bin/nls_lang.sh`
export ORACLE_BASE=/u01/app/oracle
export LD_LIBRARY_PATH=$ORACLE_HOME/lib:$LD_LIBRARY_PATH
export PATH=$ORACLE_HOME/bin:$PATH' | sudo tee -a ~/.bashrc
source ~/.bashrc
sudo service oracle-xe start
sudo usermod -a -G dba $USER
echo -e "\n[+] Good Luck..."
| true
|
54a96f5a201d54c6e1311d3cb4744cf358bf09b7
|
Shell
|
Anastassiya08/BigData_course
|
/HW/hw1/605/run.sh
|
UTF-8
| 276
| 3.078125
| 3
|
[] |
no_license
|
#!/bin/bash
dd if=/dev/zero of=ex.txt bs=$1 count=1
hdfs dfs -put ex.txt /data
sizes=$(hdfs fsck /data/ex.txt -files -blocks | grep -E -o -w "len=\w+" | sed 's|.*=||')
hdfs dfs -rm /data/ex.txt
for i in $sizes
do
total=$(($total+$i))
done
diff=$(($total-$1))
echo $diff
| true
|
2fb9e75218ae0f9cf8cfbb10dd68039191975ff0
|
Shell
|
apache/flink
|
/flink-python/pyflink/bin/pyflink-udf-runner.sh
|
UTF-8
| 1,977
| 3.25
| 3
|
[
"BSD-3-Clause",
"OFL-1.1",
"ISC",
"MIT",
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
python=${python:-python}
if [[ "$FLINK_TESTING" = "1" ]]; then
ACTUAL_FLINK_HOME=`cd $FLINK_HOME; pwd -P`
FLINK_SOURCE_ROOT_DIR=`cd $ACTUAL_FLINK_HOME/../../../../; pwd`
FLINK_PYTHON="${FLINK_SOURCE_ROOT_DIR}/flink-python"
if [[ -f "${FLINK_PYTHON}/pyflink/fn_execution/boot.py" ]]; then
# use pyflink source code to override the pyflink.zip in PYTHONPATH
# to ensure loading latest code
export PYTHONPATH="$FLINK_PYTHON:$PYTHONPATH"
fi
fi
if [[ "$_PYTHON_WORKING_DIR" != "" ]]; then
# set current working directory to $_PYTHON_WORKING_DIR
cd "$_PYTHON_WORKING_DIR"
if [[ "$python" == ${_PYTHON_WORKING_DIR}* ]]; then
# The file extracted from archives may not preserve its original permission.
# Set minimum execution permission to prevent from permission denied error.
chmod +x "$python"
fi
fi
log="$BOOT_LOG_DIR/flink-python-udf-boot.log"
${python} -m pyflink.fn_execution.beam.beam_boot $@ 2>&1 | tee ${log}
| true
|
99d798c6cd5cfb10368dba1eca5c70121604b3fb
|
Shell
|
neechbear/dotfiles-example
|
/.profile.d/010-environment-common.sh
|
UTF-8
| 2,060
| 3.03125
| 3
|
[
"MIT"
] |
permissive
|
# Setting environment variables DOTFILES_SYMLINK_SOURCE and
# DOTFILES_SYMLINK_TARGET will provide default values for the
# dotfiles-symlink-files command.
#if [ -n "$HOME" ] && [ -d "$HOME" ] && [ -z "$DOTFILES_SYMLINK_SOURCE" ] ; then
# if [ -x "$HOME/src/rcfiles/bin/dotfiles.sh" ] ; then
# DOTFILES_SYMLINK_SOURCE="$HOME/src/rcfiles"
# fi
# export DOTFILES_SYMLINK_SOURCE
# export DOTFILES_SYMLINK_TARGET="$HOME"
#fi
if [ -z "$USER" ] && [ -z "$LOGNAME" ] && [ -x /usr/bin/id ] ; then
export USER="$(id -un)"
export LOGNAME="$USER"
fi
if [ -z "$TTY" ] && [ -n "$SSH_TTY" ] ; then
export TTY="$SSH_TTY"
fi
if [ -z "$TTY" ] && [ -x /usr/bin/tty ] ; then
export TTY="$(/usr/bin/tty)"
fi
export SYSTEM="$(uname -s)"
case "$SYSTEM" in
Linux) [ -d "$HOME/bin/linux" ] && PATH="$PATH:$HOME/bin/linux" ;;
FreeBSD) [ -d "$HOME/bin/freebsd" ] && PATH="$PATH:$HOME/bin/freebsd" ;;
Darwin) [ -d "$HOME/bin/darwin" ] && PATH="$PATH:$HOME/bin/darwin" ;;
esac
if [ -z "$HOSTNAME" ] ; then
if [ -x /usr/bin/hostname ] ; then
export HOSTNAME="$(/usr/bin/hostname -f)"
elif [ -x /bin/hostname ] ; then
export HOSTNAME="$(/bin/hostname -f)"
fi
fi
[ -d "$HOME/bin/$HOSTNAME" ] && PATH="$PATH:$HOME/bin/$HOSTNAME"
export LESS="-g -m -M -R"
export GCC_COLORS='error=01;31:warning=01;35:note=01;36:caret=01;32:locus=01:quote=01'
export HISTCONTROL=ignoreboth
export HISTSIZE=1000
export HISTFILESIZE=30000
export HISTTIMEFORMAT="%Y%m%d %T "
if [ -x /usr/bin/vi ] ; then
EDITOR=/usr/bin/vi
fi
if [ -x /usr/bin/vim ] ; then
EDITOR=/usr/bin/vim
fi
export EDITOR
if [ -e "$HOME/ansible.cfg" ] ; then
ANSIBLE_CONFIG="$HOME/ansible.cfg"
fi
if [ -e "$HOME/ansible/ansible.cfg" ] ; then
ANSIBLE_CONFIG="$HOME/ansible/ansible.cfg"
fi
export ANSIBLE_CONFIG
# https://superuser.com/questions/278286/making-256-color-and-line-drawing-characters-both-work-in-putty
# https://www.chiark.greenend.org.uk/~sgtatham/putty/wishlist/utf8-plus-vt100.html
# http://www.cl.cam.ac.uk/~mgk25/unicode.html#term
export NCURSES_NO_UTF8_ACS=1
| true
|
d2cc2408b8863c5b002130bacc98712ede9397f1
|
Shell
|
marciopocebon/mnk-game-test
|
/fabulous.sh
|
UTF-8
| 4,910
| 3.921875
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
#mnk-game-test
#Following repository: https://github.com/martialdidit/mnk-game-test
#Martial Puygrenier - martialdidit
#last update : 32/11/2014
STARTTIME=$(date +%s)
declare pass=0
declare fail=0
total=149
red='\e[41m'
blue='\e[1;34m'
purple='\e[1;31m'
yellow='\e[43m'
NC='\e[0m' # No Color
bold=`tput bold`
normal=`tput sgr0`
function failed {
fail=$((fail+1))
}
function success {
pass=$((pass+1))
}
function fabulous {
EXPECT_VALUE=$1
shift
"$@" &>/dev/null
local status=$?
if [ $EXPECT_VALUE -eq 0 ]; then
echo "Expected result: EXIT_SUCCESS"
else
echo "Expected result: EXIT_FAILURE"
fi
if [ $status -eq $EXPECT_VALUE ]; then
echo -e "${blue}*Passed*${NC}: check $@"
success
else
echo -e "${red}****FAILED!****:${NC} check $@"
echo ""
echo "Return code: $status (Error)"
echo "Stderr"
"$@"
failed
fi
echo ""
}
function constest {
EXPECT_VALUE=$1
shift
"$@" &>/dev/null
local status=$?
if [ $EXPECT_VALUE -eq 0 ]; then
echo "Expected result: EXIT_SUCCESS"
else
echo "Expected result: EXIT_FAILURE"
fi
if [ $status -eq $EXPECT_VALUE ]; then
echo -e "${blue}*Passed*${NC}: check $@"
success
else
echo -e "${red}****FAILED!****:${NC} check $@"
echo ""
echo "Return code: $status (Error)"
failed
fi
echo ""
}
function game {
EXPECT_VALUE=$1
shift
FILE=$1
shift
while read -r line
do
cat <<< "$line"
done <$FILE | "$@" &>/dev/null
local status=$?
if [ $EXPECT_VALUE -eq 0 ]; then
echo "Expected result: EXIT_SUCCESS"
else
echo "Expected result: EXIT_FAILURE"
fi
if [ $status -eq $EXPECT_VALUE ]; then
echo -e "${blue}*Passed*${NC}: check $@"
success
else
echo -e "${red}****FAILED!****:${NC} check $@"
echo ""
echo "Return code: $status (Error)"
echo "Stderr"
echo "Check the file $FILE"
echo "Stdin :"
while read -r line
do
echo $line
done <$FILE
while read -r line
do
cat <<< "$line"
done <$FILE | "$@"
failed
fi
echo ""
}
echo "=======================[ mnk-game ]========================="
echo ""
echo "-----------( File Hierarchy )---------"
echo ""
if [ ! -f ../mnk-game/src/mnk-game.c ]; then
echo "File '../mnk-game/src/mnk-game.c' not found!"
failed
fi
if [ ! -f ../mnk-game/src/mnk-game.h ]; then
echo "File '../mnk-game/src/mnk-game.h' not found!"
failed
fi
if [ ! -f ../mnk-game/src/Makefile ]; then
echo "File '../mnk-game/src/MAKEFILE' not found!"
failed
fi
if [ ! -f ../mnk-game/Makefile ]; then
echo "File '../mnk-game/MAKEFILE' not found!"
failed
fi
if [ $fail -eq 0 ]; then
echo "Checking request files: Passed"
else
echo "Checking request files: Failed"
fi
echo ""
echo "-----------( Build System )---------"
echo ""
cd ../mnk-game
echo "Expected result: EXIT_SUCCES"
make &>/dev/null
code=$?
if [[ $code -eq 0 ]]; then
echo "*Passed*: check the make"
success
else
echo "****FAILED!****: check the make target"
echo "Abort the script"
failed
exit
fi
echo ""
make help &>/dev/null
code=$?
echo "Expected result: EXIT_SUCCES"
if [[ code -eq 0 ]]; then
echo "*Passed*: check the help target"
success
else
echo "****FAILED!****: check the help target"
failed
fi
if [[ ! -f ../mnk-game/mnk-game ]]; then
echo "****FAILED!****: executable not found"
echo "Abort the script"
failed
exit
fi
echo ""
echo "-----------( Option Parser )---------"
echo ""
cd ../mnk-game-test
for i in "$@"
do
case $i in
-s)
#Basic test
while read line
do
name=$line
fabulous $name
done < sample_test
;;
-g)
#Grid test
while read line
do
name=$line
game $name
done < grid_test
;;
-c)
#Constest test
while read line
do
name=$line
constest $name
done < test_contest
;;
-b)
#board tests
make
exec 3>&1
./bitboard_test
result=$(cat pass)
pass=$((pass+result))
fail=$((fail+$((45-result))))
;;
*)
echo "Unknow option, see the readme"
exit
;;
esac
done
ENDTIME=$(date +%s)
echo ""
echo "----------( Final result )----------"
echo -e "${blue}Passed $pass${NC}; ${red}Failed: $fail${NC}; Total $total tests"
if [[ $((pass + fail)) -ne $total ]]; then
echo -e "${purple}CAREFUL, NOT ALL THE TEST ARE PASSED IN THE SCRIPT${NC}, can be an infinite loop or you simply forget to add other test files."
fi
echo "Time elapsed: $((ENDTIME-STARTTIME))s"
| true
|
44453084901f8c9dc663b048a512bbf80db89a55
|
Shell
|
GatorShan/Tragopogon-Inflorescence-RNA-seq-Analysis
|
/Annotation/Trinotate_Functional_Annotation/Trinotate_Blast_Tdu_3.0.sh
|
UTF-8
| 1,263
| 2.71875
| 3
|
[] |
no_license
|
#!/bin/bash
#SBATCH --job-name=Trinotate_Blast_Tdu_3.0
#SBATCH --mail-user=shan158538@ufl.edu
#SBATCH --mail-type=FAIL,END
#SBATCH --output=Trinotate_Blast_Tdu_3.0_%j.out
#SBATCH --error=Trinotate_Blast_Tdu_3.0_%j.error
#SBATCH --nodes=1
#SBATCH --ntasks=1
#SBATCH --cpus-per-task=16
#SBATCH --mem=2gb
#SBATCH --time=5-00:00:00
date;hostname;pwd
IN1=/ufrc/soltis/shan158538/TragFL_NewAnalysis/OutPut/SuperTranscript
IN2=/ufrc/soltis/shan158538/TragFL_NewAnalysis/OutPut/Annotation/TransDecoder
DB=/ufrc/soltis/shan158538/TragFL_NewAnalysis/OutPut/Annotation/Database
OUT=/ufrc/soltis/shan158538/TragFL_NewAnalysis/OutPut/Annotation/Blast
# use the latest version of blast
module purge
module load ncbi_blast/2.9.0
cd ${DB}
#makeblastdb \
# -in uniprot_sprot.pep \
# -dbtype prot \
# -parse_seqids
blastx \
-query ${IN1}/SuperTranscript_Tdu.fasta \
-db ${DB}/uniprot_sprot.pep \
-num_threads 16 \
-max_target_seqs 1 \
-outfmt 6 \
-evalue 1e-3 \
> ${OUT}/Tdu_blastx.outfmt6
echo "Search Trinity transcripts"
blastp \
-query ${IN2}/SuperTranscript_Tdu.fasta.transdecoder.pep \
-db ${DB}/uniprot_sprot.pep \
-num_threads 16 \
-max_target_seqs 1 \
-outfmt 6 \
-evalue 1e-3 \
> ${OUT}/Tdu_blastp.outfmt6
echo "Search Transdecoder-predicted proteins"
date
| true
|
1ef0f393d497f15f0215971fe34869b136dc3577
|
Shell
|
Ankitha016/Unix
|
/fieoperations/file_operation2.sh
|
UTF-8
| 314
| 2.921875
| 3
|
[] |
no_license
|
#!/bin/sh
echo "Enter the source file name"
read file1
echo "Enter the destination file name"
read file2
echo "Contents of file1"
cat $file1
echo "Contents of file2"
cat $file2
cp $file1 $file2
echo "After copying contents file2 has contents"
cat $file2
mv $file1 $file2
echo "After renaming file1 file to a"
ls
| true
|
aea2e3d4c2d60407e279fec6714273b832591b0b
|
Shell
|
AlexisEspinosaGayosso/OF-rsyncScripts
|
/myBackupFunctions.sh
|
UTF-8
| 12,745
| 3.390625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
#FUNCTIONS
#-------------------------------------------
#Defining the basic backup function
basicBackup() {
echo "#--------------------------------------------------" | tee -a ${logJob}
echo "CaseDir=${caseDir}" | tee -a ${logJob}
echo "#--------------------------------------------------" | tee -a ${logJob}
echo "Backing up the ${bDir} Dir" | tee -a ${logJob}
echo "#--------------------------------------------------" | tee -a ${logJob}
#sshpass -p $pwRemote rsync --stats --progress -auvzrhs espinosa@hpc-data.pawsey.org.au:$caseDir/$bDir . | tee -a ${logJob}
if [ -z "$bDir" ]; then
echo "bDir=${bDir} then nothing is done" | tee -a ${logJob}
else
if [ "$bDir" = " " ]; then
echo "bDir=${bDir} then nothing is done" | tee -a ${logJob}
else
for ((jTries=0; jTries<$MaxTries; jTries++))
do
echo "bDir=${bDir} backup will be done, try ${jTries}" | tee -a ${logJob}
rsync --stats --progress -auvzrhs espinosa@hpc-data.pawsey.org.au:$caseDir/$bDir . | tee -a ${logJob}
errorRsync=${PIPESTATUS[0]}
if [ "$errorRsync" -eq 0 ] ; then
echo "rsync passed with error code ${errorRsync}" | tee -a ${logJob}
echo "Keep going into the rest of the script" | tee -a ${logJob}
jTries=$MaxTries
elif [ "$errorRsync" -eq 23 ] ; then
echo "rsync failed with error code ${errorRsync}" | tee -a ${logJob}
echo "Aborting the backup of ${bDir}" | tee -a ${logJob}
echo "Keep going into the rest of the script" | tee -a ${logJob}
jTries=$MaxTries
else
echo "rsync failed with code ${errorRsync}" | tee -a ${logJob}
echo "trying again in ${pausingMinutes} minutes" | tee -a ${logJob}
sleep "${pausingMinutes}m"
fi
done
fi
fi
}
#-------------------------------------------
#Defining the basic backup function with Ignore Existing Files
basicBackupIgnoreExisting() {
echo "#--------------------------------------------------" | tee -a ${logJob}
echo "CaseDir=${caseDir}" | tee -a ${logJob}
echo "#--------------------------------------------------" | tee -a ${logJob}
echo "Backing up the ${bDir} Dir" | tee -a ${logJob}
echo "#--------------------------------------------------" | tee -a ${logJob}
#sshpass -p $pwRemote rsync --stats --progress -auvzrhs espinosa@hpc-data.pawsey.org.au:$caseDir/$bDir . | tee -a ${logJob}
if [ -z "$bDir" ]; then
echo "bDir=${bDir} then nothing is done" | tee -a ${logJob}
else
if [ "$bDir" = " " ]; then
echo "bDir=${bDir} then nothing is done" | tee -a ${logJob}
else
for ((jTries=0; jTries<$MaxTries; jTries++))
do
echo "bDir=${bDir} backup will be done, try ${jTries}" | tee -a ${logJob}
rsync --ignore-existing --stats --progress -auvzrhs espinosa@hpc-data.pawsey.org.au:$caseDir/$bDir . | tee -a ${logJob}
errorRsync=${PIPESTATUS[0]}
if [ "$errorRsync" -eq 0 ] ; then
echo "rsync passed with error code ${errorRsync}" | tee -a ${logJob}
echo "Keep going into the rest of the script" | tee -a ${logJob}
jTries=$MaxTries
elif [ "$errorRsync" -eq 23 ] ; then
echo "rsync failed with error code ${errorRsync}" | tee -a ${logJob}
echo "Aborting the backup of ${bDir}" | tee -a ${logJob}
echo "Keep going into the rest of the script" | tee -a ${logJob}
jTries=$MaxTries
else
echo "rsync failed with code ${errorRsync}" | tee -a ${logJob}
echo "trying again in ${pausingMinutes} minutes" | tee -a ${logJob}
sleep "${pausingMinutes}m"
fi
done
fi
fi
}
#-------------------------------------------
#Defining the basic backup function with Backing up only Files that have changed in size
basicBackupSizeOnly() {
echo "#--------------------------------------------------" | tee -a ${logJob}
echo "CaseDir=${caseDir}" | tee -a ${logJob}
echo "#--------------------------------------------------" | tee -a ${logJob}
echo "Backing up the ${bDir} Dir" | tee -a ${logJob}
echo "#--------------------------------------------------" | tee -a ${logJob}
#sshpass -p $pwRemote rsync --stats --progress -auvzrhs espinosa@hpc-data.pawsey.org.au:$caseDir/$bDir . | tee -a ${logJob}
if [ -z "$bDir" ]; then
echo "bDir=${bDir} then nothing is done" | tee -a ${logJob}
else
if [ "$bDir" = " " ]; then
echo "bDir=${bDir} then nothing is done" | tee -a ${logJob}
else
for ((jTries=0; jTries<$MaxTries; jTries++))
do
echo "bDir=${bDir} backup will be done, try ${jTries}" | tee -a ${logJob}
rsync --size-only --stats --progress -auvzrhs espinosa@hpc-data.pawsey.org.au:$caseDir/$bDir . | tee -a ${logJob}
errorRsync=${PIPESTATUS[0]}
if [ "$errorRsync" -eq 0 ] ; then
echo "rsync passed with error code ${errorRsync}" | tee -a ${logJob}
echo "Keep going into the rest of the script" | tee -a ${logJob}
jTries=$MaxTries
elif [ "$errorRsync" -eq 23 ] ; then
echo "rsync failed with error code ${errorRsync}" | tee -a ${logJob}
echo "Aborting the backup of ${bDir}" | tee -a ${logJob}
echo "Keep going into the rest of the script" | tee -a ${logJob}
jTries=$MaxTries
else
echo "rsync failed with code ${errorRsync}" | tee -a ${logJob}
echo "trying again in ${pausingMinutes} minutes" | tee -a ${logJob}
sleep "${pausingMinutes}m"
fi
done
fi
fi
}
#-------------------------------------------
#Defining the basic putup function
basicPutup() {
echo "#--------------------------------------------------" | tee -a ${logJob}
echo "CaseDir=${casoDir}" | tee -a ${logJob}
echo "#--------------------------------------------------" | tee -a ${logJob}
echo "Putting up the ${bDir} Dir into ${rDir}" | tee -a ${logJob}
echo "#--------------------------------------------------" | tee -a ${logJob}
#sshpass -p $pwRemote rsync --stats --progress -auvzrhs ./$bDir espinosa@hpc-data.pawsey.org.au:$caseDir | tee -a ${logJob}
if [ -z "$bDir" ]; then
echo "bDir=${bDir} then nothing is done" | tee -a ${logJob}
else
if [ "$bDir" = " " ]; then
echo "bDir=${bDir} then nothing is done" | tee -a ${logJob}
else
for ((jTries=0; jTries<$MaxTries; jTries++))
do
echo "bDir=${bDir} putup will be done, try ${jTries}" | tee -a ${logJob}
rsync --stats --progress -auvzrhs $casoDir/$bDir espinosa@hpc-data.pawsey.org.au:$caseDir/$rDir | tee -a ${logJob}
errorRsync=${PIPESTATUS[0]}
if [ "$errorRsync" -eq 0 ] ; then
echo "rsync passed with error code ${errorRsync}" | tee -a ${logJob}
echo "Keep going into the rest of the script" | tee -a ${logJob}
jTries=$MaxTries
elif [ "$errorRsync" -eq 23 ] ; then
echo "rsync failed with error code ${errorRsync}" | tee -a ${logJob}
echo "Aborting the backup of ${bDir}" | tee -a ${logJob}
echo "Keep going into the rest of the script" | tee -a ${logJob}
jTries=$MaxTries
else
echo "rsync failed with code ${errorRsync}" | tee -a ${logJob}
echo "trying again in ${pausingMinutes} minutes" | tee -a ${logJob}
sleep "${pausingMinutes}m"
fi
done
fi
fi
}
#-------------------------------------------
#Defining the basic putup function with Ignore Existing files
basicPutupIgnoreExisting() {
echo "#--------------------------------------------------" | tee -a ${logJob}
echo "CaseDir=${casoDir}" | tee -a ${logJob}
echo "#--------------------------------------------------" | tee -a ${logJob}
echo "Putting up the ${bDir} Dir into ${rDir}" | tee -a ${logJob}
echo "#--------------------------------------------------" | tee -a ${logJob}
#sshpass -p $pwRemote rsync --stats --progress -auvzrhs ./$bDir espinosa@hpc-data.pawsey.org.au:$caseDir | tee -a ${logJob}
if [ -z "$bDir" ]; then
echo "bDir=${bDir} then nothing is done" | tee -a ${logJob}
else
if [ "$bDir" = " " ]; then
echo "bDir=${bDir} then nothing is done" | tee -a ${logJob}
else
for ((jTries=0; jTries<$MaxTries; jTries++))
do
echo "bDir=${bDir} putup will be done, try ${jTries}" | tee -a ${logJob}
rsync --ignore-existing --stats --progress -auvzrhs $casoDir/$bDir espinosa@hpc-data.pawsey.org.au:$caseDir/$rDir | tee -a ${logJob}
errorRsync=${PIPESTATUS[0]}
if [ "$errorRsync" -eq 0 ] ; then
echo "rsync passed with error code ${errorRsync}" | tee -a ${logJob}
echo "Keep going into the rest of the script" | tee -a ${logJob}
jTries=$MaxTries
elif [ "$errorRsync" -eq 23 ] ; then
echo "rsync failed with error code ${errorRsync}" | tee -a ${logJob}
echo "Aborting the backup of ${bDir}" | tee -a ${logJob}
echo "Keep going into the rest of the script" | tee -a ${logJob}
jTries=$MaxTries
else
echo "rsync failed with code ${errorRsync}" | tee -a ${logJob}
echo "trying again in ${pausingMinutes} minutes" | tee -a ${logJob}
sleep "${pausingMinutes}m"
fi
done
fi
fi
#-------------------------------------------
#Defining the basic putup function with only files that have changed in size
basicPutupSizeOnly() {
echo "#--------------------------------------------------" | tee -a ${logJob}
echo "CaseDir=${casoDir}" | tee -a ${logJob}
echo "#--------------------------------------------------" | tee -a ${logJob}
echo "Putting up the ${bDir} Dir into ${rDir}" | tee -a ${logJob}
echo "#--------------------------------------------------" | tee -a ${logJob}
#sshpass -p $pwRemote rsync --stats --progress -auvzrhs ./$bDir espinosa@hpc-data.pawsey.org.au:$caseDir | tee -a ${logJob}
if [ -z "$bDir" ]; then
echo "bDir=${bDir} then nothing is done" | tee -a ${logJob}
else
if [ "$bDir" = " " ]; then
echo "bDir=${bDir} then nothing is done" | tee -a ${logJob}
else
for ((jTries=0; jTries<$MaxTries; jTries++))
do
echo "bDir=${bDir} putup will be done, try ${jTries}" | tee -a ${logJob}
rsync --size-only --stats --progress -auvzrhs $casoDir/$bDir espinosa@hpc-data.pawsey.org.au:$caseDir/$rDir | tee -a ${logJob}
errorRsync=${PIPESTATUS[0]}
if [ "$errorRsync" -eq 0 ] ; then
echo "rsync passed with error code ${errorRsync}" | tee -a ${logJob}
echo "Keep going into the rest of the script" | tee -a ${logJob}
jTries=$MaxTries
elif [ "$errorRsync" -eq 23 ] ; then
echo "rsync failed with error code ${errorRsync}" | tee -a ${logJob}
echo "Aborting the backup of ${bDir}" | tee -a ${logJob}
echo "Keep going into the rest of the script" | tee -a ${logJob}
jTries=$MaxTries
else
echo "rsync failed with code ${errorRsync}" | tee -a ${logJob}
echo "trying again in ${pausingMinutes} minutes" | tee -a ${logJob}
sleep "${pausingMinutes}m"
fi
done
fi
fi
#-------------------------------------------
#Defining the non trajectories backup function (for the motile particle experiments)
noTrajectoriesBackup() {
echo "#--------------------------------------------------" | tee -a ${logJob}
echo "CaseDir=${caseDir}" | tee -a ${logJob}
echo "#--------------------------------------------------" | tee -a ${logJob}
echo "Backing up the ${bDir} Dir" | tee -a ${logJob}
echo "#--------------------------------------------------" | tee -a ${logJob}
#sshpass -p $pwRemote rsync --stats --progress -auvzrhs espinosa@hpc-data.pawsey.org.au:$caseDir/$bDir . | tee -a ${logJob}
if [ -z "$bDir" ]; then
echo "bDir=${bDir} then nothing is done" | tee -a ${logJob}
else
if [ "$bDir" = " " ]; then
echo "bDir=${bDir} then nothing is done" | tee -a ${logJob}
else
for ((jTries=0; jTries<$MaxTries; jTries++))
do
echo "bDir=${bDir} backup will be done, try ${jTries}" | tee -a ${logJob}
rsync --stats --progress -auvzrhs --exclude CompleteTrajectories/ espinosa@hpc-data.pawsey.org.au:$caseDir/$bDir . | tee -a ${logJob}
errorRsync=${PIPESTATUS[0]}
if [ "$errorRsync" -eq 0 ] ; then
echo "rsync passed with error code ${errorRsync}" | tee -a ${logJob}
echo "Keep going into the rest of the script" | tee -a ${logJob}
jTries=$MaxTries
elif [ "$errorRsync" -eq 23 ] ; then
echo "rsync failed with error code ${errorRsync}" | tee -a ${logJob}
echo "Aborting the backup of ${bDir}" | tee -a ${logJob}
echo "Keep going into the rest of the script" | tee -a ${logJob}
jTries=$MaxTries
else
echo "rsync failed with code ${errorRsync}" | tee -a ${logJob}
echo "trying again in ${pausingMinutes} minutes" | tee -a ${logJob}
sleep "${pausingMinutes}m"
fi
done
fi
fi
}
| true
|
2b8e0bc14845478588ccec3e1942678157edcac2
|
Shell
|
cysouw/Reproducible-Research
|
/example valency/code/recode.sh
|
UTF-8
| 128
| 2.671875
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
for file in *.txt ; do
iconv -f UTF-16LE -t UTF-8 "$file" | tr -d "\r" > ../data_recoded/"$file"
done
| true
|
3220d828e1635a4605de49c5c792b7bd7a3c496d
|
Shell
|
rettier/c
|
/c
|
UTF-8
| 3,320
| 4.0625
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# ------------------------------------------------------------------------------
# inlined version of Michael Kropat's (mkropat) realpath.sh
# https://github.com/mkropat/sh-realpath
# this is to get rid of the core-utils dependency on osx
realpath() {
canonicalize_path "$(resolve_symlinks "$1")"
}
resolve_symlinks() {
_resolve_symlinks "$1"
}
_resolve_symlinks() {
_assert_no_path_cycles "$@" || return
local dir_context path
path=$(readlink -- "$1")
if [ $? -eq 0 ]; then
dir_context=$(dirname -- "$1")
_resolve_symlinks "$(_prepend_dir_context_if_necessary "$dir_context" "$path")" "$@"
else
printf '%s\n' "$1"
fi
}
_prepend_dir_context_if_necessary() {
if [ "$1" = . ]; then
printf '%s\n' "$2"
else
_prepend_path_if_relative "$1" "$2"
fi
}
_prepend_path_if_relative() {
case "$2" in
/* ) printf '%s\n' "$2" ;;
* ) printf '%s\n' "$1/$2" ;;
esac
}
_assert_no_path_cycles() {
local target path
target=$1
shift
for path in "$@"; do
if [ "$path" = "$target" ]; then
return 1
fi
done
}
canonicalize_path() {
if [ -d "$1" ]; then
_canonicalize_dir_path "$1"
else
_canonicalize_file_path "$1"
fi
}
_canonicalize_dir_path() {
(cd "$1" 2>/dev/null && pwd -P)
}
_canonicalize_file_path() {
local dir file
dir=$(dirname -- "$1")
file=$(basename -- "$1")
(cd "$dir" 2>/dev/null && printf '%s/%s\n' "$(pwd -P)" "$file")
}
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# c macro start
_c(){
if [[ $# -gt 0 ]] ; then
_cr "$@" <&0
return
fi
if tty > /dev/null; then
${paste}
else
${copy} <&0
fi
}
_cc(){
if tty > /dev/null ; then
${paste}
else
${copy} <&0
${paste}
fi
}
_cf(){
if realpath "$1" ; then
realpath "$1" | _c "${@:2}" > /dev/null
fi
}
_cr(){
if [[ -z $C_HOST ]] ; then
(>&2 echo "missing configuration: set \$C_HOST to a c-server")
exit 1
fi
key="${*}"
key=${key// /%20}
if tty > /dev/null ; then
curl -G "$C_HOST/?c=${key}" -XGET -sS | gunzip
else
gzip <&0 | curl -H 'Content-Type: application/octet-stream' -XPOST "$C_HOST/?c=${key}" --data-binary @- -sS
fi
}
has_command() {
command -v "$1" >/dev/null 2>&1
}
main(){
if has_command pbcopy ; then
copy="pbcopy"
paste="pbpaste"
elif has_command xclip ; then
copy="xclip -selection c"
paste="xclip -selection clipboard -o"
elif has_command xsel ; then
copy="xsel --clipboard --input"
paste="xsel --clipboard --output"
else
echo "No clipboard command found (supports pbcopy, xclip, xsel)"
echo "If you want to add support for your faviourite clipboard command"
echo "please open a pull request at https://github.com/rettier/c"
exit 1;
fi
command=$(basename "$0")
commands=(cc cr cf c)
if echo "${commands[@]}" | grep -o "${command}" >/dev/null ; then
command="_${command}"
$command "$@" <&0
fi
}
# ------------------------------------------------------------------------------
main "$@"
| true
|
6ba053e46698a1e6491941b6b1840c3962e83d27
|
Shell
|
milkbugdoctor/PD_libs
|
/work/run_mira
|
UTF-8
| 654
| 3.359375
| 3
|
[] |
no_license
|
#!/bin/bash
if [ $# -lt 3 ]; then
echo -e "\nUsage: $0 fasta qual ref output-dir\n"
exit 1
fi
fasta="$1" ; shift
qual="$1" ; shift
ref="$1" ; shift
dir="$1" ; shift
mkdir -p "$dir" || exit 1
if [ ! -s "$fasta" ]; then
echo -e "$fasta not found!" 1>&2
exit 1
fi
ln -s "`abs_path \"$fasta\"`" $dir/mira_in.fasta
if [ ! -s "$qual" ]; then
echo -e "$qual not found!" 1>&2
else
ln -s "`abs_path \"$qual\"`" $dir/mira_in.fasta.qual
fi
if [ -s "$ref" ]; then
ln -s "`abs_path \"$ref\"`" $dir/mira_backbone_in.fasta
cd $dir
mira -454data -fasta -SB:lb=1:sbuil=2:bsn=LT2:bbq=60
else
cd $dir; mira -454data -fasta
fi
| true
|
cb8613330d63c2d67a93da3f690c8c63268aef1e
|
Shell
|
mabnhdev/ops-debian
|
/openswitch/ops-init-alphanetworks.sh
|
UTF-8
| 668
| 2.875
| 3
|
[] |
no_license
|
#!/bin/bash
set -x
# TODO: This is not the right place for this. Must be recreated
# after reboot since /var/run is tmpfs.
if [ ! -d /var/run/openvswitch ] ; then
mkdir -p /var/run/openvswitch
fi
# Turn on manual fan control for SNX-60A0-486F
# TODO: Add GPIO support to OpenSwitch and set through there
if [ -d /sys/class/gpio/gpiochip452 ] ; then
if [ ! -d /sys/class/gpio/gpio470 ] ; then
echo '470' | tee --append /sys/class/gpio/export > /dev/null
echo 'out' | tee --append /sys/class/gpio/gpio470/direction > /dev/null
fi
echo '0' | tee --append /sys/class/gpio/gpio470/value > /dev/null
fi
/usr/sbin/ops-init-namespace
| true
|
700c8ab8fc777c3fd72ef27fdebd443622d1b1c4
|
Shell
|
xfp-2013/tplenx
|
/docker/home/.bashrc
|
UTF-8
| 465
| 2.578125
| 3
|
[] |
no_license
|
# ~/.bashrc: executed by bash(1) for non-login shells.
# see /usr/share/doc/bash/examples/startup-files (in the package bash-doc)
# If this is an xterm set the title to user@host:dir
PS1='\h:\w\$ '
# colored GCC warnings and errors
export GCC_COLORS='error=01;31:warning=01;35:note=01;36:caret=01;32:locus=01:quote=01'
# some more ls aliases
alias ls='ls -h --group-directories-first --color=auto'
alias ll='ls -alF'
alias la='ls -A'
alias l='ls -CF'
umask 022
| true
|
7b2df6b4c7ab64da2e35ba67a45e5df5759eb6af
|
Shell
|
metamorph-inc/meta-core
|
/bin/Python27/ubuntu_create_venv.sh
|
UTF-8
| 1,614
| 2.796875
| 3
|
[
"LicenseRef-scancode-other-permissive"
] |
permissive
|
#!/bin/bash
set -e
#http_proxy=kms1.isis.vanderbilt.edu:8888
#export http_proxy
# cache svn password
# svn cat https://svn.isis.vanderbilt.edu/META/trunk/bin/Python27/Lib/site-packages/PCC-py2.7.egg > /dev/null
OPENMDAO_VERSION=${OPENMDAO_VERSION:-0.2.7}
# libfreetype6-dev: building 'matplotlib.ft2font' extension
# libpng12-dev: building 'matplotlib._png' extension
REQUIRED_PACKAGES="python-virtualenv python-setuptools python-numpy python-scipy build-essential gfortran wget libfreetype6-dev libpng12-dev subversion"
# prereqs for compiling scipy: libblas-dev libatlas-base-dev (among others) (but we don't compile it)
dpkg -l $REQUIRED_PACKAGES >/dev/null || \
sudo apt-get install -y $REQUIRED_PACKAGES
dpkg -l $REQUIRED_PACKAGES | tail -n+6 | grep -v ^ii && \
sudo apt-get install -y $REQUIRED_PACKAGES
[ ! -f go-openmdao-$OPENMDAO_VERSION.py ] && wget -O go-openmdao-$OPENMDAO_VERSION.py http://openmdao.org/releases/$OPENMDAO_VERSION/go-openmdao.py
[ ! -d openmdao-$OPENMDAO_VERSION ] && python go-openmdao-$OPENMDAO_VERSION.py
# ./openmdao-0.2.7/bin/easy_install scipy
# ./openmdao-0.2.7/bin/easy_install pip
# ./openmdao-0.2.7/bin/python -m pip freeze
cat <<EOF > requirements.txt
elementtree==1.2.7-20070827-preview
matplotlib==1.1.0
mpmath==0.17
EOF
./openmdao-$OPENMDAO_VERSION/bin/pip install -r requirements.txt
./openmdao-$OPENMDAO_VERSION/bin/pip install 'svn+https://svn.isis.vanderbilt.edu/META/trunk/src/Python27Packages/PCC#egg=PCC'
./openmdao-$OPENMDAO_VERSION/bin/pip install 'svn+https://svn.isis.vanderbilt.edu/META/trunk/src/Python27Packages/py_modelica#egg=py_modelica'
| true
|
094f62f48a263af932ffc5cd09c8f7da3750a910
|
Shell
|
fluidchunky/carmel.eos
|
/commands/deploy.dev.sh
|
UTF-8
| 404
| 3.546875
| 4
|
[] |
no_license
|
CONTRACT=$1
if [ -z $CONTRACT ]; then
echo "What do you want to deploy?"
exit 1
fi
case "${CONTRACT}" in
eos)
deploy_dev_eostoken_contract
;;
tokens)
deploy_dev_contract carmeltokens
;;
system)
deploy_dev_contract carmelsystem
;;
*)
error "Invalid contract"
print "Supported: [eos|tokens|system]"
;;
esac
| true
|
a4c56420a3696ae3d9d621bb25800dc4f9e7496f
|
Shell
|
lsof-org/lsof
|
/lib/dialects/sun/Mksrc
|
UTF-8
| 604
| 3.546875
| 4
|
[
"LicenseRef-scancode-purdue-bsd"
] |
permissive
|
#!/bin/bash
#
# Mksrc - make Solaris source files
#
# WARNING: This script assumes it is running from the main directory
# of the lsof, version 4 distribution.
#
# One environment variable applies:
#
# LSOF_MKC is the method for creating the source files.
# It defaults to "ln -s". A common alternative is "cp".
#
# $Id: Mksrc,v 1.4 2000/12/04 14:35:13 abe Exp $
mksrc() {
for i in $L
do
rm -f $i
$LSOF_MKC $D/$i $i
echo "$LSOF_MKC $D/$i $i"
done
}
D=lib/dialects/sun
L="ddev.c dfile.c dlsof.h dmnt.c dnode.c dnode1.c dnode2.c dproc.c dproto.h dsock.c dstore.c machine.h"
mksrc
| true
|
55ff5f4a4f94a83f44f7b48c2730532a0e8c6783
|
Shell
|
ben-hidalgo/scorpicode
|
/devops/scripts/docker-build.sh
|
UTF-8
| 1,009
| 2.65625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
set -exuo pipefail
HATS_TAG=`yq r devops/helmchart/tags.yaml hats.tag`
WEBSITE_TAG=`yq r devops/helmchart/tags.yaml website.tag`
FRONTEND_TAG=`yq r devops/helmchart/tags.yaml frontend.tag`
ROXIE_TAG=`yq r devops/helmchart/tags.yaml roxie.tag`
SOXIE_TAG=`yq r devops/helmchart/tags.yaml soxie.tag`
DEBUGGER_TAG=`yq r devops/helmchart/tags.yaml debugger.tag`
docker build . -f devops/dockerfiles/hats.Dockerfile -t $GCR_HOSTNAME/$GKE_PROJECT/hats:$HATS_TAG
docker build . -f devops/dockerfiles/website.Dockerfile -t $GCR_HOSTNAME/$GKE_PROJECT/website:$WEBSITE_TAG
docker build . -f devops/dockerfiles/frontend.Dockerfile -t $GCR_HOSTNAME/$GKE_PROJECT/frontend:$FRONTEND_TAG
docker build . -f devops/dockerfiles/roxie.Dockerfile -t $GCR_HOSTNAME/$GKE_PROJECT/roxie:$ROXIE_TAG
docker build . -f devops/dockerfiles/soxie.Dockerfile -t $GCR_HOSTNAME/$GKE_PROJECT/soxie:$SOXIE_TAG
docker build . -f devops/dockerfiles/debugger.Dockerfile -t $GCR_HOSTNAME/$GKE_PROJECT/debugger:$DEBUGGER_TAG
| true
|
2f723ad278c19c138caf69d676f0ab2d87433f09
|
Shell
|
Romainj1/ProjetRMI
|
/run.sh
|
UTF-8
| 657
| 3.25
| 3
|
[] |
no_license
|
#!/bin/bash
./compile.sh
lancerserveur(){
rm -f ./rmipresence
ps aux | grep rmiregistry | grep -v grep > rmipresence
rmipresence="rmipresence"
if [ -s $rmipresence ]
then
echo "rmiregistry déjà lancé"
else
echo "Lancement rmiregistry ..."
cd ./classes && rmiregistry&
echo "Lancement rmiregistry Fait!"
fi
echo $DIRSTACK
java -cp classes projetrmi.Serveur
}
lancerclient(){
java -cp classes projetrmi.Client
}
case "$1" in
client)
lancerclient
;;
serveur)
lancerserveur
;;
*)
echo $"Usage: $0 {client|serveur}"
exit 1
esac
exit 0
| true
|
c9758114e20aa60ef430342bd94703767f79e953
|
Shell
|
tomaszbartoszewski/avro-kafka-binary-encoding
|
/complex_object.sh
|
UTF-8
| 2,298
| 3
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Read schema and message from files
schema=$(cat worker.avsc | tr -d '\n ' | sed -E 's/"/\\"/g')
message=$(cat worker_message_value.json | tr -d '\n ')
# Publish message to Kafka
curl -X POST -H "Content-Type: application/vnd.kafka.avro.v2+json" \
-H "Accept: application/vnd.kafka.v2+json" \
--data '{"value_schema": "'$schema'", "records": [{"value": '$message'}]}' \
"http://localhost:8082/topics/factory_worker"
# Create a consumer for Avro data
curl -X POST -H "Content-Type: application/vnd.kafka.v2+json" \
--data '{"name": "factory_worker_avro_consumer_instance", "format": "avro", "auto.offset.reset": "earliest"}' \
http://localhost:8082/consumers/factory_worker_avro_consumer &>/dev/null
curl -X POST -H "Content-Type: application/vnd.kafka.v2+json" --data '{"topics":["factory_worker"]}' \
http://localhost:8082/consumers/factory_worker_avro_consumer/instances/factory_worker_avro_consumer_instance/subscription &>/dev/null
curl -s -X GET -H "Accept: application/vnd.kafka.avro.v2+json" \
http://localhost:8082/consumers/factory_worker_avro_consumer/instances/factory_worker_avro_consumer_instance/records | jq .
curl -X DELETE -H "Content-Type: application/vnd.kafka.v2+json" \
http://localhost:8082/consumers/factory_worker_avro_consumer/instances/factory_worker_avro_consumer_instance
# Create a consumer for binary data
curl -s -X POST -H "Content-Type: application/vnd.kafka.v2+json" \
--data '{"name": "factory_worker_consumer_instance", "format": "binary", "auto.offset.reset": "earliest"}' \
http://localhost:8082/consumers/factory_worker_binary_consumer &>/dev/null
curl -s -X POST -H "Content-Type: application/vnd.kafka.v2+json" --data '{"topics":["factory_worker"]}' \
http://localhost:8082/consumers/factory_worker_binary_consumer/instances/factory_worker_consumer_instance/subscription &>/dev/null
curl -s -X GET -H "Accept: application/vnd.kafka.binary.v2+json" \
http://localhost:8082/consumers/factory_worker_binary_consumer/instances/factory_worker_consumer_instance/records | jq .
curl -X DELETE -H "Content-Type: application/vnd.kafka.v2+json" \
http://localhost:8082/consumers/factory_worker_binary_consumer/instances/factory_worker_consumer_instance
| true
|
c3e86cb50dfdfc1019aa3e788e5be7d923238221
|
Shell
|
huxley-open-source/huxley-infra-open-source
|
/apparmor/safe_scripts/Octave.run
|
UTF-8
| 1,314
| 3.890625
| 4
|
[] |
no_license
|
#!/bin/bash
# parameters are:
# $1 source_file
# $2 input_file
# $3 timelimit (in seconds)
#
# the output of the submission should be directed to the standard output
#
# Please, see Readme.md to a complete list of return codes
#
IFS=$'\n'
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
. $DIR/config.properties
octave=`which octave`
[ -x "$octave" ] || octave=/usr/bin/octave
if [ "$1" == "" -o "$2" == "" -o "$3" == "" ]; then
echo "Octave.run: missing parameter"
exit 101
fi
if [ ! -r $1 ]; then
echo "Octave.run: $1 not found or it's not readable"
exit 102
fi
if [ ! -x $jail ]; then
echo "Octave.run: $jail not found or it's not executable"
exit 103
fi
if [ ! -x $octave ]; then
echo "Octave.run: $octave not found or it's not executable"
exit 109
fi
name=$1
source_dir=$(dirname ${name})
input=$2
time=$3
#create a temporary script in order to run the octave from the shell
temp='octave_'$(md5sum $name | cut -d" " -f1)
cat $name > $source_dir/$temp'.m'
scriptName='run_'$temp'.sh'
echo "#!/bin/bash
$octave -qfH $source_dir/$temp.m
exit \$?
" > $source_dir/$scriptName
chmod +x $source_dir/$scriptName
# run the script
$jail -t$time -i$input $source_dir/$scriptName
ret=$?
## Uma limpeza
rm $source_dir/$temp'.m'
rm $source_dir/$scriptName
exit $ret
| true
|
4bf2572e0b57583964759d000b08b491b0cde4b9
|
Shell
|
tony2heads/My-miriad-scripts
|
/read.sh
|
UTF-8
| 368
| 2.59375
| 3
|
[] |
no_license
|
#!/bin/sh
# converts fits files to miriad format and adds header data for PKS1934-638
for x in *.fits
do (
ndot=${x%%fits}
fits in=$x out=${ndot}mir op=uvin
puthd in=${ndot}mir/systemp value=20.0
puthd in=${ndot}mir/jyperk value=20.0
)
done
puthd in=PKS1934-638.mir/source value=1934-638
#puthd in=3C48.mir/source value=3C48
#puthd in=3C138.mir/source value=3C138
| true
|
c9b6866ef561fa71b616fdd1a5ae07a6dcbf85cb
|
Shell
|
v8tix/kongo
|
/example.sh
|
UTF-8
| 2,252
| 2.625
| 3
|
[
"CC-BY-4.0"
] |
permissive
|
#!/usr/bin/env bash
echo -e "\nAdding datagen pageviews:"
curl -X POST -H "Content-Type: application/json" --data '
{ "name": "datagen-pageviews",
"config": {
"connector.class": "io.confluent.kafka.connect.datagen.DatagenConnector",
"kafka.topic": "pageviews",
"quickstart": "pageviews",
"key.converter": "org.apache.kafka.connect.json.JsonConverter",
"value.converter": "org.apache.kafka.connect.json.JsonConverter",
"value.converter.schemas.enable": "false",
"producer.interceptor.classes": "io.confluent.monitoring.clients.interceptor.MonitoringProducerInterceptor",
"max.interval": 200,
"iterations": 10000000,
"tasks.max": "1"
}}' http://localhost:8083/connectors -w "\n"
sleep 5
echo -e "\nAdding MongoDB Kafka Sink Connector for the 'pageviews' topic into the 'test.pageviews' collection:"
curl -X POST -H "Content-Type: application/json" --data '
{"name": "mongo-sink",
"config": {
"connector.class":"com.mongodb.kafka.connect.MongoSinkConnector",
"tasks.max":"1",
"topics":"pageviews",
"connection.uri":"mongodb://172.10.10.59:27017,172.10.10.60:27017,172.10.10.61:27017/test?replicaSet=rs0",
"database":"test",
"collection":"pageviews",
"key.converter": "org.apache.kafka.connect.storage.StringConverter",
"value.converter": "org.apache.kafka.connect.json.JsonConverter",
"value.converter.schemas.enable": "false"
}}' http://localhost:8083/connectors -w "\n"
sleep 5
echo -e "\nAdding MongoDB Kafka Source Connector for the 'test.pageviews' collection:"
curl -X POST -H "Content-Type: application/json" --data '
{"name": "mongo-source",
"config": {
"tasks.max":"1",
"connector.class":"com.mongodb.kafka.connect.MongoSourceConnector",
"connection.uri":"mongodb://172.10.10.59:27017,172.10.10.60:27017,172.10.10.61:27017/test?replicaSet=rs0",
"topic.prefix":"mongo",
"database":"test",
"collection":"pageviews"
}}' http://localhost:8083/connectors -w "\n"
sleep 5
echo -e "\nKafka Connectors: \n"
curl -X GET "http://localhost:8083/connectors/" -w "\n"
echo -ne "\n"
echo "Looking at data in 'db.pageviews':"
docker exec mongo-rs-1 /usr/bin/mongo --eval 'db.pageviews.find()'
| true
|
324c2e0e0e7c42e891ac1ab756d0e834eac8b200
|
Shell
|
wolfsearch/obsolete.op-identity-provider-client
|
/scripts/digitalocean/postdeploy.sh
|
UTF-8
| 875
| 3.265625
| 3
|
[] |
no_license
|
#!/bin/bash -e
#######################################################
# Minimal deployed PIO service structure
#######################################################
CONFIGURED_DIR=$(date +%s%N)
if [ ! -d "configured/$CONFIGURED_DIR" ]; then
mkdir -p configured/$CONFIGURED_DIR
fi
cp -Rf sync/scripts configured/$CONFIGURED_DIR/scripts
cp -Rf sync/source configured/$CONFIGURED_DIR/source
cp -Rf sync/source configured/$CONFIGURED_DIR/install
cp sync/.pio.json configured/$CONFIGURED_DIR
rm -f live || true
ln -s configured/$CONFIGURED_DIR live
sudo chmod -Rf ug+x $PIO_SCRIPTS_PATH
#######################################################
#echo "Linking service into apache document root ..."
#rm -f /var/www/html/$PIO_SERVICE_ID || true
#ln -s $PIO_SERVICE_PATH/live/install /var/www/html/$PIO_SERVICE_ID
#chown -Rf www-data:www-data $PIO_SERVICE_PATH/live/install
| true
|
b84a8ddd827b5d1ac05b0b8629450beb28576e55
|
Shell
|
WPYNB/scripts
|
/reset/centos/yum/setcfg.sh
|
UTF-8
| 279
| 3.359375
| 3
|
[] |
no_license
|
#!/bin/bash
. ver.sh
ver=$(ver)
path=$(pwd)/$ver
#备份仓库
mkdir /etc/yum.repos.d/bak
mv /etc/yum.repos.d/*.repo /etc/yum.repos.d/bak
#设置为本地cd光盘仓库
if [ $1 == cd ];then
. $path/cd.sh
return
fi
#设置源
cp $path/yum/$1/*.repo /etc/yum.repos.d/
| true
|
4728b828a2aa62094710d842c79f50c8f6a5c555
|
Shell
|
origin2007/Serverize
|
/serverize
|
UTF-8
| 311
| 3.484375
| 3
|
[] |
no_license
|
#!/bin/sh
echo "serverize, a tool to make any long-term running program a deamon in linux system"
echo "version 0.1"
if [ $# -eq 0 ]; then
echo "usage: $0 (appname) [parameters]"
return 1
fi
command="$*"
nohup $command > "/tmp/$1.log" 2>&1 &
echo "app output will be directed to /tmp/$1.log"
echo "done!"
| true
|
b0529a4a385b5019de8e84add47e7d3cd55dc247
|
Shell
|
utdemir/midye
|
/tmux.sh
|
UTF-8
| 287
| 3.09375
| 3
|
[] |
no_license
|
#!/usr/bin/env sh
set -o errexit
tmpdir="$(mktemp -d)"
trap "rm -rf '$tmpdir'" EXIT
cmd="$1"
TMUX="tmux -S "$tmpdir/tmux.sock" -f /dev/null"
$TMUX new-session -d -x 90 -y 20 "$cmd; tmux wait-for -S finished; sleep 1h"
$TMUX wait-for finished
$TMUX capture-pane -p
$TMUX kill-session
| true
|
3324dad7449bb7204a7accbc7f97d43c91b81c5b
|
Shell
|
jojow/artifactmgr
|
/run.sh
|
UTF-8
| 514
| 2.734375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
#
# Set environment variable for OpenTOSCA's operation invoker:
# export ARTIFACT_MANAGER_URL=http://localhost:8888/runs?reqTransformer=PlainConfigInput
#
if [ -z "$ARTIFACT_MANAGER_DIR" ]; then
ARTIFACT_MANAGER_DIR="$HOME/artifactmgr"
fi
NVM_DIR="$ARTIFACT_MANAGER_DIR/.nvm"
source $NVM_DIR/nvm.sh
cd $ARTIFACT_MANAGER_DIR
DEBUG="artifactmgr:*" forever -a -l forever.log -o out.log -e err.log server.js
#DEBUG="artifactmgr:*" npm start 2>&1 | tee -a $ARTIFACT_MANAGER_DIR/artifactmgr.log
| true
|
c1f3d965fad0c62c240e083d2f328defc5a947b6
|
Shell
|
mojianhua/shell
|
/ListenPidAndRestart.sh
|
UTF-8
| 584
| 3.40625
| 3
|
[] |
no_license
|
#!/bin/bash
#判断es的9200端口是否已经打开
port=`lsof -i:9200`
#如果9200不存在在尝试启动es的master进出并且发送邮件通知
if [ ! -n "$port" ];
then
{
echo '【'`date +"%Y-%m-%d %H:%M:%S"`】-----'【端口不存在,尝试启动es服务】'>>/Applications/shell/error.log
`echo "es出错重启es:9200" | mail -s "es出错" 1657210793@qq.com`
`/Applications/XAMPP/htdocs/es/elasticsearch_master/bin/elasticsearch -d`
}
else
{
#如果正常则写入日志
echo '【'`date +"%Y-%m-%d %H:%M:%S"`'】-----端口信息【'${port}'】'>>/Applications/shell/ok.log
}
fi
| true
|
f67ee651866679c538670f769d5b09cc04fa7e3a
|
Shell
|
RubenTadeia/PF
|
/Lab4/Part2/tools_grid/scripts/build_flists_test.sh
|
UTF-8
| 379
| 3.28125
| 3
|
[] |
no_license
|
#!/bin/sh
CWD=`pwd`
echo "====================== Make lists ==========================="
for SID in `seq 34`; do
echo ""
echo "-------- Speaker $SID --------"
cd $CWD
CD1="data/test/id$SID"
cd $CD1
TMPID="test_id$SID.list"
ls *.wav |sed 's/\.wav//' > $CWD/flists/$TMPID
done
echo "----------------------------------------------------------"
| true
|
1bf5659b7712be7af56c3833e906655aed61ff6b
|
Shell
|
anderoav/skript
|
/praks6/yl1
|
UTF-8
| 295
| 2.828125
| 3
|
[] |
no_license
|
#!/bin/bash
#
# Ülesanne1
#
echo -n "Sisesta täisarv: "
read arv
vastus=$(($arv % 2)) #vastus 2-ga jagamisel
if [ $vastus -eq 0 ]
then
echo "$arv on paaris" #kui on paaris siis väljastab selle
else #kui midagi muud
echo "$arv on paaritu" #paaritu siis väljastab selle
fi
#
# Skript lõppes
| true
|
e3579c2cdc71394eefcdebc61b094397bf7ae2af
|
Shell
|
rickding/HelloPython
|
/open_face/data/download-lfw-subset.sh
|
UTF-8
| 1,561
| 4.09375
| 4
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# Download data.
cd "$(dirname "$0")"
die() {
echo >&2 $*
exit 1
}
checkCmd() {
command -v $1 >/dev/null 2>&1 \
|| die "'$1' command not found. Please install from your package manager."
}
checkCmd wget
checkCmd tar
printf "\n\n====================================================\n"
printf "Downloading lfw-a (subset of people with name starting with A)"
printf "====================================================\n\n"
rm -rf lfw-subset
mkdir -p lfw-subset
cd lfw-subset
wget -nv http://vis-www.cs.umass.edu/lfw/lfw-a.tgz
[ $? -eq 0 ] || ( rm lfw-a.tgz && die "+ lfw-a: Error in wget." )
printf "\n\n====================================================\n"
printf "Verifying checksums.\n"
printf "====================================================\n\n"
md5str() {
local FNAME=$1
case $(uname) in
"Linux")
echo $(md5sum "$FNAME" | cut -d ' ' -f 1)
;;
"Darwin")
echo $(md5 -q "$FNAME")
;;
esac
}
checkmd5() {
local FNAME=$1
local EXPECTED=$2
local ACTUAL=$(md5str "$FNAME")
if [ $EXPECTED == $ACTUAL ]; then
printf "+ $FNAME: successfully checked\n"
else
printf "+ ERROR! $FNAME md5sum did not match.\n"
printf " + Expected: $EXPECTED\n"
printf " + Actual: $ACTUAL\n"
printf " + Please manually delete this file and try re-running this script.\n"
return -1
fi
printf "\n"
}
set -e
checkmd5 \
lfw-a.tgz \
678b1f67c300002fedafbb0705d22e8d
tar xf lfw-a.tgz
mkdir raw
mv lfw/{Ann_Veneman,Adrien_Brody,Anna_Kournikova} raw
rm -rf lfw
rm lfw-a.tgz
| true
|
9985e3ed1a2ea4d05f7c0fbceb34961379bfdf5c
|
Shell
|
tenreads/ops-scripts
|
/feed_provision.sh
|
UTF-8
| 868
| 2.921875
| 3
|
[] |
no_license
|
#! /usr/bin/env bash
function setupRepo() {
cd /root/
git clone $feedpuller_repo
cd $feedpuller_path
}
function setupRequirements() {
pip install -r requirements.txt
echo """import sys, nltk;nltk.download('stopwords');sys.exit()""" >> /root/nsetup.py
python /root/nsetup.py
rm /root/nsetup.py
cd parser
npm install
}
function setupCron() {
echo """function cleanupSystem {
pkill python
free -m
sync; echo 3 | sudo tee -a /proc/sys/vm/drop_caches > /dev/null
free -m
}
function startPull {
cleanupSystem
#nohup python /root/feedpuller/puller.py > /root/latest-pull.log 2>&1&
nohup python /root/python-node-feedpuller/feed_parser.py > /root/latest-pull.log 2>&1&
}
startPull
""" >> /root/cron.sh
}
function setupFeedPuller() {
setupRepo
setupRequirements
setupCron
}
setupFeedPuller
| true
|
bc225c9142dc2c700fd2ccadfaf20e9b1f30fa2f
|
Shell
|
fams/ninjaserver
|
/linuxplace/scripts/aplica_squid.sh
|
WINDOWS-1250
| 1,391
| 2.625
| 3
|
[] |
no_license
|
#!/bin/sh
################################################################################
#Config SQUID
################################################################################
LXHOME=/usr/local/linuxplace
#Pega variveis
. $LXHOME/config/ldap
. $LXHOME/config/lxn
. $LXHOME/config/squid
#variveis auxiliares
#CAMINHOS
STANZAHOME=$LXHOME/stanza
WEBMINHOME=$STANZAHOME/webmin
ETCSQUID=/etc/squid
LOCALNET=$(echo $LOCALNET|sed -e 's/\//\\\//')
cat $STANZAHOME/squid/squid.conf|sed -e "
s/%LOCALNET%/$LOCALNET/g
s/%REALM%/$REALM/g
s/%LISTEN%/$LISTEN/g
s/%SUFFIX%/$SUFFIX/g
">/etc/squid/squid.conf
mkdir -p /etc/squid/acl 2>/dev/null
test -f /etc/squid/acl/url_rest.acl || echo -e "-i\nmp3"> /etc/squid/acl/url_rest.acl
test -f /etc/squid/acl/destino_priv.acl || echo "linuxplace.com.br"> /etc/squid/acl/destino_priv.acl
test -f /etc/squid/acl/destino_noauth.acl || echo -e"obsupgdp.caixa.gov.br\nwindowsupdate.microsoft.com\nliveupdate.symantec.com\nsymantecliveupdate.com"> /etc/squid/acl/destino_noauth.acl
test -f /etc/squid/acl/destino_rest.acl || echo "consumptionjunction.com" > /etc/squid/acl/destino_rest.acl
test -f /etc/squid/acl/usuarios_priv.acl || echo "none" > /etc/squid/acl/usuarios_priv.acl
test -f /etc/squid/acl/usuarios_rest.acl || echo "none" > /etc/squid/acl/usuarios_rest.acl
################################################################################
| true
|
90711f0b2edb51409272d9833d2c6a6e7f595f82
|
Shell
|
deweerdt/tamis
|
/tests/test2.sh
|
UTF-8
| 372
| 3.234375
| 3
|
[] |
no_license
|
#!/bin/bash
#
# sample output of test2:
#
# Lock is 0x804a010
# Access @ 0x8048890 was protected by lock 0x804a010
TESTDIR=.
OUTPUT=$($TESTDIR/test2 2>&1)
[ $? -ne 0 ] && exit $?
DECL_LOCK_ADDR=$(echo $OUTPUT | sed 's/^Lock is \([^ ]*\).*/\1/')
FOUND_LOCK_ADDR=$(echo $OUTPUT | sed 's/.*was protected by lock \([^ ]*\).*/\1/')
[ "$DECL_LOCK_ADDR" == "$FOUND_LOCK_ADDR" ]
| true
|
b19e1d8a18e425305c710e86b580a1c52bc918d7
|
Shell
|
song10/bin
|
/unity_minimize.sh
|
UTF-8
| 289
| 2.53125
| 3
|
[] |
no_license
|
#!/bin/sh
# gsettings set org.compiz.unityshell:/org/compiz/profiles/unity/plugins/unityshell/ launcher-minimize-window true/false
ANS=true
if [ -n "$1" ]; then ANS="$1"; fi
gsettings set org.compiz.unityshell:/org/compiz/profiles/unity/plugins/unityshell/ launcher-minimize-window ${ANS}
| true
|
568ad6ac13a1a3724253b6a737bb78c089af15d0
|
Shell
|
xuebingwu/hyper-editing
|
/hyper_editing
|
UTF-8
| 5,691
| 3.828125
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
help () {
echo ""
echo "Usage: hyper_editing [Options] -sra_id SRR***** | -fastq r1.fastq [-fastq2 r2.fastq]"
echo ""
echo "Options:"
echo " -sra_id <STR> Use fastq-dump to fetch fastq files using SRA IDs (SRR***)"
echo " -fastq <FILE> Local input fastq file (read1/mate1 if paired-end)"
echo " -fastq2 <FILE> Local input fastq file, read2/mate2, only for paired-end data"
echo " -genome <PATH> Path/prefix of genome *.fa, such as path_to_folder/hg38"
echo " -repeat <FILE> Simple repeat file (default provided in the folder 'data')"
echo " -output <STR> Output folder name (will be under current folder)"
echo " -gap <N> Gap max size between the pairs (default=50000)"
echo " -skip_bwa Skip bwa mapping if output exists"
echo " -phred64 Set PHRED score offset to 64 (default=33)"
echo ""
echo "Required to run:"
echo " bwa, bam2fastx, samtools, and fastq-dump if using -sra_id"
echo ""
echo "Example:"
echo "hyper_editing -sra_id SRR948734"
echo "hyper_editing -fastq data/input.fq -genome /home/genomes/hg38"
echo "hyper_editing -fastq data/r1.fq -fastq2 data/r2.fq"
}
# default file path
genome="/home/local/ARCS/xw2629/genomes/sequences/hg38"
sim_rep_file="/home/local/ARCS/xw2629/software/hyper-editing/data/unique_simple_repeats.txt"
# default values
fastq=""
fastq2=""
sra_id=""
paired_end="0" # 0 => single end, 1 => paired end
output="hyper-editing-test"
phred="33"
gap="50000"
bwa_run="1"
he_detect_args="0.05 0.6 30 0.6 0.1 0.8 0.2" # args meaning: -Min of edit sites at Ultra-Edit read -Min fraction of edit sites/mm sites -Min sequence quality for counting editing event -Max fraction of same letter in cluster -Min of cluster length -Max initiate index of cluster -Min ending index of cluster
# parse commandline arguments
while [ $# -gt 0 ]
do
case "$1" in
-fastq) fastq="$2"; shift;;
-fastq2) fastq2="$2"; shift;;
-sra_id) sra_id="$2"; shift;;
-genome) genome=$2; shift;;
-output) output=$2; shift;;
-gap) gap=$2; shift;;
-phred64) phred="64";;
-skip_bwa) bwa_run="0";;
-repeat) sim_rep_file=$2; shift;;
-h) help;exit 0;;
--help) help;exit 0;;
--) shift; break;;
-*)
echo '';echo "****** unknown option: $1 ******";echo ''; 1>&2; help; exit 1;;
*) break;; # terminate while loop
esac
shift
done
# transform the genome if not already exist
if [ ! -f $genome.t2g.bwt ];then
echo "Transform the genome and build bwa index"
TransformIndexBWA_genome.sh $genome
fi
# prepare source file
if [ -n "$fastq" ];then
mkdir $output
fastq_path=$(realpath $fastq)
echo $fastq_path' '$output > $output/file_list
# if paired-end
if [ -n "$fastq2" ];then
paired_end="1"
fastq2_path=$(realpath $fastq2)
echo $fastq2_path' '$output >> $output/file_list
fi
elif [ -n "$sra_id" ];then
echo "running fastq-dump to get the fastq files from SRA"
echo "---------fastq-dump:start----------"
fastq-dump --split-3 $sra_id
echo "---------fastq-dump:end----------"
mkdir $output
if [ -f "$sra_id.fastq" ];then
fastq_path=$(realpath $sra_id.fastq)
echo $fastq_path' '$output > $output/file_list
elif [ -f $sra_id\_1.fastq ] && [ -f $sra_id\_2.fastq ];then
paired_end="1"
fastq1_path=$(realpath $sra_id\_1.fastq)
fastq2_path=$(realpath $sra_id\_2.fastq)
echo $fastq1_path' '$output > $output/file_list
echo $fastq2_path' '$output >> $output/file_list
else
echo "fastq-dump error. Exit!"
exit
fi
else
echo "ERROR: need to specify input using either -sra_id or -fastq [and -fastq2 if paired-end] "
help
exit 1
fi
echo "Output folder :" $output
echo "Genome index :" $genome
echo "Paired-end :" $paired_end
echo "Phred score offset:" $phred
echo "Max gap b/w pairs :" $gap
echo "BWA run :" $bwa_run
echo "he_detect_args :" $he_detect_args
echo "Simple repeat file:" $sim_rep_file
echo "-------input file(s)-------"
cat $output/file_list
echo "---------------------------"
echo "--start--hyper-editing-pipeline--"
##########################################################################################
run_he_script="run_hyper_editing.sh" # if needed insert the proper path before the script name
bwa_aln_soft="bwa" # if needed insert the proper path before the tool name
bwa_mem_soft="bwa" # if needed insert the proper path before the tool name
SamToFastq_soft="bam2fastx" # if needed insert the proper path before the tool name
SamTools_soft="samtools" # if needed insert the proper path before the tool name
source_file=$output/file_list # path+full_name of the input files to run: fastq_file_path+name /TAB/ out_output (if the input files are of paired-end reads, each of the paired files should appear in separate line).
genome_bwa_ind=$genome # path+output of the index genome expected 5 files like: output.amb, output.ann, output.bwt, output.pac, output.sa
genome_trans_bwa_ind=$genome # path+output of the index transformed genome: for each of the 6 transformed (a2c a2g a2t g2c t2c t2g)=>tt: 5 files: output.tt.amb, output.tt.ann, output.tt.bwt, output.tt.pac, output.tt.sa + 1 fasta file output.tt.fa => tot 36 files
genome_fasta=$genome.fa # path+full_name of the fasta file of the original genome
################################################################################################
$run_he_script $genome_bwa_ind $genome_trans_bwa_ind $genome_fasta $phred $paired_end $gap $output $bwa_run $he_detect_args $source_file $bwa_aln_soft $bwa_mem_soft $SamToFastq_soft $SamTools_soft $sim_rep_file
| true
|
ecebf12af47693a9ff67c35c2a8f96f871dc422e
|
Shell
|
pier-bezuhoff/Clonium4Android
|
/graphics/green_remake/render.sh
|
UTF-8
| 457
| 3.546875
| 4
|
[] |
no_license
|
#!/bin/bash
# make 1..7 holes in every file in bases/ by hole masks from holes/ adding hole outline from hole_outlines/ and storing in set/
base_dir=${1:-"bases/"}
holes_dir=${2:-"holes/"}
outlines_dir=${3:-"hole_outlines/"}
output_dir=${4:-"set/"}
render_base_script=${5:-"./render_base.sh"}
for base_file in ${base_dir%/}/*.png
do
if [[ -f $base_file ]]
then
$render_base_script $base_file $holes_dir $outlines_dir $output_dir
fi
done
| true
|
49f1e7ca3914aa49679cda5c6c1b59890504147d
|
Shell
|
ALTISLIFERPG-XOREAXEAX-DE/Deployment
|
/bin/release.sh
|
UTF-8
| 3,272
| 3.625
| 4
|
[] |
no_license
|
#!/bin/bash
RELEASE="${1}"
DATESTAMP="${2}"
RELEASE_DIRECTORY="/cygdrive/c/CYGWIN_RELEASES/${RELEASE}/${DATESTAMP}"
SOURCE_UPSTREAM="../Upstream/Altis-4.4r2/Altis-4.4r2"
SOURCE_TREE="../Altis"
SOURCE_MISSION="../Mission"
SOURCE_TEXTURES="../Textures"
PBO_CONSOLE="/cygdrive/c/Program Files/PBO Manager v.1.4 beta/PBOConsole.exe"
echo "building a release for ${RELEASE} (${DATESTAMP})"
for DIRECTORY in "Altis_Life.Altis" "life_server"; do
mkdir -pv "${RELEASE_DIRECTORY}/${DIRECTORY}"
#
# preseed the directory with upstream files
#
rsync -Pavpx --delete \
"${SOURCE_UPSTREAM}/${DIRECTORY}/." \
"${RELEASE_DIRECTORY}/${DIRECTORY}/."
done
#
# copy the mission file
#
test -f "${SOURCE_MISSION}/mission.sqm" && rsync -Pavpx \
"${SOURCE_MISSION}/mission.sqm" \
"${RELEASE_DIRECTORY}/Altis_Life.Altis/."
#
# config the Config_vItems.hpp
#
test -f "${SOURCE_MISSION}/tmp/Config_vItems.hpp" && rsync -Pavpx \
"${SOURCE_MISSION}/tmp/Config_vItems.hpp" \
"${RELEASE_DIRECTORY}/Altis_Life.Altis/."
#
# copy the textures
#
test -d "${SOURCE_TEXTURES}/textures" && rsync -Pavpx \
"${SOURCE_TEXTURES}/textures/." \
"${RELEASE_DIRECTORY}/Altis_Life.Altis/textures/."
for DIRECTORY in "Altis_Life.Altis" "life_server"; do
#
# copy our overlay files into the release
#
test -d "${SOURCE_TREE}/${DIRECTORY}" && rsync -Pavpx \
"${SOURCE_TREE}/${DIRECTORY}/." \
"${RELEASE_DIRECTORY}/${DIRECTORY}/."
#
# build the PBO files
#
"${PBO_CONSOLE}" \
-pack "C:\\CYGWIN_RELEASES\\${RELEASE}\\${DATESTAMP}\\${DIRECTORY}" \
"C:\\CYGWIN_RELEASES\\${RELEASE}\\${DATESTAMP}\\${DIRECTORY}.pbo"
if [[ "production" == "${RELEASE}" ]]; then
mkdir -pv "production/${DATESTAMP}"
rsync -Pavpx \
"${RELEASE_DIRECTORY}/${DIRECTORY}.pbo" \
"production/${DATESTAMP}/${DIRECTORY}.pbo"
fi
done
SERVER="127.0.0.1"
if [[ "testing" == "${RELEASE}" ]]; then
SERVER="192.168.4.114"
fi
if [[ "production" == "${RELEASE}" ]]; then
SERVER="altisliferpg.xoreaxeax.de"
fi
#
# deploy to server
#
TARGET_DIRECTORY="/home/steam/Steam/steamapps/common/Arma\ 3\ Server"
rsync -Pavpx \
"${RELEASE_DIRECTORY}/Altis_Life.Altis.pbo" \
"steam@${SERVER}:${TARGET_DIRECTORY}/mpmissions/${RELEASE}_Altis_Life.Altis.pbo"
rsync -Pavpx \
"${RELEASE_DIRECTORY}/life_server.pbo" \
"steam@${SERVER}:${TARGET_DIRECTORY}/@life_server/addons/."
#
# restart arma3 on betaserver
#
if [[ "testing" == "${RELEASE}" ]]; then
ssh steam@${SERVER} -t make -C /home/steam restart
fi
sleep 1
#
# validate the contents so we know we copied everything correctly :)
#
ls -ali "${RELEASE_DIRECTORY}"
echo
sha1sum ${RELEASE_DIRECTORY}/Altis_Life.Altis.pbo
ls -al ${RELEASE_DIRECTORY}/Altis_Life.Altis.pbo
ssh -q steam@${SERVER} -t sha1sum "${TARGET_DIRECTORY}/mpmissions/${RELEASE}_Altis_Life.Altis.pbo"
ssh -q steam@${SERVER} -t ls -al "${TARGET_DIRECTORY}/mpmissions/${RELEASE}_Altis_Life.Altis.pbo"
echo
sha1sum ${RELEASE_DIRECTORY}/life_server.pbo
ls -al ${RELEASE_DIRECTORY}/life_server.pbo
ssh -q steam@${SERVER} -t sha1sum "${TARGET_DIRECTORY}/@life_server/addons/life_server.pbo"
ssh -q steam@${SERVER} -t ls -al "${TARGET_DIRECTORY}/@life_server/addons/life_server.pbo"
exit 0
| true
|
81faf02fe96e92484da9221dc100cc974311aeb7
|
Shell
|
mblair/setup
|
/to_clean_up/postgresql.sh
|
UTF-8
| 1,968
| 3
| 3
|
[
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] |
permissive
|
#Determine prereqs for CentOS and Ubuntu.
cd /home/matt/src
wget http://wwwmaster.postgresql.org/redir/198/h/source/v$POSTGRES_VER/postgresql-$POSTGRES_VER.tar.bz2
tar xjvf postgresql-$POSTGRES_VER.tar.bz2
cd postgresql-$POSTGRES_VER
./configure
make -j4
checkinstall --fstrans=no make install-world #installs man pages too.
echo 'PATH=$PATH:/usr/local/pgsql/bin' >> /home/matt/.bash_profile
echo '/usr/local/pgsql/lib' > /etc/ld.so.conf.d/pgsql.conf
echo 'MANPATH=$MANPATH:/usr/local/pgsql/share/man' >> /home/matt/.bash_profile
cp contrib/start-scripts/linux /etc/init.d/postgres
chmod a+x /etc/init.d/postgres
if [ $OS = "CentOS" ]; then
chkconfig --add postgres
chkconfig postgres on
else
update-rc.d postgres defaults
fi
if [ $OS = "CentOS" ]; then
adduser -m -r postgres
else
adduser postgres --disabled-password --gecos ""
fi
mkdir /usr/local/pgsql/data
chown postgres /usr/local/pgsql/data
cat > /home/postgres/script.sh << "EOF"
/usr/local/pgsql/bin/initdb -D /usr/local/pgsql/data
/usr/local/pgsql/bin/postgres -D /usr/local/pgsql/data > logfile 2>&1 &
sleep 10 # Give PG time to start.
/usr/local/pgsql/bin/createdb test
/usr/local/pgsql/bin/psql -c "ALTER USER postgres WITH PASSWORD 'pgpass'"
EOF
sed -i 's/^\t//' /home/postgres/script.sh
chmod +x /home/postgres/script.sh
su postgres -c 'cd && ./script.sh' #Not sure if that cd is needed. su postgres starts me in /home/postgres.
echo 'localhost:*:*:postgres:pgpass' > /home/matt/.pgpass #might want to change that.
chown matt:matt /home/matt/.pgpass
chmod 0600 /home/matt/.pgpass
sed -i '/^local.*trust$/s/trust/password/g' /usr/local/pgsql/data/pg_hba.conf
sed -i '/^host.*trust$/s/trust/password/g' /usr/local/pgsql/data/pg_hba.conf
/etc/init.d/postgres restart
#gem install pg -- --with-pg-dir=/usr/local/pgsql #For apps you're developing.
# - OR - #
#bundle config build.pg --with-pg-dir=/usr/local/pgsql/ #For stuff whose gems you've installed via Bundler (Heroku apps, etc).
| true
|
57042a0b939a25d47af69d46aebec7fedccf07b1
|
Shell
|
PropertyBrands/btt-drupalcamp-denver-2015-scripts
|
/provisioning/create-db.sh
|
UTF-8
| 360
| 3.484375
| 3
|
[] |
no_license
|
#!/bin/bash
EXPECTED_ARGS=3
E_BADARGS=65
MYSQL=`which mysql`
QUERY1="CREATE DATABASE IF NOT EXISTS $1;"
QUERY2="GRANT ALL ON *.* TO '$2'@'localhost' IDENTIFIED BY '$3';"
QUERY3="FLUSH PRIVILEGES;"
SQL="${QUERY1}${QUERY2}${QUERY3}"
if [ $# -ne $EXPECTED_ARGS ]
then
echo "Usage: $0 dbname dbuser dbpass"
exit ${E_BADARGS}
fi
${MYSQL} -uroot -p -e "$SQL"
| true
|
0dee2b1b5793f25301fac46b0be0a7aad61ecc74
|
Shell
|
LGSInnovations/edison-debian-image
|
/edison-image-edison-ext4/var/lib/dpkg/info/lynx.preinst
|
UTF-8
| 1,562
| 3.90625
| 4
|
[] |
no_license
|
#!/bin/sh
set -e
# Inspired by
# http://wiki.debian.org/DpkgConffileHandling
# Remove a no-longer used conffile
rm_or_moveconffile() {
PKGNAME="$1"
CONFFILE="$2"
NEWDIR="$3"
if [ -e "$CONFFILE" ]; then
md5sum="`md5sum \"$CONFFILE\" | sed -e \"s/ .*//\"`"
old_md5sum="`dpkg-query -W -f='${Conffiles}' $PKGNAME | sed -n -e \"\\\\' $CONFFILE '{s/ obsolete$//;s/.* //p}\"`"
if [ "$md5sum" != "$old_md5sum" ]; then
test -e "$NEWDIR" || mkdir -m 755 -p "$NEWDIR"
echo "Obsolete conffile $CONFFILE has been modified by you." 1>&2
if [ -e "$NEWDIR/`basename $CONFFILE`" ]; then
echo "replacement conffile $NEWDIR/`basename $CONFFILE` already exists." 1>&2
echo "Saving as $CONFFILE.dpkg-bak ..." 1>&2
mv -f "$CONFFILE" "$CONFFILE".dpkg-bak
else
echo "Moving conffile $CONFFILE to new location $NEWDIR" 1>&2
mv -f "$CONFFILE" "$NEWDIR/"
fi
else
echo "Removing obsolete conffile $CONFFILE ..." 1>&2
rm -f "$CONFFILE"
fi
fi
}
case "$1" in
install|upgrade)
# Upgrading from a a real, non-dummy lynx package.
if dpkg --compare-versions "$2" 'lt-nl' '2.8.7dev9-1.1' ; then
update-alternatives --quiet --remove www-browser /usr/bin/lynx
update-alternatives --quiet --remove lynx /usr/bin/lynx.stable
rm_or_moveconffile lynx /etc/lynx.cfg /etc/lynx-cur
rm_or_moveconffile lynx /etc/lynx.lss /etc/lynx-cur
fi
;;
*)
esac
| true
|
e6462f6d0c753b1380285aa8a3d9ec8969f6ad9e
|
Shell
|
matthewwedlow/Expect_Scripts
|
/script.sh
|
UTF-8
| 1,396
| 3.109375
| 3
|
[] |
no_license
|
#!/usr/bin/expect -f
# Set variables
set hostname [lindex $argv 0]
set username $env(USER)
set password [lindex $argv 1]
set enablepassword [lindex $argv 2]
# Log results
log_file -a ~/results.log
# Output current working device
send_user "\n"
send_user ">>>>> Working on $hostname @ [exec date] <<<<<\n"
send_user "\n"
# Don't check keys
spawn ssh -o StrictHostKeyChecking=no $username\@$hostname
# Check for SSH problems
expect {
timeout { send_user "\nTimeout Exceeded - Check Host\n"; exit 1 }
eof { send_user "\nSSH Connection To $hostname Failed\n"; exit 1 }
"*#" {}
"*assword:" {
send "$password\n"
}
}
# Check if you're in enable mode and enter it you aren't
expect {
default { send_user "\nEnable Mode Failed - Check Password\n"; exit 1 }
"*#" {}
"*>" {
send "enable\n"
expect "*assword"
send "$enablepassword\n"
expect "*#"
}
}
# Enter configuration mode
send "conf t\n"
expect "(config)#"
# Enter your commands here. Examples listed below
#send "tacacs-server host 10.0.0.5\n"
#expect "(config)#"
#send "tacacs-server directed-request\n"
#expect "(config)#"
#send "tacacs-server key 7 0000000000000\n"
#expect "(config)#"
#send "ntp server 10.0.0.9\n"
#expect "(config)#"
#send "ip domain-name yourdomain.com\n"
#expect "(config)#"
send "end\n"
expect "#"
send "write mem\n"
expect "#"
send "exit\n"
expect ":~\$"
exit
| true
|
d056cbd0e765e694f04f521e75e268332841d8cd
|
Shell
|
Vinotha16/WIN_ROLLBACK
|
/templates/linux_actualfacts/ubuntu14.04/rsh_216_actual.fact
|
UTF-8
| 553
| 2.734375
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
if [ $(sudo egrep "^shell|^login|^exec" /etc/inetd.* 2> /dev/null | wc -l) -ne 0 ] || [ $(sudo grep disable.*yes /etc/xinetd.conf /etc/xinetd.d/* 2> /dev/null | grep shell | wc -l) -ne 0 ] || [ $(sudo grep disable.*yes /etc/xinetd.conf /etc/xinetd.d/* 2> /dev/null | grep login | wc -l) -ne 0 ] || [ $(sudo grep disable.*yes /etc/xinetd.conf /etc/xinetd.d/* 2> /dev/null | grep exec | wc -l) -ne 0 ]; then
echo "{ \"rsh_216_actual\" : \"\" }"
else
echo "{ \"rsh_216_actual\" : \"not enabled\" }"
exit 1
fi
done
| true
|
317eaf60e37b11f318f0726d04c78adc18932e74
|
Shell
|
CSU-CIRA/tempestd_data_assimilation
|
/TempestD_converter/splitH5dump
|
UTF-8
| 992
| 4.3125
| 4
|
[] |
no_license
|
#!/bin/bash
# Dumps the datasets in an HDF5 file to individual dump files - one for each
# dataset. The files have the names of the datasets except that any spaces are
# replaced with underscores.
# Argument:
# 1: path of the HDF5 file
# 2: directory to put the dataset dump files
#set -x
# Get the name of this script
this=`basename $0`
warning() {
echo $this: $* 1>&2
}
function join_by { local IFS="$1"; shift; echo "$*"; }
h5path=$1
dsFilesDir=$2
regex='\"(.*)\"'
h5dump --header $h5path | while read dsq ds; do
if [ $dsq = DATASET ]; then
if [[ $ds =~ $regex ]]; then
#echo ${BASH_REMATCH[1]}
read -ra fn <<< "${BASH_REMATCH[1]}"
fname=$(join_by _ "${fn[@]}")
echo fname: $fname
echo dataset: \"${BASH_REMATCH[1]}\"
h5dump --dataset="${BASH_REMATCH[1]}" $h5path > $dsFilesDir/$fname
else
warning "Regex: \"$regex\" did not match string: $ds"
fi
fi
done
| true
|
87625643fdcd5502546a8fbfb3ea9e38f5a00006
|
Shell
|
trasba/auto-ipv6
|
/ipv6.sh
|
UTF-8
| 517
| 3.203125
| 3
|
[] |
no_license
|
#!/bin/bash
cd "$(dirname "$0")"
while read var value
do
export "$var"="$value"
done < config
regex='^([0-9a-fA-F]{0,4}:){1,7}[0-9a-fA-F]{0,4}$'
if [[ $ipv6addr =~ $regex ]]; then
echo $ipv6addr
else
echo "no valid ipv6 address"
fi
if [[ $ipv6gtwy =~ $regex ]]; then
echo $ipv6gtwy
else
echo "no valid ipv6 gateway"
fi
ip -6 addr add $ipv6addr dev $dev
ping6 -c2 -I ens2 ff02::1
ip -6 route add $ipv6gtwy dev $dev
ip -6 route add default via $ipv6gtwy dev $dev
| true
|
22eae87a618082a36bed5df37633c408d94daa73
|
Shell
|
LadaKazinkina/usaivlrzzjw
|
/.default/act3.sh
|
UTF-8
| 113
| 2.859375
| 3
|
[] |
no_license
|
#!/bin/bash
main () {
#ps -aux | less
pwd
echo $0
read -e -p "Variable=" variable
echo $variable
}
main;
| true
|
3385a54f8a8da8e9801810e704fa6288d6a0f7c1
|
Shell
|
Peemag/M3U-Playlist-Generator-for-http-streaming---BASH-Script
|
/Generate_Playlist.sh
|
UTF-8
| 2,129
| 3.9375
| 4
|
[] |
no_license
|
#!/bin/sh
# Author: Luca Oltenau
# Version 0.1 - 10.10.2016
#
# This simple script will generate a M3U playlist out of a specified folder containing media files,
# and add an URL to the path, so you can use the Playlist in conjunction with a Web Server as a simple Media Streaming System.
# All files will be scanned by ffprobe to obtain media duration, so please make sure it is installed on your system.
# On each run, the script will delete the previously generated Playlist file and replace with a new one.
#########
# Setup #
#########
## Name of generated playlist
M3Uname=Playlist.m3u
## Please specify the folder you would like to scan. Don't add final /. (e.g. /var/www/media )
videopath=/var/www/media
## URL you would like to append to the path. Add / at the end. (e.g. https://www.example.com/media/)
URL=https://www.example.com/media/
## Uncomment if you would like to replace all spaces in the filenames by underscore, so streaming via http is possible.
## ATTENTION: This will rename ALL files in the folder (excluding subfolders)
#find $videopath/ -depth -name "* *" -execdir rename 's/ /_/g' "{}" \;
## Specify your webservers User and Group. (user:group) You can find out by running ls -l in your www folder.
RIGHTS=www:www
## Uncomment if you would like to change ownership of the files to match webservers User and Group.
#chown $RIGHTS $videopath
########
# Code #
########
# Removing old M3U Playlist
rm -f $videopath/$M3Uname
# Generate M3U Playlist by scanning media files and extracting duration with ffprobe.
for f in $videopath/*.m* $videopath/*.avi; do length=`ffprobe -v error -show_entries format=duration -of default=noprint_wrappers=1:nokey=1 $f | rev | cut -c8- | rev`; datei=`ls $f | xargs -n 1 basename | rev | cut -c5- | rev | sed -r 's/_/ /g'`; dateisauber=`ls $f | xargs -n 1 basename`; dateimitpfad=$URL$dateisauber; echo -e "#EXTINF:$length,$datei\n$dateimitpfad" >> $videopath/$M3Uname; done
# Add necessary line to M3U Playlist.
sed -i '1i#EXTM3U' $videopath/$M3Uname
# Change ownership of Playlist to match http user. Uncomment if not needed.
chown $RIGHTS $videopath/$M3Uname
| true
|
ac9f960eb16f0a78c520ce1425ba2d599c57878b
|
Shell
|
hubmapconsortium/cwltool
|
/conformance-test.sh
|
UTF-8
| 5,036
| 3.875
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
venv() {
if ! test -d "$1" ; then
if command -v virtualenv > /dev/null; then
virtualenv -p python3 "$1"
else
python3 -m venv "$1"
fi
fi
# shellcheck source=/dev/null
source "$1"/bin/activate
}
# Set these environment variables when running the script, e.g.:
# version=v1.1 spec_branch=new_test container=docker ./conformance_test.sh
# Version of the standard to test against
# Current options: v1.0, v1.1, v1.2
version=${version:-v1.0}
# Which branch of the standard's repo to use.
# This can be useful when adding new features
spec_branch=${spec_branch:-main}
# Which container runtime to use
# Valid options: docker, singularity
container=${container:-docker}
set -e
set -x
if [[ "$version" = "v1.0" ]] ; then
repo=common-workflow-language
else
# shellcheck disable=SC2001
repo=cwl-$(echo "$version" | sed 's/\(v[0-9]*\.\)\([0-9]*\).*/\1\2/')
fi
if [ ! -d "${repo}-${spec_branch}" ]; then
if [ ! -f "${repo}-${spec_branch}.tar.gz" ]; then
wget "https://github.com/common-workflow-language/${repo}/archive/${spec_branch}.tar.gz"
fi
tar xzf "${spec_branch}.tar.gz"
fi
if [ "${container}" == "docker" ]; then
docker pull node:slim
fi
venv cwltool-venv3
pip3 install -U setuptools wheel pip
pip3 uninstall -y cwltool
pip3 install -e .
pip3 install codecov cwltest>=2.1
pushd "${repo}-${spec_branch}" || exit 1
# shellcheck disable=SC2043
if [[ "$version" = "v1.0" ]]; then
DRAFT="DRAFT=v1.0"
fi
# Clean up all cov data
find . -name '.coverage*' -print0 | xargs -0 rm -f
rm -f coverage.xml
source=$(realpath ../cwltool)
COVERAGE_RC=${PWD}/.coveragerc
cat > "${COVERAGE_RC}" <<EOF
[run]
branch = True
source = ${source}
[report]
exclude_lines =
if self.debug:
pragma: no cover
raise NotImplementedError
if __name__ == .__main__.:
ignore_errors = True
omit =
tests/*
EOF
CWLTOOL_WITH_COV=${PWD}/cwltool_with_cov3
cat > "${CWLTOOL_WITH_COV}" <<EOF
#!/bin/bash
coverage run --parallel-mode --rcfile=${COVERAGE_RC} \
"$(command -v cwltool)" "\$@"
EOF
chmod a+x "${CWLTOOL_WITH_COV}"
unset exclusions
declare -a exclusions
EXTRA="--parallel"
# shellcheck disable=SC2154
if [[ "$version" = *dev* ]]
then
EXTRA+=" --enable-dev"
fi
if [[ "$container" = "singularity" ]]; then
EXTRA+=" --singularity"
# This test fails because Singularity and Docker have
# different views on how to deal with this.
exclusions+=(docker_entrypoint)
if [[ "${version}" = "v1.1" ]]; then
# This fails because of a difference (in Singularity vs Docker) in
# the way filehandles are passed to processes in the container and
# wc can tell somehow.
# See issue #1440
exclusions+=(stdin_shorcut)
fi
if [[ "${version}" = "v1.2" ]]; then
# See issue #1441
exclusions+=(iwdr_dir_literal_real_file)
fi
fi
if [ -n "$EXTRA" ]
then
EXTRA="EXTRA=${EXTRA}"
fi
if [ "$GIT_BRANCH" = "origin/main" ] && [[ "$version" = "v1.0" ]] && [[ "$container" = "docker" ]]
then
rm -Rf conformance
# shellcheck disable=SC2154
git clone http://"${jenkins_cwl_conformance}"@github.com/common-workflow-language/conformance.git
git -C conformance config user.email "cwl-bot@users.noreply.github.com"
git -C conformance config user.name "CWL Jenkins build bot"
CONFORMANCE_MSG=$(cat << EOM
Conformance test of cwltool ${tool_ver} for CWL ${version}
Commit: ${GIT_COMMIT}
Python version: 3
Container: ${container}
EOM
)
tool_ver=$(cwltool --version | awk '{ print $2 }')
badgedir=${PWD}/conformance/cwltool/cwl_${version}/cwltool_${tool_ver}
mkdir -p "${PWD}"/conformance/cwltool/cwl_"${version}"/
rm -fr "${badgedir}"
BADGE=" --badgedir=${badgedir}"
fi
if (( "${#exclusions[*]}" > 0 )); then
EXCLUDE=-S$(IFS=,; echo "${exclusions[*]}")
else
EXCLUDE=""
fi
# shellcheck disable=SC2086
LC_ALL=C.UTF-8 ./run_test.sh --junit-xml=result3.xml ${EXCLUDE} \
RUNNER=${CWLTOOL_WITH_COV} "-j$(nproc)" ${BADGE} \
${DRAFT} "${EXTRA}" \
"--classname=py3_${container}"
# LC_ALL=C is to work around junit-xml ASCII only bug
# capture return code of ./run_test.sh
CODE=$?
find . -name '.coverage.*' -print0 | xargs -0 coverage combine --rcfile="${COVERAGE_RC}" --append
coverage xml --rcfile="${COVERAGE_RC}"
codecov --file coverage.xml
if [ -d conformance ]
then
rm -rf conformance/cwltool/cwl_"${version}"/cwltool_latest
cp -r conformance/cwltool/cwl_"${version}"/cwltool_"${tool_ver}" conformance/cwltool/cwl_"${version}"/cwltool_latest
git -C conformance add --all
git -C conformance diff-index --quiet HEAD || git -C conformance commit -m "${CONFORMANCE_MSG}"
git -C conformance push http://"${jenkins_cwl_conformance}":x-oauth-basic@github.com/common-workflow-language/conformance.git
fi
popd || exit
deactivate
# build new docker container
if [ "$GIT_BRANCH" = "origin/main" ] && [[ "$version" = "v1.0" ]]
then
./build-cwl-docker.sh || true
fi
#docker rm -v $(docker ps -a -f status=exited | sed 's/ */ /g' | cut -d' ' -f1)
exit ${CODE}
| true
|
af9f7bfe2f3d72a89f3ef2259b7d7765f23db756
|
Shell
|
RebacaInc/abot_charm
|
/oai-epc/hooks/hss-relation-changed
|
UTF-8
| 5,364
| 3.3125
| 3
|
[
"LicenseRef-scancode-generic-cla",
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/bin/bash
################################################################################
#
# Copyright (c) 2016, EURECOM (www.eurecom.fr)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are those
# of the authors and should not be interpreted as representing official policies,
# either expressed or implied, of the FreeBSD Project.
#
################################################################################
# file hss-relation-changed
# brief called when the hss-epc relation is changed
# author navid.nikaein@eurecom.fr and A. B. Molini
set -eux
source $CHARM_DIR/utils/common
set_env_paths
#Gather everything HSS charm puts on the wire on S6a-epc interface
juju-log "Retrieving HSS relation info"
ip_address_hss=`relation-get ip_address`
hss_fqdn=`relation-get hss_hostname`
hss_running=`relation-get hss_running`
mme_in_db=`relation-get mme_in_db`
trigger_restart=`relation-get trigger_restart`
juju-log "trigger_restart=$trigger_restart, hss_running=$hss_running, mme_in_db=$mme_in_db , relation_ids_epc=$(relation-ids epc) "
if [ ! -f $CHARM_DIR/.trigger ]; then
echo "0" > $CHARM_DIR/.trigger
fi
if [ -z "$ip_address_hss" ]; then
juju-log "Data not sent yet"
exit 0 # exit silently to safely interrupt the execution of the hook without error
fi
if [ "$mme_in_db" == "no" ]; then
juju-log "Waiting for the mme insertion in the db......"
exit 0
elif [ "$mme_in_db" == "yes" ]; then
juju-log "Mme has been inserted so let's run EPC!!!"
fi
if [ "$hss_running" == "no" ]; then
check_epc_process
if [ $status -eq 0 ]; then
service mme_gw stop
juju-log "The database relation is broken...EPC is stopped...and waiting for db"
status-set blocked "Add the db relation"
if [ -n "$(relation-ids epc)" ]; then
for epc_id in $(relation-ids epc); do
juju-log ""$epc_id""
relation-set -r "$epc_id" epc_running=no
done
fi
else
juju-log "Wait for db relation before starting EPC software"
status-set blocked "Add the db relation"
fi
exit 0
fi
#Define entry to link the HSS IP to its FQDN
if [ -z "$(grep -o "$hss_fqdn" /etc/hosts)" ]; then
echo "$ip_address_hss $hss_fqdn hss" >> /etc/hosts
fi
hss_hostname=${hss_fqdn%%.*}
sed -r -i "s/(HSS_HOSTNAME[^\"]*)\".*\"/\1\"$hss_hostname\"/" $epc_conf_path/epc.conf
if [ "$trigger_restart" != "$(cat $CHARM_DIR/.trigger)" ]; then
#update the trigger value in the file
echo "$trigger_restart" > $CHARM_DIR/.trigger
#restart the EPC software for sure because it means that the config-changed of HSS has been run
# and so the HSS software has been rerun
$CHARM_DIR/hooks/start
if [ -n "$(relation-ids epc)" ]; then
for epc_id in $(relation-ids epc); do
juju-log ""$epc_id""
restart=`relation-get -r $epc_id trigger_restart $JUJU_UNIT_NAME`
#questo trigger serve solo nel caso in cui faccio juju set di un option di
#hss.
relation-set -r $epc_id trigger_restart=$(($((restart + 1)) % 10))
###
#relation-set -r "$epc_id" epc_running=yes
###this is not needed, but to be sure I leave it, it doesn't bother
done
fi
else
#if the trigger_restart hasn't changed means that if this is hook is running is because
#the db relation has been added or the hss relation has been added. So for sure the
#EPC software is not running unless this hook is queued multiple times.
check_epc_process
#this check is to avoid that every time this hook is called the EPC software
#is restarted even if it is already running and connected to HSS software.
if [ $status -eq 1 ]; then
$CHARM_DIR/hooks/start
if [ -n "$(relation-ids epc)" ]; then
for epc_id in $(relation-ids epc); do
juju-log ""$epc_id""
relation-set -r "$epc_id" epc_running=yes
done
fi
fi
fi
hss_opc_val=`relation-get opc-key`
echo $hss_opc_val > /tmp/opcvalue
| true
|
2627664d496074104fff04a5d364730b737cd7d7
|
Shell
|
rajeshm7910/devportal-binary
|
/devportal-binary-bundle-Redhat-6-x86_64/lib/bash_toolkit.sh
|
UTF-8
| 14,480
| 4.34375
| 4
|
[] |
no_license
|
#!/bin/bash
###############################################################################
# Bash Toolkit: Useful functions for all scripts to use, such as user prompting
# and output display.
###############################################################################
# ------------------------------------------------------------------------------
# Prompt and get input from user for a yes or no
# question, defaulting to yes if they just hit <Enter>.
#
# Parameters:
# first: Question message to display to user, will be appended
# with "[Y/n]:".
# second: Variable to store the answer into. Valid
# values are "y" or "n".
#
# Example:
# prompt_question_yes_or_no_default_yes "Do you like APIs?" likes_apis
#
prompt_question_yes_or_no_default_yes() {
local question_message=$1
local resultvar=$2
local question_answered=n
# Grab the value of the variable that is referred to as an argument (yes, it's like that)
eval response_var=\$$2
# Then check to see if said variable has already been set
if [ -z "$response_var" ]; then
until [[ $question_answered = "y" ]]; do
display_nonewline "${question_message?} [Y/n]: "
read answer
if [[ -z $answer || "y" = $answer || "Y" = $answer ]]; then
question_answered=y
answer=y
echo $answer >> ${logfile}
elif [[ "n" = "$answer" || "N" = "$answer" ]]; then
question_answered=y
answer=n
echo $answer >> ${logfile}
else
echo 'Please answer "y", "n" or <ENTER> for "y"'
fi
done
eval $resultvar="'$answer'"
# Write this question/answer to the config file
if [ "y" = "$generate_autoinstall_config_file" ]; then
echo "#" $question_message >> $capturefile
echo $resultvar=$answer >> $capturefile
fi
else
# It has been set (meaning it came from a config file), so just use that value
eval $resultvar=\$$2
fi
}
# ------------------------------------------------------------------------------
# Prompt and get input from user for a yes or no
# question, defaulting to yes if they just hit <Enter>.
#
# Parameters:
# first: Question message to display to user, will be appended
# with "[Y/n]:".
# second: Variable to store the answer into. Valid
# values are "y" or "n".
#
# Example:
# prompt_question_yes_or_no_default_yes "Do you like APIs?" likes_apis
#
prompt_question_yes_or_no_default_no() {
local question_message=$1
local resultvar=$2
local question_answered=n
# Grab the value of the variable that is referred to as an argument (yes, it's like that)
eval response_var=\$$2
# Then check to see if said variable has already been set
if [ -z "$response_var" ]; then
until [[ $question_answered = "y" ]]; do
display_nonewline "${question_message?} [y/N]: "
read answer
if [[ -z $answer || "n" = $answer || "N" = $answer ]]; then
question_answered=y
answer=n
echo $answer >> ${logfile}
elif [[ "y" = "$answer" || "Y" = "$answer" ]]; then
question_answered=y
answer=y
echo $answer >> ${logfile}
else
echo 'Please answer "y", "n" or <ENTER> for "n"'
fi
done
eval $resultvar="'$answer'"
# Write this question/answer to the config file.
if [ "y" = "$generate_autoinstall_config_file" ]; then
echo "#" $question_message >> $capturefile
echo $resultvar=$answer >> $capturefile
fi
else
# It has been set (meaning it came from a config file), so just use that value.
eval $resultvar=\$$2
fi
}
# ------------------------------------------------------------------------------
# Ask user a question, then capture result to a variable.
#
# Parameters:
# first: Question message to display to user.
# second: Variable to store the answer into
# third: Default value for user to use if they just hit <ENTER>.
#
# Example:
# prompt_question_text_input "What is your favorite color?" color blue
#
prompt_question_text_input() {
local question_message=$1
local resultvar=$2
local default_value=$3
#grab the value of the variable that is referred to as an argument (yes, it's like that)
eval response_var=\$$2
#then check to see if said variable has already been set
if [ -z "$response_var" ]; then
if [[ ! -z $default_value ]]; then
display_nonewline "${question_message?} [${default_value}]: "
else
display_nonewline "${question_message?} : "
fi
read answer
if [[ -z $answer && ! -z $default_value ]]; then
eval $resultvar="'$default_value'"
echo $default_value >> ${logfile}
else
eval $resultvar="'$answer'"
echo $answer >> ${logfile}
fi
# Write this question/answer to the config file
if [ "y" = "$generate_autoinstall_config_file" ]; then
echo "#" $question_message >> $capturefile
echo $resultvar=$answer >> $capturefile
fi
else
# It has been set (meaning it came from a config file), so just use that value
eval $resultvar=\$$2
fi
}
# ------------------------------------------------------------------------------
# Ask user a for a password without displaying on screen, then capture result
# to a variable.
#
# Parameters:
# first: Text to prompt user with
# second: Variable to store the answer into
#
# Example:
# prompt_question_password_input "What is your PIN number?" pin_number
#
prompt_question_password_input() {
local question_message=$1
local resultvar=$2
#grab the value of the variable that is referred to as an argument (yes, it's like that)
eval response_var=\$$2
#then check to see if said variable has already been set
if [ -z "$response_var" ]; then
unset bash_toolkit_answer
blank_allowed=0
while [[ $blank_allowed -eq 0 && -z $bash_toolkit_answer ]]; do
display_nonewline "${question_message?} : "
read -s bash_toolkit_answer
echo ''
if [[ $3 -eq 1 ]] ; then
blank_allowed=1
fi
done
eval $resultvar="'$bash_toolkit_answer'"
echo "********" >> ${logfile}
#write this question/answer to the config file
if [ "y" = "$generate_autoinstall_config_file" ]; then
echo "#" $question_message >> $capturefile
echo $resultvar=$bash_toolkit_answer >> $capturefile
fi
else
#it has been set (meaning it came from a config file), so just use that value
eval $resultvar=\$$2
fi
}
# ------------------------------------------------------------------------------
# Ask user a for a password without displaying on screen and ask user
# for password again for confirmation, then capture result
# to a variable.
#
# Parameters:
# first: Text to prompt user with
# second: Variable to store the answer into
#
# Example:
# prompt_question_password_and_confirm_imput "What is super secret password?" password
#
function prompt_question_password_and_confirm_input() {
local question_message=$1
local resultvar=$2
# Grab the value of the variable that is referred to as an argument (yes, it's like that)
eval response_var=\$$2
# Then check to see if said variable has already been set
if [ -z "$response_var" ]; then
# It has not, so ask the user
bash_toolkit_password_valid=0
while [ $bash_toolkit_password_valid -eq 0 ] ; do
prompt_question_password_input "$question_message" bash_toolkit_pass
prompt_question_password_input "Confirm password" bash_tookit_db_pass_confirm
if [ "$bash_toolkit_pass" != "$bash_tookit_db_pass_confirm" ] ; then
display_error "Password and password confirmation do not match."
unset bash_toolkit_pass
unset bash_tookit_db_pass_confirm
bash_toolkit_password_valid=0
else
bash_toolkit_password_valid=1
fi
done
eval $resultvar="'$bash_toolkit_pass'"
# Write this question/answer to the config file
if [ "y" = "$generate_autoinstall_config_file" ]; then
echo "#" $question_message >> $capturefile
echo $resultvar=$bash_toolkit_pass >> $capturefile
fi
else
# It has been set (meaning it came from a config file), so just use that value
eval $resultvar=\$$2
fi
}
# ------------------------------------------------------------------------------
# Initialize script by creating log file, trapping errors, etc.
#
# Parameters:
# first: variable name of a temp directory if needed (optional)
#
script_initialize() {
temp_dir_var=$1
# Turn of case sensitive matching for our string compares
shopt -s nocasematch
# Set colors for displaying errors.
export RED=$(tput setaf 1)
export NORMAL=$(tput sgr0)
# Get the date of script running
export script_rundate="$(date '+%Y-%m-%d-%H.%M.%S')"
# Register_exception_handler in case errors are thrown by other programs
# or if user CTRL-C from script.
register_exception_handlers
# Call init_logfile function
init_logfile
# Temp dir will be created if temp_dir_var given.
create_tmp_dir $temp_dir_var
# Make sure /usr/local/bin is in the path.
if [[ $( echo $PATH | grep -c '/usr/local/bin' ) -eq 0 ]]; then
export PATH="${PATH}:/usr/local/bin"
fi
}
# ------------------------------------------------------------------------------
# Create tmp directory if first param is set
#
# Parameters:
# first: variable name of a temp directory
#
function create_tmp_dir() {
temp_dir_var=$1
# create tmp directory
if [[ ! -z $temp_dir_var ]] ; then
bash_toolkit_get_tmp_directory bash_toolkit_temp_dir
# Remove it if already exists.
if [[ -d $bash_toolkit_temp_dir || -f $bash_toolkit_temp_dir ]]; then
rm -rf $bash_toolkit_temp_dir
fi
mkdir $bash_toolkit_temp_dir
eval $temp_dir_var="'$bash_toolkit_temp_dir'"
fi
}
# ------------------------------------------------------------------------------
# Remove tmp directory when exiting
function remove_tmp_dir() {
bash_toolkit_get_tmp_directory bash_toolkit_temp_dir
# Remove tmp directory
if [[ -d $bash_toolkit_temp_dir || -f $bash_toolkit_temp_dir ]]; then
rm -rf $bash_toolkit_temp_dir
fi
if test -n "$(find /tmp -maxdepth 1 -name 'devportal-binary-bundle-*' -print -quit)"; then
rm -rf /tmp/devportal-binary-bundle*
fi
if [[ -d /tmp/drupal ]] ; then
rm -rf /tmp/drupal
fi
}
# ------------------------------------------------------------------------------
# Display a dashed horizontal line.
#
# Example:
# display_hr
#
display_hr() {
display "--------------------------------------------------------------------------------"
}
# ------------------------------------------------------------------------------
# Display a major heading
#
# Parameters:
# first: Message to display
#
# Example:
# display_h1 "Starting Install"
#
display_h1() {
display
display_hr
display $1
display_hr
}
# ------------------------------------------------------------------------------
# Display error message to user in red.
#
# Parameters:
# first: message to display
#
# Example:
# display_error "Virus detected!"
#
display_error() {
display "${RED}${1}${NORMAL}"
}
# ------------------------------------------------------------------------------
# Display messages in logfile and screen.
#
# Parameters:
# first: message to display
#
# Example:
# display "Hello World!"
#
display() {
echo $@ 2>&1 | tee -a ${logfile}
}
display_nonewline() {
printf -- "${@}" 2>&1 | tee -a ${logfile}
}
display_multiline() {
display_nonewline "${1?}\n"
}
# ------------------------------------------------------------------------------
# Invoke the exception_handler on CTRL-C
#
# This funciton is called by script_initialize
#
register_exception_handlers() {
# Bash defines pseudo-signals ERR and EXIT that can be used to trap any error or exit of the shell.
trap trap_signal_error ERR
# Interrupt from keyboard, someone hit CTRL-C
trap trap_signal_sigint SIGINT
# Trap normal exits
trap trap_signal_exit EXIT
}
################################################################################
# PRIVATE functions
################################################################################
# ------------------------------------------------------------------------------
# PRIVATE function, call script_initialize.
#
# Initialize logfile for script.
#
init_logfile() {
export logfile="${script_path}/install.log"
if [ ! -e "$logfile" ] ; then
touch "$logfile"
fi
if [ ! -w "$logfile" ] ; then
echo "Cannot write to file: $logfile. Please check permissions of this directory and file."
exit 1
fi
}
# ------------------------------------------------------------------------------
# PRIVATE function, call script_initialize.
#
# Clean up function called if signal caught.
#
function trap_signal_exit() {
# Call function to remove tmp directory
remove_tmp_dir
exit 0
}
# ------------------------------------------------------------------------------
# PRIVATE function, call script_initialize.
#
# Clean up function called if signal caught.
#
function trap_signal_sigint() {
# Call function to remove tmp directory
remove_tmp_dir
exit 1
}
# ------------------------------------------------------------------------------
# PRIVATE function, call script_initialize.
#
# Clean up function called if signal caught.
#
function trap_signal_error(){
remove_tmp_dir
cat <<ENDOFMESSAGE
-------------------------------------------------------------
${RED} Exiting, ERROR!
The actions of this installer are written to a log here:
${logfile}
If you need support during this installation,
please include the logfile in your communication.${NORMAL}
Here are the last few lines of the logfile for your convenience:
-------------------------------------------------------------
ENDOFMESSAGE
tail -n 5 $logfile
cat <<-ENDOFMESSAGE
-------------------------------------------------------------
ENDOFMESSAGE
exit 1
}
# ------------------------------------------------------------------------------
# PRIVATE function
#
# Get the tmp directory
#
# Parameters:
# First: variable to set with script directory
#
function bash_toolkit_get_tmp_directory() {
resultvar=$1
# Get directory this script is running in and put it in SCRIPT_PATH
bash_toolkit_cwd=`dirname $0`
bash_toolkit_tmp_dir=${bash_toolkit_cwd}/tmp
# Change path to the full absolute path now
bash_toolkit_abs_tmp_dir=`readlink -f $bash_toolkit_tmp_dir`
eval $resultvar="'$bash_toolkit_abs_tmp_dir'"
}
| true
|
08c734d7d8169fac774ded0676f607b3801a9d63
|
Shell
|
wisehead/shell_scripts
|
/val_calc/data_from_file/gettopfamily.sh
|
UTF-8
| 1,038
| 3.3125
| 3
|
[] |
no_license
|
#############################################################
# File Name: gettopfamily.sh
# Autohor: Hui Chen (c) 2020
# Mail: chenhui13@baidu.com
# Create Time: 2020/03/22-09:27:37
#############################################################
#!/bin/sh
#!/bin/bash
# gettopfamily.sh
# 说明:
#[ $# -lt 1 ]:要求至少输入一个参数,$# 是 Shell 中传入参数的个数
#[ ! -f $1 ]:要求输入参数是一个文件,-f 的用法见 test 命令,man test
#income=$1:把输入参数赋给 income 变量,再作为 awk 的参数,即需处理的文件
#awk:用文件第三列除以第二列,求出月均收入,考虑到精确性,保留了两位精度
#sort -k 2 -n -r:这里对结果的 awk 结果的第二列 -k 2,即月均收入进行排序,按照数字排序 -n,并按照递减的顺序排序 -r。
[ $# -lt 1 ] && echo "please input the income file" && exit -1
[ ! -f $1 ] && echo "$1 is not a file" && exit -1
income=$1
awk '{
printf("%d %0.2f\n", $1, $3/$2);
}' $income | sort -k 2 -n -r
| true
|
95b12669eab92cd29b8a332dc4e48aa7f14cb2ea
|
Shell
|
tbui/dotfiles-1
|
/.bashrc
|
UTF-8
| 498
| 3.125
| 3
|
[] |
no_license
|
#
# ~/.bashrc
#
# If not running interactively, don't do anything
[[ $- != *i* ]] && return
# Setup the XDG config
export XDG_CONFIG_HOME=~/.config
# Setup nvim / vim
alias vi='nvim'
alias vim='nvim'
export EDITOR='nvim'
alias ls='ls --color=auto'
PS1='[\u@\h \W]\$ '
# Setup powerline-shell
function _update_ps1() {
PS1=$(powerline-shell $?)
}
if [[ $TERM != linux && ! $PROMPT_COMMAND =~ _update_ps1 ]]; then
PROMPT_COMMAND="_update_ps1; $PROMPT_COMMAND"
fi
# Run neofetch
neofetch
| true
|
077554fdb3fd2d7b833f6fa59ad6642df2a03081
|
Shell
|
madanwork/pxcore-local
|
/examples/pxScene2d/src/jsMin.sh
|
UTF-8
| 556
| 3.8125
| 4
|
[
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-other-permissive",
"Apache-2.0"
] |
permissive
|
#!/bin/bash
if [ "$#" -lt 1 ]
then
printf "\n Usage: cCompiler <input.js> <output.js> \n\n"
exit 1
fi
INPUT=$1
OUTPUT=$2
if [ ! -e "$INPUT" ]
then
printf "\nCLOSURE COMPILER - Error: INPUT file '$INPUT' not found.\n\n"
exit 1
fi
CLOSURE_COMPILER=closure-compiler-v20170218.jar
# Create default output filename if needed...
if [ -z "$2" ]
then
OUTPUT="${INPUT%.*}".min.js
fi
printf "\nCLOSURE COMPILER - Processing: $INPUT >>> $OUTPUT \n\n"
java -jar $CLOSURE_COMPILER --js $INPUT --js_output_file $OUTPUT
printf "\n... Done !\n\n"
| true
|
069703cbd4ec1d96cfb53d3acc926cae5c6de6bd
|
Shell
|
cloudwm/installer
|
/tweaks/certbot-20-osrepo
|
UTF-8
| 526
| 3.140625
| 3
|
[] |
no_license
|
#!/bin/bash
# Add this at the begining of all scripts.
if [ -f "include/startup.sh" ]; then
. include/startup.sh
elif [ -f "../include/startup.sh" ]; then
. ../include/startup.sh
fi
echo "Installing certbot" | log
installPackage software-properties-common
waitOrStop 0 "Failed apt install: software-properties-common"
add-apt-repository universe
add-apt-repository -r ppa:certbot/certbot
apt update
installPackage certbot
waitOrStop 0 "Failed apt install: certbot"
# tag ssl-ready.success
tagScript success
exit 0
| true
|
52b964d5cbd7cf5f7d28ad7503f28d63dc90c0e8
|
Shell
|
DriveClutch/circleci-python
|
/tools/deployment.sh
|
UTF-8
| 293
| 2.71875
| 3
|
[] |
no_license
|
#!/bin/bash -e
if [[ -f ".circleci/debuglog" ]]; then
set -x
fi
# Check if there is a deployment.sh in the repo and exec
if [[ -x "tools/deployment.sh" ]]; then
tools/deployment.sh
exit $?
fi
# Container Images
/tools/docker.sh
# Helm Packages
#/tools/helm.sh
#
#/tools/helm-install.sh
| true
|
589e6586a6b1d4f68f4fc1f0a30ddad6ec02f6be
|
Shell
|
evankicks/kubernetes-samples
|
/deployment/kustomize-samples/spring/k8s/base/kubectl-apply-from-circle-ci.sh
|
UTF-8
| 455
| 2.703125
| 3
|
[] |
no_license
|
#!/bin/bash
source ~/.ssh/environment
echo ${BASTION_USER}
echo ${TENANT}
echo ${DOCKER_ENV}
# This updates context in ~/.kube/config file if it doesn't exist
aws eks update-kubeconfig --name ${TENANT}-eks-${DOCKER_ENV}
# Execute kubectl-aws-iam-auth.sh present in k8s/base
sh /home/${BASTION_USER}/k8s/base/kubectl-aws-iam-auth.sh
cd /home/${BASTION_USER}/k8s/overlays/${TENANT}/${DOCKER_ENV}/
# -k indicates to apply using kustomize
kubectl apply -k .
| true
|
a8768089e98addd827c1fcfac7a33ff43532476f
|
Shell
|
poojabhat1690/zebrafishAnalysis_Dec2017
|
/annotation/annotation/motifAnalysis/1
|
UTF-8
| 835
| 2.546875
| 3
|
[] |
no_license
|
#!/bin/bash
ml homer/4.9-foss-2017a
cd /groups/ameres/Pooja/Projects/zebrafishAnnotation/sequencingRun_december2017/analysis/annotation/noPASacceptedSamples/geneLists_noPASaccepted/
rm *_proper.fasta
for i in *.fasta
do
awk '{print ">\n" $0;}' "$i" > "$i"_proper.fasta
mkdir /scratch/pooja/"$i"
findMotifs.pl targets.fa fasta motifResults/ -fasta background.fa
findMotifs.pl "$i"_proper.fasta fasta /scratch/pooja/"$i" -fasta /groups/ameres/Pooja//backgroundSample.fasta -b -len 4
done
#### doing the same for all the sequences of noPAS
#awk '{print ">\n" $0;}' allNoPAS_SLAMdunkExperiment.fasta > allNoPAS_SLAMdunkExperiment.fasta_proper.fasta
#findMotifs.pl allNoPAS_SLAMdunkExperiment.fasta_proper.fasta fasta /clustertmp/pooja/allNoPAS_SLAMdunkExperiment -fasta /groups/ameres/Pooja//backgroundSample.fasta -b -len 4
| true
|
e8187e0d7ea09ea994377b0df0d110ad14c1bcc3
|
Shell
|
kiciek/daemontools-encore
|
/rts.tests/00-preamble.sh
|
UTF-8
| 844
| 3.171875
| 3
|
[
"LicenseRef-scancode-public-domain",
"MIT"
] |
permissive
|
PATH=`pwd`:/command:/usr/local/bin:/usr/local/sbin:/bin:/sbin:/usr/bin:/usr/sbin:/usr/X11R6/bin:/usr/ucb
export PATH
umask 022
die() {
echo "$@"
exit 1
}
catexe() {
cat > $1 || die "Could not create \"$1\""
chmod +x $1 || die "Could not chmod \"$1\""
}
filter_svstat() {
sed -e 's/[0-9]* seconds/x seconds/' -e 's/pid [0-9]*/pid x/'
}
waituntil() {
until "$@"
do
:
done
}
waitwhile() {
while "$@"
do
:
done
}
waitok() {
for svc in $*
do
waituntil svok $svc
done
}
waitnotok() {
for svc in $*
do
waitwhile svok $svc
done
}
rm -rf rts-tmp || die "Could not clean up old rts-tmp"
mkdir rts-tmp || die "Could not create new rts-tmp"
cd rts-tmp || die "Could not change to rts-tmp"
mkdir test.sv || die "Could not create test.sv"
TOP=`pwd`
| true
|
ffa673f7cd74c8ea41a80919be274d5bfb23bec7
|
Shell
|
d-lopes/dmsplus
|
/scanner/src/upload.sh
|
UTF-8
| 6,679
| 3.671875
| 4
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/bin/bash
# commen declarations
LOGFILE="/var/run/logs/`date +%Y-%m-%d`_watch.log"
BASE_DIR=/var/run
DOCUMENT_API_URL=http://$WEB_HOST/api/documents
# skip invalid input - we expect a file path
if [ -z "$1" ]; then
echo "`date +%Y-%m-%dT%H:%M:%S%:z` - ERROR: no input file provided" >> $LOGFILE 2>&1
exit 1
fi
# set file related variables
ORIG_FILE=$1
FILE_NAME=$(basename "$ORIG_FILE")
OCRED_FILE=$BASE_DIR/tmp/$FILE_NAME
SIDECAR_FILE=$BASE_DIR/tmp/$FILE_NAME.txt
UPLOADED_TXT_FILE=$BASE_DIR/tmp/$FILE_NAME.uploaded.txt
FAILED_UPLOAD_FILE=$BASE_DIR/err/failed_upload-$FILE_NAME
ERROR_FILE=$BASE_DIR/err/$FILE_NAME
DOCUMENT_ID="undefined" # this is going to be set later at runtime
# get contents from side car file and delete it (if available)
if [ -f "$SIDECAR_FILE" ]; then
cat "$SIDECAR_FILE" |tr '\n' ' ' |tr -d '\f' |tr '\\' '-' |tr '"' ' ' | sed -E 's/( |[^[:print:]])+/ /' > $UPLOADED_TXT_FILE
CONTENT=$(cat $UPLOADED_TXT_FILE)
# remove (or replace) any occurence of ...
# * \n (line breaks),
# * non-printable chars,
# * \ (backslashs) or
# * " (hyphens)
# -> otherwise error occurs during creation of document in DMS webapp
if [[ -z "${CONTENT// }" ]]; then
CONTENT=""
fi
rm $SIDECAR_FILE
rm $UPLOADED_TXT_FILE
else
CONTENT=""
fi
#################################
# search document meta data in API
#################################
# get document path from original file (reduced by base dir + first subsequent dir which is either 'inbox' oder 'uploads')
DOCUMENT_PATH=${ORIG_FILE#/*/*/*/}
echo "`date +%Y-%m-%dT%H:%M:%S%:z` - INFO: searching document for file " $FILE_NAME " in DMS web application ..." >> $LOGFILE 2>&1
DOCUMENT_ID=$(curl -s "$DOCUMENT_API_URL?page=1&search=path==$DOCUMENT_PATH" | jq -r '.data[0].id')
# figure out if cURL command was successful at all
# -> OCR scans where is was not possible to create the document meta data for are moved to the error folder
if [ $? -ne 0 ]; then
echo "`date +%Y-%m-%dT%H:%M:%S%:z` - WARN: unable to make API call GET $DOCUMENT_API_URL?page=1&search=path==$DOCUMENT_PATH ..." >> $LOGFILE 2>&1
else
# additionally, check if we were able to extract the Document ID (must be integer) from the response
if ! [[ $DOCUMENT_ID =~ ^[0-9]+$ ]] ; then
echo " --> no document found in in DMS web application. Continue processing under the assumption that the file " $FILE_NAME " does not exist yet ..." >> $LOGFILE 2>&1
DOCUMENT_ID="undefined"
else
echo " --> document with ID $DOCUMENT_ID found in DMS web application ..." >> $LOGFILE 2>&1
fi
fi
#################################
# send document meta data to API
#################################
# create new document if it does not exist
if [[ $DOCUMENT_ID == "undefined" ]]; then
# build JSON request to send
JSON_BODY="{ \"filename\": \"$FILE_NAME\", \"content\": \"$CONTENT\"}"
echo "`date +%Y-%m-%dT%H:%M:%S%:z` - INFO: creating document for file " $FILE_NAME " in DMS web application ..." >> $LOGFILE 2>&1
RESPONSE=$(curl -s -X POST $DOCUMENT_API_URL -H "accept: application/json" -H "Content-Type: application/json" -d "$JSON_BODY" -o -)
# figure out if cURL command was successful at all
# -> OCR scans where is was not possible to create the document meta data for are moved to the error folder
if [ $? -ne 0 ]; then
echo "`date +%Y-%m-%dT%H:%M:%S%:z` - ERROR: unable to make API call POST $DOCUMENT_API_URL ..." >> $LOGFILE 2>&1
echo " abort processing. moving file " $FILE_NAME " to error folder. please investigate file " $FILE_NAME " further ..." >> $LOGFILE 2>&1
mv $OCRED_FILE $ERROR_FILE
exit 1
else
# additionally, check if we were able to extract the Document ID (must be integer) from the response
DOCUMENT_ID=$(echo "$RESPONSE" | jq '.id')
if ! [[ $DOCUMENT_ID =~ ^[0-9]+$ ]] ; then
echo "`date +%Y-%m-%dT%H:%M:%S%:z` - ERROR: unable to interpret Document ID $DOCUMENT_ID as Integer from HTTP response $RESPONSE ..." >> $LOGFILE 2>&1
echo " JSON_BODY: $JSON_BODY" >> $LOGFILE 2>&1
echo " abort processing. moving file " $FILE_NAME " to error folder. please investigate file " $FILE_NAME " further ..." >> $LOGFILE 2>&1
mv $OCRED_FILE $ERROR_FILE
exit 1
fi
fi
else
# build JSON request to send
JSON_BODY="{\"content\": \"$CONTENT\"}"
echo "`date +%Y-%m-%dT%H:%M:%S%:z` - INFO: updating document content for document with ID $DOCUMENT_ID in DMS web application ..." >> $LOGFILE 2>&1
RESPONSE=$(curl -s -X PUT $DOCUMENT_API_URL/$DOCUMENT_ID -H "accept: application/json" -H "Content-Type: application/json" -d "$JSON_BODY" -o -)
# figure out if cURL command was successful at all
if [ $? -ne 0 ]; then
echo "`date +%Y-%m-%dT%H:%M:%S%:z` - WARN: unable to make API call PUT $DOCUMENT_API_URL/$DOCUMENT_ID ..." >> $LOGFILE 2>&1
fi
fi
#################################
# sent document file to API
#################################
echo "`date +%Y-%m-%dT%H:%M:%S%:z` - INFO: adding file $FILE_NAME to document with ID $DOCUMENT_ID in DMS web application ..." >> $LOGFILE 2>&1
HTTP_STATUS_CODE=$(curl -F document=@$OCRED_FILE "$DOCUMENT_API_URL/$DOCUMENT_ID/binary" -s -o /dev/null -w "%{http_code}")
# figure out if cURL command was successful at all
# -> OCR scans which upload has failed are moved to a special folder
if [ $? -ne 0 ]; then
echo "`date +%Y-%m-%dT%H:%M:%S%:z` - ERROR: unable to upload $FILE_NAME to $DOCUMENT_API_URL/$DOCUMENT_ID/binary." >> $LOGFILE 2>&1
echo " moving file " $FILE_NAME " to error folder. please investigate file " $FILE_NAME " further ..." >> $LOGFILE 2>&1
mv $OCRED_FILE $FAILED_UPLOAD_FILE
exit 1
else
# additionally, check if HTTP_STATUS_CODE indicates successful processing of the uploaded file
case "$HTTP_STATUS_CODE" in
2*) echo "`date +%Y-%m-%dT%H:%M:%S%:z` - INFO: upload successful ..." >> $LOGFILE 2>&1
# successfully uploaded files are removed from the harddrive (as they are now available in the DMS webapp)
rm $OCRED_FILE
;;
*) echo "`date +%Y-%m-%dT%H:%M:%S%:z` - ERROR: Unexpected server response (HTTP_STATUS_CODE: $HTTP_STATUS_CODE)." >> $LOGFILE 2>&1
echo " moving file " $FILE_NAME " to error folder. please investigate file " $FILE_NAME " further ..." >> $LOGFILE 2>&1
# failed uploads need to be moved to a folder where they can be investigated further
mv $OCRED_FILE $FAILED_UPLOAD_FILE
exit 1
;;
esac
fi
| true
|
9b1589d1ec485ed7463f2506f94be4b8e3e3aae4
|
Shell
|
nourey/build-gcp
|
/su.sh
|
UTF-8
| 5,211
| 3.578125
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
while :
do
cat WELCOME.txt
printf "\n"
read -p "+---------------------------------------+
| To start y. To exit n. |
+---------------------------------------+" start_answer
case $start_answer in
[Yy]* )break;;
[Nn]*)exit;;
esac
done
cd /home
printf "\nDownloading google-cloud-sdk\n"
sudo curl -O https://dl.google.com/dl/cloudsdk/channels/rapid/downloads/google-cloud-sdk-327.0.0-linux-x86_64.tar.gz
sudo chmod -R 777 google-cloud-sdk-327.0.0-linux-x86_64.tar.gz
sudo tar zxvf google-cloud-sdk-327.0.0-linux-x86_64.tar.gz
sudo chmod -R 777 google-cloud-sdk
cd google-cloud-sdk
printf "\nInstalling google-cloud-sdk\n"
./install.sh
./bin/gcloud init
cd /home
printf "\n"
read -p "Which name do you want to give your Google VM Instance? " INSTANCE_NAME
printf "\nSelect your machine type and your zone.\n"
printf "\nAs an example for selecting machine type, N2 standard machine type has 4gb memory for per CPU. Which means if you choose n2-standard-8 as your machine type, your machine will have 32 GB Memory.\nUsually n2-standard-8 has enough capabilities. But if you don't feel satisfied with the machine type you chosed, you can change your machine type anytime you want.\n"
printf "\nMore information for machine types visit: https://cloud.google.com/compute/docs/machine-types \n"
printf "\nSee how can you change your Instance type in GCP Dashboard: https://cloud.google.com/compute/docs/instances/changing-machine-type-of-stopped-instance#console\n"
printf "\nBe careful with your selected Zone. You may want to double check the correspondence Zone for your desired Instance Type.\n "
while :
do
read -p "You can see available machine types in desired Zone by pressing z.
You can see available zones for desired Machine Type by pressing m.
When you find the right machine type with corresponding zone you can make your selection by pressing e. " answ_zone
case $answ_zone in
[Zz]* )read -p "Select your Instance Zone to see available Instance Types: " ZONE;
gcloud compute machine-types list --filter $ZONE;;
[Mm]* )read -p "Select your Machine Type to see available Zones: " INSTANCE_TYPE;
gcloud compute machine-types list --filter $INSTANCE_TYPE;;
[Ee]* )read -p "Please enter your Instance Type and Instance Zone in the right format. (For an example europe-west3-c n2-standard-8): " ZONE INSTANCE_TYPE;
ZONE=$ZONE;
INSTANCE_TYPE=$INSTANCE_TYPE;
break;;
esac
done
printf "\n"
echo "Selected Zone: "$ZONE
echo "Selected Instance Type: "$INSTANCE_TYPE
printf "\n"
read -p "Give the integer value of Boot Disk Size in gb's (i.e. 120)" BOOT_DISK_SIZE
BOOT_DISK_SIZE+="GB"
read -p "This is the last step before initializing your machine. If you feel hesitant with your choices, you can change them now. (press enter) " answer
while :
do
echo "To exit (Nn)"
echo "Which configuration do you want to edit?"
echo "[1] Instance Name:" $INSTANCE_NAME
echo "[2] Instance Type:" $INSTANCE_TYPE
echo "[3] Zone:" $ZONE
echo "[4] Boot Disk Size:"$BOOT_DISK_SIZE
read -p "You can select with numerical choices. " answer
case $answer in
[1]* )read -p "Give your Instance a name " new_name;
INSTANCE_NAME=$new_name;;
[2]* )read -p "Select your new Instance Type " new_type;
INSTANCE_TYPE=$new_type;;
[3]* )read -p "Select your new Zone " new_zone;
ZONE=$new_zone;;
[4]* )read -p "Select new Boot Disk Size " new_bsize;
BOOT_DISK_SIZE=$new_bsize;
BOOT_DISK_SIZE+="GB";;
[Nn]* ) break;;
esac
done
printf "\n"
export IMAGE_PROJECT="debian-cloud"
export IMAGE="debian-10-buster-v20210217"
echo "Your Instance Configuration is as below:"
echo " --Instance Name: " $INSTANCE_NAME
echo " --Instance Type: " $INSTANCE_TYPE
echo " --Zone: " $ZONE
echo " --Boot Disk Size:" $BOOT_DISK_SIZE
echo " --Image: " $IMAGE
printf "\n"
gcloud compute instances create $INSTANCE_NAME \
--zone=$ZONE \
--machine-type=$INSTANCE_TYPE \
--metadata serial-port-enable=TRUE \
--image=$IMAGE \
--image-project=$IMAGE_PROJECT \
--boot-disk-size=$BOOT_DISK_SIZE
printf "\nYour machine has been configured. Now you have to login your gmail account to build the machine.\n"
gcloud compute firewall-rules create sample-http \
--description "Incoming http and https allowed." \
--allow tcp:80,tcp:443,tcp:8888
printf "\nFirewall rules configured.\n"
gcloud auth login
PROJECT=$(gcloud config get-value core/project 2> /dev/null)
printf "\nWaiting for the machine to be raised."
sleep 10
printf "\nSending second step\n"
sudo gcloud compute scp --recurse /home/build-gcp $INSTANCE_NAME:/home --ssh-key-expire-after=2m --project $PROJECT --zone $ZONE
printf "\nSecond step is sent"
sleep 10
printf "\nEstablishing SSH\n"
gcloud compute ssh $INSTANCE_NAME --project $PROJECT --zone $ZONE --command 'cd /home/build-gcp && bash su-2.sh'
| true
|
8268dbe9b2fdf3bfa0ea5021439570e6dd23f820
|
Shell
|
message-db/message-db
|
/test/_controls/write-message-correlated.sh
|
UTF-8
| 430
| 2.953125
| 3
|
[
"MIT"
] |
permissive
|
function write-message-correlated {
local stream_name=${1:-$(stream-name)}
local instances=${2:-1}
local correlation_stream_name=${3:-"someCorrelation"}
if [ ! -z ${CORRELATION+x} ]; then
correlation_stream_name=$CORRELATION
fi
metadata="'{\"correlationStreamName\": \"$correlation_stream_name\"}'"
METADATA=$metadata STREAM_NAME=$stream_name INSTANCES=$instances database/write-test-message.sh > /dev/null
}
| true
|
0b35463f5797ab3ce22d8151dce930bcb8cf5735
|
Shell
|
GangZhao-NOAA/NOAA_3drtma
|
/scripts/exrtma3d_updatevars.ksh
|
UTF-8
| 9,973
| 2.6875
| 3
|
[] |
no_license
|
#!/bin/ksh --login
set -x
check_if_defined() { #usage: check_if_defined "var1_name" "var2_name" ...
for str in "$@"; do
eval "path=\${$str}"
if [ -z "${path}" ]; then
${ECHO} "ERROR: \$${str} is not defined"; exit 1
fi
done
}
check_dirs_exist() { #usage: check_dirs_exist "var1_name" "var2_name" ...
for str in "$@"; do
eval "path=\${$str}"
if [ ! -d ${path} ]; then
${ECHO} "ERROR: ${path}/ does not exist"; exit 1
fi
done
}
if [ "${envir}" == "esrl" ]; then
# Set IMPI I/O performance variables
export I_MPI_EXTRA_FILESYSTEM=on
export I_MPI_EXTRA_FILESYSTEM_LIST=lustre:panfs
fi
# make sure executable exists
if [ ! -f ${EXECrtma3d}/${exefile_name_updatevars} ]; then
${ECHO} "ERROR: executable '${EXECrtma3d}/${exefile_name_updatevars}' does not exist!"
exit 1
fi
# Check to make sure required directory defined and existed
check_if_defined "FCST_LENGTH" "GSIRUN_DIR" "FIXwrf" "PDY" "cyc" "subcyc"
check_dirs_exist "GSIRUN_DIR" "FIXwrf"
# Initialize an array of WRF input dat files that need to be linked
set -A WRF_DAT_FILES ${FIXwrf}/run/LANDUSE.TBL \
${FIXwrf}/run/RRTM_DATA \
${FIXwrf}/run/RRTM_DATA_DBL \
${FIXwrf}/run/RRTMG_LW_DATA \
${FIXwrf}/run/RRTMG_LW_DATA_DBL \
${FIXwrf}/run/RRTMG_SW_DATA \
${FIXwrf}/run/RRTMG_SW_DATA_DBL \
${FIXwrf}/run/VEGPARM.TBL \
${FIXwrf}/run/GENPARM.TBL \
${FIXwrf}/run/SOILPARM.TBL \
${FIXwrf}/run/MPTABLE.TBL \
${FIXwrf}/run/URBPARM.TBL \
${FIXwrf}/run/URBPARM_UZE.TBL \
${FIXwrf}/run/ETAMPNEW_DATA \
${FIXwrf}/run/ETAMPNEW_DATA.expanded_rain \
${FIXwrf}/run/ETAMPNEW_DATA.expanded_rain_DBL \
${FIXwrf}/run/ETAMPNEW_DATA_DBL \
${FIXwrf}/run/co2_trans \
${FIXwrf}/run/ozone.formatted \
${FIXwrf}/run/ozone_lat.formatted \
${FIXwrf}/run/ozone_plev.formatted \
${FIXwrf}/run/bulkdens.asc_s_0_03_0_9 \
${FIXwrf}/run/bulkradii.asc_s_0_03_0_9 \
${FIXwrf}/run/capacity.asc \
${FIXwrf}/run/CCN_ACTIVATE.BIN \
${FIXwrf}/run/coeff_p.asc \
${FIXwrf}/run/coeff_q.asc \
${FIXwrf}/run/constants.asc \
${FIXwrf}/run/kernels.asc_s_0_03_0_9 \
${FIXwrf}/run/kernels_z.asc \
${FIXwrf}/run/masses.asc \
${FIXwrf}/run/termvels.asc \
${FIXwrf}/run/wind-turbine-1.tbl \
${FIXwrf}/run/tr49t85 \
${FIXwrf}/run/tr49t67 \
${FIXwrf}/run/tr67t85 \
${FIXwrf}/run/grib2map.tbl \
${FIXwrf}/run/gribmap.txt \
${FIXwrf}/run/freezeH2O.dat \
${FIXwrf}/run/qr_acr_qg.dat \
${FIXwrf}/run/qr_acr_qs.dat \
${FIXwrf}/run/eclipse_besselian_elements.dat
for file in ${WRF_DAT_FILES[@]}; do
if [ ! -s ${file} ]; then
${ECHO} "ERROR: ${file} either does not exist or is empty"
exit 1
fi
done
if [ "${subcyc}" == "-1" ]; then #hourly run
SUBH_TIME='00'
tz_str=t${cyc}z
else
SUBH_TIME=${subcyc}
tz_str=t${cyc}${subcyc}z
fi
START_TIME=`${DATE} -d "${PDY} ${cyc} ${SUBH_TIME} minutes"`
# Compute date & time components for the analysis time
YYYYMMDDHH=`${DATE} +"%Y%m%d%H" -d "${START_TIME}"`
YYYYMMDDHHMM=`${DATE} +"%Y%m%d%H%M" -d "${START_TIME}"`
time_1hour_ago=`${DATE} -d "${START_TIME} 1 hour ago" +%Y%m%d%H`
time_str=`${DATE} "+%Y-%m-%d_%H_%M_%S" -d "${START_TIME}"`
time_str2=`${DATE} "+%Y-%m-%d_%H_00_00" -d "${START_TIME}"`
END_TIME=`${DATE} -d "${START_TIME} ${FCST_LENGTH} seconds"`
#----- enter working directory -------
cd ${DATA}
${ECHO} "enter working directory:${DATA}"
export WRF_NAMELIST=${DATA}/namelist.input
if [ ${DOMAIN} == "alaska" ] ; then
${CP} ${PARMwrf}/wrf.nl_AK ${WRF_NAMELIST}
else
${CP} ${PARMwrf}/wrf.nl ${WRF_NAMELIST}
fi
# Check to make sure the wrfinput_d01 file exists
if [ -r ${GSIRUN_DIR}/wrf_inout ]; then
${ECHO} " Initial condition ${GSIRUN_DIR}/wrf_inout "
${LN} -s ${GSIRUN_DIR}/wrf_inout wrfinput_d01
#${LN} -s ${GSIRUN_DIR}/wrf_inout wrfvar_output
else
${ECHO} "ERROR: ${GSIRUN_DIR}/wrf_inout does not exist, or is not readable"
exit 1
fi
# Make links to the WRF DAT files
for file in ${WRF_DAT_FILES[@]}; do
${LN} -sf ${file} .
done
# Get the start and end time components
start_year=`${DATE} +%Y -d "${START_TIME}"`
start_month=`${DATE} +%m -d "${START_TIME}"`
start_day=`${DATE} +%d -d "${START_TIME}"`
start_hour=`${DATE} +%H -d "${START_TIME}"`
start_minute=`${DATE} +%M -d "${START_TIME}"`
start_second=`${DATE} +%S -d "${START_TIME}"`
end_year=`${DATE} +%Y -d "${END_TIME}"`
end_month=`${DATE} +%m -d "${END_TIME}"`
end_day=`${DATE} +%d -d "${END_TIME}"`
end_hour=`${DATE} +%H -d "${END_TIME}"`
end_minute=`${DATE} +%M -d "${END_TIME}"`
end_second=`${DATE} +%S -d "${END_TIME}"`
if [ ${DOMAIN} == "alaska" ] ; then
mod3=$(( $start_hour % 3 ))
if [ $mod3 -eq 0 ]; then #don't run wrf since it will crash
echo "hour=$start_hour, skip wrf run"
exit 0
fi
fi
# Compute number of days and hours for the run
(( run_days = 0 ))
(( run_hours = 0 ))
# Create patterns for updating the wrf namelist
run=[Rr][Uu][Nn]
equal=[[:blank:]]*=[[:blank:]]*
start=[Ss][Tt][Aa][Rr][Tt]
end=[Ee][Nn][Dd]
year=[Yy][Ee][Aa][Rr]
month=[Mm][Oo][Nn][Tt][Hh]
day=[Dd][Aa][Yy]
hour=[Hh][Oo][Uu][Rr]
minute=[Mm][Ii][Nn][Uu][Tt][Ee]
second=[Ss][Ee][Cc][Oo][Nn][Dd]
# Update the run_days,run_hours,start_time,end_time in wrf namelist.input
${SED} -i "\
s/\(${run}_${day}[Ss]\)${equal}[[:digit:]]\{1,\}/\1 = ${run_days}/; \
s/\(${run}_${hour}[Ss]\)${equal}[[:digit:]]\{1,\}/\1 = ${run_hours}/; \
\
s/\(${start}_${year}\)${equal}[[:digit:]]\{4\}/\1 = ${start_year}/; \
s/\(${start}_${month}\)${equal}[[:digit:]]\{2\}/\1 = ${start_month}/; \
s/\(${start}_${day}\)${equal}[[:digit:]]\{2\}/\1 = ${start_day}/; \
s/\(${start}_${hour}\)${equal}[[:digit:]]\{2\}/\1 = ${start_hour}/; \
s/\(${start}_${minute}\)${equal}[[:digit:]]\{2\}/\1 = ${start_minute}/;\
s/\(${start}_${second}\)${equal}[[:digit:]]\{2\}/\1 = ${start_second}/;\
\
s/\(${end}_${year}\)${equal}[[:digit:]]\{4\}/\1 = ${end_year}/; \
s/\(${end}_${month}\)${equal}[[:digit:]]\{2\}/\1 = ${end_month}/; \
s/\(${end}_${day}\)${equal}[[:digit:]]\{2\}/\1 = ${end_day}/; \
s/\(${end}_${hour}\)${equal}[[:digit:]]\{2\}/\1 = ${end_hour}/; \
s/\(${end}_${minute}\)${equal}[[:digit:]]\{2\}/\1 = ${end_minute}/; \
s/\(${end}_${second}\)${equal}[[:digit:]]\{2\}/\1 = ${end_second}/; \
" ${WRF_NAMELIST}
# Move existing rsl files to a subdir if there are any
${ECHO} "Checking for pre-existing rsl files"
if [ -f "rsl.out.0000" ]; then
rsldir=rsl.`${LS} -l --time-style=+%Y%m%d%H%M%S rsl.out.0000 | ${CUT} -d" " -f 7`
${MKDIR} ${rsldir}
${ECHO} "Moving pre-existing rsl files to ${rsldir}"
${MV} rsl.out.* ${rsldir}
${MV} rsl.error.* ${rsldir}
else
${ECHO} "No pre-existing rsl files were found"
fi
# Run WRF to update reflectivity fields
export pgm="rtma3d_updatevars"
. prep_step
startmsg
msg="***********************************************************"
postmsg "$jlogfile" "$msg"
msg=" begin updating reflectivity by a one-time_step WRF"
postmsg "$jlogfile" "$msg"
msg="***********************************************************"
postmsg "$jlogfile" "$msg"
if [ "${envir}" == "esrl" ]; then #Jet
CP_LN="${LN} -sf"
else
CP_LN=${CP}
fi
${CP_LN} ${EXECrtma3d}/${exefile_name_updatevars} ${pgm}
now=`${DATE} +%Y%m%d%H%M%S`
${MPIRUN} ${pgm}
export err=$?; err_chk
# Save a copy of the RSL files
rsldir=rsl.wrf.${now}
${MKDIR} ${rsldir}
mv rsl.out.* ${rsldir}
mv rsl.error.* ${rsldir}
# Check to see if the 0h output is there:
if [ ! -e "wrfout_d01_${time_str}" ]; then
${ECHO} "WRF failed at the first time step!"
exit 1
fi
# Output successful so write status to log
${ECHO} "Assemble Reflectivity fields back into wrf_inout"
if [ "${envir}" == "esrl" ]; then #Jet
#${NCKS} -A -H -v REFL_10CM,COMPOSITE_REFL_10CM,REFL_10CM_1KM,REFL_10CM_4KM,U10,V10 wrfout_d01_${time_str} ${GSIRUN_DIR}/wrf_inout
${NCKS} -A -H -v REFL_10CM,COMPOSITE_REFL_10CM,REFL_10CM_1KM,REFL_10CM_4KM wrfout_d01_${time_str} ${GSIRUN_DIR}/wrf_inout
else
if [ ! -f ${EXECrtma3d}/${exefile_name_updatevars_ncfields} ]; then
${ECHO} "ERROR: executable '${EXECrtma3d}/${exefile_name_updatevars_ncfields}' does not exist!"
exit 1
fi
${CP_LN} ${EXECrtma3d}/${exefile_name_update_ncfields} .
${LN} -s wrfout_d01_${time_str} wrfout_d01
${exefile_name_update_ncfields} wrfout_d01 wrf_inout
export err=$?; err_chk
if [ -f ${COMOUTgsi_rtma3d}/${ANLrtma3d_FNAME} ]; then
${ECHO} "Erasing the GSI generated analysis file to be replaced by modified analysis."
${RM} ${COMOUTgsi_rtma3d}/${ANLrtma3d_FNAME}
fi
${CP_LN} -p wrf_inout ${COMOUTgsi_rtma3d}/${ANLrtma3d_FNAME}
fi
${ECHO} "update_vars.ksh completed successfully at `${DATE}`"
# Saving some files
${CP} -p namelist.input ${COMOUTwrf_rtma3d}/namelist.input_${cycle_str}
if [ "${envir}" == "esrl" ]; then #Jet
${RM} -f ${DATA}/sig*
${RM} -f ${DATA}/obs*
${RM} -f ${DATA}/pe*
fi
msg="JOB $job FOR $RUN HAS COMPLETED NORMALLY"
postmsg "$jlogfile" "$msg"
exit 0
| true
|
c89c20a8984f8373402c8a42628f436bf0615ffd
|
Shell
|
jasl8r/docker-seafile
|
/assets/runtime/env-defaults
|
UTF-8
| 1,338
| 2.546875
| 3
|
[] |
no_license
|
#!/bin/bash
DEBUG=${DEBUG:-$DEBUG_ENTRYPOINT}
SEAFILE_TMP_DIR="${SEAFILE_ROOT_DATA_DIR}/tmp"
CCNET_DATA_DIR=${CCNET_DATA_DIR:-$SEAFILE_ROOT_DATA_DIR/ccnet}
SEAFILE_DATA_DIR=${SEAFILE_DATA_DIR:-$SEAFILE_ROOT_DATA_DIR/seafile}
SEAHUB_DATA_DIR=${SEAHUB_DATA_DIR:-$SEAFILE_ROOT_DATA_DIR/seahub}
SEAFILE_CONF_DIR="${SEAFILE_HOME}/conf"
SEAFILE_PID_DIR="${SEAFILE_HOME}/pids"
SEAFILE_NAME=${SEAFILE_NAME:-Seafile}
SEAFILE_HOST=${SEAFILE_HOST:-localhost}
SEAFILE_SCHEME=${SEAFILE_SCHEME:-http}
CCNET_ID=${CCNET_ID:-}
CCNET_DB_NAME=${CCNET_DB_NAME:-}
SEAFDAV_ENABLED=${SEAFDAV_ENABLED:-false}
SEAFDAV_PORT=${SEAFDAV_PORT:-8080}
SEAFILE_PORT=${SEAFILE_PORT:-8082}
SEAFILE_DB_NAME=${SEAFILE_DB_NAME:-}
SEAHUB_SECRET_KEY=${SEAHUB_SECRET_KEY:-}
SEAHUB_DB_NAME=${SEAHUB_DB_NAME:-}
DB_ADAPTER=${DB_ADAPTER:-}
DB_HOST=${DB_HOST:-}
DB_PORT=${DB_PORT:-}
DB_USER=${DB_USER:-}
DB_PASS=${DB_PASS:-}
DB_ROOT_PASS=${DB_ROOT_PASS:-}
DB_ENCODING=${DB_ENCODING:-}
SMTP_HOST=${SMTP_HOST:-}
SMTP_PORT=${SMTP_PORT:-25}
SMTP_USER=${SMTP_USER:-}
SMTP_PASS=${SMTP_PASS:-}
SMTP_TLS=${SMTP_TLS:-False}
SEAFILE_EMAIL=${SEAFILE_EMAIL:-${SMTP_USER}}
SEAFILE_EMAIL=${SEAFILE_EMAIL:-example@example.com}
LDAP_HOST=${LDAP_HOST:-}
LDAP_BASE=${LDAP_BASE:-}
LDAP_BIND_DN=${LDAP_BIND_DN:-}
LDAP_PASS=${LDAP_PASS:-}
LDAP_UID=${LDAP_UID:-mail}
LDAP_TLS_REQCERT=${LDAP_TLS_REQCERT:-}
| true
|
ec256288043e9fcd444bf2ecb5cebfccfe3b0aff
|
Shell
|
dlimery/aws-vpcs
|
/myCreateVpc.sh
|
UTF-8
| 4,629
| 4.21875
| 4
|
[] |
no_license
|
#!/bin/bash
#
# Script filename = "myCreateVpc.sh"
# Create AWS Virtual Private Cloud (VPCs)
# Sourced from http://www.alittlemadness.com/category/bash/
# and from https://kvz.io/blog/2013/11/21/bash-best-practices/
set -o errexit
set -o pipefail
set -o nounset
# Enabling bash tracing
#set -o xtrace
# Set magic variables for current file & dir
__dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
__file="${__dir}/$(basename "${BASH_SOURCE[0]}")"
__base="$(basename ${__file} .sh)"
__root="$(cd "$(dirname "${__dir}")" && pwd)"
# misc variables
name="your VPC/network name"
# input variables
aws_region="eu-west-3"
aws_availability_zone="eu-west-3c"
aws_vpc_name="$name VPC"
aws_subnet_name="$name Subnet"
aws_instance_gateway_name="$name Gateway"
aws_route_table_name="$name Route Table"
aws_security_group_name="$name Security Group"
aws_subnet_cidr_block="172.22.1.0/24"
# constants for colored output
readonly NC='\033[0m' # No Color
readonly RED='\033[0;31m'
readonly GREEN='\033[0;32m'
readonly YELLOW='\033[0;33m'
readonly CYAN='\033[0;36m'
readonly GREY='\033[0;90m'
### Functions
function my_pause() {
read -p "Press enter to continue"
}
function validate_vpc_cidr_block() {
local ip=${1}
local return_code=1
testformat=^[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\/16$
if [[ "${ip}" =~ ${testformat} ]]; then
OIFS=$IFS
IFS="./"
ip=($ip)
IFS=$OIFS
[[ ${ip[0]} -le 255 && ${ip[1]} -le 255 \
&& ${ip[2]} -eq 0 && ${ip[3]} -eq 0 ]]
return_code=$?
fi
return ${return_code}
}
function display_usage() {
local return_code=0
echo -e "\t~\t~\t~\t~\t~\t~\t~"
echo -e "\n${GREEN}myCreateVpc" \
"${NC}- bash script to create AWS VPCs -" \
"${GREY}[pre-release-0.0.1]${NC}\n"
echo -e "USAGE: ${CYAN}${__base} ${NC}<${YELLOW}vpc_cidr_block${NC}>\n"
echo -e "DESCRIPTION:\n"
echo -e " myCreateVpc is a tool for creating AWS Virtual Private Cloud"
echo -e " (VPC) instances. Virtual Private Cloud is a virtual network"
echo -e " dedicated to an AWS account. It is logically isolated from"
echo -e " other virtual networks in the AWS Cloud. AWS resources can be"
echo -e " launched into VPCs, such as Amazon EC2 instances."
echo -e " myCreateVpc is a bash script which leverages AWS CLI commands."
echo -e " It accepts only one argument: an IPv4 CIDR block in /16\n"
echo -e " For more details see https://github.com/dlimery/aws-vpcs\n"
echo -e "TIP:\n"
echo -e " <${YELLOW}vpc_cidr_block${NC}>" \
"MUST have the following IPv4 CIDR format:" \
"${YELLOW}A.B.${NC}0${YELLOW}.${NC}0${YELLOW}/16${NC}\n"
echo -e "\texample: ${CYAN}${__base} ${YELLOW}172.22.0.0/16${NC}\n"
return ${return_code}
}
function syntax_status() {
local return_code=1
if [[ "${1}" -gt "1" ]]; then
return_code=2
echo -e "\nOUTPUT:"
echo -e "\n${NC}[${RED}SYNTAX ERROR${NC}]" \
"Too many arguments!\n"
display_usage
exit 2
else
if validate_vpc_cidr_block ${2}; then
return_code=0
echo -e "\n${CYAN}myCreateVpc" \
"${NC}- bash script to create AWS VPCs -" \
"${GREY}[pre-release-0.0.1]${NC}\n"
echo -e "\t~\t~\t~\t~\t~\t~\t~"
echo -e "\nOUTPUT:"
echo -e "\n[${GREEN}OK${NC}]" \
"${CYAN}${2} ${NC}is a valid /16 CIDR Block\n"
else
return_code=3
echo -e "\nOUTPUT:"
echo -e "\n${NC}[${RED}SYNTAX ERROR${NC}]" \
"${CYAN}${2} ${NC}is not compliant to IPv4 format:" \
"${CYAN}A.B.0.0/16${NC}\n"
display_usage
exit 3
fi
fi
return ${return_code}
}
function aws_ec2_create_vpc() {
local __new_vpc_id=${2}
local aws_vpc_cidr_block=${1}
# Starting the creation process
echo -e "\nCreating VPC..."
# create vpc
cmd_output=$(aws ec2 create-vpc \
--cidr-block "${aws_vpc_cidr_block}" \
--output json)
vpc_id=$(echo -e "${cmd_output}" | /usr/bin/jq '.Vpc.VpcId' | tr -d '"')
eval $__new_vpc_id="'${vpc_id}'"
# show result
echo -e "\n[${GREEN}OK${NC}] VPC ${CYAN}'${vpc_id}' ${NC}created.\n"
}
function aws_ec2_create_tag() {
local new_vpc_id=$1
aws ec2 create-tags \
--resources "$new_vpc_id" \
--tags Key=Name,Value="$new_vpc_id"
}
function main() {
local new_vpc_id=null
# Arguments validation tests
if [[ "$#" -eq "0" ]]; then
echo -e "\n${NC}[${RED}SYNTAX ERROR${NC}]" \
"No arguments supplied\n"
display_usage
exit 1
fi
if syntax_status $# $@; then
aws_vpc_cidr_block=$@
else
exit 99
fi
aws_ec2_create_vpc ${aws_vpc_cidr_block} new_vpc_id
aws_ec2_create_tag ${new_vpc_id}
}
main "$@"
| true
|
07dcd7937872d9faff76d2eafae8eae674e12534
|
Shell
|
jirisimek98/bashScripts
|
/post-commit
|
UTF-8
| 365
| 2.578125
| 3
|
[] |
no_license
|
#!/bin/bash
CURRENTDIR=$(pwd);
USR=root;
PASS=redhat;
echo ${CURRENTDIR} >> /home/jboss/opt/data/log.txt;
git push http://${USR}:${PASS}@gogs.apps.playground.rhba.openshift-aws.rhocf-dev.com/root/jsimek-OS master;
cd ../Production;
git pull http://${USR}:${PASS}@gogs.apps.playground.rhba.openshift-aws.rhocf-dev.com/root/jsimek-OS master;
cd $CURRENTDIR;
| true
|
82b2fa7a052a1431f944b8b32e0c181c48dcadbc
|
Shell
|
jambros2/ScratchRadio
|
/scripts/start_gnu_radio.sh
|
UTF-8
| 1,065
| 3.703125
| 4
|
[] |
no_license
|
#!/bin/bash
#
# Starts the GNU Radio driver script which is used by Scratch to access the
# GNU Radio functionality. Currently runs the script in the foreground for
# tracing activity on the console.
#
GR_SCRIPT_NAME="/usr/lib/scratch2/scratch_extensions/gnuRadioDriver.py"
COMMAND_PIPE_DIR="/tmp/gr-control"
COMMAND_PIPE_NAME=$COMMAND_PIPE_DIR/"command.pipe"
TX_MSG_PIPE_NAME=$COMMAND_PIPE_DIR/"txmessage.pipe"
RX_MSG_PIPE_NAME=$COMMAND_PIPE_DIR/"rxmessage.pipe"
SCRIPT_LOCK_NAME=$COMMAND_PIPE_DIR/"script.lock"
# Ensure the command pipe directory is present.
mkdir -p $COMMAND_PIPE_DIR
# Creates the named pipes if not already present.
if ! [ -p $COMMAND_PIPE_NAME ]
then
rm -f $COMMAND_PIPE_NAME
mkfifo $COMMAND_PIPE_NAME
fi
if ! [ -p $TX_MSG_PIPE_NAME ]
then
rm -f $TX_MSG_PIPE_NAME
mkfifo $TX_MSG_PIPE_NAME
fi
if ! [ -p $RX_MSG_PIPE_NAME ]
then
rm -f $RX_MSG_PIPE_NAME
mkfifo $RX_MSG_PIPE_NAME
fi
# Runs the GNU Radio script. Uses flock to ensure only one instance is running.
flock -w 0.1 $SCRIPT_LOCK_NAME python $GR_SCRIPT_NAME
| true
|
4be14d04aa749e807911c8e7b19735a8b93be9ad
|
Shell
|
spring-cloud-samples/spring-cloud-contract-samples
|
/producer_with_dsl_restdocs/src/test/bash/generate_swagger.sh
|
UTF-8
| 734
| 3.296875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
case "`uname`" in
Darwin* )
uname="osx"
;;
* )
uname="linux"
;;
esac
export ROOT="$( pwd )"
export TEST_ROOT="${ROOT}/src/test"
export SWAGYMNIA_BIN="${TEST_ROOT}/bash/${uname}/swaggymnia"
PATH="${PATH}:${ROOT}/node/"
export PATH
echo "Generate postman from restdocs"
node_modules/restdocs-to-postman/bin/cli -i target/generated-snippets -e insomnia -f secondLastFolder -r "${TEST_ROOT}"/swagger/replacements.json -o target/insomnia-collection.json
echo "Generate swagger from postman"
pushd target
"${SWAGYMNIA_BIN}" generate -insomnia insomnia-collection.json -config "${TEST_ROOT}"/swagger/config.json -output json
popd
echo "Swagger spec is available at $( pwd )/target/swagger.json"
| true
|
8f116908f46b0b4ea3710a5b2b8209b77a015644
|
Shell
|
asabirov/centos_provision
|
/scripts/src/app/installer/stage3/setup_vars.sh
|
UTF-8
| 531
| 3.125
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
setup_vars(){
VARS['skip_firewall']='no'
VARS['ssl']='no'
VARS['db_root_password']=$(generate_password)
VARS['db_name']='keitaro'
VARS['db_user']='keitaro'
VARS['db_password']=$(generate_password)
VARS['db_restore']='no'
VARS['db_restore_path_want_exit']='no'
VARS['admin_login']='admin'
VARS['admin_password']=$(generate_password)
VARS['php_engine']='php-fpm'
}
generate_password(){
local PASSWORD_LENGTH=16
LC_ALL=C tr -cd '[:alnum:]' < /dev/urandom | head -c${PASSWORD_LENGTH}
}
| true
|
8b477290a8fea4a7413016e77cd272bcd5392f5c
|
Shell
|
4nalejo/gihack
|
/gihack
|
UTF-8
| 2,084
| 2.9375
| 3
|
[] |
no_license
|
#!/bin/bash
echo -e "\e[32m
___ ___ ___ ___ ___
/ /\ ___ /__/\ / /\ / /\ /__/|
/ /:/_ / /\ \ \:\ / /::\ / /:/ | |:|
/ /:/ /\ / /:/ \__\:\ / /:/\:\ / /:/ | |:|
/ /:/_/::\ /__/::\ ___ / /::\ / /:/~/::\ / /:/ ___ __| |:|
/__/:/__\/\:\ \__\/\:\__ /__/\ /:/\:\ /__/:/ /:/\:\ /__/:/ / /\ /__/\_|:|____
\ \:\ /~~/:/ \ \:\/\ \ \:\/:/__\/ \ \:\/:/__\/ \ \:\ / /:/ \ \:\/:::::/
\ \:\ /:/ \__\::/ \ \::/ \ \::/ \ \:\ /:/ \ \::/~~~~
\ \:\/:/ /__/:/ \ \:\ \ \:\ \ \:\/:/ \ \:\
\ \::/ \__\/ \ \:\ \ \:\ \ \::/ \ \:\
\__\/ \__\/ \__\/ \__\/ \__\/
\e[0m"
while :;
do
echo -n "connect or entry: "
read ce
if [[ $ce == "connect" ]]
then
echo -n "name account: "
read nameAccount
echo -n "page: "
read page
echo -en "\e[2m\e[32m"
git clone https://github.com/$nameAccount/$page
echo -en "\e[0m"
cd $page
echo -e "\e[32mRemote $page\e[0m"
while :;
do
echo -n "shell: "
read shell
if [[ $shell == "next" ]]
then
echo -n "add: "
read add
git add $add
echo -en "\e[32m"
git status | grep "modified"
echo -en "\e[0m"
echo -n "name: "
read name
git commit -m "$name" > /dev/null 2>&1
echo -n "finish (account) enter"
read
git push
exit
else
echo -e "command \e[2m$shell\e[0m output: \e[32m\n$($shell)\e[0m"
fi
done
elif [[ $ce == "entry" ]]
then
echo -n "page: "
read page
cd $page
while :;
do
echo -n "shell: "
read shell
if [[ $shell == "next" ]]
then
echo -n "add: "
read add
git add $add
echo -en "\e[32m"
git status | grep "modified"
echo -en "\e[0m"
echo -n "name: "
read name
git commit -m "$name" > /dev/null 2>&1
echo -n "finish (account) enter"
read
git push
cd ..
rm -rfd $page
exit
else
echo -e "command \e[2m$shell\e[0m output: \e[32m\n$($shell)\e[0m"
fi
done
else
echo -n ""
fi
done
| true
|
31bb97e96e07644b7677d8fcdc18f391647864bf
|
Shell
|
Simon-Fuhaoyuan/EI401-KC4C
|
/stop_present_server.sh
|
UTF-8
| 574
| 3.484375
| 3
|
[] |
no_license
|
#!/bin/bash
# stop present_server
i=$(ps -ef | grep presenter | grep video_analysis_person | grep -o '[0-9]\{1,\}' | head -n1)
if test -z "$i"
then
echo "presenter server not in process!"
else
kill -9 $i
echo "presenter server stop success!"
fi
# delete analysis results
result_addr=$(tail -n1 present_server_config.txt)
# echo "do you want to delete the analysis results? (in ${result_addr}) [y/n]:"
# read a
#a=y
#if test "$a" = "y"
#then
# rm -rf ${result_addr}/*
# echo "delete files in ${result_addr} success!"
#else
# echo "files in ${result_addr} remained!"
#fi
| true
|
a2b65d91ff637ed1c1b3496ea949a70180a5b8d0
|
Shell
|
stephanenicolas/android-unit-test
|
/scripts/run-tests.sh
|
UTF-8
| 746
| 3.09375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
echo "Running plugin tests."
./gradlew clean check install
temp="$?"
if [ "$temp" -ne 0 ]
then
echo "Error during the plugin tests. Gradle returned $temp."
exit 1
fi
echo "Success in running the plugin tests. Now running the plugin in a dummy application project."
cd example
../gradlew clean test
temp="$?"
if [ "$temp" -ne 0 ]
then
echo "Error during the dummy application project tests. Gradle returned $temp."
exit 1
fi
echo "Success in running the dummy application tests. Now running the plugin in a dummy library project."
cd ../example-library
../gradlew clean test
temp="$?"
if [ "$temp" -ne 0 ]
then
echo "Error during the dummy library project tests. Gradle returned $temp."
exit 1
fi
echo "Plugin tested succesfully"
| true
|
e554addb6250fee59d86912eeaaec2b6f3be667c
|
Shell
|
parkbruce/.bash
|
/think/fileinfo
|
UTF-8
| 327
| 3.1875
| 3
|
[] |
no_license
|
#!/bin/bash
# fileinfo
FILES="/usr/sbin/accept
/usr/sbin/pwck
/usr/sbin/chroot
/usr/bin/fakefile
/sbin/badblocks
/sbin/ypbind"
clear
echo "fileinfo"
echo
for f in $FILES; do
if [ ! -e "$f" ]; then
echo "$f does not exist"
echo
continue
fi
ls -l $f
whatis $(basename $f)
echo
done
| true
|
daeac507fc75ff73b02f295eb2d79cc049878f85
|
Shell
|
Anton-Cao/dotfiles
|
/install_spacemacs.sh
|
UTF-8
| 235
| 2.96875
| 3
|
[] |
no_license
|
#!/bin/bash
cd ~
if [ ! -f "~/.spacemacs" ]; then
mv .emacs.d .emacs.d.bak
mv .emacs .emacs.bak
git clone https://github.com/syl20bnr/spacemacs ~/.emacs.d
fi
echo "Copy contents of spacemacs_user_config into .spacemacs"
| true
|
48a4235fea8b9088df5faae6e311ff828f78e843
|
Shell
|
hashbang/shell-etc
|
/kernel/postinst.d/apt-auto-removal
|
UTF-8
| 2,798
| 4.15625
| 4
|
[
"LicenseRef-scancode-warranty-disclaimer",
"MIT"
] |
permissive
|
#!/bin/sh
set -e
# Author: Steve Langasek <steve.langasek@canonical.com>
#
# Mark as not-for-autoremoval those kernel packages that are:
# - the currently booted version
# - the kernel version we've been called for
# - the latest kernel version (determined using rules copied from the grub
# package for deciding which kernel to boot)
# - the second-latest kernel version, if the booted kernel version is
# already the latest and this script is called for that same version,
# to ensure a fallback remains available in the event the newly-installed
# kernel at this ABI fails to boot
# In the common case, this results in exactly two kernels saved, but it can
# result in three kernels being saved. It's better to err on the side of
# saving too many kernels than saving too few.
#
# We generate this list and save it to /etc/apt/apt.conf.d instead of marking
# packages in the database because this runs from a postinst script, and apt
# will overwrite the db when it exits.
eval $(apt-config shell APT_CONF_D Dir::Etc::parts/d)
test -n "${APT_CONF_D}" || APT_CONF_D="/etc/apt/apt.conf.d"
config_file=${APT_CONF_D}/01autoremove-kernels
eval $(apt-config shell DPKG Dir::bin::dpkg/f)
test -n "$DPKG" || DPKG="/usr/bin/dpkg"
installed_version="$1"
running_version="$(uname -r)"
version_test_gt ()
{
local version_test_gt_sedexp="s/[._-]\(pre\|rc\|test\|git\|old\|trunk\)/~\1/g"
local version_a="`echo "$1" | sed -e "$version_test_gt_sedexp"`"
local version_b="`echo "$2" | sed -e "$version_test_gt_sedexp"`"
$DPKG --compare-versions "$version_a" gt "$version_b"
return "$?"
}
list="$(${DPKG} -l | awk '/^ii[ ]+(linux|kfreebsd|gnumach)-image-[0-9]+\./ && $2 !~ /-dbg$/ { print $2 }' | sed -e 's#\(linux\|kfreebsd\|gnumach\)-image-##')"
latest_version=""
previous_version=""
for i in $list; do
if version_test_gt "$i" "$latest_version"; then
previous_version="$latest_version"
latest_version="$i"
elif version_test_gt "$i" "$previous_version"; then
previous_version="$i"
fi
done
if [ "$latest_version" != "$installed_version" ] \
|| [ "$latest_version" != "$running_version" ] \
|| [ "$installed_version" != "$running_version" ]
then
# We have at least two kernels that we have reason to think the
# user wants, so don't save the second-newest version.
previous_version=
fi
kernels="$(echo "$latest_version
$installed_version
$running_version
$previous_version" | sort -u | sed -e 's#\.#\\.#g' )"
generateconfig() {
cat <<EOF
// DO NOT EDIT! File autogenerated by $0
APT::NeverAutoRemove
{
EOF
apt-config dump --no-empty --format '%v%n' 'APT::VersionedKernelPackages' | while read package; do
for kernel in $kernels; do
echo " \"^${package}-${kernel}$\";"
done
done
echo '};'
}
generateconfig > "${config_file}.dpkg-new"
mv "${config_file}.dpkg-new" "$config_file"
| true
|
4a9585ea87f9fe2ff86ea57412f90094e2957afe
|
Shell
|
akankshamahajan15/Backup-filesystem-in-kernel
|
/CSE-506/testscript.sh
|
UTF-8
| 430
| 3.078125
| 3
|
[] |
no_license
|
#!/bin/sh
#This testscript runs multiple scripts from test01.sh to test12.sh
#stores results in results file and running logs in log file
/bin/rm -f log
/bin/rm -f results
#compile bkptcl.c and cbkptcl.c
make
touch log
touch results
for i in {01..13}
do
echo running script test$i.sh
./test$i.sh
echo completed script test$i.sh
echo ""
done
echo "Check logs and results file in this folder to the details"
| true
|
5de182add7a55fd878d4cc62b11316176d047a4b
|
Shell
|
jelaas/bifrost-build
|
/all/opt-openldap-2.4.33-1/Build.sh
|
UTF-8
| 3,296
| 3.609375
| 4
|
[] |
no_license
|
#!/bin/bash
SRCVER=openldap-2.4.33
PKG=opt-$SRCVER-1 # with build version
# PKGDIR is set by 'pkg_build'. Usually "/var/lib/build/all/$PKG".
PKGDIR=${PKGDIR:-/var/lib/build/all/$PKG}
SRC=/var/spool/src/$SRCVER.tgz
[ -f /var/spool/src/$SRCVER.tar.bz2 ] && SRC=/var/spool/src/$SRCVER.tar.bz2
BUILDDIR=/var/tmp/src/$SRCVER
DST="/var/tmp/install/$PKG"
#########
# Simple inplace edit with sed.
# Usage: sedit 's/find/replace/g' Filename
function sedit {
sed "$1" $2 > /tmp/sedit.$$
cp /tmp/sedit.$$ $2
rm /tmp/sedit.$$
}
#########
# Fetch sources
./Fetch-source.sh || exit $?
pkg_uninstall # Uninstall any dependencies used by Fetch-source.sh
#########
# Install dependencies:
# pkg_available dependency1-1 dependency2-1
# pkg_install dependency1-1 || exit 2
pkg_install groff-1.21-1 || exit 2 # Needed to convert man-pages: see below
pkg_install db-5.2.36-1 || exit 2
pkg_install openssl-0.9.8x-1 || exit 2
pkg_install tcp_wrappers-7.6-1 || exit 2
# Compile against musl:
# pkg_install musl-0.9.1-1 || exit 2
# export CC=musl-gcc
#########
# Unpack sources into dir under /var/tmp/src
cd $(dirname $BUILDDIR); tar xf $SRC
#########
# Patch
cd $BUILDDIR || exit 1
libtool_fix-1
# patch -p1 < $PKGDIR/mypatch.pat
#########
# Configure
OPTPREFIX=opt/openldap
B-configure-1 --prefix=/$OPTPREFIX --localstatedir=/var \
--with-tls=openssl \
--enable-syslog \
--enable-slapd \
--enable-wrappers \
--enable-bdb=yes --enable-hdb=yes \
--enable-passwd=yes \
--enable-local \
--without-fetch \
--enable-ldap --enable-overlays --enable-relay \
|| exit 1
[ -f config.log ] && cp -p config.log /var/log/config/$PKG-config.log
#########
# Post configure patch
# patch -p0 < $PKGDIR/Makefile.pat
#########
# Compile
make || exit 1
#########
# Install into dir under /var/tmp/install
rm -rf "$DST"
make install DESTDIR=$DST # --with-install-prefix may be an alternative
OPTDIR=$DST/$OPTPREFIX
mkdir -p $OPTDIR/etc/config.flags
mkdir -p $OPTDIR/etc/config.preconf
mkdir -p $OPTDIR/rc.d
echo yes > $OPTDIR/etc/config.flags/slapd
echo $PKG > $OPTDIR/pkgversion
cp -p $PKGDIR/rc $OPTDIR/rc.d/rc.slapd
chmod +x $OPTDIR/rc.d/rc.slapd
[ -f $PKGDIR/README ] && cp -p $PKGDIR/README $OPTDIR
#########
# Convert man-pages
cd $DST || exit 1
for f in $(find . -path \*man/man\*); do if [ -f $f ]; then groff -T utf8 -man $f > $f.txt; rm $f; fi; done
#########
# Check result
cd $DST || exit 1
[ -f opt/openldap/libexec/slapd ] || exit 1
(ldd opt/openldap/libexec/slapd|grep -qs "not a dynamic executable") || exit 1
#########
# Clean up
cd $DST || exit 1
rm -rf opt/openldap/lib var opt/openldap/include opt/openldap/share/man/man3
mv opt/openldap/etc/openldap/slapd.conf opt/openldap/etc/config.preconf || exit 1
mv opt/openldap/etc/openldap/slapd.ldif opt/openldap/etc/config.preconf || exit 1
mv opt/openldap/etc/openldap/ldap.conf opt/openldap/etc/config.preconf || exit 1
rm -f opt/openldap/etc/openldap/*.default
[ -d $OPTPREFIX/bin ] && strip $OPTPREFIX/bin/*
[ -d $OPTPREFIX/sbin ] && strip $OPTPREFIX/sbin/*
[ -d $OPTPREFIX/usr/bin ] && strip $OPTPREFIX/usr/bin/*
#########
# Make package
cd $DST
tar czf /var/spool/pkg/$PKG.tar.gz .
#########
# Cleanup after a success
cd /var/lib/build
[ "$DEVEL" ] || rm -rf "$DST"
[ "$DEVEL" ] || rm -rf "$BUILDDIR"
pkg_uninstall
exit 0
| true
|
741a4656076fcf4294b2267e9d7f0d886b151cf4
|
Shell
|
ezPappi/wifibsd
|
/netbsd/x86/cf/site/etc/rc.xml/ssh/ssh
|
UTF-8
| 1,390
| 4
| 4
|
[
"BSD-4-Clause-UC",
"LicenseRef-scancode-other-permissive",
"BSD-3-Clause"
] |
permissive
|
#!/bin/sh
# Startup ssh
. /etc/rc.common
SSH=ssh
PIDFILE=/var/run/${SSH}d.pid
conf_dir="/etc/${SSH}"
ssh_keygen_flags=""
command="/usr/sbin/${SSH}d"
ConsoleMessage=echo
StartService ()
{
if [ -r $PIDFILE ]; then
$ConsoleMessage "SSH already running"
else
if [ ! -f ${conf_dir}/ssh_host_key ]; then
echo "Generating SSH version 1 RSA host key..."
/usr/bin/ssh-keygen -t rsa1 ${ssh_keygen_flags} \
-f ${conf_dir}/ssh_host_key -N '' 1>/dev/null 2>&1
fi
if [ ! -f ${conf_dir}/ssh_host_dsa_key ]; then
echo "Generating SSH DSA host key..."
/usr/bin/ssh-keygen -t dsa ${ssh_keygen_flags} \
-f ${conf_dir}/ssh_host_dsa_key -N '' 1>/dev/null 2>&1
fi
if [ -f ${conf_dir}/ssh_host_rsa_key ]; then
echo "Generating SSH version 2 RSA key ..."
/usr/bin/ssh-keygen -t rsa ${ssh_keygen_flags} \
-f ${conf_dir}/ssh_host_rsa_key -N '' 1>/dev/null 2>&1
fi
$ConsoleMessage "Starting SSH"
${SSH}d
fi
}
StopService ()
{
if [ -r $PIDFILE ]; then
$ConsoleMessage "Stopping SSH"
kill -TERM `cat $PIDFILE`
else
$ConsoleMessage "SSH not running"
fi
}
RestartService ()
{
if [ -r $PIDFILE ]; then
$ConsoleMessage "Restarting SSH"
kill -HUP `cat $PIDFILE`
else
$ConsoleMessage "SSH not running. Starting"
StartService
fi
}
RunService "$1"
| true
|
cdceed7c11ae53481142202744b33cb56f11f9fb
|
Shell
|
keradus/scrape-pages
|
/publish.sh
|
UTF-8
| 652
| 3.453125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
cd $(dirname $(dirname $0))
package_version="$TRAVIS_TAG"
publish_package_to_npm() {
echo '//registry.npmjs.org/:_authToken=${NPM_AUTH_TOKEN}' >> ~/.npmrc \
&& npm run build \
&& cd lib \
&& npm publish \
&& cd ..
}
push_version_to_github() {
git checkout -b master \
&& git add package.json package-lock.json \
&& git commit --message "release $package_version" \
&& git remote add deploy https://${GITHUB_TOKEN}@github.com/${TRAVIS_REPO_SLUG}.git \
&& git push deploy master
}
set -x
npm version --no-git-tag-version "$package_version" \
&& publish_package_to_npm \
&& push_version_to_github
| true
|
ec95e51b0f16b2b3cb760874db1c9ad8fd56e0e7
|
Shell
|
eclipse/steady
|
/docker/kb-importer/start.sh
|
UTF-8
| 2,305
| 3.484375
| 3
|
[
"Apache-2.0",
"EPL-1.0",
"BSD-3-Clause",
"EPL-2.0",
"MIT"
] |
permissive
|
#!/bin/bash
mkdir -p /kb-importer/data
cd /kb-importer/data
if [ -d $KB_IMPORTER_CLONE_FOLDER ] && [ ! -z $KB_IMPORTER_CLONE_FOLDER ]; then
mkdir -p $KB_IMPORTER_CLONE_FOLDER
fi
if [ -f /kb-importer/kb-importer.jar ]; then
mv /kb-importer/kb-importer.jar /kb-importer/kaybee /kb-importer/data
fi
#substitute env variables used by kaybee in kaybeeconf.yaml
sed "s|KB_IMPORTER_STATEMENTS_REPO|$KB_IMPORTER_STATEMENTS_REPO|g" ../conf/kaybeeconf.yaml.sample > ../conf/kaybeeconf.yaml
sed -i "s|KB_IMPORTER_STATEMENTS_BRANCH|$KB_IMPORTER_STATEMENTS_BRANCH|g" ../conf/kaybeeconf.yaml
echo "Statements repo: " $KB_IMPORTER_STATEMENTS_REPO
echo "Statements branch: " $KB_IMPORTER_STATEMENTS_BRANCH
echo "Statements folder: " $KB_IMPORTER_STATEMENTS_FOLDER
echo "Clones folder: " $KB_IMPORTER_CLONE_FOLDER
echo "Skip clones: " $KB_IMPORTER_SKIP_CLONE
./kaybee update --force
#Adding certs
certs=`ls /kb-importer/certs | grep -v readme.txt`
for cert in $certs; do
keytool -import -alias $cert -storepass changeit -keystore /usr/lib/jvm/java-1.8-openjdk/jre/lib/security/cacerts -file /kb-importer/certs/$cert -noprompt
done
#Wait for backend to start
sleep 40
#Run initial import
./../kb-importer.sh
#create a cron job kaybeeconf.yaml
crontab -l > tmpcron
if ! cat tmpcron | grep "kb-importer.sh"
then
if [ -z "$KB_IMPORTER_CRON_HOUR" ]
then
echo "0 0 * * * PATH=$PATH BACKEND_SERVICE_URL=$BACKEND_SERVICE_URL KB_IMPORTER_STATEMENTS_FOLDER=$KB_IMPORTER_STATEMENTS_FOLDER KB_IMPORTER_STATEMENTS_BRANCH=$KB_IMPORTER_STATEMENTS_BRANCH KB_IMPORTER_STATEMENTS_REPO=$KB_IMPORTER_STATEMENTS_REPO KB_IMPORTER_CLONE_FOLDER=$KB_IMPORTER_CLONE_FOLDER KB_IMPORTER_SKIP_CLONE=$KB_IMPORTER_SKIP_CLONE /kb-importer/kb-importer.sh >> /kb-importer/cron.log 2>&1" >> tmpcron
else
echo "0 " "$KB_IMPORTER_CRON_HOUR" " * * * PATH=$PATH BACKEND_SERVICE_URL=$BACKEND_SERVICE_URL KB_IMPORTER_STATEMENTS_FOLDER=$KB_IMPORTER_STATEMENTS_FOLDER KB_IMPORTER_STATEMENTS_BRANCH=$KB_IMPORTER_STATEMENTS_BRANCH KB_IMPORTER_STATEMENTS_REPO=$KB_IMPORTER_STATEMENTS_REPO KB_IMPORTER_CLONE_FOLDER=$KB_IMPORTER_CLONE_FOLDER KB_IMPORTER_SKIP_CLONE=$KB_IMPORTER_SKIP_CLONE /kb-importer/kb-importer.sh >> /kb-importer/cron.log 2>&1" >> tmpcron
fi
fi
crontab tmpcron
echo "cron job created."
rm tmpcron
cron -f
| true
|
d7c91bb02d557294277d17f7dda669982988de0f
|
Shell
|
trickv/trick_skel
|
/bin/trick_skel/git-clone
|
UTF-8
| 403
| 3.0625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
# example: https://github.com/trickv/radio_flyer
# or : https://gitlab.com/pvanstaveren/foo
# perhaps: git@github.com:trickv/secret_thing ? wont work yet
host=$(echo $1 | cut -d/ -f3)
author=$(echo $1 | cut -d/ -f4)
repo=$(echo $1 | cut -d/ -f5)
src_dir=$HOME/src/$host/$author
echo "This goes into: $src_dir"
mkdir -p $src_dir
cd $src_dir
git clone $1
echo "Now cd $src_dir/$repo"
| true
|
326e978613d9558ae6adf8823ff28a4fd4c2adad
|
Shell
|
NicHub/QUIZZ
|
/gitpull.sh
|
UTF-8
| 1,062
| 2.96875
| 3
|
[] |
no_license
|
#!/bin/bash
case "$1" in
1) echo "1) Pull de rPi vers Mac"
git pull ssh://pi@davberrypi.local:22/home/pi/quizz
;;
2) echo "2) Pull de Mac vers rPi"
git pull ssh://nico@nicobook.local:22/Users/nico/Documents/projets/quizz/
;;
3) echo "3) Pull de Mac vers USB"
git pull /Users/nico/Documents/projets/quizz/
;;
4) echo "4) Pull de USB vers Mac"
git pull /Volumes/NICUSB/quizz/
;;
5) echo "5) Pull de nicobook vers nicomac"
git pull ssh://nico@nicobook.local:22/Users/nico/Documents/projets/quizz/
;;
6) echo "6) Pull de nicomac vers nicobook"
git pull ssh://nico@nicomac.local:22/Users/nico/Documents/projets/quizz/
;;
*) echo "Usage:"
echo "1) Pull de rPi vers Mac"
echo "2) Pull de Mac vers rPi"
echo "3) Pull de Mac vers USB"
echo "4) Pull de USB vers Mac"
echo "5) Pull de nicobook vers nicomac"
echo "6) Pull de nicomac vers nicobook"
exit 1
;;
esac
exit 0
| true
|
0821e5c20cf800c9cd22d11708e761790beb7e4a
|
Shell
|
veiset/config
|
/.bashrc
|
UTF-8
| 1,790
| 3.1875
| 3
|
[] |
no_license
|
#
# ~/.bashrc
#
# If not running interactively, don't do anything
[[ $- != *i* ]] && return
alias ls='ls --color=auto'
PS1='[\u@\h \W]\$ '
# radeon gpu power management (laptop)
alias profilepm='sudo bash -c "echo profile > /sys/class/drm/card0/device/power_method"'
alias auto='profilepm && sudo bash -c "echo auto > /sys/class/drm/card0/device/power_profile"'
alias low='profilepm && sudo bash -c "echo low > /sys/class/drm/card0/device/power_profile"'
alias mid='profilepm && sudo bash -c "echo mid > /sys/class/drm/card0/device/power_profile"'
alias high='profilepm && sudo bash -c "echo high > /sys/class/drm/card0/device/power_profile"'
alias dynpm='sudo bash -c "echo dynpm > /sys/class/drm/card0/device/power_method"'
alias gpu=" cat /sys/kernel/debug/dri/0/radeon_pm_info /sys/class/drm/card0/device/power_method"
# FAST! WE HAVE NO TIME, GET ME TO <location>!!!
alias cgit="cd /home/vz/dev/git/"
alias cma="cd /home/vz/dev/git/master/"
alias cthe="cd /home/vz/dev/git/master/thesis"
alias ..='cd ..'
alias ...='cd ../..'
alias ....='cd ../../..'
alias p='python2'
alias p3='python3'
# pretty colors
alias ls='ls --color=auto'
alias egrep='egrep --color=auto'
alias grep='grep --color=auto'
# toggle keyboard layout with caps lock
alias caps="setxkbmap -layout \"us, no\" -option \"grp:caps_toggle\""
# PS1
function subLevel {
pwd | grep -o '/' | wc -l
}
PS1='[\D{%H:%M} \u@\h:`subLevel` \W]\$ '
# auto complete
complete -cf sudo
complete -cf man
# editor
EDITOR=vim ; export EDITOR
# watch a folder for changes and execute a command, eg:
# watchstuff test/ py.test -v test/
watchdir() {
while true; do
change=$(inotifywait -e close_write,moved_to,create $1)
clear
${*:2}
done
}
watchtests() {
watchdir test/ py.test -v test/
}
| true
|
9be81b4a55b973e0d6ee50a5f0408f60d505349e
|
Shell
|
OutsourcedGuru/makemake
|
/makemake
|
UTF-8
| 1,426
| 3.984375
| 4
|
[] |
no_license
|
#!/bin/bash
echo -n "Determining project name... "
projectName=${PWD##*/}
projectname=`echo "$projectName" | tr '[:upper:]' '[:lower:]'`
PROJECTNAME=`echo "$projectName" | tr '[:lower:]' '[:upper:]'`
printf '%s\n' "$projectname"
echo -n "Reviewing .cpp files in current directory..."
cppFiles=`ls *.cpp`
for eachfile in $cppFiles
do
echo -n " $eachfile"
done
echo ""
destFile=CMakeLists.txt
output1="# CMakeLists files in this project can"
output2="# refer to the root source directory of the project as \${"
output3="_SOURCE_DIR} and"
output4="# to the root binary directory of the project as \${"
output5="_BINARY_DIR}."
output6="cmake_minimum_required (VERSION 2.8.11)"
output7="project ("
output8=")"
output9="add_executable ("
output10=")"
echo "$output1" > "$destFile"
echo "$output2$PROJECTNAME$output3" >> "$destFile"
echo "$output4$PROJECTNAME$output5" >> "$destFile"
echo "$output6" >> "$destFile"
echo "$output7$PROJECTNAME$output8" >> "$destFile"
echo "" >> "$destFile"
echo -n "$output9$projectname" >> "$destFile"
for eachfile in $cppFiles
do
echo -n " $eachfile" >> "$destFile"
done
echo "$output10" >> "$destFile"
echo "Generating Makefile in current directory..."
result=`cmake . -G"Unix Makefiles"`
printf '%s\n' "$result"
echo "Making project..."
output=`make`
printf '%s\n' "$output"
| true
|
7377ac8eb12f850f087debfbb3c35195c5195478
|
Shell
|
iarna/dotfiles
|
/install
|
UTF-8
| 2,699
| 4.28125
| 4
|
[] |
no_license
|
#!/bin/bash
INSTALLTO=~/.etc
FINAL=~
if [ -z "$1" ]; then
DRYRUN=true
elif [ "$1" == "now" ]; then
DRYRUN=false
elif [[ "$1" =~ / ]]; then
INSTALLTO="$@"
DRYRUN=false
else
echo "Form: $0 [now]" 1>&2
echo "By default, install will print out a list of what it would do." 1>&2
echo "If you pass \"now\" as an argument, it will copy the files from" 1>&2
echo "this repository into your home directory." 1>&2
exit 1
fi
function run-cmd () {
if [ "$DRYRUN" == "true" ]; then
echo " > $@"
else
"$@"
fi
}
function update-home () {
FILE="$1"
PREFIXGIT="$2"
PREFIXFINAL="$3"
if [ -d "$FILE" ]; then
git ls-files "$FILE" | sort | egrep -v "$ignorere" | while read toinst; do
update-home "$toinst" "$PREFIXGIT" "$PREFIXFINAL"
done
else
prefix_dir=$(dirname "${PREFIXGIT}${FILE}")
final_dir=$(dirname "${PREFIXFINAL}${FILE}")
if [ ! -d "$prefix_dir" ]; then
run-cmd mkdir -v -p "$prefix_dir"
fi
if [ ! -d "$final_dir" ]; then
run-cmd mkdir -v -p "$final_dir"
fi
if echo $FILE | grep -q '[.]patch$'; then
FULLPATH="$(pwd)/${FILE}"
pushd "${PREFIXGIT}" > /dev/null
run-cmd git apply "${FULLPATH}"
popd > /dev/null
elif echo $FILE | grep -q '[.]replace$'; then
FINALNAME="$(basename "${FILE}" .replace)"
run-cmd cp -v -a "$FILE" "${PREFIXGIT}${FINALNAME}"
run-cmd ln -f -s "${PREFIXGIT}${FINALNAME}" "${PREFIXFINAL}${FINALNAME}"
else
run-cmd cp -v -a "$FILE" "${PREFIXGIT}${FILE}"
run-cmd ln -f -s "${PREFIXGIT}${FILE}" "${PREFIXFINAL}${FILE}"
fi
fi
}
function post-install () {
if [ -d ssh ]; then
run-cmd chmod go-rwx -R "$INSTALLTO/.ssh"
fi
}
# We always ignore ourselves
extra_ignore_re="^install$"
# Plus we ignore any directories flagged as non-dot
for a in $(cat .non-dot-dirs); do
extra_ignore_re="$extra_ignore_re\|"
extra_ignore_re="$extra_ignore_re^$a/"
done
# Plus anything in .installignore, which we strip comments from
# and join with pipes
ignorere="$((grep -v '^#' .installignore; echo $extra_ignore_re) | perl -e 'print join q{|}, grep {!/^$/} map {chomp;$_} <>')"
if [ -n $INSTALLTO ]; then
run-cmd mkdir $INSTALLTO
run-cmd git init $INSTALLTO
fi
pushd $INSTALLTO
run-cmd git stash
popd
# Then we tell it to install our contents as dotfiles
update-home . "$INSTALLTO/." "$FINAL/."
# Plus the contents of our non_dot_dirs
for non_dot_dir in $(cat .non-dot-dirs); do
if [ -d $non_dot_dir ]; then
pushd $non_dot_dir
update-home . "$INSTALLTO/$non_dot_dir/" "$FINAL/$non_dot_dir/"
popd
fi
done
# Run our post install steps, mostly perm changes
post-install
pushd $INSTALLTO
run-cmd git add -A .
run-cmd git commit -m"New install $(date "+%Y-%m-%d %H:%M:%S %Z")"
run-cmd git stash pop
popd
| true
|
d417f5aef47d7f6fdd819a38dfa17ff08b041849
|
Shell
|
gastonginestet/ejerciciosISO
|
/Practica 3 2019/Ej13c-EsArchivoODirectorio.sh
|
UTF-8
| 503
| 3.921875
| 4
|
[] |
no_license
|
#!/bin/bash
#Primero chequeo cantidad de parametros
if [ $# -ne 1 ];
then
echo "La cantidad de parametros es incorrecta"
exit 1
fi
#Chequeo si la ruta existe (si es archivo o directorio da lo mismo)
if [ -e $1 ]; then
echo "Si existe"
if [ -f $1 ];then
echo "Es un archivo"
else
echo "Es un directorio"
fi
else
echo "No existe el archivo/directorio con nombre $1"
echo "Creando..."
mkdir $1
echo "Listando el directorio $(pwd)" ; ls
fi
echo "Imprimiendo el nombre del archivo: $1"
exit 0
| true
|
d99b7d295c0cdc8b997e93a2685dcfc3804a5074
|
Shell
|
SammyEnigma/qmlnet
|
/build/travis.sh
|
UTF-8
| 479
| 3.421875
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
set -e
set -x
SCRIPT_DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
# Before we run the build, get gitversion and generate a version.json.
dotnet tool install -g GitVersion.Tool --version 5.3.4
export PATH="$PATH:$HOME/.dotnet/tools"
dotnet gitversion > version.json
if [ "$TRAVIS_OS_NAME" == "linux" ]; then
$SCRIPT_DIR/travis.linux.sh
elif [ "$TRAVIS_OS_NAME" == "osx" ]; then
$SCRIPT_DIR/travis.osx.sh
else
echo "Unsupported os."
exit 1
fi
| true
|
857f0cb76fd640747d2680812f85e587a9a0009f
|
Shell
|
tofaramac/ansible
|
/roles/maldetect/templates/maldet.j2
|
UTF-8
| 3,500
| 3.171875
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/usr/bin/env bash
export PATH=/bin:/sbin:/usr/bin:/usr/sbin:/usr/local/bin:/usr/local/sbin:$PATH
export LMDCRON=1
inspath='/usr/local/maldetect'
intcnf="$inspath/internals/internals.conf"
if [ -f "$intcnf" ]; then
source $intcnf
else
echo "\$intcnf not found."
exit 1
fi
if [ -f "$cnf" ]; then
source $cnf
if [ -f "$compatcnf" ]; then
source $compatcnf
fi
else
echo "could not find \$cnf, fatal error, bye."
exit 1
fi
if [ -f "/etc/sysconfig/maldet" ]; then
. /etc/sysconfig/maldet
elif [ -f "/etc/default/maldet" ]; then
. /etc/default/maldet
fi
if [ -f "$cron_custom_conf" ]; then
. $cron_custom_conf
fi
if [ -z "$scan_days" ]; then
scan_days=1
fi
if [ "$find" ]; then
# prune any quarantine/session/tmp data older than 7 days
tmpdirs="$tmpdir $varlibpath/sess $varlibpath/quarantine $varlibpath/pub"
for dir in $tmpdirs; do
if [ -d "$dir" ]; then
$find $dir -type f -mtime +7 -print0 | xargs -0 rm -f >> /dev/null 2>&1
fi
done
fi
if [ "$autoupdate_version" == "1" ] || [ "$autoupdate_signatures" == "1" ]; then
# sleep for random 1-999s interval to better distribute upstream load
sleep $(echo $RANDOM | cut -c1-3) >> /dev/null 2>&1
fi
if [ "$autoupdate_version" == "1" ]; then
# check for new release version
$inspath/maldet -d >> /dev/null 2>&1
fi
if [ "$autoupdate_signatures" == "1" ]; then
# check for new definition set
$inspath/maldet -u >> /dev/null 2>&1
fi
# if we're running inotify monitoring, send daily hit summary
if [ "$(ps -A --user root -o "cmd" | grep maldetect | grep inotifywait)" ]; then
$inspath/maldet --monitor-report >> /dev/null 2>&1
else
if [ -d "/home/virtual" ] && [ -d "/usr/lib/opcenter" ]; then
# ensim
$inspath/maldet -b -r /home/virtual/?/fst/var/www/html/,/home/virtual/?/fst/home/?/public_html/ $scan_days >> /dev/null 2>&1
elif [ -d "/etc/psa" ] && [ -d "/var/lib/psa" ]; then
# psa
$inspath/maldet -b -r /var/www/vhosts/?/ $scan_days >> /dev/null 2>&1
elif [ -d "/usr/local/directadmin" ]; then
# DirectAdmin
$inspath/maldet -b -r /home?/?/domains/?/public_html/,/var/www/html/?/ $scan_days >> /dev/null 2>&1
elif [ -d "/var/www/clients" ]; then
# ISPConfig
$inspath/maldet -b -r /var/www/clients/?/web?/web,/var/www $scan_days >> /dev/null 2>&1
elif [ -d "/etc/webmin/virtual-server" ]; then
# Virtualmin
$inspath/maldet -b -r /home/?/public_html/,/home/?/domains/?/public_html/ $scan_days >> /dev/null 2>&1
elif [ -d "/usr/local/ispmgr" ]; then
# ISPmanager
$inspath/maldet -b -r /var/www/?/data/,/home/?/data/ $scan_days >> /dev/null 2>&1
elif [ -d "/var/customers/webs" ]; then
# froxlor
$inspath/maldet -b -r /var/customers/webs/ $scan_days >> /dev/null 2>&1
elif [ -d "/usr/local/vesta" ]; then
# VestaCP
$inspath/maldet -b -r /home/?/web/?/public_html/,/home/?/web/?/public_shtml/,/home/?/tmp/,/home/?/web/?/private/ $scan_days >> /dev/null 2>&1
elif [ -d "/usr/share/dtc" ]; then
# DTC
if [ -f /var/lib/dtc/saved_install_config ]; then
. /var/lib/dtc/saved_install_config
fi
$inspath/maldet -b -r ${conf_hosting_path:-/var/www/sites}/?/?/subdomains/?/html/ $scan_days >> /dev/null 2>&1
else
# Customized for Prepaid
$inspath/maldet -b -r /var/www/ $scan_days >> /dev/null 2>&1
fi
fi
if [ -f "$cron_custom_exec" ]; then
. $cron_custom_exec
fi
| true
|
dcdde1041d7cb981c6745d1596467ffb17c4e800
|
Shell
|
ewels/miRNA_processing
|
/old_files/get_mapped_reads_fastq.sh
|
UTF-8
| 1,786
| 3.609375
| 4
|
[] |
no_license
|
#!/bin/bash
# MODULE LOADING
source $HOME/.virtualenvs/python-276/bin/activate
# Modules, activate the module command
case "$(basename $SHELL)" in
-sh|sh|*/sh) modules_shell=sh ;;
-ksh|ksh|*/ksh) modules_shell=ksh ;;
-zsh|zsh|*/zsh) modules_shell=zsh ;;
-bash|bash|*/bash) modules_shell=bash ;;
esac
module() { eval `/usr/local/Modules/$MODULE_VERSION/bin/modulecmd $modules_shell $*`; }
export PATH=$HOME/bin:$HOME/.local/bin:$PATH
export LD_LIBRARY_PATH=$HOME/lib/
export PYTHONPATH=$HOME/lib/python2.7/
module unload python
module load python/2.7.4
module load samtools
module load picard
input_file_fullpath=$(readlink -f $1)
input_file_basename=$(basename $input_file_fullpath)
output_dir_fullpath=$(dirname $input_file_fullpath)
output_vis_dir=$(dirname $(readlink -f $output_dir_fullpath))/visualization/
output_base="${input_file_basename%.*}""_mappedreads"
output_samfile=$output_dir_fullpath/$output_base".sam"
output_fastqfile=$output_dir_fullpath/$output_base".fastq"
echo "Creating output sam file \"$output_samfile\""
# -h outputs sam header, required by Picard
[[ ! -e $output_samfile ]] && samtools view -h -F 4 $input_file_fullpath >> $output_samfile || echo "Sam file \"$output_samfile\" already exists; skipping sam file generation."
echo "Done with sam file generation."
echo -n "Creating output fastqfile \"$output_fastqfile\""
[[ ! -e $output_fastqfile ]] && java -Xmx250M -jar $PICARD_HOME/SamToFastq.jar I=$output_samfile F=$output_fastqfile || echo "Fastq file \"$output_fastqfile\" already exists; skipping fastq file generation."
echo "Done with fastq file generation."
echo "Creating output plot under $output_vis_dir"
python plots.py -i $output_fastqfile -d $output_vis_dir
echo "Done with output plot generation."
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.