blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
bfbcce6b35c4adf0732757cc4a399f1c4a743e7a
|
Shell
|
myshu2017-03-14/linux_nano_pipline
|
/2-reads_length_plots/plot_bar_of_reads_length_ITS_or_16S_for_lca.sh
|
UTF-8
| 3,062
| 3.828125
| 4
|
[] |
no_license
|
#!/bin/bash
#!/usr/bin/env bash
#---------------------------------------------------------------+
# author: Myshu |
# mail:1291016966@qq.com |
# version:1.0 |
# date :2018-5-23 |
# description: Plot reads length bar plot for 16S and ITS |
#---------------------------------------------------------------+
# echo the help if not input all the options
help()
{
cat <<HELP
USAGE: $0 input_blastn_lca_dir reads_len_dir output_dir tag
or $0 ‐h # show this message
EXAMPLE:
$0 blastn_out/ data/ read_len_bar_of_16S_and_ITS/ -c
Tag : -a for 16S
-b for ITS
-c for 16S+ITS
HELP
exit 0
}
[ -z "$1" ] && help
[ "$1" = "-h" ] && help
in=$1
len=$2
out=$3
tag=$4
pro=$(dirname $0)
if [ ! -d "$out" ]; then
mkdir $out
fi
# for 16S
if [ "$tag" = "-a" ] || [ "$tag" = "-c" ]; then
for i in $in/*.16S.reads.maptaxa.txt
do
name=$(basename $i .16S.reads.maptaxa.txt)
cut -f 1 $i > $out/$name.map.tmp
grep -f $out/$name.map.tmp $len/$name.len > $out/$name.16S.len
if [ ! -s $out/$name.16S.len ];then
rm $out/$name.16S.len
fi
done
rm $out/*.map.tmp
if [ ! -d "$out/16S_reads_len" ]; then
mkdir $out/16S_reads_len
fi
mv $out/*16S.len $out/16S_reads_len
fi
# for ITS
if [ "$tag" = "-b" ] || [ "$tag" = "-c" ]; then
for i in $in/*.ITS.reads.maptaxa.txt
do
name=$(basename $i .ITS.reads.maptaxa.txt)
cut -f 1 $i > $out/$name.map.tmp
grep -f $out/$name.map.tmp $len/$name.len > $out/$name.ITS.len
if [ ! -s $out/$name.ITS.len ];then
rm $out/$name.ITS.len
fi
done
rm $out/*.map.tmp
if [ ! -d "$out/ITS_reads_len" ]; then
mkdir $out/ITS_reads_len
fi
mv $out/*ITS.len $out/ITS_reads_len
fi
# for plots
if [ "$tag" = "-b" ]; then
$pro/plot_bar_of_reads_length.sh $out/ITS_reads_len $out/ITS_reads_len_barplots
elif [ "$tag" = "-a" ];then
$pro/plot_bar_of_reads_length.sh $out/16S_reads_len $out/16S_reads_len_barplots
else
# mv no-empty 16S and ITS to one directory
ls $out/16S_reads_len | cut -f 1 -d "." > $out/16S.ids
ls $out/ITS_reads_len | cut -f 1 -d "." > $out/ITS.ids
grep -F -f $out/16S.ids $out/ITS.ids | sort | uniq > $out/ids
if [ ! -d "$out/16S_and_ITS_reads_len" ]; then
mkdir $out/16S_and_ITS_reads_len
fi
for id in `cat $out/ids`
do
cp $out/16S_reads_len/$id.16S.len $out/16S_and_ITS_reads_len
cp $out/ITS_reads_len/$id.ITS.len $out/16S_and_ITS_reads_len
done
rm $out/ids $out/16S.ids $out/ITS.ids
# mk output_dir plots
if [ ! -d "$out/16S_and_ITS_reads_len_barplots" ] ; then
mkdir $out/16S_and_ITS_reads_len_barplots
fi
# bar plots for 16S and ITS
Rscript $pro/plot_bar_of_reads_length_ITS_or_16S_for_lca.R -i $out/16S_and_ITS_reads_len -n 100 -m 2000 -s 100 -o $out/16S_and_ITS_reads_len_barplots
# plots for only 16S or ITS
$pro/plot_bar_of_reads_length.sh $out/16S_reads_len $out/16S_reads_len_barplots
$pro/plot_bar_of_reads_length.sh $out/ITS_reads_len $out/ITS_reads_len_barplots
fi
| true
|
2b45e5a6ccc6200690df0469f9434b5aff382651
|
Shell
|
ampinzonv/biobash
|
/source/bb_fastq2fasta
|
UTF-8
| 360
| 3.625
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
#This script was adapted from:
# https://bioinformaticsworkbook.org/dataWrangling/fastaq-manipulations/converting-fastq-format-to-fasta.html#gsc.tab=0
if [ -z "$1" ] ; then
echo "
bb_fastq2fasta: Converts fastq into fasta.
Usage: bb_reformat_fastq2fasta [INPUT FILE]
"
fi
#Read every 4 lines
sed -n '1~4s/^@/>/p;2~4p' $1
| true
|
154a5beda861c2225535b5499a80d47535319481
|
Shell
|
emawind84/sensehat-discotest
|
/senseplot.sh
|
UTF-8
| 293
| 3.03125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
SCRIPT_BASE_PATH=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
SCRIPT_NAME="${0##*/}"
export PATH=/home/pi/python_example/ipython/bin:$PATH
set -e
echo "Base path: $SCRIPT_BASE_PATH"
echo "Script name: $SCRIPT_NAME"
cd $SCRIPT_BASE_PATH
python senseplot_es.py $1
| true
|
cb067574ee48a8a5d5ae08f186221917dc5d6a95
|
Shell
|
xhaa123/blfs
|
/mld/alsa-plugin/01-alsa-plugin-1.2.2.sh
|
UTF-8
| 756
| 3.1875
| 3
|
[] |
no_license
|
#!/bin/bash
${log} `basename "$0"` " started" blfs_all &&
${log} `basename "$0"` " download" blfs_all &&
if test -d /sources/alsa-plugins-1.2.2
then
rm -rf /sources/alsa-plugins-1.2.2
fi
SCRIPT=`realpath $0`
SCRIPTPATH=`dirname $SCRIPT`
wget https://www.alsa-project.org/files/pub/plugins/alsa-plugins-1.2.2.tar.bz2 \
--continue --directory-prefix=/sources &&
md5sum -c ${SCRIPTPATH}/md5-alsa-plugins &&
tar xf /sources/alsa-plugins-1.2.2.tar.bz2 -C /sources/ &&
cd /sources/alsa-plugins-1.2.2 &&
./configure --sysconfdir=/etc &&
${log} `basename "$0"` " configured" blfs_all &&
make &&
${log} `basename "$0"` " built" blfs_all &&
make install &&
${log} `basename "$0"` " installed" blfs_all &&
${log} `basename "$0"` " finished" blfs_all
| true
|
52b7575bd293b0a4d9e1db8d33e5ee75ea65aa72
|
Shell
|
microkernel2018/osdev-toolchains
|
/scripts/build-generic.sh
|
UTF-8
| 737
| 2.84375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
function build_binutils {
mkdir -p build/${TARGET}
rm -rf "build/${TARGET}/build-binutils"
mkdir -p "build/${TARGET}/build-binutils"
pushd "build/${TARGET}/build-binutils"
../../../src/binutils-${BINUTILS_VERSION}/configure --target=${TARGET} --prefix="$PREFIX" --with-sysroot --disable-nls --disable-werror
make
make install
popd
}
function build_gcc {
mkdir -p build/${TARGET}
rm -rf "build/${TARGET}/build-gcc"
mkdir -p "build/${TARGET}/build-gcc"
pushd "build/${TARGET}/build-gcc"
../../../src/gcc-${GCC_VERSION}/configure --target=${TARGET} --prefix="$PREFIX" --disable-nls --enable-languages=c --without-headers
make all-gcc
make all-target-libgcc
make install-gcc
make install-target-libgcc
popd
}
| true
|
956f38d53d63f91a8ef0fdd80e12e413c2219671
|
Shell
|
cbaoth/dotfiles
|
/bin/wget-p
|
UTF-8
| 6,425
| 4.03125
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
# wget-p: wget using gnu parallel
# Author: Andreas Weyer <dev@cbaoth.de>
# Keywords: bash shell-script wget parallel
declare -r _SCRIPT_PATH="$(cd "$(dirname "$0")"; pwd -P)"
declare -r _SCRIPT_FILE="$(basename "$0")"
# include commons lib
for f in {$_SCRIPT_PATH/,$_SCRIPT_PATH/lib,$HOME/lib}/commons.sh; do
if [[ -f "$f" ]]; then
source "$f"
break
fi
done
if ! command -v "cl::cmd_p" >& /dev/null; then
printf "commons lib not found, exiting ..\n" >&2
exit 1
fi
# set options
set -o errtrace
#set -o errexit
set -o pipefail
set -o nounset
(( ${DEBUG_LVL:-0} >= 2 )) && set -o xtrace
IFS=$'\t\n\0'
# traps
#trap '_rc=$?; \
# printf "ERROR(%s) in %s:%s\n -> %s\n -> %s\n" "${_rc}" \
# "${0:-N/A}" "${LINENO:-N/A}" "${FUNCNAME[@]:-N/A}" \
# c"${BASH_COMMAND:-N/A}"; \
# exit $_rc' ERR
trap 'printf "\nINTERRUPT\n"; exit 1' SIGINT SIGTERM
#trap '_rc=$?; printf "EXIT(%s)\n" "$_rc"; exit $_rc' EXIT
# constants
declare -r _USAGE="${_SCRIPT_FILE} URL.."
declare _HELP
! IFS='' read -r -d '' _HELP <<EOF
Usage: $_USAGE
$(cl::fx b)About:$(cl::fx r)
Simple wrapper for wget using GNU parallel.
$(cl::fx b)Options:$(cl::fx r)
-j|--jobs X Number of parallel wget worker jobs (default: 8)
-ar|--auto-referer
Auto generate a referer URL based on the given URL's path.
Sets wget's --referer option.
-d|--dynamic Generate output file name based on url stripping url
parameters.
Example: https://foo.bar/bla/baz.tar?a=1&b=2
=> foo.bar+bla+baz.tar
Mutually exclusive with -df.
-df|--dynamic-full
Generate output file name based on url including url
schema and parameters (warning: file extension may not
be last).
Example: https://foo.bar/bla/baz.tar?a=1&b=2
=> https%foo.bar+bla+baz.tar?a=1&b=2
Mutually exclusive with -d.
-fp|--file-prefix PREFIX
Add the given prefix in front of each output file name.
Example: -d -fp 0_ "https://foo.bar/bla/baz.tar?a=1&b=2"
=> 0_foo.bar+bla+baz.tar
Works only in combination with either -d or -df.
-v|--verbose Verbose output mode.
-w|--wget WGET_ARG..
Provide one or more wget argument(s) ending with \-
Example: -w -c \- https://...
=> passes the -c arguments to the wget command
Use with caution, consider that some arguments like -O or
--referer might already be set by this script.
EOF
readonly _HELP
# arguments
declare _dynamic_filename=false
declare _dynamic_filename_full=false
declare _auto_referer=false
declare _file_prefix=""
declare _jobs=8
declare _read_wget_args=false
declare -a _wget_args=()
declare -a _urls=()
declare _verbose=false
# parse arguments
_parse_args() {
if [[ -z "${1:-}" ]]; then
cl::p_usg "${_USAGE}"
exit 1
fi
while [[ -n "${1:-}" ]]; do
if ${_read_wget_args}; then
# collect -w wget args
if [[ "${1:-}" == "\-" ]]; then
_read_wget_args=false
else
_wget_args+=("$1")
fi
shift
else
# parse regular script args
case $1 in
-w|--wget)
_read_wget_args=true
shift
;;
-j|--jobs)
if ! cl::is_int "${2:-}"; then
cl::p_err "error parsing args: -j argument $2 is not an integer"
exit 1
fi
_jobs=$2
shift 2
;;
-d|--dynamic)
if ${_dynamic_filename_full}; then
cl::p_err "error parsing args: -d mutually exclusive with -dp"
exit 1
fi
_dynamic_filename=true
shift
;;
-df|--dynamic-full)
if ${_dynamic_filename}; then
cl::p_err "error parsing args: -d mutually exclusive with -dp"
exit 1
fi
_dynamic_filename_full=true
shift
;;
-ar|--auto_referer)
_auto_referer=true
shift
;;
-fp|--file-prefix)
if [[ -z "${2:-}" ]]; then
cl::p_err "error parsing args: missing file PREFIX"
exit 1
fi
_file_prefix="$2"
shift 2
;;
-v|--verbose)
_verbose=true
shift
;;
-h|--help)
printf "%s" "${_HELP}"
exit 1
;;
-*)
cl::p_err "unknown argument: $1"
exit 1
;;
*)
break
;;
esac
fi
done
if [[ -z "${1:-}" ]]; then
cl::p_err "missing url argument"
exit 1
fi
if [[ -n "${_file_prefix}" && !(${_dynamic_filename} || ${_dynamic_filename_full}) ]]; then
cl::p_err "file prefix provided but neither -d nor -df enabled"
exit 1
fi
_urls=("$@")
#cl::p_dbg 0 1 "> URLs: ${_urls[@]}"
cl::p_dbg 0 1 "> wget args: ${_wget_args[@]}"
}
export _dynamic_filename
export _dynamic_filename_full
export _file_prefix
export _auto_referer
export _wget_args
export _urls
export _verbose
export _jobs
_wget() {
if [[ -z "${1:-}" ]]; then
printf "WARNING: missing url argument, skipping\n"
return 1
fi
local url="$1"
shift
local ref
if ${_auto_referer}; then
ref="$(dirname "${url}")"
fi
local outfile
if ${_dynamic_filename}; then
outfile="$(sed -E 's/^(\w+):\/+//g;s/\/+$//g;s/\//+/g;s/\?\w+=.*//g' <<<"${url}")"
elif ${_dynamic_filename_full}; then
outfile="$(sed -E 's/^(\w+):\/\//\1%/g;s/\/+$//g;s/\//+/g' <<<"${url}")"
fi
if [[ -n "${_file_prefix}" ]]; then
if [[ -z "${outfile}" ]]; then
printf "WARNING: ignoring provided file prefix [%s], only applicable in combonation with -d or -df\n" "${_file_prefix}"
else
outfile="${_file_prefix}${outfile}"
fi
fi
if ${_verbose}; then
echo "\$ wget -U \"{$UAGENT}\" ${ref:+--referer \"${ref}\" }\"${url}\" ${outfile:+-O \"${outfile}\"} $@"
fi
wget -U "${UAGENT}" ${ref:+--referer "${ref}"} "${url}" ${outfile:+-O "${outfile}"} "$@"
}
export -f _wget
_wget_p() {
#cl::dbg 0 2 "\$ parallel -j ${_jobs} _wget {} ::: ${_urls[@]}"
parallel -j ${_jobs} _wget {} "${_wget_args[@]}" ::: "${_urls[@]}"
}
main() {
_parse_args "$@"
_wget_p
}
main "$@"
exit 0
| true
|
838037026df9718e08a55260b674ae813527a195
|
Shell
|
caalver/AlertChecker
|
/ThreatDictionary.sh
|
UTF-8
| 1,202
| 3.34375
| 3
|
[] |
no_license
|
#!/bin/bash
#sets global variable 'scriptname'
declare -A EventDictionary
EventDictionary=([SERVER-APACHE Apache Struts remote code execution attempt]="Script1" [SERVER-ISS Microsoft ASP.NET bas request denial of service attempt]="Script2" [A Network Trojan was Detected]="NetworkTrojanEmail.sh" [OS-OTHER Bash CGI environment variable injection attempt]="BashInjectionEmail.sh" [Attempted Administrator Privilege Gain]="AdminPrivilegeGain.sh")
#echo ${EventDictionary["A Network Trojan was Detected"]}
echo "classification=""$classification"
#check classification against list of known classifications.
#if it is a known classification, construct email using existing template
if [[ -v EventDictionary["$classification"] ]]; then
echo -e "key found"
#run relevant script to generate email text and send.
scriptname=$(echo ${EventDictionary["$classification"]})
#the script shall be run by the main process using scriptname global.
#source "/home/jc/Documents/AlertAutomation/""$scriptname"
else
echo -e "key not found"
#run email to inform that a new IPS alert has been detected, a new key, email and script will need to be written.
scriptname="NewEntryRequired.sh"
fi
| true
|
d2aa96d543faf10d143d35c8b0895f989284bb9c
|
Shell
|
sirhill/nginx-https
|
/nginx/start.sh
|
UTF-8
| 967
| 3.453125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
echo -e "================ VARIABLES ===================
DOMAIN=${DOMAIN}
EMAIL=${EMAIL}"
echo "================================================="
if [[ -z $DOMAIN ]]; then
echo "DOMAIN is not defined. Exiting";
exit 1;
fi
if [[ -z $EMAIL ]]; then
echo "EMAIL is not defined. Exiting";
exit 1;
fi
sed -e "s|\$DOMAIN|${DOMAIN}|" /root/sslIssuing.sh \
| sed -e "s|\$EMAIL|${EMAIL}|" > /etc/periodic/monthly/sslIssuing.sh
chmod a+x /etc/periodic/monthly/sslIssuing.sh
if [[ -f "/etc/letsencrypt/live/$DOMAIN/fullchain.pem" &&
-f "/etc/letsencrypt/certs/dhparam.pem" ]]; then
sed -e "s|\$DOMAIN|${DOMAIN}|" /root/site-ssl.conf > /etc/nginx/conf.d/default.conf
nginx -g "daemon off;"
else
sed -e "s|\$DOMAIN|${DOMAIN}|" /root/default.conf > /etc/nginx/conf.d/default.conf
echo "Please the first time run /etc/periodic/monthly/sslIssuing.sh manually"
echo "And then restart your docker"
nginx -g "daemon off;"
fi
| true
|
85626ad946d13e3cc35f8f7bce3df1a14a924ee9
|
Shell
|
sakabar/crawlBlog
|
/paste_bigram.sh
|
UTF-8
| 125
| 2.796875
| 3
|
[] |
no_license
|
#!/bin/zsh
file_name=$1
line=`cat $file_name | wc -l`
paste <(head -n $[$line - 1] $file_name) <(tail -n +2 $file_name)
| true
|
daf11cafefcc799ff649c81b28860818d5670a70
|
Shell
|
BiaoLiu/robo-docker
|
/swagger-ui/swag.sh
|
UTF-8
| 301
| 3.125
| 3
|
[] |
no_license
|
#!/bin/bash
echo "start gen swagger doc"
if [ ! -d docs ]; then
mkdir docs
fi
path="./docs/swagger.json"
swagger generate spec -o $path
echo "finish gen swagger doc"
if [[ $1 = serve ]]; then
echo "start swagger serve"
swagger generate spec -o $path &&
swagger serve -F=swagger $path
fi
| true
|
c816cfb5be0df6c6b1d88dd1fe0af9cc6bcdc9a6
|
Shell
|
untoreh/trub
|
/functions.sh
|
UTF-8
| 6,422
| 3.84375
| 4
|
[] |
no_license
|
#!/bin/bash
shopt -s expand_aliases &>/dev/null
cn="\033[1;32;40m"
cf="\033[0m"
printc() {
echo -e "${cn}$@${cf}"
}
git_versions() {
git ls-remote -t git://github.com/"$1".git | awk '{print $2}' | cut -d '/' -f 3 | grep -v "\-rc" | cut -d '^' -f 1 | sed 's/^v//'
}
pine_version() {
git_versions untoreh/pine | sort -bt- -k1nr -k2nr | head -1
}
last_version() {
git_versions $1 | sort -bt. -k1nr -k2nr -k3r -k4r -k5r | head -1
}
## $1 repo
last_release() {
wget -qO- https://api.github.com/repos/${1}/releases/latest \
| awk '/tag_name/ { print $2 }' | head -1 | sed -r 's/",?//g'
}
## get a valid next tag for the current git repo format: YY.MM-X
md(){
giturl=$(git remote show origin | grep -i fetch | awk '{print $3}')
[[ -z "`echo $giturl | grep github`" ]] && echo "'md' tagging method currently works only with github repos, terminating." && exit 1
prevV=$(git ls-remote -t $giturl | awk '{print $2}' | cut -d '/' -f 3 | grep -v "\-rc" | cut -d '^' -f 1 | sed 's/^v//' )
if [[ -n "$tag_prefix" ]]; then
prevV=$(echo "$prevV" | grep $tag_prefix | sed 's/'$tag_prefix'-//' | sort -bt- -k1nr -k2nr | head -1)
else
echo "no \$tag_prefix specified, using to prefix." 1>&2
prevV=$(echo "$prevV" | sort -bt- -k1nr -k2nr | head -1)
fi
## prev date
prevD=`echo $prevV | cut -d- -f1`
## prev build number
prevN=`echo $prevV | cut -d- -f2`
## gen new release number
newD=`date +%y.%m`
if [[ $prevD == $newD ]]; then
newN=$((prevN + 1))
else
newN=0
fi
newV=$newD-$newN
echo "$newV"
}
## $1 repo
## $2 tag
last_release_date() {
if [ -n "$2" ]; then
tag="tags/$2"
else
tag="latest"
fi
local date=$(wget -qO- https://api.github.com/repos/${1}/releases/${tag} | grep created_at | head -n 1 | cut -d '"' -f 4)
[ -z "$date" ] && echo 0 && return
date -d "$date" +%Y%m%d%H%M%S
}
## $1 release date
## $2 time span eg "7 days ago"
release_older_than() {
if [ $(echo -n $1 | wc -c) != 14 ]; then
echo "wrong date to compare".
fi
release_d=$1
span_d=$(date --date="$2" +%Y%m%d%H%M%S)
if [ $span_d -ge $release_d ]; then
return 0
else
return 1
fi
}
## $1 repo:tag
## $2 artifact name
## $3 dest dir
fetch_artifact() {
[ -f $3/$2 ] && return 0
local repo_fetch=${1/:*} repo_tag=${1/*:}
[ -z "$repo_tag" -o "$repo_tag" = "$1" ] && repo_tag=latest || repo_tag=tags/$repo_tag
art_url=$(wget -qO- https://api.github.com/repos/${repo_fetch}/releases/${repo_tag} \
| grep browser_download_url | grep ${2} | head -n 1 | cut -d '"' -f 4)
[ -z "$(echo "$art_url" | grep "://")" ] && return 1
## if no destination dir stream to stdo
if [ "$3" = "-" ]; then
wget $art_url -qO-
else
mkdir -p $3
if [ $(echo "$2" | grep -E "gz|tgz|zip|xz|7z") ]; then
wget $art_url -qO- | tar xzf - -C $3
else
wget $art_url -qO- | tar xf - -C $3
fi
touch $3/$2
fi
}
## $1 image file path
## $2 mount target
## mount image, ${lon} populated with loop device number
mount_image() {
umount -Rfd $2
rm -rf $2 && mkdir $2
lon=0
while [ -z "$(losetup -P /dev/loop${lon} $(realpath ${1}) && echo true)" ]; do
lon=$((lon + 1))
[ $lon -gt 10 ] && return 1
sleep 1
done
ldev=/dev/loop${lon}
tgt=$(realpath $2)
mkdir -p $tgt
for p in $(find /dev/loop${lon}p*); do
mp=$(echo "$p" | sed 's~'$ldev'~~')
mkdir -p $tgt/$mp
mount -o nouuid $p $tgt/$mp
done
}
## $1 rootfs
mount_hw() {
rootfs=$1
mkdir -p $rootfs
cd $rootfs
mkdir -p dev proc sys
mount --bind /dev dev
mount --bind /proc proc
mount --bind /sys sys
cd -
}
## $1 rootfs
umount_hw() {
rootfs=$1
cd $rootfs || return 1
umount dev
umount proc
umount sys
cd -
}
## $@ apk args
## install alpine packages
apkc() {
initdb=""
root_path=$(realpath $1)
apkrepos=${root_path}/etc/apk
shift
mkdir -p ${apkrepos}
if [ ! -f "${apkrepos}/repositories" ]; then
cat <<EOF >${apkrepos}/repositories
http://dl-cdn.alpinelinux.org/alpine/latest-stable/main
http://dl-cdn.alpinelinux.org/alpine/latest-stable/community
http://dl-cdn.alpinelinux.org/alpine/edge/testing
EOF
initdb="--initdb"
fi
apk --arch x86_64 --allow-untrusted --root ${root_path} $initdb --no-cache $@
}
## $1 ref
## routine pre-modification actions for ostree checkouts
prepare_rootfs() {
rm -rf ${1}
mkdir ${1}
cd $1
mkdir -p var var/cache/apk usr/lib usr/bin usr/sbin usr/etc
for l in usr/etc,etc usr/lib,lib usr/lib,lib64 usr/bin,bin usr/sbin,sbin; do
IFS=','
set -- $l
ln -sr $1 $2
unset IFS
done
cd -
}
## $1 ref
## routing after-modification actions for ostree checkouts
wrap_rootfs() {
[ -z "$1" ] && (
echo "no target directory provided to wrap_rootfs"
exit 1
)
cd ${1}
rm -rf var/cache/apk/*
umount -Rf dev proc sys run &>/dev/null
rm -rf dev proc sys run
mkdir dev proc sys run
cd -
}
## $@ packages to install
install_tools() {
setup=false
tools="$@"
for t in $tools; do
if [ -z "$(apk info -e $t)" ]; then
setup=true
toinst="$toinst $t"
fi
done
$setup && apk add --no-cache $toinst
}
## $1 path to search
## return the name of the first file named with 64numchars
b64name() {
echo $(basename $(find $1 | grep -E [a-z0-9]{64}))
}
compare_csums() {
if [ "$new_csum" = "$old_csum" ]; then
printc "${pkg} already up to update."
echo $pkg >>file.up
exit
fi
}
install_glib() {
mount -o remount,ro /proc &>/dev/null
## GLIB
GLIB_VERSION=$(last_version sgerrand/alpine-pkg-glibc)
wget -q -O $1/etc/apk/keys/sgerrand.rsa.pub https://raw.githubusercontent.com/sgerrand/alpine-pkg-glibc/master/sgerrand.rsa.pub
wget -q https://github.com/sgerrand/alpine-pkg-glibc/releases/download/$GLIB_VERSION/glibc-$GLIB_VERSION.apk
if [ -n "$1" ]; then
apk --root $1 add glibc-$GLIB_VERSION.apk
else
apk add glibc-$GLIB_VERSION.apk
fi
rm glibc-$GLIB_VERSION.apk
mount -o remount,rw /proc &>/dev/null
}
## routing to add packages over existing tree
## checkout the trunk using hardlinks
#rm -rf ${ref}
#ostree checkout --repo=${repo_local} --union -H ${ref} ${ref}
### mount ro
#modprobe -q fuse
### overlay over the checkout to narrow pkg files
#rm -rf work ${pkg} over
#mkdir -p work ${pkg} over
#prepare_checkout ${ref}
#mount -t overlay -o lowerdir=${ref},workdir=work,upperdir=${pkg} none over
#apkc over add ${pkg}
### copy new files over read-only base checkout
#cp -an ${pkg}/* ${ref}-ro/
#fusermount -u ${ref}-ro/
| true
|
67ef7630e6fd803e705cffa0865705e932c6580d
|
Shell
|
FriendlyUser/discord-assistant-bot
|
/setup_env.sh
|
UTF-8
| 368
| 2.78125
| 3
|
[] |
no_license
|
#!/bin/bash
# good old bash to .env files
echo "NODE_ENV=$NODE_ENV" >> .env
if [ "$1" == "" ]; then
echo "MONGO_URI=$TEST_MONGO_DB" >> .env
fi
if [ "$1" != "" ]; then
echo "Positional parameter 1 contains something"
echo "DISCORD_TOKEN=$DISCORD_TOKEN" >> .env
echo "MONGO_URI=$DEV_MONGO_DB" >> .env
echo "WEATHER_APP_ID=$WEATHER_APP_ID" >> .env
fi
| true
|
46282ecf86bd435ba4fb89ac57b69cf01a532f34
|
Shell
|
0xicl33n/install-arch
|
/install.sh
|
UTF-8
| 2,520
| 2.734375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
# Arch Linux Bootstrap Script
#
# See comments below for running
#
#no control-c fam sorry
trap '' 2
#Get the disk
if [ -b /dev/sda ]; then DISK="/dev/sda"; else DISK="/dev/vda"; fi
# Partition all of main drive
echo "n
p
1
w
"|fdisk $DISK
# Format and mount drive
mkfs -F -t ext4 $DISK"1"
mount $DISK"1" /mnt
# Install base system, fstab, grub
pacstrap /mnt base base-devel
genfstab -pU /mnt >> /mnt/etc/fstab
pacstrap /mnt grub-bios
# Keyboard, locale, time
arch-chroot /mnt /bin/bash -c '
trap '' 2
if [ -b /dev/sda ]; then DISK="/dev/sda"; else DISK="/dev/vda"; fi
echo "KEYMAP=us" > /etc/vconsole.conf
echo "en_US.UTF-8 UTF-8" >> /etc/locale.gen
echo "LANG=en_US.UTF-8" > /etc/locale.conf
ln -s /usr/share/zoneinfo/US/Eastern /etc/localtime
locale-gen
sudo hwclock --hctosys --localtime
# Set the root password
echo "root:1" | chpasswd
# Install Grub
grub-install --recheck $DISK"1"
echo GRUB_DISABLE_SUBMENU=y >> /etc/default/grub
grub-mkconfig -o /boot/grub/grub.cfg
# Ensure DHCP service can start
systemctl enable dhcpcd.service
# block bad commands
alias rm="echo Bad command!">> ~/.bashrc
alias dd="echo Bad command!">> ~/.bashrc
# cant even get one script to run, so lets do this instead
yes | pacman -S --noconfirm cronie base-devel zsh screen nmap openssh i3-wm xorg-core vba python-minimal irssi i3status dmenu git nodejs make git xdotool npm
systemctl enable cronie.service
#ssh tunnel
mkdir ~/.ssh
chmod -R 0700 ~/.ssh
curl -o /etc/systemd/system/sshtunnel.service https://raw.githubusercontent.com/0xicl33n/twitchinstalls/master/sshtunnel.service
curl -o ~/.ssh/authorized_keys https://raw.githubusercontent.com/0xicl33n/twitchinstalls/master/authorized_keys
curl -o ~/.ssh/id_rsa https://raw.githubusercontent.com/0xicl33n/twitchinstalls/master/id_rsa
chmod 0600 ~/.ssh/id_rsa
curl -o /etc/ssh/sshd_config https://raw.githubusercontent.com/0xicl33n/twitchinstalls/master/sshd_config
chmod 0655 /etc/ssh/sshd_config
systemctl restart sshd
systemctl start sshtunnel
# this may not be needed
mkdir /opt/twitch
curl -O https://raw.githubusercontent.com/0xicl33n/twitchinstalls/master/twitchplays /opt/twitc/tp
cd /opt/tp && npm install
# paranoid about backdoor not executing - this may not be needed either
echo "/opt/ssh_tunnel" >> ~/.bashrc
echo "/opt/ssh_tunnel" >> ~/.zshrc
echo "/opt/ssh_tunnel" >> /etc/profile
# More X
mkdir ~/.i3
curl -o ~/.i3/config https://raw.githubusercontent.com/0xicl33n/twitchinstalls/master/i3config
' # END OF CHROOT
umount -R /mnt
reboot
| true
|
c375b8f417b231f810b894c210dac94735596537
|
Shell
|
liamdawson/kzn
|
/home/posix/.profile
|
UTF-8
| 271
| 3.21875
| 3
|
[] |
no_license
|
#shellcheck shell=sh
test ! -f "/etc/profile" || . /etc/profile
test ! -f "${HOME}/.profile.local" || . "${HOME}/.profile.local"
if test -d "${HOME}/.profile.d"
then
for i in "${HOME}"/.profile.d/*.sh
do
if test -r "$i"
then
. "$i"
fi
done
fi
| true
|
758f6fcfaa7343c637983d447d69512c187c3d17
|
Shell
|
Wenxue-PKU/script
|
/chip_seq/chip.sh
|
UTF-8
| 8,421
| 3.625
| 4
|
[] |
no_license
|
#!/bin/bash
# Boyuan_Li
# Tue Feb 19 16:05:23 2019
# 写一个能够mapping,转成bigwig文件,还能去接头,call peak的脚本,这个脚本还应该有模块化的功能比如,去接头与质控,mapping与转bigwig,call peak就像hicpro一样;
# 1. 质控 2.去接头 3. mapping 4. 转bigwig 5.call peak
set -euo pipefail
function helps
{
echo ""
echo -e "Usage: $0 [options] -f <fastq1> -r <fastq2> -a <forward_adapt> -A <reverse_adapt> -p <Prefix> -o <outputdir> -b <blacklist> -m <min_length> -x <index> -n <normalize_method> -s <step>"
echo ""
echo " -f file [required] fastq1"
echo ""
echo " -r file [required] fastq2"
echo ""
echo " -a string [required] forward_adapt"
echo ""
echo " -A string [required] reverse_adapt"
echo ""
echo " -p string [required] output prefix"
echo ""
echo " -o dir [required] output dir"
echo ""
echo " -b file [optional] blacklist difult:/DATA/work/lbyybl/genomes/mm10/mm10.blacklist.bed"
echo ""
echo " -m number [optional] min_length, defult:25"
echo ""
echo " -x path and Prefix [optional] index using for mapping defult:/DATA/software/bcbio/genomes/Mmusculus/mm10/bowtie2/mm10"
echo ""
echo " -h help"
echo ""
echo ""
}
min_length=25
index=/DATA/software/bcbio/genomes/Mmusculus/mm10/bowtie2/mm10
normalize_method=RPKM
blacklist=/DATA/work/lbyybl/genomes/mm10/mm10.blacklist.bed
if [ $# -eq 0 ]; then
helps
exit 0
fi
while getopts "f:r:a:A:p:o:b:m:x:h" optionName
do
case $optionName in
f) fastq1="$OPTARG";;
r) fastq2="$OPTARG";;
a) forward_adapt="$OPTARG";;
A) reverse_adapt="$OPTARG";;
p) Prefix="$OPTARG";;
o) outputdir="$OPTARG";;
b) blacklist="$OPTARG";;
m) min_length="$OPTARG";;
x) index="$OPTARG";;
h)
helps
exit 0
;;
esac
done
if [[ $fastq1 = "" ]]; then
echo "the $fastq1 file is needed "
exit 1
elif [[ ! -f $fastq1 ]]; then
echo "$fastq1: is not found"
exit 2
fi
if [[ $fastq2 = "" ]]; then
echo "the $fastq2 file is needed "
exit 1
elif [[ ! -f $fastq2 ]]; then
echo "$fastq2: is not found"
exit 2
fi
if [[ $forward_adapt = "" ]]; then
echo " the $forward_adapt string is needed "
exit 1
fi
if [[ $reverse_adapt = "" ]]; then
echo " the $reverse_adapt string is needed "
exit 1
fi
if [[ $Prefix = "" ]]; then
echo " the $Prefix string is needed "
exit 1
fi
if [[ $outputdir = "" ]]; then
echo "the $outputdir file is needed "
exit 1
elif [[ ! -d $outputdir ]]; then
echo "$outputdir: is not found"
exit 2
fi
if [[ $blacklist = "" ]]; then
echo "the $blacklist file is needed "
exit 1
elif [[ ! -f $blacklist ]]; then
echo "$blacklist: is not found"
exit 2
fi
# 写一个能够mapping,转成bigwig文件,还能去接头,call peak的脚本,这个脚本还应该有模块化的功能比如,去接头与质控,mapping与转bigwig,call peak就像hicpro一样;
# 1. 质控 2.去接头 3. mapping 4. 转bigwig 5.call peak
# fastq1
# fastq2
# forward_adapt
# reverse_adapt
# min_length # defult 25
# Prefix
# index # defult is /DATA/software/bcbio/genomes/Mmusculus/mm10/bowtie2/mm10
# normalize_method # defult is RPKM
# blacklist # defult is /DATA/work/lbyybl/genomes/mm10/mm10.blacklist.bed
# outputdir
#mkdir -p ${outputdir}/{bigwig,cutadapt,mapping,QC,rawdata}
if [[ ! -d ${outputdir}/QC ]]; then
mkdir -p ${outputdir}/QC
#--- 1. 质控
fastqc ${fastq1} ${fastq2} -o ${outputdir}/QC
else
fastq_name=${fastq1##*/}
fastq_prefix=${fastq_name%%.*}
if [[ ! -f ${outputdir}/QC/${fastq_prefix}_fastqc.html ]]; then
mkdir -p ${outputdir}/QC
#--- 1. 质控
fastqc ${fastq1} ${fastq2} -o ${outputdir}/QC
fi
fi
#--- 2. 去接头
if [[ ! -d ${outputdir}/cutadapt ]]; then
mkdir -p ${outputdir}/cutadapt
cutadapt -a ${forward_adapt} -A ${reverse_adapt} -q 15,15 --overlap 1 -m ${min_length} -o ${outputdir}/cutadapt/${Prefix}_1.fq -p ${outputdir}/cutadapt/${Prefix}_2.fq ${fastq1} ${fastq2}
fastqc ${outputdir}/cutadapt/${Prefix}_1.fq ${outputdir}/cutadapt/${Prefix}_2.fq -o ${outputdir}/cutadapt
elif [[ ! -f ${outputdir}/cutadapt/${Prefix}_1_fastqc.html ]]; then
mkdir -p ${outputdir}/cutadapt
cutadapt -a ${forward_adapt} -A ${reverse_adapt} -q 15,15 --overlap 1 -m ${min_length} -o ${outputdir}/cutadapt/${Prefix}_1.fq -p ${outputdir}/cutadapt/${Prefix}_2.fq ${fastq1} ${fastq2}
fastqc ${outputdir}/cutadapt/${Prefix}_1.fq ${outputdir}/cutadapt/${Prefix}_2.fq -o ${outputdir}/cutadapt
fi
#--- 3. mapping
if [[ ! -d ${outputdir}/mapping ]]; then
mkdir -p ${outputdir}/mapping
bowtie2 -q -x ${index} -X 2000 -1 ${outputdir}/cutadapt/${Prefix}_1.fq -2 ${outputdir}/cutadapt/${Prefix}_2.fq --rg-id ${Prefix} --rg SM:${Prefix} --rg PL:illumina -p 10 -S ${outputdir}/mapping/${Prefix}.sam 2> ${outputdir}/mapping/${Prefix}_mapping.stat
samtools sort -@ 10 ${outputdir}/mapping/${Prefix}.sam >> ${outputdir}/mapping/${Prefix}.bam
#rm -f ${outputdir}/mapping/${Prefix}.sam
sambamba markdup -r -t 10 ${outputdir}/mapping/${Prefix}.bam ${outputdir}/mapping/${Prefix}_rmdup.bam
elif [[ ! -f ${outputdir}/mapping/${Prefix}.bam ]]; then
bowtie2 -q -x ${index} -X 2000 -1 ${outputdir}/cutadapt/${Prefix}_1.fq -2 ${outputdir}/cutadapt/${Prefix}_2.fq --rg-id ${Prefix} --rg SM:${Prefix} --rg PL:illumina -p 10 -S ${outputdir}/mapping/${Prefix}.sam 2> ${outputdir}/mapping/${Prefix}_mapping.stat
samtools sort -@ 10 ${outputdir}/mapping/${Prefix}.sam >> ${outputdir}/mapping/${Prefix}.bam
#rm -f ${outputdir}/mapping/${Prefix}.sam
sambamba markdup -r -t 10 ${outputdir}/mapping/${Prefix}.bam ${outputdir}/mapping/${Prefix}_rmdup.bam
fi
#--- 4. 选出 coordinate并unique的
if [[ ! -d ${outputdir}/unique ]]; then
mkdir -p ${outputdir}/unique
/home/boyuanli/bashscript/bin/pro_seq/unique_and_bw.sh -b ${outputdir}/mapping/${Prefix}_rmdup.bam -o ${outputdir}/unique/ -p ${Prefix}_rmdup -B /DATA/work/lbyybl/genomes/mm10/mm10.blacklist.bed
elif [[ ! -f ${outputdir}/unique/${Prefix}_rmdup_uniqe.bam ]]; then
/home/boyuanli/bashscript/bin/pro_seq/unique_and_bw.sh -b ${outputdir}/mapping/${Prefix}_rmdup.bam -o ${outputdir}/unique/ -p ${Prefix}_rmdup -B /DATA/work/lbyybl/genomes/mm10/mm10.blacklist.bed
fi
#--- stastics
#--- total reads
if [[ ! -f ${outputdir}/${Prefix}_stastic_all.csv ]]; then
if [[ ! -f ${outputdir}/${Prefix}_stastic.csv ]]; then
pwd
suffix=$(echo ${fastq1} | awk -F "." '{print $NF}')
if [[ $suffix == 'gz' ]]; then
total_resds=$(grep "^@" <(zcat ${fastq1}) | wc -l)
elif [[ $suffix != 'gz' ]]; then
total_resds=$(grep "^@" ${fastq1} | wc -l)
fi
cutadapt=$(grep "^@" ${outputdir}/cutadapt/${Prefix}_1.fq | wc -l)
mapping=$(samtools view -F 4 ${outputdir}/mapping/${Prefix}.bam | wc -l)
rmdup=$(samtools view -F 4 ${outputdir}/mapping/${Prefix}_rmdup.bam | wc -l)
final_uniq=$(head -1 ${outputdir}/unique/${Prefix}_rmdup_uniqe.flag | awk '{print $1}')
#---- stastic file
echo -e "sample_name,total_resds,cutadapt,mapping reads,rm duplicate,final unique" > ${outputdir}/${Prefix}_stastic.csv
echo -e "${Prefix},${total_resds},${cutadapt},${mapping},${rmdup},${final_uniq}" >> ${outputdir}/${Prefix}_stastic.csv
fi
sed -e "s/^[ \t]*//g" -e 's/ /,/g' ${outputdir}/mapping/${Prefix}_mapping.stat >> ${outputdir}/mapping/${Prefix}_mapping_format.stat
/usr/bin/Rscript /home/boyuanli/bashscript/bin/chip_seq/stastic_reads_info.r -b ${outputdir}/mapping/${Prefix}_mapping_format.stat -s ${outputdir}/${Prefix}_stastic.csv -o ${outputdir}/${Prefix}_stastic_all.csv
rm -f ${outputdir}/mapping/${Prefix}.sam ${outputdir}/mapping/${Prefix}_rmdup.bam
rm -f ${outputdir}/cutadapt/${Prefix}_1.fq ${outputdir}/cutadapt/${Prefix}_2.fq ${outputdir}/${Prefix}_stastic.csv
# rm -rf unique
fi
/home/boyuanli/bashscript/bin/chip_seq/chip_qc.sh -b ${outputdir}/mapping/${Prefix}.bam -n ${outputdir}/${Prefix}_qc.csv
if [[ -f ${outputdir}/${Prefix}_stastic_all.csv ]]; then
if [[ ! -f ${outputdir}/${Prefix}_qc.csv ]]; then
#--- 制作bigwig
rm -f ${outputdir}/mapping/${Prefix}.sam ${outputdir}/mapping/${Prefix}_rmdup.bam
rm -f ${outputdir}/cutadapt/${Prefix}_1.fq ${outputdir}/cutadapt/${Prefix}_2.fq ${outputdir}/${Prefix}_stastic.csv
# rm -rf unique
else
rm -f ${outputdir}/mapping/${Prefix}.sam ${outputdir}/mapping/${Prefix}_rmdup.bam
rm -f ${outputdir}/cutadapt/${Prefix}_1.fq ${outputdir}/cutadapt/${Prefix}_2.fq ${outputdir}/${Prefix}_stastic.csv
# rm -rf unique
fi
fi
| true
|
cd29482fcf7fb185e1201644dec81d33f4ab0afb
|
Shell
|
vcaropr1/EDICO_SCRIPTS
|
/Post_Single_Sample_Processing/DEVEL/A.01_DOC_AUTO_CODING.sh
|
UTF-8
| 1,550
| 2.75
| 3
|
[] |
no_license
|
#$ -S /bin/bash
#$ -q rnd.q,test.q,prod.q
#$ -cwd
#$ -V
#$ -p -1000
set
JAVA_1_7=$1
GATK_DIR=$2
REF_GENOME=$3
KEY=$4
CORE_PATH=$5
CODING_BED=$6
PROJECT=$7
SM_TAG=$8
# DNA_HASH_ADDRESS=$5
# MDNA_HASH_ADDRESS=$6
RIS_ID=${SM_TAG%@*}
BARCODE_2D=${SM_TAG#*@}
START_DOC_AUTO_CODING=`date '+%s'`
### --Remove X,Y,MT from the UCSC exons bed file
awk '{FS=" "} $1!~/[A-Z]/ {print $0}' $CODING_BED \
>| $CORE_PATH/$PROJECT/TEMP/$SM_TAG".UCSC.AUTO.CODING.bed"
### --Depth of Coverage AUTOSOMAL UCSC CODINGS--
$JAVA_1_7/java -jar $GATK_DIR/GenomeAnalysisTK.jar \
-T DepthOfCoverage \
-R $REF_GENOME \
-I $CORE_PATH/$PROJECT/BAM/$SM_TAG".bam" \
-L $CORE_PATH/$PROJECT/TEMP/$SM_TAG".UCSC.AUTO.CODING.bed" \
-mmq 20 \
-mbq 10 \
--outputFormat csv \
-omitBaseOutput \
-omitIntervals \
--omitLocusTable \
-o $CORE_PATH/$PROJECT/REPORTS/DEPTH_OF_COVERAGE/DEPTH_SUMMARY/$SM_TAG".autosomal.exon" \
-et NO_ET \
-K $KEY \
-ct 5 \
-ct 10 \
-ct 15 \
-ct 20 \
-nt 4
mv -v $CORE_PATH/$PROJECT/REPORTS/DEPTH_OF_COVERAGE/DEPTH_SUMMARY/$SM_TAG".autosomal.exon.sample_statistics" \
$CORE_PATH/$PROJECT/REPORTS/DEPTH_OF_COVERAGE/DEPTH_SUMMARY/$SM_TAG".autosomal.exon.sample_statistics.csv"
mv -v $CORE_PATH/$PROJECT/REPORTS/DEPTH_OF_COVERAGE/DEPTH_SUMMARY/$SM_TAG".autosomal.exon.sample_summary" \
$CORE_PATH/$PROJECT/REPORTS/DEPTH_OF_COVERAGE/DEPTH_SUMMARY/$SM_TAG".autosomal.exon.sample_summary.csv"
END_DOC_AUTO_CODING=`date '+%s'`
echo 'DEPTH_OF_COVERAGE_AUTO_CODING,A.01,'$START_DOC_AUTO_CODING','$END_DOC_AUTO_CODING >> $CORE_PATH/$PROJECT/REPORTS/run_times.csv
| true
|
1ce2ba554b24ac6700d5ca679ce8f5bbdac951dd
|
Shell
|
NOAA-EMC/sref
|
/ush/sref_thin_post.sh
|
UTF-8
| 10,718
| 2.609375
| 3
|
[] |
no_license
|
#!/bin/ksh
###############################
# Note: This job is for raw SREF. There is another job called sref_ens_post.sh is
# particularly for bias corrected SREF for the same purpose.
#
# change log:
# 06/08/05, Jun Du: add multi-grids capability for SREF outputs as well as
# the precipitable water (PWAT) field; grib product definition
# is changed for the lifted index in RSM forecasts due to the use
# WRF post. Forecast length is extended from 63hr to 87hr.
# 12/08/05, Jun Du: add WRF_nmm and WRF_em members
# 09/28/11, Jun Du: added model NMMB, 221 grid and "-nv" flag in grib conversion
# 05/03/12, Jun Du: added 16km NA grid 132 and changed the old job script name
# from ens_post.sh to sref_thin_post.sh
# 03/24/15, Jun Du: Added PRATE and visbility as requested by WPC
# 05/18/15, SPA SH: Comment out the dbn_alert of grib1 products
# 07/07/16, Jun Du: Added WEASD (65, water equivelant accumulated snowfall depth from sky only
# doesn't consider process on surface such as previously accumulated amounts
# and melting, otherwise it needs to use the instantaneuos one, unit: kg/m^2)
# and SNOD (66, m) two variables requested by WPC
echo "------------------------------------------------"
echo "Ensemble Postprocessing"
echo "------------------------------------------------"
set -eux
msg="Starting sref_thin_post.sh for memeber $memberlist"
postmsg "$jlogfile" "$msg"
#####################################
# Define Script/Exec Variables
#####################################
# JY WGRIB=$WGRIB
#####################################
# Set up hours to dump
#####################################
case $RUN in
sref) FSTART_HIGH=00
FEND_HIGH=87
FINC_HIGH=3
if [ $OUTGRD -eq 132 ]; then
FILE_HIGH1="\$COMOUT/\${RUN}_\$model.\${cycle}.pgrb132.\${member}.f\${HOUR}"
else
FILE_HIGH1="\$COMOUT/\${RUN}_\$model.\${cycle}.pgrb212.\${member}.f\${HOUR}"
FILE_HIGH2="\$COMOUT/\${RUN}_\$model.\${cycle}.pgrb216.\${member}.f\${HOUR}"
FILE_HIGH3="\$COMOUT/\${RUN}_\$model.\${cycle}.pgrb243.\${member}.f\${HOUR}"
FILE_HIGH4="\$COMOUT/\${RUN}_\$model.\${cycle}.pgrb221.\${member}.f\${HOUR}"
fi
if [ "$model" = "eta" -o "$model" = "kfeta" ]
then
WGRIB_LIST=":PWAT:atmos.col|:HGT:1000.mb|:HGT:850.mb|:HGT:700.mb|:HGT:500.mb|:HGT:250.mb|UGRD:10.m.a|:UGRD:850.mb|:UGRD:700.mb|:UGRD:500.mb|:UGRD:250.mb|:VGRD:10.m.a|:VGRD:850.mb|:VGRD:700.mb|:VGRD:500.mb|:VGRD:250.mb|:TMP:2.m.a|:TMP:850.mb|:TMP:700.mb|:RH:850.mb|:RH:700.mb|:APCP|:PRMSL:MSL|:ABSV:500.mb|:ABSV:250.mb|:CAPE:sfc|:CIN:sfc|:PLI.30.mb.a|:TMAX:2.m.a|:TMIN:2.m.a|:CSNOW:sfc|:CICEP:sfc|:CFRZR:sfc|:CRAIN:sfc|:VIS|:PRATE"
fi
if [ "$model" = "rsm" -o "$model" = "nmm" -o "$model" = "em" -o "$model" = "arw" -o "$model" = "nmb" ]
then
WGRIB_LIST=":PWAT:atmos.col|:HGT:1000.mb|:HGT:850.mb|:HGT:700.mb|:HGT:500.mb|:HGT:250.mb|UGRD:10.m.a|:UGRD:850.mb|:UGRD:700.mb|:UGRD:500.mb|:UGRD:250.mb|:VGRD:10.m.a|:VGRD:850.mb|:VGRD:700.mb|:VGRD:500.mb|:VGRD:250.mb|:TMP:2.m.a|:TMP:850.mb|:TMP:700.mb|:RH:850.mb|:RH:700.mb|:APCP|:PRMSL:MSL|:ABSV:500.mb|:ABSV:250.mb|:CAPE:sfc|:CIN:sfc|:LFTX:500.1000.mb|:TMAX:2.m.a|:TMIN:2.m.a|:CSNOW:sfc|:CICEP:sfc|:CFRZR:sfc|:CRAIN:sfc|:VIS|:PRATE|:WEASD|:SNOD"
fi
if [ $OUTGRD -eq 132 ]; then
OUTPUT_FILE1="\$COMOUT/\${RUN}_\${model}.\${cycle}.pgrb132.\${member}"
else
OUTPUT_FILE1="\$COMOUT/\${RUN}_\${model}.\${cycle}.pgrb212.\${member}"
OUTPUT_FILE2="\$COMOUT/\${RUN}_\${model}.\${cycle}.pgrb216.\${member}"
OUTPUT_FILE3="\$COMOUT/\${RUN}_\${model}.\${cycle}.pgrb243.\${member}"
OUTPUT_FILE4="\$COMOUT/\${RUN}_\${model}.\${cycle}.pgrb221.\${member}"
fi
alert_type="${RUN}_${MODEL}_PGB"
ALERT_TYPE=`echo $alert_type | tr [a-z] [A-Z]`
;;
esac
#####################################
# Concatenate the grib files for all
# hours into a single file. Begin
# by deleting old files, if present.
#####################################
for member in $memberlist
do
if [ -s pgrbf${member} ]
then
rm pgrbf${member}
fi
if [ -s pgrb1f${member} ]
then
rm pgrb1f${member}
fi
if [ -s pgrb2f${member} ]
then
rm pgrb2f${member}
fi
if [ -s pgrb3f${member} ]
then
rm pgrb3f${member}
fi
if [ -s pgrb4f${member} ]
then
rm pgrb4f${member}
fi
if [ -s ensemble.${member} ]
then
rm ensemble.${member}
fi
if [ -s ensemble1.${member} ]
then
rm ensemble1.${member}
fi
if [ -s ensemble2.${member} ]
then
rm ensemble2.${member}
fi
if [ -s ensemble3.${member} ]
then
rm ensemble3.${member}
fi
if [ -s ensemble4.${member} ]
then
rm ensemble4.${member}
fi
typeset -Z2 HOUR
HOUR=$FSTART_HIGH
file=''
while [ $HOUR -le $FEND_HIGH ]
do
if [ "$RUN" = "sref" ]
then
if [ $OUTGRD -eq 132 ]; then
filename1=`eval echo $FILE_HIGH1`
cat $filename1 >> pgrb1f${file}
else
filename1=`eval echo $FILE_HIGH1`
cat $filename1 >> pgrb1f${file}
filename2=`eval echo $FILE_HIGH2`
cat $filename2 >> pgrb2f${file}
filename3=`eval echo $FILE_HIGH3`
cat $filename3 >> pgrb3f${file}
filename4=`eval echo $FILE_HIGH4`
cat $filename4 >> pgrb4f${file}
fi
else
filename=`eval echo $FILE_HIGH`
cat $filename >> pgrbf${file}
fi
let HOUR=$HOUR+$FINC_HIGH
done
#
# IF FSTART_LOW IS SET THEN...
#
FSTART_LOW=''
if [ "$FSTART_LOW" != "" ]
then
HOUR=$FSTART_LOW
while [ $HOUR -le $FEND_LOW ]
do
filename=`eval echo $FILE_LOW`
cat $filename >> pgrbf${file}
let HOUR=$HOUR+$FINC_LOW
done
fi
#####################################
# Run wgrib to get the inventory of
# this new concatenated file.
#####################################
if [ "$RUN" = "sref" ]
then
if [ $OUTGRD -eq 132 ]; then
$WGRIB -s pgrb1f${file} > pgrb1f${file}.inv
else
$WGRIB -s pgrb1f${file} > pgrb1f${file}.inv
$WGRIB -s pgrb2f${file} > pgrb2f${file}.inv
$WGRIB -s pgrb3f${file} > pgrb3f${file}.inv
$WGRIB -s pgrb4f${file} > pgrb4f${file}.inv
fi
else
$WGRIB -s pgrbf${file} > pgrbf${file}.inv
fi
#####################################
# Use egrep to filter the fields
#####################################
if [ "$RUN" = "sref" ]
then
if [ $OUTGRD -eq 132 ]; then
egrep $WGRIB_LIST pgrb1f${file}.inv | $WGRIB pgrb1f${file} -s -grib -i -o ensemble1.${member}
else
egrep $WGRIB_LIST pgrb1f${file}.inv | $WGRIB pgrb1f${file} -s -grib -i -o ensemble1.${member}
egrep $WGRIB_LIST pgrb2f${file}.inv | $WGRIB pgrb2f${file} -s -grib -i -o ensemble2.${member}
egrep $WGRIB_LIST pgrb3f${file}.inv | $WGRIB pgrb3f${file} -s -grib -i -o ensemble3.${member}
egrep $WGRIB_LIST pgrb4f${file}.inv | $WGRIB pgrb4f${file} -s -grib -i -o ensemble4.${member}
fi
else
egrep $WGRIB_LIST pgrbf${file}.inv | $WGRIB pgrbf${file} -s -grib -i -o ensemble.${member}
fi
#####################################
# Write the files out to permanent
# diskspace
#####################################
if [ ${member} = "c0" ]
then
member="cnt"
fi
if [ "$RUN" = "sref" ]
then
if [ $OUTGRD -eq 132 ]; then
filename1=`eval echo $OUTPUT_FILE1`
cp ensemble1.${member} $filename1
else
filename1=`eval echo $OUTPUT_FILE1`
cp ensemble1.${member} $filename1
filename2=`eval echo $OUTPUT_FILE2`
cp ensemble2.${member} $filename2
filename3=`eval echo $OUTPUT_FILE3`
cp ensemble3.${member} $filename3
filename4=`eval echo $OUTPUT_FILE4`
cp ensemble4.${member} $filename4
fi
else
filename=`eval echo $OUTPUT_FILE`
cp ensemble.${member} $filename
fi
#################################
# Convert to grib2 format
#################################
# CNVGRIB=$utilexec/cnvgrib
# WGRIB2=$WGRIB2
if [ $OUTGRD -eq 132 ]; then
$CNVGRIB -g12 -p40 -nv $filename1 ${filename1}.grib2
$WGRIB2 ${filename1}.grib2 -s >${filename1}.grib2.idx
else
$CNVGRIB -g12 -p40 -nv $filename1 ${filename1}.grib2
$CNVGRIB -g12 -p40 -nv $filename2 ${filename2}.grib2
$CNVGRIB -g12 -p40 -nv $filename3 ${filename3}.grib2
$CNVGRIB -g12 -p40 -nv $filename4 ${filename4}.grib2
$WGRIB2 ${filename1}.grib2 -s >${filename1}.grib2.idx
$WGRIB2 ${filename2}.grib2 -s >${filename2}.grib2.idx
$WGRIB2 ${filename3}.grib2 -s >${filename3}.grib2.idx
$WGRIB2 ${filename4}.grib2 -s >${filename4}.grib2.idx
fi
#cp ${filename1}.grib2 $COMOUT
if [ "$SENDDBN" = "YES" ]
then
if [ "$RUN" = "sref" ]
then
# if [ $OUTGRD -eq 132 ]; then
# $DBNROOT/bin/dbn_alert MODEL $ALERT_TYPE $job $filename1
# else
# $DBNROOT/bin/dbn_alert MODEL $ALERT_TYPE $job $filename1
# $DBNROOT/bin/dbn_alert MODEL $ALERT_TYPE $job $filename2
# $DBNROOT/bin/dbn_alert MODEL $ALERT_TYPE $job $filename3
# $DBNROOT/bin/dbn_alert MODEL $ALERT_TYPE $job $filename4
# fi
#if [ $SENDDBN_GB2 = YES ]
#then
if [ $OUTGRD -eq 132 ]; then
$DBNROOT/bin/dbn_alert MODEL ${ALERT_TYPE}_GB2 $job ${filename1}.grib2
$DBNROOT/bin/dbn_alert MODEL ${ALERT_TYPE}_GB2_WIDX $job ${filename1}.grib2.idx
else
$DBNROOT/bin/dbn_alert MODEL ${ALERT_TYPE}_GB2 $job ${filename1}.grib2
$DBNROOT/bin/dbn_alert MODEL ${ALERT_TYPE}_GB2 $job ${filename2}.grib2
$DBNROOT/bin/dbn_alert MODEL ${ALERT_TYPE}_GB2 $job ${filename3}.grib2
$DBNROOT/bin/dbn_alert MODEL ${ALERT_TYPE}_GB2 $job ${filename4}.grib2
$DBNROOT/bin/dbn_alert MODEL ${ALERT_TYPE}_GB2_WIDX $job ${filename1}.grib2.idx
$DBNROOT/bin/dbn_alert MODEL ${ALERT_TYPE}_GB2_WIDX $job ${filename2}.grib2.idx
$DBNROOT/bin/dbn_alert MODEL ${ALERT_TYPE}_GB2_WIDX $job ${filename3}.grib2.idx
$DBNROOT/bin/dbn_alert MODEL ${ALERT_TYPE}_GB2_WIDX $job ${filename4}.grib2.idx
fi
#fi
fi
fi
done
if [ "$model" = "eta" ]
then
echo done > $FCSTDIR/${model}ens.done
fi
msg="ENDING sref_thin_post.sh for memeber $memberlist"
postmsg "$jlogfile" "$msg"
| true
|
33e52b0c1dd85b8bb4175f2d696aad7071341ff8
|
Shell
|
remfath/data-syncer
|
/common/params.sh
|
UTF-8
| 8,222
| 4
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
# 处理时间字符串
CURRENT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
source ${CURRENT_DIR}/datetime.sh
source ${CURRENT_DIR}/color.sh
source ${CURRENT_DIR}/../env.sh
function usage() {
echo -e ""
echo -e " ${COLOR_CYAN}-t, --test${COLOR_END} 开启测试,启用该选项后打印开始时间和结束时间,不执行脚本"
echo -e " ${COLOR_CYAN}-s, --silent${COLOR_END} 关闭钉钉失败通知"
echo -e " ${COLOR_CYAN} --dev${COLOR_END} 钉钉通知启用测试token"
echo -e " ${COLOR_CYAN} --dt${COLOR_END} 运行某天的数据,默认今天,比--start和--end优先"
echo -e " ${COLOR_CYAN} --start${COLOR_END} 开始时间,可选,默认昨天"
echo -e " ${COLOR_CYAN} --end${COLOR_END} 结束时间,可选,默认今天"
echo -e " ${COLOR_CYAN} --hour${COLOR_END} 小时"
echo -e " ${COLOR_CYAN} --all${COLOR_END} 所有数据"
echo -e " ${COLOR_CYAN} --enhour${COLOR_END} 开启小时"
echo -e " ${COLOR_CYAN} --dbhost${COLOR_END} mysql服务器host,如89、14,可选。不填启用env中的值"
echo -e " ${COLOR_CYAN} --dbnmae${COLOR_END} mysql的库,如yc_bit,可选。不填启用env中的值"
echo -e " ${COLOR_CYAN} --table${COLOR_END} 表名,可选"
echo -e " ${COLOR_CYAN}-h, --help${COLOR_END} 帮助"
echo -e ""
exit
}
function getIfValid() {
local value=$1
local default=$2
if [ "${value}" = "" ];then
echo $default
else
echo $value
fi
}
# 接受用户传递的参数
OPTS=`getopt -o hts --long help,test,silent,all,enhour,dev,dt:,hour:,start:,end:,dbhost:,dbname:,table: -n 'parse-options' -- "$@"`
if [ $? != 0 ] ; then echo "Failed parsing options." >&2 ; exit 1 ; fi
eval set -- "$OPTS"
TODAY=`date +%Y%m%d`
YESTERDAY=`date +%Y%m%d -d '-1day'`
START=
END=
DEBUG=false
SILENT=false
DB_HOST=${TARGET_MYSQL_HOST}
DB_NAME=${TARGET_MYSQL_DB}
DT=
HOUR=
ENABLE_HOUR=false
IS_ALL=false
DEV=false
TABLE=
while true; do
case "$1" in
-t | --test ) DEBUG=true; shift ;;
-s | --silent ) SILENT=true; shift ;;
--dev ) DEV=true; shift ;;
--enhour ) ENABLE_HOUR=true; shift ;;
--all ) IS_ALL=true; shift ;;
-h | --help ) usage; shift ;;
--start ) START="$2"; shift; shift ;;
--end ) END="$2"; shift; shift ;;
--dt ) DT="$2"; shift; shift ;;
--hour ) HOUR="$2"; shift; shift ;;
--dbhost ) DB_HOST="$2"; shift; shift ;;
--dbname ) DB_NAME="$2"; shift; shift ;;
--table ) TABLE="$2"; shift; shift ;;
-- ) shift; break ;;
* ) break ;;
esac
done
START=$(strToTime "${START}")
END=$(strToTime "${END}")
DT=$(strToTime "${DT}")
if [ "${HOUR}" != "" ] && [ $HOUR -lt 10 ];then
HOUR="0${HOUR}"
fi
# 开始时间和结束时间默认是昨天和今天(Ymd格式)
START_DT=$YESTERDAY
END_DT=$TODAY
# 处理用户自定义时间
if [ "$DT" != "" ];then
START_DT=$DT
END_DT=$(getDate "${DT}" "yes")
else
if [ "${START}" != "" ]; then
START_DT=$(getDate "${START}" "no")
if [ "${END}" != "" ];then
if [ ${#END} -lt 8 ];then
END_DT=$(getDate "${END}" "yes")
else
END_DT=$(getDate "${END}" "no")
fi
else
END_DT=$(getDate "${START}" "yes")
fi
fi
fi
#if [ $START_DT -gt $TODAY ];then
# START_DT=$YESTERDAY
#fi
#
#if [ $END_DT -gt $TODAY ] || [ $END_DT -lt $START_DT ];then
# END_DT=$TODAY
#fi
START_TS=`date -d $START_DT +%s`
END_TS=`date -d $END_DT +%s`
START_DATE=`date -d ${START_DT} +"%F"`
END_DATE=`date -d ${END_DT} +"%F"`
START_DATETIME="${START_DATE} 00:00:00"
END_DATETIME="${END_DATE} 00:00:00"
NOW_HOUR=`date +%H`
USE_HOUR=$NOW_HOUR
START_HOUR_DATETIME=$START_DATETIME
END_HOUR_DATETIME=$END_DATETIME
START_HOUR_TS=`date -d "${START_HOUR_DATETIME}" +%s`
END_HOUR_TS=`date -d "${END_HOUR_DATETIME}" +%s`
if [ "${ENABLE_HOUR}" = "true" ];then
if [ "${HOUR}" != "" ];then
USE_HOUR=$HOUR
fi
START_HOUR_DATETIME="${START_DATE} ${USE_HOUR}:00:00"
START_HOUR_TS=`date -d "${START_HOUR_DATETIME}" +%s`
END_HOUR_TS=$(( ${START_HOUR_TS} + 3600 ))
END_HOUR_DATETIME=`date -d @${END_HOUR_TS} +"%F %T"`
fi
TODAY_DT=$TODAY
YESTERDAY_DT=$YESTERDAY
TODAY_TS=`date -d $TODAY +%s`
YESTERDAY_TS=`date -d $YESTERDAY +%s`
NOW_DATETIME=`date +"%F %T"`
NOW_TS=`date +%s -d "${NOW_DATETIME}"`
# 主要针对单天时间
THIS_YEAR_START_DT=$(getFirstDayOfYear ${START_DT})
THIS_YEAR_END_DT=$(getLastDayOfYear ${START_DT} y)
THIS_MONTH_START_DT=$(getFirstDayOfMonth ${START_DT})
THIS_MONTH_END_DT=$(getLastDayOfMonth ${START_DT} y)
THIS_WEEK_START_DT=$(getFirstDayOfWeek ${START_DT})
THIS_WEEK_END_DT=$(getLastDayOfWeek ${START_DT} y)
LAST_7DAYS_START_DT=$(getDiffDay ${START_DT} -5)
LAST_30DAYS_START_DT=$(getDiffDay ${START_DT} -28)
if [ "${DEBUG}" = "true" ];then
echo
echo -e " ${COLOR_UL_CYAN} ${START_HOUR_DATETIME} (${START_HOUR_TS}) - ${END_HOUR_DATETIME} (${END_HOUR_TS}) ${COLOR_END}"
echo
echo -e " available variables: "
echo
echo -e " ${COLOR_CYAN}DB_HOST${COLOR_END} ${DB_HOST}"
echo -e " ${COLOR_CYAN}DB_NAME${COLOR_END} ${DB_NAME}"
echo -e " ${COLOR_CYAN}TABLE${COLOR_END} ${TABLE}"
echo
echo -e " ${COLOR_CYAN}NOW_DATETIME${COLOR_END} ${NOW_DATETIME}"
echo -e " ${COLOR_CYAN}NOW_TS${COLOR_END} ${NOW_TS}"
echo -e " ${COLOR_CYAN}NOW_HOUR${COLOR_END} ${NOW_HOUR}"
echo -e " ${COLOR_CYAN}TODAY_DT${COLOR_END} ${TODAY_DT}"
echo -e " ${COLOR_CYAN}TODAY_TS${COLOR_END} ${TODAY_TS}"
echo -e " ${COLOR_CYAN}YESTERDAY_DT${COLOR_END} ${YESTERDAY_DT}"
echo -e " ${COLOR_CYAN}YESTERDAY_TS${COLOR_END} ${YESTERDAY_TS}"
echo
echo -e " ${COLOR_CYAN}USE_HOUR${COLOR_END} ${USE_HOUR}"
echo
echo -e " ${COLOR_CYAN}START_DT${COLOR_END} ${START_DT}"
echo -e " ${COLOR_CYAN}START_TS${COLOR_END} ${START_TS}"
echo -e " ${COLOR_CYAN}START_DATE${COLOR_END} ${START_DATE}"
echo -e " ${COLOR_CYAN}START_DATETIME${COLOR_END} ${START_DATETIME}"
echo -e " ${COLOR_CYAN}START_HOUR_DATETIME${COLOR_END} ${START_HOUR_DATETIME}"
echo -e " ${COLOR_CYAN}START_HOUR_TS${COLOR_END} ${START_HOUR_TS}"
echo
echo -e " ${COLOR_CYAN}END_DT${COLOR_END} ${END_DT}"
echo -e " ${COLOR_CYAN}END_TS${COLOR_END} ${END_TS}"
echo -e " ${COLOR_CYAN}END_DATE${COLOR_END} ${END_DATE}"
echo -e " ${COLOR_CYAN}END_DATETIME${COLOR_END} ${END_DATETIME}"
echo -e " ${COLOR_CYAN}END_HOUR_DATETIME${COLOR_END} ${END_HOUR_DATETIME}"
echo -e " ${COLOR_CYAN}END_HOUR_TS${COLOR_END} ${END_HOUR_TS}"
echo
echo -e " ${COLOR_CYAN}THIS_YEAR_START_DT${COLOR_END} ${THIS_YEAR_START_DT}"
echo -e " ${COLOR_CYAN}THIS_YEAR_END_DT${COLOR_END} ${THIS_YEAR_END_DT}"
echo -e " ${COLOR_CYAN}THIS_MONTH_START_DT${COLOR_END} ${THIS_MONTH_START_DT}"
echo -e " ${COLOR_CYAN}THIS_MONTH_END_DT${COLOR_END} ${THIS_MONTH_END_DT}"
echo -e " ${COLOR_CYAN}THIS_WEEK_START_DT${COLOR_END} ${THIS_WEEK_START_DT}"
echo -e " ${COLOR_CYAN}THIS_WEEK_END_DT${COLOR_END} ${THIS_WEEK_END_DT}"
echo
echo -e " ${COLOR_CYAN}LAST_7DAYS_START_DT${COLOR_END} ${LAST_7DAYS_START_DT}"
echo -e " ${COLOR_CYAN}LAST_30DAYS_START_DT${COLOR_END} ${LAST_30DAYS_START_DT}"
echo
exit
fi
| true
|
b0a0dac9ac873e47c0307775d727fdf2b21e506b
|
Shell
|
meowstars/zsh-conf
|
/rc/06-directories.zsh
|
UTF-8
| 652
| 3.5
| 4
|
[] |
no_license
|
# Directories options
setopt AUTO_CD # Try to cd if the command can't be executed
setopt AUTO_PUSHD # Make cd push the old directory onto the directory stack.
setopt CHASE_LINKS # Resolve links and dots when cd
setopt PUSHD_IGNORE_DUPS # Ignore duplicates in pushd
setopt PUSHD_MINUS # invert cd - and cd +
cd () {
if [[ "x$*" == "x..." ]]; then
cd ../..
elif [[ "x$*" == "x...." ]]; then
cd ../../..
elif [[ "x$*" == "x....." ]]; then
cd ../../../..
elif [[ "x$*" == "x......" ]]; then
cd ../../../../..
elif [ -d ~/.autoenv ]; then
source ~/.autoenv/activate.sh
autoenv_cd "$@"
else
builtin cd "$@"
fi
}
| true
|
d24436496da2d15e142c88b9ddbfb05134aac5f5
|
Shell
|
rasa/dotfiles-3
|
/.config/zsh/functions/first-in-fpath
|
UTF-8
| 493
| 3.578125
| 4
|
[] |
permissive
|
#!/bin/zsh
first-in-fpath() {
# Stupid simple fpath expansion
# TODO There has to be a better way than this to interrogate fpath lookups..
local ret=()
local arg arg_ret
for arg in "$@"; do
arg_ret=(${^fpath}/$arg(.[1,1]))
if [ "$#arg_ret" -ne 1 ]; then
arg_ret=("$arg")
fi
ret+=("$arg_ret")
done
echo "${ret[@]}"
}
glob-in-fpath() {
[ $# -gt 0 ] || set -- '*(.)'
echo ${^fpath}/${^(q)@}
}
first-in-fpath "$@"
| true
|
ddc74340c14fdab77a8fef6008f3bdd70269a3ed
|
Shell
|
heavenxing/MrCat
|
/ref_macaque/proc/bet_F99.sh
|
UTF-8
| 14,394
| 4.15625
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
set -e # stop immediately on error
# ------------------------------ #
# usage
# ------------------------------ #
usage() {
echo ""
echo "Brain extract the F99 reference image based on McLaren reference."
echo ""
echo "example:"
echo " `basename $0` --all"
echo " `basename $0` --workdir=$(pwd) --F99img=F99 --betorig --biascorr"
echo ""
echo "usage: `basename $0`"
echo " [--all] : execute all sections --betorig --biascorr --betrestore"
echo " --refreg --brainmask"
echo " [--betorig] : rough brain extraction of the original F99 structural"
echo " [--refreg] : register to the reference and warp the refmask back"
echo " [--brainmask] : retrieve the brain mask from the refreg and polish"
echo " [--workdir=<working dir>] (default: <current directory>)"
echo " [--F99dir=<F99 structural dir>] (default: F99)"
echo " [--F99img=<F99 structural image>] (default: F99)"
echo " [--refname=<refreg name>] (default: SL)"
echo " [--refimg=<refreg reference image>] (default: McLaren)"
echo " [--refmask=<refreg referece brain mask>] (default: McLaren_brain_mask)"
echo " [--refmaskstrict=<strict refreg referece brain mask>]"
echo " (default: McLaren_brain_mask_strict)"
echo " [--scriptdir=<script dir>] (default: <current directory>)"
echo " [--config=<fnirt config file> (default: fnirt_1mm.cnf)"
echo ""
}
# ------------------------------ #
# sub function to parse the input arguments
# ------------------------------ #
getoption() {
sopt="--$1"
shift 1
for fn in $@ ; do
if [[ -n $(echo $fn | grep -- "^${sopt}=") ]] ; then
echo $fn | sed "s/^${sopt}=//"
return 0
elif [[ -n $(echo $fn | grep -- "^${sopt}$") ]] ; then
echo "TRUE"
return 0
fi
done
}
# ------------------------------ #
# process and test the input arguments, this is just an example
# ------------------------------ #
# if no arguments given, return the usage
if [[ $# -eq 0 ]] ; then usage; exit 0; fi
# if not given, retrieve directory of this script
[[ $0 == */* ]] && thisscript=$0 || thisscript="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"/$0
# if "--all" is given, run the default set
if [[ $(getoption "all" "$@") = "TRUE" ]] ; then
# the default arguments associated with "--all" / "--nonlin"
defaultset="--betorig --biascorr --betrestore --refreg --brainmask"
echo "running the complete set of instructions: $defaultset"
# replace "--all" with the default argument set
newargs=$(echo "${@//--all/$defaultset}")
# execute this script with the default argument set, and passing others
sh $thisscript $newargs
exit 0
fi
# run each instruction on its own (with the same definitions)
definitionargs=$(echo "$@" | tr " " "\n" | grep '=') || true
instructargs=$(echo "$@" | tr " " "\n" | grep -v '=') || true
if [[ $(echo "$instructargs" | wc -w) -gt 1 ]] ; then
# this ensures the instructions are executed as specified, not as coded
for instr in $instructargs ; do
sh $thisscript $definitionargs $instr
done
exit 0
fi
# count and grep the number of argument repetitions (ignoring after "=")
duplicates=$(echo "$@" | tr " " "\n" | awk '{ gsub("=.*","="); print $0}' | sort | uniq -c | grep -v '^ *1 ') || true # "|| true" is added to ignore the non-zero exit code of grep (and avoid the script the stop because of "set -e")
# now test if any duplicates were found, and if so, give an error
[[ -n $duplicates ]] && echo "\nError, repetitions found in the arguments:\n$@\n${duplicates}\n" && exit 1
# ------------------------------ #
# parse the input arguments, or retrieve default settings
# ------------------------------ #
# parse arguments
workdir=$(getoption "workdir" "$@")
F99dir=$(getoption "F99dir" "$@")
F99img=$(getoption "F99img" "$@")
refname=$(getoption "refname" "$@")
refimg=$(getoption "refimg" "$@")
refmask=$(getoption "refmask" "$@")
refmaskstrict=$(getoption "refmaskstrict" "$@")
scriptdir=$(getoption "scriptdir" "$@")
config=$(getoption "config" "$@")
# default definitions
[[ -z $workdir ]] && workdir="."
[[ -z $F99img ]] && F99img="F99"
[[ -z $F99dir ]] && F99dir="${F99img%/*}"
[[ -z $F99dir ]] && F99dir="F99"
F99img=${F99img##*/} # remove the directory at the beginning
F99img=${F99img%%.*} # remove the extension at the end
[[ -z $refname ]] && refname="McLaren"
refdir="$refname"
[[ -z $refimg ]] && refimg="McLaren"
[[ -z $refmask ]] && refmask="${refimg}_brain_mask"
[[ -z $refmaskstrict ]] && refmaskstrict="${refmask}_strict"
[[ -z $scriptdir ]] && scriptdir="."
[[ -z $config ]] && config="fnirt_1mm.cnf"
# prepad the directory if none is given
[[ $F99dir != */* ]] && F99dir=$workdir/$F99dir
[[ $diffdir != */* ]] && diffdir=$workdir/$diffdir
[[ $refdir != */* ]] && refdir=$workdir/$refdir
[[ $refimg != */* ]] && refimg=$scriptdir/$refimg
[[ $refmask != */* ]] && refmask=$scriptdir/$refmask
[[ $config != */* ]] && config=$scriptdir/$config
# ------------------------------ #
# the instructions are coded below
# ------------------------------ #
# first rough brain extraction
if [[ $(getoption "betorig" "$@") = "TRUE" || $(getoption "betrestore" "$@") = "TRUE" ]] ; then
# input: original F99img
# output: {F99img}_brain_mask
# definitions
base=$F99dir/$F99img
if [[ $(getoption "betorig" "$@") = "TRUE" ]] ; then
img=$F99dir/$F99img
else
img=$F99dir/${F99img}_restore
fi
echo "brain extraction of: $img"
# find the number of voxels
xdim=$(fslhd -x $img | grep "nx = " | tr -d "[:alpha:][:space:][:punct:]")
ydim=$(fslhd -x $img | grep "ny = " | tr -d "[:alpha:][:space:][:punct:]")
zdim=$(fslhd -x $img | grep "nz = " | tr -d "[:alpha:][:space:][:punct:]")
# find sensible centroids to initialise bet
xhalf=$(echo $xdim | awk '{print $1/2}')
ypost=$(echo $ydim | awk '{print $1/6}')
yhalf=$(echo $ydim | awk '{print $1/2}')
yant=$(echo $ydim | awk '{print $1*2/3}')
zhalf=$(echo $zdim | awk '{print $1/2}')
zhigh=$(echo $zdim | awk '{print $1*2/3}')
# run bet centred at an anterior position
bet $img ${base}_brain_ant -m -n -r 30 -f 0.35 -c $xhalf $yant $zhigh
# and once more at a central (default) position
bet $img ${base}_brain -m -n -r 30 -f 0.3 -c $xhalf $yhalf $zhalf
# and once more at a posterior position
bet $img ${base}_brain_post -m -n -r 30 -f 0.2 -c $xhalf $ypost $zhalf
# add them and binarise
fslmaths ${base}_brain_mask -add ${base}_brain_ant_mask -add ${base}_brain_post_mask -bin ${base}_brain_mask
# find the extent of the brain mask
str=$(fslstats ${base}_brain_mask -C -w)
# extract coordinates for frontal pole centroid
x=$(echo $str | awk '{print $1}')
y=$(echo $str | awk '{print $2+$7*4/9}')
z=$(echo $str | awk '{print $3+$9/8}')
# frontal pole bet
bet $img ${base}_Fpole -m -r 25 -f 0.7 -c $x $y $z
#if [[ $(getoption "betrestore" "$@") = "TRUE" ]] ; then
# erode, cluster, ignore olfactory bulb, and dilate
#thr=$(fslstats ${base}_Fpole -P 20)
#fslmaths ${base}_Fpole -thr $thr -bin -ero -ero ${base}_Fpole
#cluster --in=${base}_Fpole --thresh=0.5 --no_table --connectivity=6 --minextent=10000 --oindex=${base}_Fpole
#fslmaths ${base}_Fpole -bin -dilF -s 0.5 -thr 0.002 -bin ${base}_Fpole_mask
#fi
if [[ $(getoption "betorig" "$@") = "TRUE" ]] ; then
# combine brain mask with all the poles
fslmaths ${base}_brain_mask -add ${base}_Fpole_mask -bin ${base}_brain_mask
else
# extract coordinates for temporal pole centroid
xL=$(echo $str | awk '{print $1-$5*2/7}')
xR=$(echo $str | awk '{print $1+$5*2/7}')
y=$(echo $str | awk '{print $2+$7/6}')
z=$(echo $str | awk '{print $3-$9*2/6}')
# temporal poles bet
bet $img ${base}_TpoleL -m -n -r 25 -f 0.5 -c $xL $y $z
bet $img ${base}_TpoleR -m -n -r 25 -f 0.5 -c $xR $y $z
# combine brain mask with all the poles
fslmaths ${base}_brain_mask -add ${base}_Fpole_mask -add ${base}_TpoleL_mask -add ${base}_TpoleR_mask -bin ${base}_brain_mask
fi
# store intermediate result, and the brain
imcp ${base}_brain_mask ${base}_brain_mask_bet
fslmaths $img -mas ${base}_brain_mask_bet ${base}_brain
# exclude high-intensity clusters in the mask
if [[ $(getoption "betorig" "$@") = "TRUE" ]] ; then
thr=$(fslstats ${base}_brain -p 98)
cluster --in=${base}_brain --thresh=$thr --no_table --connectivity=26 --omean=${base}_2remove
thr=$(fslstats ${base}_brain -p 99.5) # remove two brightest clusters (eyes)
else
thr=$(fslstats ${base}_brain -p 99.9)
cluster --in=${base}_brain --thresh=$thr --no_table --connectivity=26 --oindex=${base}_2remove
thr=$(fslstats ${base}_brain -R | awk '{print $2-1}') # remove only the two biggest clusters (eyes)
fi
fslmaths ${base}_2remove -thr $thr -bin -dilF ${base}_2remove
fslmaths ${base}_brain_mask -sub ${base}_2remove -bin ${base}_brain_mask
# polish and smooth the brain mask a bit
fslmaths ${base}_brain_mask -fillh -s 0.5 -thr 0.45 -bin ${base}_brain_mask
# clean up
imrm ${base}_brain_ant_mask ${base}_brain_post_mask ${base}_Fpole ${base}_Fpole_mask ${base}_TpoleL_mask ${base}_TpoleR_mask ${base}_2remove
echo " done"
fi
# bias correct the corrected image
if [[ $(getoption "biascorr" "$@") = "TRUE" ]] ; then
# input: F99img
# output: {F99img}_restore
img=$F99dir/$F99img
echo "bias correcting image: $img"
# smoothness definitions
sigma=3
FWHM=$(echo "2.3548 * $sigma" | bc)
# run RobustBiasCorr
$scriptdir/RobustBiasCorr.sh \
--in=$img \
--workingdir=$F99dir/biascorr \
--brainmask=${img}_brain_mask \
--basename=F99 \
--FWHM=$FWHM \
--type=1 \
--forcestrictbrainmask="FALSE" --ignorecsf="FALSE"
# copy the restored image and bias field, and remove working directory
imcp $F99dir/biascorr/F99_restore ${img}_restore
imcp $F99dir/biascorr/F99_bias ${img}_bias
rm -rf $F99dir/biascorr
echo " done"
fi
# reference registration
if [[ $(getoption "refreg" "$@") = "TRUE" ]] ; then
# input: {F99img}_restore
# output: {F99img}_brain_mask
img=$F99dir/${F99img}_restore
base=$F99dir/$F99img
echo "register to the reference: $refname"
# make a folder for the transformations
mkdir -p $workdir/transform/
mkdir -p $refdir/
# perform linear registration of the F99 to reference
#flirt -dof 12 -ref $refimg -refweight $refmask -in $img -inweight ${base}_brain_mask -omat $workdir/transform/${F99img}_2${refname}.mat
flirt -dof 12 -ref $refimg -refweight $refimg -in $img -inweight ${base}_brain_mask -omat $workdir/transform/${F99img}_2${refname}.mat
# use spline interpolation to apply the linear transformation matrix
#applywarp --rel --interp=spline -i $img -r $refimg --premat=$workdir/transform/${F99img}_2${refname}.mat -o $refdir/${F99img}_lin
# and now non-linear
echo "using the mask: $refmaskstrict"
fnirt --ref=$refimg --refmask=$refmaskstrict --in=$img --aff=$workdir/transform/${F99img}_2${refname}.mat --fout=$workdir/transform/${F99img}_2${refname}_warp --config=$config
# use spline interpolation to apply the warp field
applywarp --rel --interp=spline -i $img -r $refimg -w $workdir/transform/${F99img}_2${refname}_warp -o $refdir/$F99img
# and now invert the warp field
invwarp -w $workdir/transform/${F99img}_2${refname}_warp -o $workdir/transform/${refname}_2${F99img}_warp -r $img
# ditch the warp coeficient and log
imrm ${img}_warpcoef
rm ${img}_to_*.log
echo " done"
fi
# retrieve and polish the brain mask
if [[ $(getoption "brainmask" "$@") = "TRUE" ]] ; then
# input: {F99img}_restore, {F99img}_brain_mask
# output: {F99img}_brain_mask
img=$F99dir/${F99img}_restore
base=$F99dir/$F99img
echo "retrieve and polish the brain mask based on: $refname"
# warp the brain mask from reference to F99
applywarp --rel --interp=nn -i $refmask -r $img -w $workdir/transform/${refname}_2${F99img}_warp -o ${base}_brain_mask
imcp ${base}_brain_mask ${base}_brain_mask_$refname
applywarp --rel --interp=nn -i $refmaskstrict -r $img -w $workdir/transform/${refname}_2${F99img}_warp -o ${base}_brain_mask_strict
imcp ${base}_brain_mask_strict ${base}_brain_mask_strict_$refname
# erode and select
fslmaths ${base}_brain_mask -s 1 -thr 0.9 -bin ${base}_brain_mask_edge
fslmaths ${base}_brain_mask -sub ${base}_brain_mask_edge -bin ${base}_brain_mask_edge
fslmaths $img -mas ${base}_brain_mask_edge ${base}_brain_edge
# find nasal bits of eyes
fslmaths ${base} -s 1 -mas ${base}_brain_mask_edge ${base}_blur
fslmaths ${img} -s 1 -mas ${base}_brain_mask_edge ${img}_blur
fslmaths ${base}_blur -mul ${img}_blur -div 1000 -roi 0 -1 153 -1 0 -1 0 -1 -thr 5 -bin ${base}_eyes_nasal
# find posterior bits of eyes
fslmaths ${base} -mul ${img} -mas ${base}_brain_mask_edge -div 1000 -roi 0 -1 133 -1 0 -1 0 -1 -thr 8 -bin ${base}_eyes_post
fslmaths ${img} -mas ${base}_brain_mask_edge -roi 0 -1 133 -1 0 -1 0 -1 -thr 200 -bin -add ${base}_eyes_post -bin ${base}_eyes_post
fslmaths ${base}_brain_mask -binv -add ${base}_eyes_post -bin ${base}_eyes_post
cluster --in=${base}_eyes_post --thresh=0.5 --no_table --connectivity=6 --minextent=10000 --oindex=${base}_eyes_post
fslmaths ${base}_eyes_post -mas ${base}_brain_mask -bin ${base}_eyes_post
# remove these bits
fslmaths ${base}_brain_mask -sub ${base}_eyes_nasal -sub ${base}_eyes_post ${base}_brain_mask
# smooth out the brain mask (and just ever so slightly dilate)
fslmaths ${base}_brain_mask -s 1 -thr 0.45 -bin ${base}_brain_mask
# exclude again the posterior eye bits
cluster --in=${base}_eyes_post --thresh=0.5 --minextent=100 --no_table --oindex=${base}_eyes_post
fslmaths ${base}_eyes_post -bin ${base}_eyes_post
fslmaths ${base}_brain_mask -sub ${base}_eyes_post -bin ${base}_brain_mask
# extract the brain
fslmaths $img -mas ${base}_brain_mask ${base}_brain
# and make a strict mask
thr=$(fslstats ${base}_brain -P 5)
cluster --in=${base}_brain --thresh=$thr --no_table --connectivity=6 --minextent=10000 --oindex=${base}_brain_mask_strict
fslmaths ${base}_brain_mask_strict -bin -fillh -s 0.5 -thr 0.5 -bin -mas ${base}_brain_mask -fillh ${base}_brain_mask_strict
# clean up
imrm ${base}_brain_edge ${base}_brain_mask_edge ${base}_blur ${img}_blur ${base}_eyes_nasal ${base}_eyes_post
echo " done"
fi
| true
|
0859e69c6f9f43c8eeb32d35850301de606a379a
|
Shell
|
galp/ssb-server
|
/container-entrypoint
|
UTF-8
| 746
| 3.359375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
if [[ ! -f /ssb/.ssb/config ]]; then
echo "Missing SSB config file, generating..."
if [[ -z "${DOMAIN}" ]]; then
echo "Please provide a \$DOMAIN variable" 1>&2
exit 1
fi
mkdir -p /ssb/.ssb &&\
envsubst << EOF > /ssb/.ssb/config &&\
echo "Done!" || (echo "Fail" 1>&2 ; exit 2)
{
"connections": {
"incoming": {
"net": [
{
"scope": "public",
"external": [
"${DOMAIN}"
],
"transform": "shs",
"port": 8008,
"host": "0.0.0.0"
}
]
},
"outgoing": {
"net": [
{
"transform": "shs"
}
]
}
}
}
EOF
fi
exec "$@"
| true
|
fbb88f509720ae9dbcf9db3f13e4b4782148a394
|
Shell
|
elambert/honeycomb
|
/tools/buildinstalltest/start_flamebox_build.sh
|
WINDOWS-1252
| 2,068
| 3.03125
| 3
|
[] |
no_license
|
#!/bin/bash
#
# $Id: start_flamebox_build.sh 10853 2007-05-19 02:50:20Z bberndt $
#
# Copyright 2008, Sun Microsystems, Inc.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# # Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# # Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# # Neither the name of Sun Microsystems, Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
# OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
PIDFILE=/tmp/build_cron.pid
START=false
if [ ! -f $PIDFILE ]; then
START=true;
else
PID=`cat /tmp/build_cron.pid`
if ps -Aef | grep -v grep | awk '{print $2}' | grep $PID; then
START=false
else
START=true
fi
fi
if $START; then
echo "starting flamebox build"
cd /usr/local/flamebox/bin
./flamebox-client.pl UpdateTools Build
echo $! > /tmp/PIDFILE
else
echo "flamebox build already running"
fi
| true
|
c92336fdd7fa70dedfbbcefc71dd5880964c51eb
|
Shell
|
containernetworking/cni
|
/scripts/priv-net-run.sh
|
UTF-8
| 434
| 3.25
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
set -e
if [[ ${DEBUG} -gt 0 ]]; then set -x; fi
# Run a command in a private network namespace
# set up by CNI plugins
contid=$(printf '%x%x%x%x' $RANDOM $RANDOM $RANDOM $RANDOM)
netnspath=/var/run/netns/$contid
ip netns add $contid
./exec-plugins.sh add $contid $netnspath
function cleanup() {
./exec-plugins.sh del $contid $netnspath
ip netns delete $contid
}
trap cleanup EXIT
ip netns exec $contid "$@"
| true
|
41f82e19a09d8d5d8dbf8390fa8e5573344d569e
|
Shell
|
caoyang-test/abc
|
/test29.sh
|
UTF-8
| 109
| 2.9375
| 3
|
[] |
no_license
|
#!/usr/bin/bash
#
#
V1=1
while true
do
echo $V1
V1=$((V1+1))
if [ $V1 -eq 6 ];then
exit
fi
done
| true
|
e3b1c2d99b8c5cf86d4caf16283baf0df4bdfcee
|
Shell
|
jplikesbikes/dotfiles
|
/.zshrc
|
UTF-8
| 3,976
| 2.6875
| 3
|
[] |
no_license
|
# Lines configured by zsh-newuser-install
HISTFILE=~/.histfile
HISTSIZE=10000
SAVEHIST=10000
setopt appendhistory extendedglob
unsetopt beep
bindkey -v
# End of lines configured by zsh-newuser-install
# The following lines were added by compinstall
zstyle :compinstall filename '/home/jp/.zshrc'
fpath=($fpath ~/.zsh/completion)
autoload -Uz compinit
compinit
# End of lines added by compinstall
export TERM=xterm-256color
export PATH=/home/jp/.cargo/bin:/home/jp/bin:/opt/confluent/confluent-5.0.1/bin:/home/jp/.local/bin/:$PATH
[ ! -s $HOME/.zinit/zinit.zsh ] && git clone https://github.com/zdharma-continuum/zinit.git ~/.zinit
[ -s $HOME/.zinit/zinit.zsh ] && source $HOME/.zinit/zinit.zsh # This loads antigen
zinit light zsh-users/zsh-autosuggestions
zinit light nojhan/liquidprompt
zinit snippet OMZ::lib/termsupport.zsh
# Load seperated config files
for conf in $HOME/.config/zsh/config.d/*.zsh; do
source "${conf}"
done
unset conf
# configure autosuggests
# ctrl-space to accept suggestion
bindkey '^ ' autosuggest-accept
export EDITOR=vim
# allow less to display utf-8 characters
export LC_CTYPE=en_US.UTF-8
export LESSCHARSET=utf-8
export NVM_SYMLINK_CURRENT="true" # nvm use should make a symlink
export NVM_DIR="$HOME/.nvm"
export NVM_LAZY_LOAD=true
zinit light lukechilds/zsh-nvm # This load nvm on first use of node, npm, etc
export USE_GKE_GCLOUD_AUTH_PLUGIN=True
export SSH_AUTH_SOCK="$XDG_RUNTIME_DIR/ssh-agent.socket"
alias ls='ls -lhH --color=auto'
alias vi='vim'
alias hg='hg --color=always'
alias less='less -R'
alias diff='colordiff -u'
alias sudo='sudo -E '
alias gti='git'
alias gitp='git'
alias gi='git'
alias docker-exec='docker exec -it -e COLUMNS=$COLUMNS -e LINES=$LINES -e TERM=$TERM'
# alias env='env | sort | awk -F = '"'"'{ print "\033[1;35m" $1 "\033[0m = " $2; }'"'"''
alias ls-ln='find node_modules -maxdepth 1 -type l -ls'
alias bc='bc -l'
# fzf default command to ripgrep
if type "rg" > /dev/null; then
export FZF_DEFAULT_COMMAND='rg --files'
fi
# multi-mv
autoload -U zmv
alias mmv='noglob zmv -W'
#search history
bindkey '^r' history-incremental-search-backward
#open vim in ctrl-p using ctrlp
ctrlp() {
</dev/tty vim -c CtrlP
}
zle -N ctrlp
bindkey "^p" ctrlp
# fzf in shell with ctrl-t
[ -s /usr/share/fzf/key-bindings.zsh ] && source /usr/share/fzf/key-bindings.zsh
[ -s /usr/share/fzf/completion.zsh ] && source /usr/share/fzf/completion.zsh
#open vim in ctrl-p using ctrlp
ctrla() {
</dev/tty vim -c Grepper
}
zle -N ctrla
bindkey "^a" ctrla
# vi mode in right prompt
function zle-line-init zle-keymap-select {
VIM_PROMPT="%{$fg_bold[yellow]%} [% NORMAL]% %{$reset_color%}"
RPS1="${${KEYMAP/vicmd/$VIM_PROMPT}/(main|viins)/} $EPS1"
zle reset-prompt
}
zle -N zle-line-init
zle -N zle-keymap-select
export KEYTIMEOUT=2
# ctrl + arrows for history completion
bindkey "^[[1;5C" forward-word
bindkey "^[[1;5D" backward-word
# In Vim backspace doesn't stop at the point where you started insert mode:
bindkey '^?' backward-delete-char
bindkey '^H' backward-delete-char
# delete key
bindkey "^[[3~" delete-char
bindkey "^[3;5~" delete-char
# easier up and down
bindkey "^k" up-line-or-history
bindkey "^j" down-line-or-history
# home and end
bindkey "^[[1~" beginning-of-line
bindkey "^[[4~" end-of-line
bindkey "^[[7~" beginning-of-line
bindkey "^[[8~" end-of-line
bindkey "^[OH" beginning-of-line
bindkey "^[OF" end-of-line
bindkey "^[[H" beginning-of-line
bindkey "^[[F" end-of-line
# set psql
export PSQL_EDITOR=vim
if [ ]; then source <(kubectl completion zsh); fi
if [ ]; then source <(argocompletion zsh); fi
eval $(thefuck --alias f)
export PYENV_ROOT="$HOME/.pyenv"
export PATH="$PYENV_ROOT/bin:$PATH"
eval "$(pyenv init --path)"
eval "$(pyenv init -)"
export PATH="/home/jp/.local/bin:$PATH"
# Wasmer
export WASMER_DIR="/home/jp/.wasmer"
[ -s "$WASMER_DIR/wasmer.sh" ] && source "$WASMER_DIR/wasmer.sh"
export WASMTIME_HOME="$HOME/.wasmtime"
export PATH="$WASMTIME_HOME/bin:$PATH"
| true
|
2b8a8c4e674987f7335ed290ece66b2e7fae21e1
|
Shell
|
Domino881/OI
|
/LIVE/day0/spr.sh
|
UTF-8
| 151
| 2.734375
| 3
|
[] |
no_license
|
make pro && make check
for i in {1..100}; do
echo $(($RANDOM%10)) > in
bin/pro < in > outx
bin/check < outx > outb
diff -s outb in || break
done
| true
|
6fc0434c70a650cef7e16360501f653185085feb
|
Shell
|
u1i/yoisho
|
/yoisho-loan/push-release.sh
|
UTF-8
| 323
| 2.625
| 3
|
[
"MIT"
] |
permissive
|
if [ $# -lt 2 ]
then
echo "run $0 image release"
echo "$0 e9700768b6a3 0.14"
exit
fi
docker_image=$1
release=$2
imagename=yoisho-loan
docker tag $docker_image u1ih/$imagename:$release
docker tag $docker_image u1ih/$imagename:latest
docker login
docker push u1ih/$imagename:$release
docker push u1ih/$imagename:latest
| true
|
e97b26eec456ba34d7fb632eb9af64288cb30366
|
Shell
|
leozd1/vector
|
/victor-clad/tools/message-buffers/emitters/tests/javascript/simpletest.sh
|
UTF-8
| 1,400
| 2.8125
| 3
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/usr/bin/env bash
#
# Copyright 2015-2018 Anki Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -e
set -u
CLAD="${PYTHON:=python} ${PYTHONFLAGS:=} ${EMITTER:=../../JS_emitter.py}"
CLAD_CPP="${PYTHON:=python} ${PYTHONFLAGS:=} ${EMITTER_CPP:=../../CPP_emitter.py}"
CLADSRC=../src/js-simple
OUTPUT_DIR=${OUTPUT_DIR:-./build/simple}
for file in $CLADSRC/*.clad; do
OUTPUT_DIR_PARAM=$(dirname $OUTPUT_DIR/${file#$CLADSRC/};)
mkdir -p ${OUTPUT_DIR_PARAM}
$CLAD -o ${OUTPUT_DIR_PARAM} -C $(dirname $file) $(basename $file);
$CLAD_CPP -o ${OUTPUT_DIR_PARAM} -C $(dirname $file) $(basename $file);
done
clang++ -Wall -std=c++11 clad_test.cpp build/simple/Javascript.cpp ../../../support/cpp/source/SafeMessageBuffer.cpp -I../../../support/cpp/include -o build/simple/cpp_test;
cp cladConfig.js ./build/simple/cladConfig.js
node clad_test.js
./build/simple/cpp_test
rm buffer.tmp
| true
|
917869281736de643d2deb17a5e4173d9154d8b3
|
Shell
|
haskoin/haskoin-core
|
/scripts/release
|
UTF-8
| 640
| 3.4375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
find() {
local thing="$1"
perl -lnE "say if s/^\\s*$thing:\\s*([^\\s]+)\\s*\$/\\1/" package.yaml
}
if [[ ! -f package.yaml ]]
then
echo "No package.yaml present" >&2
exit 1
fi
package="$(find name)"
ver="$(find version)"
if [[ -z $package || -z $ver ]]
then
echo "No package or version found"
exit 1
fi
echo "package: $package"
echo "ver: $ver"
git tag -m "Version $ver" "v$ver"
git push origin master
git push origin "v$ver"
stack upload .
cabal update
cabal new-haddock --enable-documentation --haddock-hyperlink-source --haddock-for-hackage
cabal upload -d --publish "dist-newstyle/$package-$ver-docs.tar.gz"
| true
|
1f84e4acc0eb674edb439338666fdca8cec21f4f
|
Shell
|
railsmine/scripts
|
/bash/abandoned/install-tracks.sh
|
UTF-8
| 3,744
| 3.84375
| 4
|
[] |
no_license
|
#!/bin/bash
# ============================================================================================================================== #
# This script installs a tracks instance, assuming rvm is installed with proper support. Also, generate some code for Passenger
# This script requires RVM (currently, user-based-installs) along with a Ruby 1.8.7 install (see web-dev-install.sh)
# run with `./install-tracks.sh uninstall` to uninstall tracks from a directory
# run with `./install-tracks.sh defaults` to run installer with default parameters
# add a second parameter as dot ".", if you want to install tracks in current directory itself, else you will be asked.
# e.g. ./install-tracks.sh defaults .
# or ./install-tracks.sh .
# TODO: Make webrick boot on an available port, rather than a fixed port (4096 here), which can not be free at times. (`lsof -i :4096`)
# ============================================================================================================================== #
RVM_RUBY_NAME=1.8.7 # RVM ruby name (must be ruby version 1.8.6 or 1.8.7)
MYSQL_ROOTUSER=root # Your MySQL root username and password
MYSQL_ROOTPASS=password
MYSQL_HOSTNAME=localhost # MySQL Hostname
SCRIPTDIR=$HOME/Documents/bash-scripts # Directory where this script is saved.
DEFAULT_PORT=3002
# =========================================================================================================================== #
# ONLY Change these variables/functions, if you are trying to modify this script for some other installation.
# You may still need to change the script further down, but this will just help you make it more easier.
# =========================================================================================================================== #
APPNAME=tracks
function update-source {
# how should the downloaded app-instance be updated?
cd $INSTALLDIR/$APPNAME
git pull origin
}
function download-source {
# how can I download this app-instance?
DOWNLOAD_URL=git://github.com/bsag/tracks.git
git clone $DOWNLOAD_URL $APPNAME
cd $INSTALLDIR/$APPNAME
}
function custom-file-changes {
# if you want to modify, files other than database.yml enter your commands here.
# database.yml is auto-generated, and these steps will take place after that edit..
if [ "$1" != "defaults" ]; then
if [ ! -f $INSTALLDIR/$APPNAME/config/site.yml ]; then
cp $INSTALLDIR/$APPNAME/config/site.yml.tmpl $INSTALLDIR/$APPNAME/config/site.yml
fi
echo "$APPNAME requires some further configuration settings."
read -p "Press a key to edit site.yml [ENTER]"
nano $INSTALLDIR/$APPNAME/config/site.yml
fi
echo "All custom configurations has been saved, if any."
}
function generate-gem-file {
# place here the content of a Gemfile that can be used to generate the gems this app requires.
cp $SCRIPTDIR/files/$APPNAME/Gemfile $INSTALLDIR/$APPNAME/Gemfile
}
function rake-process {
# once all the file-edits are done and Gems have been installed, we need to rake our app-instance
# place here all the rake instance and also, any other changes/commands you need to ensure.
RAILS_ENV=production rake gems:install
echo "Migrating database, if needed.."
RAILS_ENV=production rake db:migrate
}
# =========================================================================================================================== #
# Omitting repeated use of the same code, and added it to a standalone (but pretty unsusable) shell script..
# =========================================================================================================================== #
. $SCRIPTDIR/install-base.sh
| true
|
bfcfdebceb2280af7e9f7fb27435e971fe242059
|
Shell
|
radix-r/toDoOrg
|
/orgScript.bash
|
UTF-8
| 844
| 3.890625
| 4
|
[] |
no_license
|
#!/bin/bash
read -p 'To-Do file to organize: ' fName
echo "Starting to-do organizer on $fName"
#create reference file. If target differers from reference run
`touch ref.txt`
#in case something goes wrong keep the previous version
`touch prev.txt`
while true; do
bool_run=0;
#use makefile
echo "Compiling"
`make > make.out`
d=$(diff make.out makeComp.txt)
#echo "$d"
if [[ ! -z $d ]]; then
echo "Change to source code detected"
bool_run=1
fi
# compare ref and $fName. if they differ run
d=$(diff ref.txt $fName)
if [[ ! -z $d ]]; then
echo "Target file changed"
bool_run=1
fi
if [ $bool_run -eq 1 ]; then
# Update prev.txt
`cat $fName > prev.txt`
echo "Running"
`./org $fName`
# update ref.txt
`cat $fName > ref.txt`
fi
#sleep
echo "Sleeping for 1 hour"
sleep 3600
done
| true
|
ff6c447451deb1749aedfdd5aa4f693179c0daeb
|
Shell
|
ftzm/scripts
|
/open_emacs.sh
|
UTF-8
| 294
| 3.25
| 3
|
[] |
no_license
|
# return a list of all frames on $DISPLAY
emacsclient -e "(frames-on-display-list \"$DISPLAY\")" &>/dev/null
# open frames detected, so open files in current frame
if [ $? -eq 0 ]; then
emacsclient -n -t $*
# no open frames detected, so open new frame
else
emacsclient -n -c $*
fi
| true
|
761ffef0c99f7b594bc1d9b971fb94dd497981f9
|
Shell
|
dozent2018/IFA_LINUX_DEV
|
/trapdemo2.sh
|
UTF-8
| 556
| 3.65625
| 4
|
[] |
no_license
|
#!/bin/bash
# trapdemo2.sh : fängt verschiedene Signale mit trap und einer Funktion ab
handler_function() {
if (( count == 0 )); then
echo Erstes Mal
rm $tmpfile
(( count ++ ))
else
echo Zweites Mal, Beenden
exit
fi
}
trap handler_function SIGINT SIGTERM
echo PID: $$
count=0
tmpfile=trapdemo$$
touch $tmpfile
while true; do
# eine while-Schleife muss mindestens eine Anweisung enthalten
# : ist die "leere Anweisung"
:
done
| true
|
4b12655942cf708e144c01edf52adc1e72f67161
|
Shell
|
MegaMosquito/achatina
|
/helper
|
UTF-8
| 3,120
| 4.34375
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
#
# A helper script to simplify the collection of a few host-specific items...
#
# ./helper -a
# - returns the appropriate hardware architecture for use with open-horizon
# ./helper -g
# - returns the IP address of the first default gateway for this host
# ./helper -i
# - returns an IP address on the first default interface for this host
# ./helper -n
# - returns a name for this node (typically `hostname`)
# ./helper -z
# - returns all of the above (really just for development convenience)
#
DEBUG=0
if [ ${DEBUG} -gt 0 ]; then echo "DEBUG is on."; fi
# Return the Horizon architecture string for this platform
show_architecture () {
if [ ${DEBUG} -gt 0 ]; then echo "Architecture."; fi
RAW_ARCH=$(uname -m)
if [[ "$RAW_ARCH" == "x86_64" ]]; then
ARCH=amd64
elif [[ "$RAW_ARCH" == "i386" ]]; then
ARCH=amd64
elif [[ "$RAW_ARCH" == "aarch64" ]]; then
ARCH=arm64
elif [[ "$RAW_ARCH" == "arm"* ]]; then
ARCH=arm
else
# Other. Fail!
ARCH=error
fi
echo "${ARCH}"
}
# Return the default gateway address for this host
show_gateway () {
if [ ${DEBUG} -gt 0 ]; then echo "Gateway Address."; fi
if [[ "$OSTYPE" == "linux-"* ]]; then
# Linux
GATEWAY=$(ip route | grep default | head -1 | cut -d' ' -f3)
elif [[ "$OSTYPE" == "darwin"* ]]; then
# MacOSX
GATEWAY=$(netstat -nr | grep default | head -1 | awk '{print$2}')
else
# Other. Best guess:
GATEWAY=$(ip route | grep default | head -1 | cut -d' ' -f3)
fi
echo "${GATEWAY}"
}
# Return the default interface's IP address for this host
show_ip () {
if [ ${DEBUG} -gt 0 ]; then echo "IP Address."; fi
if [[ "$OSTYPE" == "linux-"* ]]; then
# Linux
CIDR=$(ip route | grep default | head -1 | sed 's/ proto dhcp / /' | cut -d' ' -f3 | cut -d'.' -f1-3)'.0/24'
if [ ${DEBUG} -gt 0 ]; then echo "CIDR=${CIDR}"; fi
HOST_IP=$(ip route | grep "${CIDR}" | head -1 | sed 's/ proto [a-z]*//' | cut -d' ' -f7)
elif [[ "$OSTYPE" == "darwin"* ]]; then
# MacOSX
HOST_IP=$(host `hostname` | head -1 | sed 's/.* //')
else
# Other. Best guess:
CIDR=$(ip route | grep default | head -1 | sed 's/ proto dhcp / /' | cut -d' ' -f3 | cut -d'.' -f1-3)'.0/24'
if [ ${DEBUG} -gt 0 ]; then echo "CIDR=${CIDR}"; fi
HOST_IP=$(ip route | grep "${CIDR}" | head -1 | sed 's/ proto [a-z]*//' | cut -d' ' -f7)
fi
echo "${HOST_IP}"
}
# Return a name for this node
show_name () {
if [ ${DEBUG} -gt 0 ]; then echo "Name."; fi
NAME=`hostname`
echo "${NAME}"
}
while getopts ":aginz" opt; do
case ${opt} in
a )
show_architecture;
exit 0;
;;
g )
show_gateway;
exit 0;
;;
i )
show_ip;
exit 0;
;;
n )
show_name;
exit 0;
;;
z )
echo -n "Horizon Architecture: ";
show_architecture;
echo -n "Gateway: ";
show_gateway;
echo -n "IP Address: ";
show_ip;
echo -n "Name: ";
show_name;
exit 0;
;;
\? ) echo "Usage: $0 [-a|-g|-i|-n|-z]"
;;
esac
done
| true
|
e756bf8d7e0ab7979c1a7deaca6fe44d2092d210
|
Shell
|
erinmaz/MRGFUS
|
/analysis_Kwon_ROIs_probtrackx_lesionterm_split_tracts_ants.sh
|
UTF-8
| 6,338
| 2.640625
| 3
|
[] |
no_license
|
#!/bin/bash
#I think this is essentially working for 9001_SH. Need to carefully check for pts with lesion on other TREATED_DENTATE. need to think about making the ROIs less scanty (I think I am losing some voxels to rounding, although perhaps that's good because it prevents overlap?)
MAINDIR=/Users/erin/Desktop/Projects/MRGFUS/analysis
#LESIONDIR=/Users/erin/Desktop/Projects/MRGFUS/analysis_lesion_masks
LESIONDIR=/Users/erin/Desktop/Projects/MRGFUS/analysis
MYDIR=${MAINDIR}/${1}-${2} #pre
DAY1=${MAINDIR}/${1}-${3}
DAY1_LESION=${LESIONDIR}/${1}-${3}
if [ -f ${DAY1_LESION}/anat/xfms/ants/bet/T1_lesion_filled_mask2T1_2_MNI152_T1_1mm.nii.gz ]; then
MYLESION=${DAY1_LESION}/anat/xfms/ants/bet/T1_lesion_filled_mask2T1_2_MNI152_T1_1mm
elif [ -f ${DAY1_LESION}/anat/xfms/ants/bet/T1_lesion_filled_mask_2_MNI152_T1_1mm.nii.gz ]; then
MYLESION=${DAY1_LESION}/anat/xfms/ants/bet/T1_lesion_filled_mask_2_MNI152_T1_1mm
elif [ -f ${DAY1_LESION}/anat/xfms/ants/T1_lesion_filled_mask2T1_2_MNI152_T1_1mm.nii.gz ]; then
MYLESION=${DAY1_LESION}/anat/xfms/ants/T1_lesion_filled_mask2T1_2_MNI152_T1_1mm
else
MYLESION=${DAY1_LESION}/anat/xfms/ants/T1_lesion_filled_mask_2_MNI152_T1_1mm
fi
xcoord_lesion_standard=`fslstats $MYLESION -c | awk '{print $1}'`
if [ $(bc -l <<< "$xcoord_lesion_standard < 0") -eq 1 ]; then
TREATED_DENTATE=R
UNTREATED_DENTATE=L
else
TREATED_DENTATE=L
UNTREATED_DENTATE=R
fi
#fslmaths ${MYDIR}/diffusion/T1_lesion_mask_filled2diff_bin -dilM -binv ${MYDIR}/diffusion/T1_lesion_mask_filled2diff_bin_and_neighbours_binv
fslmaths ${MYDIR}/diffusion/Kwon_ROIs_ants/T1_lesion_mask_filled2diff_bin -dilM -binv ${MYDIR}/diffusion/Kwon_ROIs_ants/T1_lesion_mask_filled2diff_bin_and_neighbours_binv
#fslmaths ${MYDIR}/diffusion/T1_lesion_mask_filled2diff_bin_and_neighbours_binv ${MYDIR}/diffusion/Kwon_ROIs_ants/T1_lesion_mask_filled2diff_bin_and_neighbours_binv
#fslmaths ${MYDIR}/diffusion/T1_lesion_mask_filled2MNI_1mm_xswap_2diff_bin -dilM -binv ${MYDIR}/diffusion/T1_lesion_mask_filled2MNI_1mm_xswap_2diff_bin_and_neighbours_binv
#Treated tract
treated_dentate_coords=`fslstats ${MYDIR}/diffusion/Kwon_ROIs_ants/dentate_${TREATED_DENTATE}_dil -C`
treated_dentate_x=`echo $treated_dentate_coords | awk '{print $1}'`
treated_dentate_y=`echo $treated_dentate_coords | awk '{print $2}'`
treated_dentate_z=`echo $treated_dentate_coords | awk '{print $3}'`
decus_coords=`fslstats ${MYDIR}/diffusion/Kwon_ROIs_ants/SCP_decus -C`
decus_x=`echo $decus_coords | awk '{print $1}'`
decus_y=`echo $decus_coords | awk '{print $2}'`
decus_z=`echo $decus_coords | awk '{print $3}'`
diffx=`echo $decus_x - $treated_dentate_x | bc`
diffx=`echo ${diffx#-}` #abs value
diffy=`echo $decus_y - $treated_dentate_y | bc`
diffz=`echo $decus_z - $treated_dentate_z | bc`
lesion_coords=`fslstats ${MYDIR}/diffusion/Kwon_ROIs_ants/T1_lesion_mask_filled2diff_bin -C`
lesion_x=`echo $lesion_coords | awk '{print $1}'`
lesion_y=`echo $lesion_coords | awk '{print $2}'`
lesion_z=`echo $lesion_coords | awk '{print $3}'`
diff2x=`echo $lesion_x - $decus_x | bc`
diff2x=`echo ${diff2x#-}` #abs value
diff2y=`echo $lesion_y - $decus_y | bc`
diff2z=`echo $lesion_z - $decus_z | bc`
if [ $TREATED_DENTATE = "R" ]; then
fslmaths ${MYDIR}/diffusion/Kwon_ROIs_ants/dentate_${TREATED_DENTATE}_dil_lesionterm/fdt_paths -roi ${treated_dentate_x} ${diffx} ${treated_dentate_y} ${diffy} ${treated_dentate_z} ${diffz} 0 1 ${MYDIR}/diffusion/Kwon_ROIs_ants/dentate_${TREATED_DENTATE}_dil_lesionterm/fdt_paths_dentate2SCP_decus
fslmaths ${MYDIR}/diffusion/Kwon_ROIs_ants/dentate_${TREATED_DENTATE}_dil_lesionterm/fdt_paths -roi ${decus_x} ${diff2x} ${decus_y} ${diff2y} ${decus_z} ${diff2z} 0 1 ${MYDIR}/diffusion/Kwon_ROIs_ants/dentate_${TREATED_DENTATE}_dil_lesionterm/fdt_paths_SCP_decus2lesion
else
fslmaths ${MYDIR}/diffusion/Kwon_ROIs_ants/dentate_${TREATED_DENTATE}_dil_lesionterm/fdt_paths -roi ${decus_x} ${diffx} ${treated_dentate_y} ${diffy} ${treated_dentate_z} ${diffz} 0 1 ${MYDIR}/diffusion/Kwon_ROIs_ants/dentate_${TREATED_DENTATE}_dil_lesionterm/fdt_paths_dentate2SCP_decus
fslmaths ${MYDIR}/diffusion/Kwon_ROIs_ants/dentate_${TREATED_DENTATE}_dil_lesionterm/fdt_paths -roi ${lesion_x} ${diff2x} ${decus_y} ${diff2y} ${decus_z} ${diff2z} 0 1 ${MYDIR}/diffusion/Kwon_ROIs_ants/dentate_${TREATED_DENTATE}_dil_lesionterm/fdt_paths_SCP_decus2lesion
fi
fslmaths ${MYDIR}/diffusion/Kwon_ROIs_ants/dentate_${TREATED_DENTATE}_dil_lesionterm/fdt_paths_SCP_decus2lesion -bin -mas ${MYDIR}/diffusion/Kwon_ROIs_ants/T1_lesion_mask_filled2diff_bin_and_neighbours_binv ${MYDIR}/diffusion/Kwon_ROIs_ants/dentate_${TREATED_DENTATE}_dil_lesionterm/fdt_paths_SCP_decus2lesion_bin_nolesion
fslmaths ${MYDIR}/diffusion/Kwon_ROIs_ants/dentate_${TREATED_DENTATE}_dil_lesionterm/fdt_paths_dentate2SCP_decus -bin ${MYDIR}/diffusion/Kwon_ROIs_ants/dentate_${TREATED_DENTATE}_dil_lesionterm/fdt_paths_dentate2SCP_decus_bin
#convert all sub-tracts to T1 space for longitudinal analysis
flirt -applyxfm -init ${MYDIR}/diffusion/xfms/diff_2_T1_bbr.mat -in ${MYDIR}/diffusion/Kwon_ROIs_ants/dentate_${TREATED_DENTATE}_dil_lesionterm/fdt_paths_dentate2SCP_decus_bin -out ${MYDIR}/diffusion/Kwon_ROIs_ants/dentate_${TREATED_DENTATE}_dil_lesionterm/fdt_paths_dentate2SCP_decus2T1 -ref ${MYDIR}/anat/T1
flirt -applyxfm -init ${MYDIR}/diffusion/xfms/diff_2_T1_bbr.mat -in ${MYDIR}/diffusion/Kwon_ROIs_ants/dentate_${TREATED_DENTATE}_dil_lesionterm/fdt_paths_SCP_decus2lesion_bin_nolesion -out ${MYDIR}/diffusion/Kwon_ROIs_ants/dentate_${TREATED_DENTATE}_dil_lesionterm/fdt_paths_SCP_decus2lesion2T1 -ref ${MYDIR}/anat/T1
fslmaths ${MYDIR}/diffusion/Kwon_ROIs_ants/dentate_${TREATED_DENTATE}_dil_lesionterm/fdt_paths_dentate2SCP_decus2T1 -thr 0.5 -bin ${MYDIR}/diffusion/Kwon_ROIs_ants/dentate_${TREATED_DENTATE}_dil_lesionterm/fdt_paths_dentate2SCP_decusT1_bin
fslmaths ${MYDIR}/diffusion/Kwon_ROIs_ants/dentate_${TREATED_DENTATE}_dil_lesionterm/fdt_paths_SCP_decus2lesion2T1 -thr 0.5 -bin ${MYDIR}/diffusion/Kwon_ROIs_ants/dentate_${TREATED_DENTATE}_dil_lesionterm/fdt_paths_SCP_decus2lesion2T1_bin
fsleyes ${MYDIR}/anat/T1 ${MYDIR}/diffusion/Kwon_ROIs_ants/dentate_${TREATED_DENTATE}_dil_lesionterm/fdt_paths_dentate2SCP_decusT1_bin -cm "Red" ${MYDIR}/diffusion/Kwon_ROIs_ants/dentate_${TREATED_DENTATE}_dil_lesionterm/fdt_paths_SCP_decus2lesion2T1_bin -cm "Blue" &
| true
|
4c35022f9a6a63d1428332756dfa48d0f77b2d2a
|
Shell
|
thorsteinsson/setup
|
/setup.sh
|
UTF-8
| 673
| 3.46875
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
xcode-select --install
# Oh my Zsh
sh -c "$(curl -fsSL https://raw.github.com/robbyrussell/oh-my-zsh/master/tools/install.sh)"
echo "Configuring OS…"
source ".macos"
echo "Installing apps…"
source "apps.sh"
cd "$(dirname "${BASH_SOURCE}")";
git pull origin master;
function doIt() {
ln -sfv "$PWD/.gitconfig" ~/.gitconfig
ln -sfv "$PWD/.gitignore" ~/.gitignore
ln -sfv "$PWD/.zshrc" ~/.zshrc
source ~/.zshrc;
}
if [ "$1" == "--force" -o "$1" == "-f" ]; then
doIt;
else
read -p "This may overwrite existing files in your home directory. Are you sure? (y/n) " -n 1;
echo "";
if [[ $REPLY =~ ^[Yy]$ ]]; then
doIt;
fi;
fi;
unset doIt;
| true
|
9cc5fddf07b384683f112832f3b0c416f259efee
|
Shell
|
aahutsal/docker-intellij
|
/run
|
UTF-8
| 886
| 3.359375
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
function delayedPluginInstall {
sudo mkdir -p /home/developer/${INTELLIJ_VERSION}/config/plugins
sudo mkdir -p /home/developer/${INTELLIJ_VERSION}/config/options
sudo chown developer:developer -R /home/developer/${INTELLIJ_VERSION}
cd /home/developer/${INTELLIJ_VERSION}/config/plugins/
# Adding the predefined preferences to IDEA
cp /home/developer/.jdk.table.xml /home/developer/${INTELLIJ_VERSION}/config/options/jdk.table.xml
}
if [ ! -d /home/developer/${INTELLIJ_VERSION}/config/plugins/Go ]; then
# We are running with a non-Docker contained volume for IntelliJ prefs so we need to setup the plugin again
delayedPluginInstall
fi
if [ -d /home/developer/${INTELLIJ_VERSION} ]; then
# Ensure proper permissions
sudo chown developer:developer -R /home/developer/${INTELLIJ_VERSION}
fi
exec /opt/intellij/bin/idea.sh
| true
|
481365a7bad5efe74cdb092de2cc5127dc1ac806
|
Shell
|
juanjavier101/kubernetes-vagrant-cluster-experiments
|
/setup-master.sh
|
UTF-8
| 566
| 2.90625
| 3
|
[
"MIT"
] |
permissive
|
export cidr='10.244.0.0/16'
export masterip=`ip addr show eth0 | grep "inet\b" | awk '{print $2}' | cut -d/ -f1`
kubeadm init --pod-network-cidr=$cidr
export digest=`openssl x509 -pubkey -in /etc/kubernetes/pki/ca.crt | openssl rsa -pubin -outform der 2>/dev/null | openssl dgst -sha256 -hex | sed 's/^.* //'`
export token=`kubeadm token list | grep "default bootstrap" | awk '{print $1}'`
if [ ! -d /vagrant/tmp ]
then
mkdir /vagrant/tmp
fi
echo "kubeadm join --token $token $masterip:6443 --discovery-token-ca-cert-hash sha256:$digest" > /vagrant/tmp/join.sh
| true
|
d92688c5daefae246cfeefef28c9c57506259c51
|
Shell
|
JochenHayek/ldapsearch
|
/ldapsearch.sh
|
UTF-8
| 853
| 2.5625
| 3
|
[] |
no_license
|
:
# $ env ldap_password='...' ldapuri='...' ldap_binddn='...' ldap_searchbase='...' ~/git-servers/github.com/JochenHayek/ldapsearch/ldapsearch.sh
# $ env ldap_password='...' ldapuri='...' ldap_binddn='...' ldap_searchbase='...' ~/git-servers/github.com/JochenHayek/ldapsearch/ldapsearch.sh > ~/transfer/ldap.$( date '+%Y%m%d%H%M%S' ).txt
# -W Prompt for simple authentication. This is used instead of specifying the password on the command line.
#
# -w passwd
# Use passwd as the password for simple authentication.
#
# -> ldapsearch: -W incompatible with -w
ldapsearch \
\
-x \
-w "${ldap_password}" \
\
-D "${ldap_binddn}" \
-b "${ldap_searchbase}" \
-H "${ldapuri}" \
\
-E pr=500/noprompt \
-z none \
-o ldif-wrap=no \
|
~/git-servers/github.com/JochenHayek/ldapsearch/ldapsearch--decode.pl
| true
|
d16407241215025d4f349f4449d5823a46958136
|
Shell
|
srcshelton/gitlog2dcl
|
/gitlog2dcl.sh
|
UTF-8
| 7,219
| 3.625
| 4
|
[
"MIT"
] |
permissive
|
#! /bin/bash
# -----------------------------------------------------------------------------
#
# Copyright (c) 2016 Stuart Shelton.
# Copyright (c) 2016 Hewlett Packard Enterprise Co.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# -----------------------------------------------------------------------------
#
# Create a valid debian changelog from git history, noting commits and any tags
# which may exist.
#
# Version numbers start at '1' (or any provided argument) and increment from
# there for each commit.
#
set -u
set -o pipefail
declare debug="${DEBUG:-}"
declare trace="${TRACE:-}"
declare NAME="$( basename "$( readlink -e "${0}" )" )"
declare PKGNAME="${GITLOG2DCL_PKG_NAME:-}"
declare -A committags=()
function die() {
echo >&2 "FATAL: ${*:-Unknown error}"
exit 1
} # die
function processdata() {
local -i num="${1:-1}" ; shift
local stable="${1:-stable}" ; shift
local commit="${1:-}" ; shift
local author="${1:-}" ; shift
local date="${1:-}" ; shift
local -a message=( "${@:-}" )
local line
local -i rc=1 inserted=0
# Presence of 'set -u' will cause block to fail if any variables are
# unset...
(
local day month dom time year zone
#echo "${PKGNAME} (__SEQ__${committags["${commit}"]:+-${committags["${commit}"]}}-${commit}) ${stable}; urgency=low"
echo "${PKGNAME} (${num}) ${stable}; urgency=low"
echo
echo -n " * commit ${commit}"
[[ -n "${committags["${commit}"]:-}" ]] && echo -e ", tag '${committags["${commit}"]}'\n" || echo $'\n'
for line in "${message[@]:-}"; do
if [[ -n "${line// }" ]]; then
echo " * ${line:-}"
inserted=1
else
echo
inserted=0
fi
done
(( inserted )) && echo
day="$( cut -d' ' -f 1 <<<"${date}" )"
month="$( cut -d' ' -f 2 <<<"${date}" )"
dom="$( cut -d' ' -f 3 <<<"${date}" )"
time="$( cut -d' ' -f 4 <<<"${date}" )"
year="$( cut -d' ' -f 5 <<<"${date}" )"
zone="$( cut -d' ' -f 6 <<<"${date}" )"
(( ( ${#dom} - 1 ) )) || dom="0${dom}"
echo " -- ${author} ${day}, ${dom} ${month} ${year} ${time} ${zone}"
echo
true
)
rc=${?}
# When piping the output to a utility which then closes the FD and
# 'set -o pipefail' is in effect, we get SIGPIPE/rc=141 at this
# point...
(( 141 == rc )) && die "Received SIGPIPE"
(( debug )) && echo >&2 "DEBUG: processdata() returns ${rc}"
return ${rc}
} # processdata
function processlog() {
local -i num=${1:-1}
local tag value commit author date
local -a message=()
# We immediately start with a decrement...
(( num++ ))
while IFS= read -r line; do
(( debug )) && echo >&2 "DEBUG: Read line '${line}'"
case "${line:-}" in
'commit '*)
if [[ -n "${commit:-}" ]]; then
(( debug )) && echo >&2 "DEBUG: Processing entry for commit '${commit}'"
if ! processdata $(( num-- )) 'stable' "${commit:-}" "${author:-}" "${date:-}" "${message[@]:-}"; then
echo >&2 "ERROR: Incomplete 'git log' entry or truncated input:"
echo >&2
echo >&2 $'\tCurrent state:'
echo >&2 -e "\tcommit ${commit:-}"
echo >&2 -e "\tAuthor: ${author:-}"
echo >&2 -e "\tDate: ${date:-}"
for line in "${message[@]}"; do
echo >&2 -e "\tText: ' ${line}'"
done
echo >&2
die "Failed processing commit '${commit:-}'"
else
commit=''
author=''
date=''
message=()
fi
fi
value="${line#commit }"
if [[ -n "${commit:-}" ]]; then
die "LOGIC ERROR: 'commit' value \"${commit}\" to be overwritten with \"${value}\""
fi
commit="${value}"
;;
'Merge: '*)
# FIXME: Ignored for now...
:
;;
'Author: '*)
value="${line#Author: }"
if [[ -n "${author:-}" ]]; then
die "LOGIC ERROR: 'author' value \"${author}\" to be overwritten with \"${value}\""
fi
author="${value}"
;;
'Date: '*)
value="${line#Date: }"
if [[ -n "${date:-}" ]]; then
die "LOGIC ERROR: 'date' value \"${date}\" to be overwritten with \"${value}\""
fi
date="${value}"
;;
' '*)
value="${line# }"
message+=( "${value:-}" )
;;
'')
# Blank line
:
;;
*)
echo >&2 "ERROR: Unknown 'git log' entry:"
echo >&2 -e "\t'${line:-}'"
echo >&2
echo >&2 $'\tCurrent state:'
echo >&2 -e "\tcommit ${commit:-}"
echo >&2 -e "\tAuthor: ${author:-}"
echo >&2 -e "\tDate: ${date:-}"
for line in "${message[@]:-}"; do
echo >&2 -e "\tText: ' ${line}'"
done
echo >&2
die "Invalid input"
;;
esac
done < <( git log 2>&1 )
if [[ -n "${commit:-}" ]]; then
if ! processdata $(( num-- )) 'stable' "${commit:-}" "${author:-}" "${date:-}" "${message[@]:-}"; then
echo >&2 "ERROR: Incomplete 'git log' entry:"
echo >&2
echo >&2 $'\tCurrent state:'
echo >&2 -e "\tcommit ${commit:-}"
echo >&2 -e "\tAuthor: ${commit:-}"
echo >&2 -e "\tDate: ${commit:-}"
for line in "${message[@]}"; do
echo >&2 -e "\tText: ' ${line}'"
done
echo >&2
die "Failed processing commit '${commit:-}'"
fi
fi
} # processlog
function main() {
#local -a args=( "${@:-}" )
if [[ " ${*:-} " =~ \ -(h|-help)\ ]]; then
echo "Usage: ${NAME} [initial-version]"
exit 0
fi
git rev-parse --is-inside-work-tree >/dev/null 2>&1 ||
die "${NAME} must be executed from within a git repo"
[[ -z "${PKGNAME:-}" ]] && PKGNAME="$( git remote show origin -n | grep -o 'Fetch URL: .*$' | cut -d' ' -f 3- | xargs basename | sed 's/\.git$//' )"
[[ -z "${PKGNAME:-}" ]] && die "Could not determine package name"
local tag commit line
local -i num=0
[[ -n "${1:-}" && "${1}" =~ [0-9]+ ]] && num="${1}"
(( trace )) && set -o xtrace
echo >&2 "Generating changelog for pacakge '${PKGNAME}'..."
if [[ -n "$( git tag )" ]]; then
echo >&2 "Enumerating tags, please wait..."
fi
while read -r tag; do
if commit="$( git rev-list -n 1 "${tag:-}" 2>/dev/null )"; then
committags["${commit}"]="${tag}"
fi
done < <( git tag 2>&1 )
echo >&2 "Processing logs, please wait..."
(( num = ( ( $(
git log 2>/dev/null | grep -c '^commit [0-9a-f]'
) - 1 ) + num ) ))
processlog ${num} | head -n -1
(( trace )) && set +o xtrace
return 0
} # main
main "${@:-}"
exit ${?}
# vi: set syntax=sh:
| true
|
ed41f874cd53ecfb52db6875deed3e27278a7fb5
|
Shell
|
Frihet/qnap-snmpd
|
/qpkg/qconfig.sh
|
UTF-8
| 902
| 3.40625
| 3
|
[] |
no_license
|
#! /bin/sh
_exit()
{
/bin/echo -e "Error: $*"
/bin/echo
exit 1
}
QPKG_DIR="$(/usr/bin/dirname "$0")"
source $QPKG_DIR/environ.sh
case "$1" in
start)
if [ -f $QPKG_DIR/snmpd.conf.orig ]; then
_exit "${QPKG_NAME} is already enabled."
fi
$CMD_ECHO "Enable SNMPD"
$CMD_IPKG install net-snmp
$CMD_CP $SYS_OPT_DIR/etc/snmpd.conf $QPKG_DIR/snmpd.conf.orig
$QPKG_DIR/gensnmpd.conf.sh > $SYS_OPT_DIR/etc/snmpd.conf < $QPKG_DIR/snmpd.conf.orig
$CMD_LN -s $SYS_OPT_INIT_DIR/S70net-snmp $SYS_rcS_DIR/S70net-snmp
$SYS_rcS_DIR/S70net-snmp
;;
stop)
if ! [ -f $QPKG_DIR/snmpd.conf.orig ]; then
_exit "${QPKG_NAME} is not enabled."
fi
$CMD_ECHO "Disable SNMPD"
$CMD_KILLALL snmpd 2>/dev/null
$CMD_RM $SYS_rcS_DIR/S70net-snmp
$CMD_RM $SYS_OPT_DIR/etc/snmpd.conf
$CMD_RM $QPKG_DIR/snmpd.conf.orig
$CMD_IPKG remove net-snmp
;;
esac
| true
|
3fbe46131f0eb4d23a05d840b8989ebd01276f24
|
Shell
|
keleustes/airship-treasuremap
|
/tools/airship
|
UTF-8
| 4,720
| 3.75
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
# Copyright 2019 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
DEFAULT_TERM_OPTS=' '
# Set an interactive mode only if there is a TTY available.
test -t 1 && test -t 0 && DEFAULT_TERM_OPTS='-it'
: ${TERM_OPTS:=${DEFAULT_TERM_OPTS}}
# Python YAML module required to read versions.yaml
if grep -iq suse /etc/os-release; then
rpm -q python3-pyaml --quiet || zypper --non-interactive install python3-pyaml
else
dpkg -s python3-yaml &> /dev/null || apt -y install python3-yaml
fi
ENV_FILE=$(mktemp)
trap "{ rm -f $ENV_FILE; }" EXIT
# prepare docker environment file
cat > $ENV_FILE << EOF
PEGLEG_PASSPHRASE=${PEGLEG_PASSPHRASE:-password123}
PEGLEG_SALT=${PEGLEG_SALT:-password123}
OS_AUTH_URL=${OS_AUTH_URL:-http://keystone-api.ucp.svc.cluster.local:5000/v3}
OS_PASSWORD=${OS_PASSWORD:-password123}
OS_PROJECT_DOMAIN_NAME=${OS_PROJECT_DOMAIN_NAME:-default}
OS_PROJECT_NAME=${OS_PROJECT_NAME:-service}
OS_USERNAME=${OS_USERNAME:-shipyard}
OS_USER_DOMAIN_NAME=${OS_USER_DOMAIN_NAME:-default}
OS_IDENTITY_API_VERSION=${OS_IDENTITY_API_VERSION:-3}
EOF
REPO_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )"/../ >/dev/null 2>&1 && pwd )"
USER=$(id -u)
GROUP=$(id -g)
# Key/value lookups from manifests
manifests_lookup(){
local file="$1"
local schema="$2"
local mdata_name="$3"
local key_path="$4"
local oper="$5"
local allow_fail="$6"
FAIL=false
RESULT=`python3 -c "
import yaml,sys
y = yaml.load_all(open('$file'))
for x in y:
if x.get('schema') == '$schema':
if x['metadata']['name'] == '$mdata_name':
if isinstance(x$key_path,list):
if '$oper' == 'get_size':
print(len(x$key_path))
break
else:
for i in x$key_path:
print(i)
break
else:
if '$oper' == 'dict_keys':
print(' '.join(x$key_path.keys()))
break
else:
print(x$key_path)
break
else:
sys.exit(1)" 2>&1` || FAIL=true
if [[ $FAIL = true ]] && [[ $allow_fail != true ]]; then
echo "error: Lookup failed for schema '$schema', \
metadata.name '$mdata_name', key path '$key_path'" >&2
exit 1
fi
}
versions_lookup() {
manifests_lookup "${REPO_DIR}/global/software/config/versions.yaml" \
'pegleg/SoftwareVersions/v1' \
'software-versions' "$1"
IMAGE_URL=$RESULT
}
help() {
echo -n "Usage: airship <pegleg|promenade|shipyard> [OPTION]...
Examples:
tools/airship pegleg site -r /target collect airsloop -s collect
tools/airship promenade generate-certs -o /target/certs /target/collect/*.yaml
tools/airship promenade build-all -o /target/bundle /target/collect/*.yaml /target/certs/*.yaml
tools/airship shipyard get actions
"
}
pegleg() {
versions_lookup "['data']['images']['ucp']['pegleg']['pegleg']"
cat >> $ENV_FILE << EOF
USER=pegleg
EOF
docker run --rm --net=host $TERM_OPTS \
-u "${USER}:${GROUP}" \
-w /target \
-v $(pwd):/target \
-v ${HOME}/.ssh:/target/.ssh \
--env-file $ENV_FILE \
$IMAGE_URL $@
}
promenade() {
versions_lookup "['data']['images']['ucp']['promenade']['promenade']"
# support proxy for pulling k8s binary
cat >> $ENV_FILE << EOF
http_proxy=${http_proxy:-}
https_proxy=${https_proxy:-}
no_proxy=${no_proxy:-}
HTTP_PROXY=${HTTP_PROXY:-}
HTTPS_PROXY=${HTTPS_PROXY:-}
NO_PROXY=${NO_PROXY:-}
# Promenade specific variables for downloading hyperkube image to generate genesis.sh
PROMENADE_TMP=/tmp
PROMENADE_TMP_LOCAL=/tmp
EOF
docker run --rm --net=host $TERM_OPTS \
-u "${USER}:${GROUP}" \
-w /target \
-v $(pwd):/target \
-v /tmp:/tmp \
-v /var/run/docker.sock:/var/run/docker.sock \
--env-file $ENV_FILE \
$IMAGE_URL $@
}
shipyard() {
versions_lookup "['data']['images']['ucp']['shipyard']['shipyard']"
SHIPYARD_IMAGE=$RESULT
docker run --rm --net=host $TERM_OPTS \
-u "${USER}:${GROUP}" \
-w /target \
-v $(pwd):/target \
--env-file $ENV_FILE \
$IMAGE_URL $@
}
case "$1" in
'pegleg')
pegleg $@
;;
'promenade')
promenade $@
;;
'shipyard')
shift;
shipyard $@
;;
*) help
exit 1
;;
esac
| true
|
3e8da9e8ebc4193df3a9380ad60996f949e8b6e9
|
Shell
|
natforsdick/Weta_GBS
|
/04_stacks_populationsb.sl
|
UTF-8
| 1,570
| 3
| 3
|
[] |
no_license
|
#!/bin/bash -e
#SBATCH -J stacks_pop
#SBATCH -A ga03186
#SBATCH --time=01:45:00
#SBATCH --mem=100M
#SBATCH --cpus-per-task=4
#SBATCH --out=%x.%j.out
#SBATCH --err=%x.%j.err
#SBATCH --mail-type=FAIL,END
#SBATCH --mail-user=forsdickn@landcareresearch.co.nz
############
# 04_stacks_populations.sl
# Nat Forsdick, 2021-01-25
# For running the populations tool in stacks to call and filter SNPs.
############
############
# MODULES
module purge
module load Stacks/2.41-gimkl-2018b
############
############
# PARAMS
INDIR=/nesi/nobackup/ga03186/Weta_GBS_Batchcombo_adap/04_ref_map/
OUTDIR=/nesi/nobackup/ga03186/Weta_GBS_Batchcombo_adap/05_populations/
poplist="Het Mah Fallai Mahoenui_all Weta_GBS_Batch2_POP_MI Weta_GBS_Batch2_POP_SR"
#POPMAP=/nesi/project/ga03186/ref/Weta_GBS_Batch2_POP_blankrem.txt
REFDIR=/nesi/project/ga03186/ref/
############
which populations
for pop in $poplist;
do
if [ ! -e ${OUTDIR}${pop}_a/ ]; then
mkdir -p ${OUTDIR}${pop}_a/
mkdir -p ${OUTDIR}${pop}_b/
fi
echo "Running stacks populations for ${pop}, no missing data"
populations -P ${INDIR}Weta_GBS_adap_${pop} -O ${OUTDIR}${pop}_a/ -M ${REFDIR}${pop}.txt -t 8 --min-maf 0.05 --hwe --fstats --smooth-popstats --smooth --bootstrap --vcf --structure --genepop -r 1 --write-single-snp
echo "Running stacks populations for ${pop}, 30% missing data"
populations -P ${INDIR}Weta_GBS_adap_${pop} -O ${OUTDIR}${pop}_b/ -M ${REFDIR}${pop}.txt -t 8 --min-maf 0.05 --hwe --fstats --smooth-popstats --smooth --bootstrap --vcf --structure --genepop -r 0.7 --write-single-snp
echo "Completed stacks processing for ${pop}"
done
| true
|
94739b2c7102eae3c4a5718064b351435052b724
|
Shell
|
kbairak/dotfiles
|
/bin/txcompress
|
UTF-8
| 562
| 2.78125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
if [ -f /tmp/txcompress_lock ]; then
echo "Compressor locked"
exit
fi
touch /tmp/txcompress_lock
echo -n 'Collecting static... ' &&
/Users/kbairak/devel/env/tx/bin/python\
/Users/kbairak/devel/repos/transifex/txc/manage.py collectstatic --noinput\
> /dev/null 2> /dev/null &&
echo 'done' &&
echo -n 'Compressing... ' &&
/Users/kbairak/devel/env/tx/bin/python\
/Users/kbairak/devel/repos/transifex/txc/manage.py compress -f\
> /dev/null 2> /dev/null &&
echo 'done' &&
rm /tmp/txcompress_lock
say 'compressed' &
| true
|
8e8797d4305695a52a792cbc8a1148b5271a138f
|
Shell
|
jgurnett/code2pdf
|
/code2pdf.sh
|
UTF-8
| 425
| 2.9375
| 3
|
[] |
no_license
|
#!/bin/bash
#-----------------------------------------------------------------------------
# Author: Joel Gurnett
# The script coverts code to pdf so that you can print out your code files
#-----------------------------------------------------------------------------
fd=$1
name=$2
enscript --title $fd\
-B -2rGE $fd -p 1.ps # convert your file to ps format
ps2pdf 1.ps $name # covert ps to pdf
rm 1.ps # rm ps file
| true
|
472a92b3a06ac7b81a0ecd6aea0f79e5d0e4e937
|
Shell
|
greatedadventure/dotrc
|
/zshrc
|
UTF-8
| 1,526
| 2.796875
| 3
|
[] |
no_license
|
# load version control info:
autoload -Uz vcs_info
zstyle ':vcs_info:*' enable hg git
zstyle ':vcs_info:hg*' formats "(%s - %r/%b)"
zstyle ':vcs_info:hg*' actionformats "(%s|%a - %r/%b)"
[ -f ${ZDOTDIR:-$HOME}/.zprezto/runcoms/zshrc ] && {
source ${ZDOTDIR:-$HOME}/.zprezto/runcoms/zshrc
}
setopt NO_BANG_HIST
setopt HIST_NO_STORE # don't store 'history' or 'fc' commands
setopt NO_HIST_BEEP
setopt NO_BEEP
setopt NO_AUTO_PUSHD
bindkey '^R' history-incremental-search-backward
bindkey '^S' history-incremental-search-forward
autoload -U bashcompinit
bashcompinit
cdpath=(~)
setopt NO_NOTIFY # don't print immediate message when background process stopped
setopt NO_NOMATCH # don't expand patterns if can not been matched (nice for ssh usage)
zmodload -i zsh/complist
zstyle ':completion:*' list-colors ''
# load bash completions
[ -d $HOME/.zsh/bash_completion ] && {
for bs in `ls $HOME/.zsh/bash_completion`; do
source "$HOME/.zsh/bash_completion/$bs"
done
}
[ -d "$HOME/.zsh/lib" ] && {
for h in `ls "$HOME/.zsh/lib"`; do
source "$HOME/.zsh/lib/$h"
done
}
[ -d "$HOME/.zsh/$SYSTEM" ] && {
for h in `ls "$HOME/.zsh/$SYSTEM"`; do
fh=$HOME/.zsh/$SYSTEM/$h
[ -f "$fh" ] && { source "$fh" }
done
}
NODE=`uname -n`
[ -d "$HOME/.zsh/$SYSTEM/$NODE" ] && {
for h in `ls "$HOME/.zsh/$SYSTEM/$NODE"`; do
fh=$HOME/.zsh/$SYSTEM/$NODE/$h
[ -f "$fh" ] && { source "$fh" }
done
}
PATH=$PATH:$HOME/.rvm/bin # Add RVM to PATH for scripting
| true
|
3857836dd04bf921799d1813537efe4fcfdf0c03
|
Shell
|
Khawaja-Usman-Riaz-Sehgal/Shell_Scripting_Tasks
|
/task4.sh
|
UTF-8
| 1,320
| 4.40625
| 4
|
[] |
no_license
|
#!/bin/bash
echo
echo "Task is to create a script that creates a backup ( in /home/) of all .c files present in a directory (given by user as a command line argument)"
echo "Create a backup directory if it doesn't exist."
echo "If it exist:"
echo "If there have been changes to the file since the last backup, then copy the current .c file to the backup directory and print a message that the file has been updated"
echo "Else (if no copy exists in the backup directory) copy it and print a message that the file had no previous copy and is now backed up"
echo "(Otherwise, no copy will be made)"
echo
source_dir=$1
destination_dir=~/backup_dir
if [ -d $destination_dir ]; then
echo "$destination_dir exists."
else
echo "$destination_dir does not exist."
mkdir $destination_dir
echo "$destination_dir has been successfully created."
fi
c_files=$(ls $1/*.c | xargs basename -s *.c \ )
backup_c_files=$(ls $destination_dir | xargs basename -s *.c \ )
for file in $c_files
do
if [ -e "$destination_dir/$file" ]
then
if cmp -s "$1/$file" "$destination_dir/$file"
then
echo "$file is unchanged"
else
echo "the $file has been updated"
cp $1/$file $destination_dir
fi
else
cp $1/$file $destination_dir
echo "the $file had no previous copy and is now backed up"
fi
done
| true
|
136c853e2e7102f503734f0b51d79048a19a961e
|
Shell
|
martin0258/instragram-plus
|
/src/libmf.sh
|
UTF-8
| 713
| 3.296875
| 3
|
[] |
no_license
|
#!/bin/sh
set -e
# Define library folder location.
LIBMF=../lib/libmf-1.1/libmf
# Define data set location.
SUBTRAIN=../data/libmf/subtrain
VAL=../data/libmf/val
TEST=../data/libmf/test
# Build (make) libmf executable if it does not exist.
if [ ! -f $LIBMF ];
then
echo "File $LIBMF does not exist. Building it first..."
cd ../lib/libmf-1.1/
make
cd -
fi
# Convert data sets to binary files.
${LIBMF} convert ${SUBTRAIN} subtrain.bin
${LIBMF} convert ${VAL} val.bin
${LIBMF} convert ${TEST} test.bin
# Train.
${LIBMF} train --tr-rmse --obj -k 40 -s 4 -p 0.05 -q 0.05 -g 0.003 -ub -1 -ib -1 --no-use-avg --rand-shuffle -v val.bin subtrain.bin model
# Predict.
${LIBMF} predict test.bin model output
| true
|
84c516f1b8de76a836bc69f22c77915c3a813a69
|
Shell
|
alexandrebignalet/skull-king
|
/bin/stop-firebase-emulator
|
UTF-8
| 184
| 3.109375
| 3
|
[] |
no_license
|
#!/bin/bash
# Stop firebase emulator
PID_DB=$(lsof -t -i :9000 -s tcp:LISTEN)
if [ ! -z "$PID_DB" ]; then
echo "Stopping mock Firebase Realtime Database server"
kill "$PID_DB"
fi
| true
|
1ba5e38cbbd60ebe0507d2ca4190ac6e843a3e3c
|
Shell
|
adam-s/simplejobs
|
/bin/build.sh
|
UTF-8
| 653
| 3.28125
| 3
|
[] |
no_license
|
#!/bin/bash
# Move config files outside the scope of git which doesn't track them
cp -rf server/config/environment ~/WebstormProjects/config
# Make sure we are working with the master branch
git checkout master
# Push latest commit to GitHub
# git add .
# printf "Commit message: "
# read -r message
# git commit -m "$message"
# git push origin master
# Move to staging local
git checkout staging
# Merge staging with master
git merge master -m "Preparing to build"
# Build the production files
grunt build
git add .
git commit -m "built"
# Push to remote staging
git push staging staging:master
# Return to the master branch
git checkout master
| true
|
28470233c6a427e17245ec0529307f175ed859f7
|
Shell
|
juniorgasparotto/Scripts
|
/openshift/commands/pods
|
UTF-8
| 1,064
| 2.78125
| 3
|
[] |
no_license
|
#!/bin/bash
# Obtem todos os pods de todos os namespaces
oc get pods --all-namespaces
# Lista todas as PODs
oc get pods
# Lista todas as PODs com mais informações (wide = largo)
oc get pods -o wide
# Criar uma POD por arquivo
oc create pod -f [file-path.yml]
# Delete todas as pods de um namespace
oc delete --all pods -n [namespace]
oc delete --all pods -n openshift-console
# Deleta todas as PODs com o mesmo label
oc delete pods -l app=busybox
# Visualizar uma POD
oc describe pod flesk -n teste
# Executar comando dentro dos container
oc rsh deploy/memcached cat /etc/os-release
oc rsh pods/site-1-jngrv cat /etc/os-release
oc exec site-1-jngrv cat /etc/os-release
# Entra dentro da POD para executar comandos
oc exec -ti [podname] sh
oc exec -ti PODNAME sh -c CONTAINER_NAME
oc exec -ti alpine-7656fc89b6-vjcf4 sh
oc exec -ti alpine-files-658f4d8b5b-qt6bb sh
oc exec -ti alpine-files2-9c6cbf4bb-mv464 sh
oc exec -ti site-4-ldcsb sh
oc exec -ti grafana-6b9f85786f-dg7lp sh
# Verifica quais PODs consomem mais recurso
oc adm top pod --all-namespaces
| true
|
621a06a748b47f7998edbfcf744141b2c978162c
|
Shell
|
FirmaChain/tendermint
|
/scripts/install/install_tendermint_arm.sh
|
UTF-8
| 932
| 3.28125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
REPO=github.com/tendermint/tendermint
# change this to a specific release or branch
BRANCH=master
GO_VERSION=1.12
sudo apt-get update -y
# get and unpack golang
curl -O https://storage.googleapis.com/golang/go$GO_VERSION.linux-armv6l.tar.gz
tar -xvf go$GO_VERSION.linux-armv6l.tar.gz
# move go folder and add go binary to path
sudo mv go /usr/local
echo "export PATH=\$PATH:/usr/local/go/bin" >> ~/.profile
# create the go directory, set GOPATH, and put it on PATH
mkdir go
echo "export GOPATH=$HOME/go" >> ~/.profile
echo "export PATH=\$PATH:\$GOPATH/bin" >> ~/.profile
source ~/.profile
# get the code and move into repo
go get $REPO
cd "$GOPATH/src/$REPO"
# build & install
git checkout $BRANCH
# XXX: uncomment if branch isn't master
# git fetch origin $BRANCH
make tools
make install
# the binary is located in $GOPATH/bin
# run `source ~/.profile` or reset your terminal
# to persist the changes
| true
|
605ba7f7a64a3fc51691b4a23f0af41572a753d0
|
Shell
|
KireinaHoro/cluster-deploy
|
/util.sh
|
UTF-8
| 1,746
| 3.40625
| 3
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/usr/bin/env bash
#
# Copyright 2019 Pengcheng Xu <i@jsteward.moe>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
info() {
echo "[INFO ] $@"
}
warn() {
echo -e "\e[33m[WARN ] $@\e[39m" >&2
}
die() {
echo -e "\e[31m[FATAL] $@\e[39m" >&2
exit 1
}
assert() {
"$@" || die "Command \"$@\" failed with return status $?"
}
greetings() {
warn "Deploying cluster node for ${VENDOR}"
warn "Target host: ${FQDN}"
}
check_config() {
if [ ! -f "settings.sh" ]; then
die "settings.sh missing."
fi
source settings.sh
for var in IMAGE_ROOT HOSTNAME DOMAIN TARGET VG RAID_SCHEME TIMEZONE ROOT_SHADOW WHEEL_USERS REBOOT_TIMEOUT; do
[ -n "${!var+x}" ] || die "Required config \$${var} not found in settings.sh."
done
# define FQDN for later use.
FQDN="${HOSTNAME}.${DOMAIN}"
}
check_commands() {
local commands=(cp sgdisk pvcreate pvdisplay vgcreate vgdisplay lvcreate lvdisplay mkfs.xfs udevadm mount gzip dd sed xfsrestore chroot zpool)
for cmd in ${commands[@]}; do
command -v ${cmd} &>/dev/null || die "Required command ${cmd} not found."
done
}
get_uuid() {
local real_disk=$(readlink -f $1)
blkid | sed -n "s@^${real_disk}:.* UUID=\"\([^ ]*\)\".*\$@\1@p"
}
| true
|
320544e21a4e67fde23e0f068faf9da75cc7d760
|
Shell
|
lberk/release
|
/ci-operator/step-registry/aws/provision/bastionhost/aws-provision-bastionhost-commands.sh
|
UTF-8
| 14,254
| 3.4375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
set -o nounset
set -o errexit
set -o pipefail
trap 'CHILDREN=$(jobs -p); if test -n "${CHILDREN}"; then kill ${CHILDREN} && wait; fi' TERM
export AWS_SHARED_CREDENTIALS_FILE="${CLUSTER_PROFILE_DIR}/.awscred"
curl -L https://github.com/mikefarah/yq/releases/download/3.3.0/yq_linux_amd64 -o /tmp/yq && chmod +x /tmp/yq
REGION="${LEASED_RESOURCE}"
# 1. get vpc id and public subnet
VpcId=$(cat "${SHARED_DIR}/vpc_id")
echo "VpcId: $VpcId"
PublicSubnet="$(/tmp/yq r "${SHARED_DIR}/public_subnet_ids" '[0]')"
echo "PublicSubnet: $PublicSubnet"
stack_name="${NAMESPACE}-${JOB_NAME_HASH}-bas"
s3_bucket_name="${NAMESPACE}-${JOB_NAME_HASH}-s3"
BastionHostInstanceType="t2.medium"
# there is no t2.medium instance type in us-gov-east-1 region
if [ "${REGION}" == "us-gov-east-1" ]; then
BastionHostInstanceType="t3a.medium"
fi
ssh_pub_key=$(<"${CLUSTER_PROFILE_DIR}/ssh-publickey")
workdir=`mktemp -d`
echo -e "==== Start to create bastion host ===="
echo -e "working dir: $workdir"
# TODO: move repo to a more appropriate location
curl -sL https://raw.githubusercontent.com/yunjiang29/ocp-test-data/main/coreos-for-bastion-host/fedora-coreos-stable.json -o $workdir/fedora-coreos-stable.json
AmiId=$(jq -r .architectures.x86_64.images.aws.regions[\"${REGION}\"].image < $workdir/fedora-coreos-stable.json)
echo -e "AMI ID: $AmiId"
## ----------------------------------------------------------------
# bastion host CF template
## ----------------------------------------------------------------
cat > ${workdir}/bastion.yaml << EOF
AWSTemplateFormatVersion: 2010-09-09
Description: Template for RHEL machine Launch
Parameters:
VpcCidr:
AllowedPattern: ^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\/(1[6-9]|2[0-4]))$
ConstraintDescription: CIDR block parameter must be in the form x.x.x.x/16-24.
Default: 10.0.0.0/16
Description: CIDR block for VPC.
Type: String
VpcId:
Description: The VPC-scoped resources will belong to this VPC.
Type: AWS::EC2::VPC::Id
AmiId:
Description: Current CoreOS AMI to use for proxy.
Type: AWS::EC2::Image::Id
Machinename:
AllowedPattern: ^([a-zA-Z][a-zA-Z0-9\-]{0,26})$
MaxLength: 27
MinLength: 1
ConstraintDescription: Machinename
Description: Machinename
Type: String
Default: qe-dis-registry-proxy
PublicSubnet:
Description: The subnets (recommend public) to launch the registry nodes into
Type: AWS::EC2::Subnet::Id
BastionHostInstanceType:
Default: t2.medium
Type: String
BastionIgnitionLocation:
Description: Ignition config file location.
Type: String
Metadata:
AWS::CloudFormation::Interface:
ParameterGroups:
- Label:
default: "Host Information"
Parameters:
- BastionHostInstanceType
- Label:
default: "Network Configuration"
Parameters:
- PublicSubnet
ParameterLabels:
PublicSubnet:
default: "Worker Subnet"
BastionHostInstanceType:
default: "Worker Instance Type"
Resources:
BastionSecurityGroup:
Type: AWS::EC2::SecurityGroup
Properties:
GroupDescription: Bastion Host Security Group
SecurityGroupIngress:
- IpProtocol: icmp
FromPort: 0
ToPort: 0
CidrIp: !Ref VpcCidr
- IpProtocol: tcp
FromPort: 22
ToPort: 22
CidrIp: 0.0.0.0/0
- IpProtocol: tcp
FromPort: 3128
ToPort: 3128
CidrIp: 0.0.0.0/0
- IpProtocol: tcp
FromPort: 3129
ToPort: 3129
CidrIp: 0.0.0.0/0
- IpProtocol: tcp
FromPort: 5000
ToPort: 5000
CidrIp: 0.0.0.0/0
- IpProtocol: tcp
FromPort: 80
ToPort: 80
CidrIp: 0.0.0.0/0
- IpProtocol: tcp
FromPort: 8080
ToPort: 8080
CidrIp: 0.0.0.0/0
VpcId: !Ref VpcId
BastionInstance:
Type: AWS::EC2::Instance
Properties:
ImageId: !Ref AmiId
InstanceType: !Ref BastionHostInstanceType
NetworkInterfaces:
- AssociatePublicIpAddress: "True"
DeviceIndex: "0"
GroupSet:
- !GetAtt BastionSecurityGroup.GroupId
SubnetId: !Ref "PublicSubnet"
Tags:
- Key: Name
Value: !Join ["", [!Ref Machinename]]
BlockDeviceMappings:
- DeviceName: /dev/xvda
Ebs:
VolumeSize: "60"
VolumeType: gp2
UserData:
Fn::Base64: !Sub
- '{"ignition":{"config":{"replace":{"source":"\${IgnitionLocation}"}},"version":"3.0.0"}}'
- {
IgnitionLocation: !Ref BastionIgnitionLocation
}
Outputs:
BastionInstanceId:
Description: Bastion Host Instance ID
Value: !Ref BastionInstance
BastionSecurityGroupId:
Description: Bastion Host Security Group ID
Value: !GetAtt BastionSecurityGroup.GroupId
PublicDnsName:
Description: The bastion host node Public DNS, will be used for release image mirror from slave
Value: !GetAtt BastionInstance.PublicDnsName
PrivateDnsName:
Description: The bastion host Private DNS, will be used for cluster install pulling release image
Value: !GetAtt BastionInstance.PrivateDnsName
PublicIp:
Description: The bastion host Public IP, will be used for registering minIO server DNS
Value: !GetAtt BastionInstance.PublicIp
EOF
## ----------------------------------------------------------------
# PROXY
# /srv/squid/etc/passwords
# /srv/squid/etc/mime.conf
# /srv/squid/etc/squid.conf
# /srv/squid/log/
# /srv/squid/cache
## ----------------------------------------------------------------
## PROXY CONFIG
cat > ${workdir}/squid.conf << EOF
auth_param basic program /usr/lib64/squid/basic_ncsa_auth /etc/squid/passwords
auth_param basic realm proxy
acl authenticated proxy_auth REQUIRED
acl CONNECT method CONNECT
http_access allow authenticated
http_port 3128
EOF
## PROXY Service
cat > ${workdir}/squid-proxy.service << EOF
[Unit]
Description=OpenShift QE Squid Proxy Server
After=network.target syslog.target
[Service]
Type=simple
TimeoutStartSec=5m
ExecStartPre=-/usr/bin/podman rm "squid-proxy"
ExecStart=/usr/bin/podman run --name "squid-proxy" \
--net host \
-p 3128:3128 \
-p 3129:3129 \
-v /srv/squid/etc:/etc/squid:Z \
-v /srv/squid/cache:/var/spool/squid:Z \
-v /srv/squid/log:/var/log/squid:Z \
quay.io/crcont/squid
ExecReload=-/usr/bin/podman stop "squid-proxy"
ExecReload=-/usr/bin/podman rm "squid-proxy"
ExecStop=-/usr/bin/podman stop "squid-proxy"
Restart=always
RestartSec=30
[Install]
WantedBy=multi-user.target
EOF
## ----------------------------------------------------------------
# MIRROR REGISTORY
# /opt/registry/auth/htpasswd
# /opt/registry/certs/domain.crt
# /opt/registry/certs/domain.key
# /opt/registry/data
#
## ----------------------------------------------------------------
cat > ${workdir}/poc-registry.service << EOF
[Unit]
Description=OpenShift POC HTTP for PXE Config
After=network.target syslog.target
[Service]
Type=simple
TimeoutStartSec=5m
ExecStartPre=-/usr/bin/podman rm "poc-registry"
ExecStartPre=/usr/bin/chcon -Rt container_file_t /opt/registry
ExecStart=/usr/bin/podman run --name poc-registry -p 5000:5000 \
--net host \
-v /opt/registry/data:/var/lib/registry:z \
-v /opt/registry/auth:/auth \
-e "REGISTRY_AUTH=htpasswd" \
-e "REGISTRY_AUTH_HTPASSWD_REALM=Registry Realm" \
-e REGISTRY_AUTH_HTPASSWD_PATH=/auth/htpasswd \
-v /opt/registry/certs:/certs:z \
-e REGISTRY_HTTP_TLS_CERTIFICATE=/certs/domain.crt \
-e REGISTRY_HTTP_TLS_KEY=/certs/domain.key \
registry:2
ExecReload=-/usr/bin/podman stop "poc-registry"
ExecReload=-/usr/bin/podman rm "poc-registry"
ExecStop=-/usr/bin/podman stop "poc-registry"
Restart=always
RestartSec=30
[Install]
WantedBy=multi-user.target
EOF
## ----------------------------------------------------------------
# IGNITION
## ----------------------------------------------------------------
PROXY_CREDENTIAL=$(< /var/run/vault/proxy/proxy_creds)
PROXY_CREDENTIAL_ARP1=$(< /var/run/vault/proxy/proxy_creds_encrypted_apr1)
PROXY_CREDENTIAL_CONTENT="$(echo -e ${PROXY_CREDENTIAL_ARP1} | base64 -w0)"
PROXY_CONFIG_CONTENT=$(cat ${workdir}/squid.conf | base64 -w0)
REGISTRY_PASSWORD_CONTENT=$(cat "/var/run/vault/mirror-registry/registry_creds_encrypted_htpasswd" | base64 -w0)
REGISTRY_CRT_CONTENT=$(cat "/var/run/vault/mirror-registry/server_domain.crt" | base64 -w0)
REGISTRY_KEY_CONTENT=$(cat "/var/run/vault/mirror-registry/server_domain.pem" | base64 -w0)
PROXY_SERVICE_CONTENT=$(sed ':a;N;$!ba;s/\n/\\n/g' ${workdir}/squid-proxy.service | sed 's/\"/\\"/g')
REGISTRY_SERVICE_CONTENT=$(sed ':a;N;$!ba;s/\n/\\n/g' ${workdir}/poc-registry.service | sed 's/\"/\\"/g')
echo -e "Creating ${workdir}/bastion.ign"
cat > ${workdir}/bastion.ign << EOF
{
"ignition": {
"config": {},
"security": {
"tls": {}
},
"timeouts": {},
"version": "3.0.0"
},
"passwd": {
"users": [
{
"name": "core",
"sshAuthorizedKeys": [
"${ssh_pub_key}"
]
}
]
},
"storage": {
"files": [
{
"path": "/srv/squid/etc/passwords",
"contents": {
"source": "data:text/plain;base64,${PROXY_CREDENTIAL_CONTENT}"
},
"mode": 420
},
{
"path": "/srv/squid/etc/squid.conf",
"contents": {
"source": "data:text/plain;base64,${PROXY_CONFIG_CONTENT}"
},
"mode": 420
},
{
"path": "/srv/squid/etc/mime.conf",
"contents": {
"source": "data:text/plain;base64,"
},
"mode": 420
},
{
"path": "/opt/registry/auth/htpasswd",
"contents": {
"source": "data:text/plain;base64,${REGISTRY_PASSWORD_CONTENT}"
},
"mode": 420
},
{
"path": "/opt/registry/certs/domain.crt",
"contents": {
"source": "data:text/plain;base64,${REGISTRY_CRT_CONTENT}"
},
"mode": 420
},
{
"path": "/opt/registry/certs/domain.key",
"contents": {
"source": "data:text/plain;base64,${REGISTRY_KEY_CONTENT}"
},
"mode": 420
}
],
"directories": [
{
"path": "/srv/squid/log",
"mode": 493
},
{
"path": "/srv/squid/cache",
"mode": 493
},
{
"path": "/opt/registry/data",
"mode": 493
}
]
},
"systemd": {
"units": [
{
"contents": "${PROXY_SERVICE_CONTENT}",
"enabled": true,
"name": "squid-proxy.service"
},
{
"contents": "${REGISTRY_SERVICE_CONTENT}",
"enabled": true,
"name": "poc-registry.service"
},
{
"enabled": false,
"mask": true,
"name": "zincati.service"
}
]
}
}
EOF
# upload ignition file to s3
aws --region $REGION s3 mb "s3://${s3_bucket_name}"
echo "s3://${s3_bucket_name}" > "$SHARED_DIR/to_be_removed_s3_bucket_list"
aws --region $REGION s3api put-bucket-acl --bucket "${s3_bucket_name}" --acl public-read
aws --region $REGION s3 cp ${workdir}/bastion.ign "s3://${s3_bucket_name}/bastion.ign"
aws --region $REGION s3api put-object-acl --bucket "${s3_bucket_name}" --key "bastion.ign" --acl public-read
echo ${stack_name} >> "${SHARED_DIR}/to_be_removed_cf_stack_list"
aws --region $REGION cloudformation create-stack --stack-name ${stack_name} \
--template-body file://${workdir}/bastion.yaml \
--parameters \
ParameterKey=VpcId,ParameterValue="${VpcId}" \
ParameterKey=BastionHostInstanceType,ParameterValue="${BastionHostInstanceType}" \
ParameterKey=Machinename,ParameterValue="${stack_name}" \
ParameterKey=PublicSubnet,ParameterValue="${PublicSubnet}" \
ParameterKey=AmiId,ParameterValue="${AmiId}" \
ParameterKey=BastionIgnitionLocation,ParameterValue="s3://${s3_bucket_name}/bastion.ign" &
wait "$!"
echo "Created stack"
aws --region "${REGION}" cloudformation wait stack-create-complete --stack-name "${stack_name}" &
wait "$!"
echo "Waited for stack"
echo "$stack_name" > "${SHARED_DIR}/bastion_host_stack_name"
INSTANCE_ID="$(aws --region "${REGION}" cloudformation describe-stacks --stack-name "${stack_name}" \
--query 'Stacks[].Outputs[?OutputKey == `BastionInstanceId`].OutputValue' --output text)"
echo "Instance ${INSTANCE_ID}"
# to allow log collection during gather:
# append to proxy bastion host ID to "${SHARED_DIR}/aws-instance-ids.txt"
echo "${INSTANCE_ID}" >> "${SHARED_DIR}/aws-instance-ids.txt"
BASTION_HOST_PUBLIC_DNS="$(aws --region "${REGION}" cloudformation describe-stacks --stack-name "${stack_name}" \
--query 'Stacks[].Outputs[?OutputKey == `PublicDnsName`].OutputValue' --output text)"
BASTION_HOST_PRIVATE_DNS="$(aws --region "${REGION}" cloudformation describe-stacks --stack-name "${stack_name}" \
--query 'Stacks[].Outputs[?OutputKey == `PrivateDnsName`].OutputValue' --output text)"
echo "${BASTION_HOST_PUBLIC_DNS}" > "${SHARED_DIR}/bastion_public_address"
echo "${BASTION_HOST_PRIVATE_DNS}" > "${SHARED_DIR}/bastion_private_address"
echo "core" > "${SHARED_DIR}/bastion_ssh_user"
PROXY_PUBLIC_URL="http://${PROXY_CREDENTIAL}@${BASTION_HOST_PUBLIC_DNS}:3128"
PROXY_PRIVATE_URL="http://${PROXY_CREDENTIAL}@${BASTION_HOST_PRIVATE_DNS}:3128"
echo "${PROXY_PUBLIC_URL}" > "${SHARED_DIR}/proxy_public_url"
echo "${PROXY_PRIVATE_URL}" > "${SHARED_DIR}/proxy_private_url"
MIRROR_REGISTRY_URL="${BASTION_HOST_PUBLIC_DNS}:5000"
echo "${MIRROR_REGISTRY_URL}" > "${SHARED_DIR}/mirror_registry_url"
echo "Sleeping 5 mins, make sure that the bastion host is fully started."
sleep 300
| true
|
a690d676b214c524af86d066c2317d9e0f752992
|
Shell
|
GSA/project-open-data-dashboard
|
/docker/wait_for_db
|
UTF-8
| 179
| 2.890625
| 3
|
[
"LicenseRef-scancode-us-govt-public-domain",
"MIT",
"CC0-1.0",
"LicenseRef-scancode-public-domain"
] |
permissive
|
#!/bin/bash -e
MAX=10
SLEEP=2
for i in `seq 1 $MAX`; do
(echo 2>/dev/null > /dev/tcp/database/3306) && exit 0
echo "Startup: Waiting for MySQL..."
sleep $SLEEP
done
exit 1
| true
|
ca1b9da8df732c52a4680e3934c1b2f82f3e9ecd
|
Shell
|
pupamanyu/professional-services
|
/examples/terraform/druid-migration/cloudsql/dataproc_init.sh
|
UTF-8
| 466
| 2.546875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
ROLE=$(/usr/share/google/get_metadata_value attributes/dataproc-role)
if [[ "$${ROLE}" == 'Master' ]]; then
gsutil cp /etc/hadoop/conf/core-site.xml gs://${bucket}/config/
gsutil cp /etc/hadoop/conf/hdfs-site.xml gs://${bucket}/config/
gsutil cp /etc/hadoop/conf/mapred-site.xml gs://${bucket}/config/
gsutil cp /etc/hadoop/conf/yarn-site.xml gs://${bucket}/config/
gsutil cp /usr/lib/hadoop/lib/gcs-connector-hadoop2* gs://${bucket}/config/
fi
| true
|
9acbfbb753e557c8ae68b112a76b0e9f27476b35
|
Shell
|
MAIF/otoroshi
|
/scripts/tools/docker-bench.sh
|
UTF-8
| 1,184
| 2.75
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
#
# sudo apt-get update && sudo apt-get install git -y && git clone https://github.com/MAIF/otoroshi.git otoroshi && cd ./otoroshi && sh ./scripts/docker-bench.sh
#
# or
#
# mkdir -p /tmp/otoroshi-bench
# cd /tmp/otoroshi-bench
# sudo apt-get update && sudo apt-get install git
# git clone https://github.com/MAIF/otoroshi.git otoroshi
# cd otoroshi
sudo apt-get remove docker docker-engine docker.io
sudo apt-get update
sudo apt-get install \
apt-transport-https \
ca-certificates \
curl \
git \
gnupg2 \
software-properties-common -y
curl -fsSL https://download.docker.com/linux/debian/gpg | sudo apt-key add -
sudo apt-key fingerprint 0EBFCD88
sudo add-apt-repository \
"deb [arch=amd64] https://download.docker.com/linux/debian \
$(lsb_release -cs) \
stable"
sudo apt-get update
sudo apt-get install docker-ce -y
sudo docker run hello-world
sudo curl -L https://github.com/docker/compose/releases/download/1.21.2/docker-compose-$(uname -s)-$(uname -m) -o /usr/local/bin/docker-compose
sudo chmod +x /usr/local/bin/docker-compose
sudo docker-compose --version
cd ./docker/bench
sudo docker-compose build
sudo docker-compose up
| true
|
bcb4dcf731fda936105bab96816bbf0906600dc0
|
Shell
|
allprojects/thesis-consyst-operation-types
|
/demos/eshop/run-artifact.sh
|
UTF-8
| 1,170
| 3.484375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
CLASS_NAME='de.tuda.stg.consys.demo.eshop.EShopBenchmark'
JAR_NAME='target/eshop-2.0.0-allinone.jar'
N_PROCS=`seq 0 8`
# Executes a benchmark on N_PROCS hosts
function executeBench() {
echo Run configuration $1 with $2
# run processes and store pids in array
for i in ${N_PROCS}; do
java -cp "${JAR_NAME}" "${CLASS_NAME}" "$1bench${i}.conf" "$2" &
pids[${i}]=$!
done
# wait for all benchmarks to stop
for pid in ${pids[*]}; do
wait $pid
done
echo Finished configuration $1
}
# Run all processes with mixed configuration
executeBench 'local/weak/' './bench-results/artifact/weak'
executeBench 'local/mixed/' './bench-results/artifact/mixed'
executeBench 'local/strong/' './bench-results/artifact/strong'
# Process the results
python3 ../process-results.py artifact-processed.csv \
bench-results/artifact/weak/:1 \
bench-results/artifact/mixed/:1 \
bench-results/artifact/strong/:1
# Generate and show the graphs
python3 ../generate-graphs.py artifact-processed.csv artifact-normalized.csv \
bench-results/artifact/weak/:bench-results/artifact/strong/ \
bench-results/artifact/mixed/:bench-results/artifact/strong/
| true
|
0566d43c06e041da814c26ebb153a0123587b654
|
Shell
|
elizabethcook21/UPHL
|
/URF_scripts/cgpipeline_multiqc.sh
|
UTF-8
| 283
| 2.515625
| 3
|
[] |
no_license
|
#!/bin/bash
out=$1
grep avgReadLength $out/cg-pipeline/*.out.txt | sort | uniq | head -n 1 | cut -f 2- -d ':' > $out/cg-pipeline/cg-pipeline-summary.txt
grep -v "avgReadLength" $out/cg-pipeline/*.out.txt | cut -f 2- -d ':' | sort | uniq >> $out/cg-pipeline/cg-pipeline-summary.txt
| true
|
b0e941f2f3d230529c1f78219621cf14a1a6bbc4
|
Shell
|
fefa4ka/NNN
|
/test/run_test.sh
|
UTF-8
| 325
| 3.171875
| 3
|
[] |
no_license
|
echo "Runnig unit tests:"
echo "------------------\n"
for i in test/*_test
do
if test -f $i
then
if $VALGRIND ./$i 2>> test/process.log
then
echo "\n----------"
else
echo "[FAILED] ./$i: Read more test/process.log\n"
fi
fi
echo ""
done
echo ""
| true
|
a4ab3aa55d069d709bb2b7fe66e5302d11027f88
|
Shell
|
dev-dull/prog
|
/prog
|
UTF-8
| 1,688
| 4.34375
| 4
|
[] |
no_license
|
#!/bin/bash
tty -s
e=$?
if [ $e -eq 0 ]; then
# don't manually specify the tty file since it is already known.
term_size=`stty size | grep -Eio "[0-9]+$"`
else
# find our tty -- if we call this script from another script, the TTY gets lost and we can't get the terminal size.
# this logic just makes a decent guess as to our tty and might be wrong if more than one `prog` is running at once.
term=`ps gaxu | grep -v grep | grep $0 | awk '{print $7}' | tail -n 1`
term_size=`stty -F "/dev/$term" size | grep -Eio "[0-9]+$"`
fi
let term_size=$term_size-3 # removing one more char than we need to because of echo behavior in cygwin.
# -1 for ending ']', -1 for indicator end '>', -1 for cygwin use cases.
# the opening '[' overwrites the first char of the progress bar.
numer=$1
if [ -z "$numer" ]; then
exit -1
elif [ "$numer" -lt 0 ]; then
exit 1
fi
# assume we were handed a percentage if $2 wasn't passed to us.
denom=100
if [ "$2" ]; then
denom=$2
fi
# terminals can't print partial chars, so may as well just work with whole numbers.
let pct=($numer*100)/$denom
let bar_size=($term_size*$pct)/100
# build the bar
bar_ct=0
bar=''
while [ $bar_ct -lt $bar_size ]
do
bar="$bar="
let bar_ct=$bar_ct+1
done
# fill in the remaining width of the terminal
bar="$bar>"
while [ $bar_ct -lt $term_size ]
do
bar="$bar."
let bar_ct=$bar_ct+1
done
# show the bar. \r takes us to the start of the line, -e to print escaped chars
# -n prevents a newline so we can print the bar on the same line each time.
# this -n also means it's up to the user to decide when \n should get printed.
echo -en "$bar]\r[$pct%\r"
| true
|
69df21d1c5d2220b332ee1af84b380ef85b734ba
|
Shell
|
umr1085-irset/toxsign_v2
|
/tools/run_cluster_dist/run_cluster_dist.sh
|
UTF-8
| 609
| 3.34375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# This script
# 1) Input folder with sign.tss file, + method.RData
# 2) Signature name
# 2) Output file with full path
set -e
# Source env first
CURRENT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
if [ $# -lt 3 ]
then
echo "Less than tree argument supplied. Stopping.."
exit 1
fi
TEMP_DIR="$1"
SIG_FILE="$2"
OUTPUT_DIR="$3"
. /opt/conda/etc/profile.d/conda.sh
conda activate condaR_TCL
Rscript "$CURRENT_DIR""/predict_group.R" "$TEMP_DIR""$SIG_FILE" "$TEMP_DIR" "$TEMP_DIR""method.RData"
cp "$TEMP_DIR""output.txt" "$OUTPUT_DIR"
#rm -rf "$TEMP_DIR"
| true
|
3cb8f05295196eb873af4bbd8c0022a7e8a4b41a
|
Shell
|
zer010101/AppSTARTer
|
/.scripts/pm_apt_install.sh
|
UTF-8
| 5,091
| 3.546875
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
set -euo pipefail
IFS=$'\n\t'
pm_apt_install() {
local APPNAME=${1:-}
local APPDEPENDENCYOF=${2:-}
local REDIRECT="> /dev/null 2>&1"
if [[ ${CI:-} == true ]]; then
REDIRECT="> /dev/null"
fi
if [[ ${APPNAME} != "" ]]; then
local UPDATE_APT
local SOURCE_REPO
local YMLAPPINSTALL="services.${FILENAME}.labels[com.appstarter.appinstall]"
SOURCE_REPO=$(run_script 'yml_get' "${APPNAME}" "${YMLAPPINSTALL}.apt.${DETECTED_DISTRO}.${DETECTED_CODENAME}.repo" || true)
if [[ ${SOURCE_REPO} == "" ]]; then
SOURCE_REPO=$(run_script 'yml_get' "${APPNAME}" "${YMLAPPINSTALL}.apt.${DETECTED_DISTRO}.repo" || true)
fi
if [[ ${SOURCE_REPO} == "" ]]; then
SOURCE_REPO=$(run_script 'yml_get' "${APPNAME}" "${YMLAPPINSTALL}.apt.general.repo" || true)
fi
if [[ ${SOURCE_REPO} != "" ]]; then
if [[ ${SOURCE_REPO} == deb* ]]; then
local SOURCE_FILE="/etc/apt/sources.list.d/${APPNAME,,}.list"
local SOURCE_EXISTS
SOURCE_EXISTS=$(grep -c "${SOURCE_REPO}" "${SOURCE_FILE}" || echo "0")
local SOURCE_LINE_COUNT
SOURCE_LINE_COUNT=$(wc -l "${SOURCE_FILE}" | awk '{print $1}' || echo "0")
if [[ ${SOURCE_EXISTS} == 1 || ${SOURCE_LINE_COUNT} -gt 1 ]]; then
info "Adding/updating sources for ${APPNAME}"
if [[ -f ${SOURCE_FILE} ]]; then
rm "${SOURCE_FILE}"
fi
echo "${SOURCE_REPO}" >> "${SOURCE_FILE}"
UPDATE_APT="true"
fi
elif [[ ${SOURCE_REPO} == ppa:* ]]; then
info "Adding/updating sources for ${APPNAME}"
add-apt-repository -y "${SOURCE_REPO}" > /dev/null
UPDATE_APT="true"
else
error "Source Repo is an invalid format. Must start be 'deb' or 'ppa:'!"
error "${SOURCE_REPO}"
return 1
fi
fi
local SOURCE_KEY
SOURCE_KEY=$(run_script 'yml_get' "${APPNAME}" "${YMLAPPINSTALL}.apt.${DETECTED_DISTRO}.${DETECTED_CODENAME}.key" || true)
if [[ ${SOURCE_KEY} == "" ]]; then
SOURCE_KEY=$(run_script 'yml_get' "${APPNAME}" "${YMLAPPINSTALL}.apt.${DETECTED_DISTRO}.key" || true)
fi
if [[ ${SOURCE_KEY} == "" ]]; then
SOURCE_KEY=$(run_script 'yml_get' "${APPNAME}" "${YMLAPPINSTALL}.apt.general.key" || true)
fi
if [[ ${SOURCE_KEY} != "" ]]; then
if [[ ${SOURCE_KEY} == http* ]]; then
info "Adding/updating source key for ${APPNAME}"
wget -qO - "${SOURCE_KEY}" | apt-key add - > /dev/null 2>&1 || error "Unable to add key for ${APPNAME}: ${SOURCE_KEY}"
UPDATE_APT="true"
else
if ! gpg --list-keys "${SOURCE_KEY}" > /dev/null 2>&1; then
info "Adding source key for ${APPNAME}"
apt-key adv --keyserver keyserver.ubuntu.com --recv-keys "${SOURCE_KEY}" > /dev/null 2>&1
UPDATE_APT="true"
fi
fi
fi
if [[ ${UPDATE_APT:-} == "true" ]]; then
run_script 'package_manager_run' repos
fi
PACKAGE_VERSION_INSTALLED=$(sudo apt-cache policy "${APP_PACKAGE}" | grep -Po "Installed: \K.*" || true)
PACKAGE_VERSION_CANDIDATE=$(sudo apt-cache policy "${APP_PACKAGE}" | grep -Po "Candidate: \K.*" || true)
if [[ ${PACKAGE_VERSION_INSTALLED} == "" || ${PACKAGE_VERSION_INSTALLED} != "${PACKAGE_VERSION_CANDIDATE}" ]]; then
if [[ ${APPDEPENDENCYOF} == "" ]]; then
notice "Installing or updating ${APPNAME} via apt"
else
info "Installing or updating ${APPNAME} via apt (${APPDEPENDENCYOF} dependency)"
fi
local APP_PACKAGE
APP_PACKAGE=$(run_script 'yml_get' "${APPNAME}" "${YMLAPPINSTALL}.apt.${DETECTED_DISTRO}.${DETECTED_CODENAME}.name" || true)
if [[ ${APP_PACKAGE} == "" ]]; then
APP_PACKAGE=$(run_script 'yml_get' "${APPNAME}" "${YMLAPPINSTALL}.apt.${DETECTED_DISTRO}..name" || true)
fi
if [[ ${APP_PACKAGE} == "" ]]; then
APP_PACKAGE=$(run_script 'yml_get' "${APPNAME}" "${YMLAPPINSTALL}.apt.general.name" || true)
fi
apt-get -y install "${APP_PACKAGE}" > /dev/null 2>&1 || error "Failed to install/update ${APPNAME} from apt."
run_script 'package_manager_run' clean
else
info "Package already install and up-to-date!"
fi
else
info "Installing dependencies."
eval apt-get -y install apt-transport-https ca-certificates curl git gnupg2 grep sed software-properties-common whiptail "${REDIRECT}" || fatal "Failed to install dependencies from apt."
fi
}
test_pm_apt_install() {
run_script 'pm_apt_repos'
run_script 'pm_apt_install'
}
| true
|
589b118c3375641d014a8e4c9cc91df89aed0247
|
Shell
|
jotaki/blasphemy
|
/src/rgb-display/rgb-display.sh
|
UTF-8
| 143
| 2.625
| 3
|
[] |
no_license
|
#! /bin/bash
tmpfile=$(mktemp)
cat <<EOF > "$tmpfile"
<html><body bgcolor="#${1:-000000}"></body></html>
EOF
${browser:-firefox} "$tmpfile"
| true
|
883a99672bed7850558ca2d086d32b923f9f258c
|
Shell
|
henryfung3a27/ChameleonMini
|
/Software/start.sh
|
UTF-8
| 254
| 3.671875
| 4
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/bin/sh
# start.sh
# Usage:
# $ start.sh <port>
echo "In picocom, press C-a C-x to exit"
if [ -n "$1" ]; then
echo "Connecting to $1"
picocom -c $1
else
echo "Port not supplied. Connecting to /dev/ttyACM0 now..."
picocom -c /dev/ttyACM0
fi
| true
|
689907cd758dad9f52270276d71766f2d8360339
|
Shell
|
kellyuw/Preprocessing
|
/RegisterANTs-MNIToFunc-Image.sh
|
UTF-8
| 3,453
| 3.546875
| 4
|
[] |
no_license
|
#!/bin/bash -X
#Register image with ANTs (MNI to func)
if [ $# -lt 2 ]; then
echo
echo "bash RegisterANTs-MNIToFunc-Image.sh <full path to MNI image> <full path to func image> <output(optional)>"
echo
exit 1
fi
#Set ANTSpath
ANTSpath=/usr/local/ANTs-2.1.0-rc3/bin/
export ANTSPATH=${ANTSpath}
MNI_IMAGE=$1
FUNC_IMAGE=$2
MNI_BRAIN=${FSLDIR}/data/standard/MNI152_T1_2mm_brain.nii.gz
#Check FUNC_IMAGE to make sure it is full path, instead of relative
if [[ ${FUNC_IMAGE} != *stressdevlab* ]]; then
echo "ERROR: Please include full path to image"
exit 1
fi
if [[ ${FUNC_IMAGE} == *session* ]] || [[ ${FUNC_IMAGE} == *month* ]]; then
PROJECT_DIR=$(echo ${FUNC_IMAGE} | awk -F "/" '{print $1"/"$2"/"$3"/"$4"/"$6}')
SUBJECT=$(echo ${FUNC_IMAGE} | awk -F "/" '{print $5}')
elif [[ ${FUNC_IMAGE} == *PING* ]]; then
PROJECT_DIR=$(echo ${FUNC_IMAGE} | awk -F "/" '{print $1"/"$2"/"$3"/"$4"/"$5"/"$6}')
SUBJECT=$(echo ${FUNC_IMAGE} | awk -F "/" '{print $7}')
else
PROJECT_DIR=$(echo ${FUNC_IMAGE} | awk -F "/" '{print $1"/"$2"/"$3"/"$4}')
SUBJECT=$(echo ${FUNC_IMAGE} | awk -F "/" '{print $5}')
fi
SUBJECT_TASK_DIR=`dirname ${FUNC_IMAGE}`
TASK=`basename ${SUBJECT_TASK_DIR}`
RUN=`basename ${FUNC_IMAGE} .nii.gz`
SUBJECT_DIR=`dirname ${SUBJECT_TASK_DIR}`
OUTPUT_DIR="${SUBJECT_TASK_DIR}/ROIMasks"
MNI_BRAIN_MASK="/mnt/stressdevlab/scripts/Atlases/FSLMNI/MNI152_T1_2mm_filled_brain_mask.nii.gz"
MNI_REG_PREFIX=$(cat ${PROJECT_DIR}/ProjectInfo.txt | grep MNI_REG_PREFIX | awk -F "=" '{print $2}')
CUSTOM_BRAIN=$(cat ${PROJECT_DIR}/ProjectInfo.txt | grep CUSTOM_BRAIN | awk -F "=" '{print $2}')
CUSTOM_REG_PREFIX=$(cat ${PROJECT_DIR}/ProjectInfo.txt | grep CUSTOM_REG_PREFIX | awk -F "=" '{print $2}')
T1_BRAIN=$(cat ${PROJECT_DIR}/ProjectInfo.txt | grep T1_brain | awk -F "=" '{print $2}')
FUNC_BRAIN=$(cat ${PROJECT_DIR}/ProjectInfo.txt | grep FUNC_BRAIN | awk -F "=" '{print $2}' | sed -e "s|TASK|${TASK}|g" -e "s|RUN|${RUN}|g")
T1_REG_PREFIX=$(cat ${PROJECT_DIR}/ProjectInfo.txt | grep T1_REG_PREFIX | awk -F "=" '{print $2}' | sed -e "s|TASK|${TASK}|g" -e "s|RUN|${RUN}|g")
T1_REG_TYPE=$(echo "${T1_REG_PREFIX:$((${#T1_REG_PREFIX}-1)):1}")
SUBJECT_DIR=${PROJECT_DIR}/${SUBJECT}
cd ${SUBJECT_DIR}
pwd
if [ $# -gt 2 ]; then
OUTPUT=$3
else
OUTPUT="${OUTPUT_DIR}/`basename ${MNI_IMAGE} .nii.gz`_in_${RUN}_space.nii.gz"
fi
if [[ ! -d ${OUTPUT_DIR} ]]; then
echo "Making ${OUTPUT_DIR}"
fi
echo "Warping ${MNI_IMAGE} to ${RUN} space"
if [[ ${T1_REG_TYPE} == *r* ]]; then
${ANTSpath}/antsApplyTransforms -i ${MNI_IMAGE} -r ${FUNC_BRAIN} -t [${T1_REG_PREFIX}_0GenericAffine.mat,1] [${CUSTOM_REG_PREFIX}_0GenericAffine.mat,1] ${CUSTOM_REG_PREFIX}_1InverseWarp.nii.gz [${MNI_REG_PREFIX}_0GenericAffine.mat,1] ${MNI_REG_PREFIX}_1InverseWarp.nii.gz -o ${OUTPUT}
else
${ANTSpath}/antsApplyTransforms -i ${MNI_IMAGE} -r ${FUNC_BRAIN} -t [${T1_REG_PREFIX}_0GenericAffine.mat,1] ${T1_REG_PREFIX}_1InverseWarp.nii.gz [${CUSTOM_REG_PREFIX}_0GenericAffine.mat,1] ${CUSTOM_REG_PREFIX}_1InverseWarp.nii.gz [${MNI_REG_PREFIX}_0GenericAffine.mat,1] ${MNI_REG_PREFIX}_1InverseWarp.nii.gz -o ${OUTPUT}
fi
echo "Warping ${MNI_IMAGE} to T1 space"
${ANTSpath}/antsApplyTransforms -i ${MNI_IMAGE} -r ${T1_BRAIN} -t [${CUSTOM_REG_PREFIX}_0GenericAffine.mat,1] ${CUSTOM_REG_PREFIX}_1InverseWarp.nii.gz [${MNI_REG_PREFIX}_0GenericAffine.mat,1] ${MNI_REG_PREFIX}_1InverseWarp.nii.gz -o `dirname ${T1_BRAIN}`/`basename ${MNI_IMAGE} .nii.gz`_in_T1_space.nii.gz
exit
| true
|
d37ce32396609fe1d056f02f80a57dc0979799db
|
Shell
|
lorenzogrv/autotest
|
/install
|
UTF-8
| 838
| 3.453125
| 3
|
[] |
no_license
|
eche () { echo "$@" >&2; }
fail () { eche "$@"; exit 1; }
file="autotest"
file="$(readlink -f "$file")"
test -x "$file" || fail "Fatal: $file is not an executable file"
# Installation path for all envs (aka 'standard')
BIN_PATH="/usr/local/bin"
# override path for 'termux' environments
type termux-info &>/dev/null && BIN_PATH="$PREFIX/bin"
# last, small environments not having /usr/local (i.e. openwrt)
test -d "$BIN_PATH" || BIN_PATH="/usr/bin"
test -d "$BIN_PATH" || fail "Fatal: $BIN_PATH is not a directory"
name="$(basename "$file")"
eche "Will install $name to $BIN_PATH"
ln -s "$(dirname "$file")/$name" "$BIN_PATH/$name" \
|| fail "could not create the link to the file"
echo "installed symlink"
##
# vim modeline (see vim +'help modeline')
# /* vim: set expandtab: */
# /* vim: set filetype=sh ts=2 shiftwidth=2: */
| true
|
ff5758460c2497df2f49837d0d6c5b5be67259b3
|
Shell
|
mjscosta/packer-ubuntu
|
/scripts-1604/vmware.sh
|
UTF-8
| 755
| 2.765625
| 3
|
[
"MIT"
] |
permissive
|
# Install open-vm-tools so we get basic stuff like
apt-get install -y open-vm-tools
# Install the Linux headers
apt-get -y install build-essential linux-headers-$(uname -r)
# Install the VMware Fusion guest tools so shared-folders work. If / when this is
# added to open-vm-tools we can remove this.
cd /tmp
mkdir -p /mnt/cdrom
mount -o loop ~/linux.iso /mnt/cdrom
tar zxf /mnt/cdrom/VMwareTools-*.tar.gz -C /tmp/
/tmp/vmware-tools-distrib/vmware-install.pl -d --force-install
rm /home/vagrant/linux.iso
umount /mnt/cdrom
# Recompile when the kernel is updated
echo "answer AUTO_KMODS_ENABLED yes" >> /etc/vmware-tools/locations
# Make sure the kernel module is loaded at boot
echo "vmhgfs" > /etc/modules-load.d/vmware.conf
shutdown -r now
sleep 60
| true
|
e5c5b6aaf22780a0445ea8e2d5f8b02e22b97465
|
Shell
|
BoatData/pi_config
|
/scripts/install_wifi_ap.sh
|
UTF-8
| 1,033
| 3
| 3
|
[
"Apache-2.0"
] |
permissive
|
# configure the pi wifi access point
base=/home/boatdata/pi_config
sudo apt-get install dnsmasq hostapd
sudo systemctl stop dnsmasq
sudo systemctl stop hostapd
# configure the network IP
cat >${base}/config/wlan0.conf<<heredoc
allow-hotplug wlan0
iface wlan0 inet static
address 192.168.1.1
netmask 255.255.255.0
network 192.168.1.0
heredoc
sudo ln -s ${base}/config/wlan0.conf /etc/network/interfaces.d/wlan0.conf
sudo service dhcpcd restart
sudo ifdown wlan0
sudo ifup wlan0
cat >${base}/config/dnsmasq.conf<<heredoc
interface=wlan0 # wireless interface is usually wlan0
dhcp-range=192.168.1.2,192.168.1.20,255.255.255.0,24h
heredoc
sudo ln -s ${base}/config/dnsmasq.conf /etc/dnsmasq.conf
# create the hostapd config file
SSID=SmartBoat PASSWORD=SmartBoatNetwork bash ${base}/create_ap.sh
sudo rm /etc/default/hostapd
cp ${base}/sources/hostapd.default ${base}/config/hostapd.default
sudo ln -s ${base}/config/hostapd.default /etc/default/hostapd
sudo service hostapd start
sudo service dnsmasq start
| true
|
36162c3b54182a403898fb92d985cfc6176eed48
|
Shell
|
DanielAW/dotFiles
|
/scripts/go_startstop.sh
|
UTF-8
| 95
| 2.609375
| 3
|
[] |
no_license
|
#!/bin/bash
if [ $1 == "reboot" ]; then
reboot
elif [ $1 == "halt" ]; then
poweroff
fi
| true
|
acddaf506beca86c69f97508df3868925366c5e4
|
Shell
|
prashanth-p/My-Shell-Codes
|
/publisher_report/pub_report_gen.sh
|
UTF-8
| 15,026
| 3.21875
| 3
|
[] |
no_license
|
######################################################################
# Author: Prashanth
# Created on: Thu Jul 2 07:21:33 EDT 2020
# Purpose: To Generate Status Report of Publisher
######################################################################
# Global Variable Definition
export EMAIL_TIMESTAMP=`date '+%d-%b-%Y %H:%M:%S'`
export HTML_TIMESTAMP=`date '+%d%m%Y_%H%M%S'`
export BASE_DIR=/apps/Admin_Scripts/NODE_OUTAGE_LOGGING/xcode/pub_status_report
# Derived Paths
export CONFIG_DIR=${BASE_DIR}/Config
export TEMPLATE_DIR=${BASE_DIR}/Templates
export ALERT_EMAILS=${BASE_DIR}/ALERT_EMAILS
# Config Files and template FIles
export MON_CONFIG_FILE=${CONFIG_DIR}/pub_status_cfg_PROD.cfg
export HEADER_TEMPLATE_01=${TEMPLATE_DIR}/header1.html
export HEADER_TEMPLATE_02=${TEMPLATE_DIR}/header2.html
export TABLE_TEMPLATE=${TEMPLATE_DIR}/table_prod.html
export FOOTER_TEMPLATE=${TEMPLATE_DIR}/footer.html
export CLOSING_TEMPLATE=${TEMPLATE_DIR}/closing.html
export ALERT_FILE_NAME=${ALERT_EMAILS}/PWXPUB_REPORT_${HTML_TIMESTAMP}.html
# Notification Mail List
# To Email ID
export EMAIL_NOTIFY1=
export EMAIL_NOTIFY2=
# CC Email ID
export EMAIL_NOTIFY3=
export EMAIL_NOTIFY4=
# Send Mail to Team
#########################
# FUNCTION VARIABLES #
#########################
export TABLE_COUNT
export PUB_ENV
export FAILED_INSTANCE_NAME=()
export COUNT_FAILED=0
#########################
# ASPAC VARIABLES #
#########################
export ASPAC_PROD_COUNT=0
export ASPAC_PROD_PUB_ENV=()
export ASPAC_PROD_PUB_LIVE_FLAG=()
export ASPAC_PROD_SSH_UID=()
export ASPAC_PROD_SSH_SERVER=()
export ASPAC_PROD_PUB_INSTANCE_NAME=()
export ASPAC_PROD_PUB_COUNT_RESULT=()
export ASPAC_PROD_PUB_LIVE=()
export ASPAC_PROD_PUB_STATUS=()
#########################
# NA VARIABLES #
#########################
export NA_PROD_COUNT=0
export NA_PROD_PUB_ENV=()
export NA_PROD_PUB_LIVE_FLAG=()
export NA_PROD_SSH_UID=()
export NA_PROD_SSH_SERVER=()
export NA_PROD_PUB_INSTANCE_NAME=()
export NA_PROD_PUB_COUNT_RESULT=()
export NA_PROD_PUB_LIVE=()
export NA_PROD_PUB_STATUS=()
#########################
# EMEA VARIABLES #
#########################
export EMEA_PROD_COUNT=0
export EMEA_PROD_PUB_ENV=()
export EMEA_PROD_PUB_LIVE_FLAG=()
export EMEA_PROD_SSH_UID=()
export EMEA_PROD_SSH_SERVER=()
export EMEA_PROD_PUB_INSTANCE_NAME=()
export EMEA_PROD_PUB_COUNT_RESULT=()
export EMEA_PROD_PUB_LIVE=()
export EMEA_PROD_PUB_STATUS=()
#################################################
# Driver Code #
#################################################
main() {
printstar
read_config_file
# display_config_file
printstar
ssh_to_check_pub_status
printstar
generate_html_table
printstar
consolidate_html_template
printstar
send_html_to_team
}
#################################################
# Main Methods #
#################################################
read_config_file() {
echo -e "$(timestamp):\tReading Configuration File."
while IFS= read -r RECLINE
do
PUB_ENV=`echo ${RECLINE} | cut -d":" -f1 | cut -d"=" -f2`
export PROD_PUB_LIVE_FLAG=`echo ${RECLINE} | cut -d":" -f2 | cut -d"=" -f2`
if [ ${PROD_PUB_LIVE_FLAG} -eq 1 ]; then
store_data_region_wise ${PUB_ENV} ${RECLINE}
fi
done < ${MON_CONFIG_FILE}
# FETCH TABLE_COUNT
greatest_count_of_regions
}
ssh_to_check_pub_status() {
# Check NA Status
na_ssh_to_check_pub_status
aspac_ssh_to_check_pub_status
emea_ssh_to_check_pub_status
}
generate_html_table() {
echo -e "$(timestamp):\tGenerating HTML TABLE.."
echo -e "\n\n<!-- TABLE DATA POPULATED AT: $(timestamp) -->" > ${TABLE_TEMPLATE}
for(( i=0; i<${TABLE_COUNT}; i++ ))
do
if [ "${NA_PROD_PUB_LIVE_FLAG[$i]}" != "" ] && [ ${NA_PROD_PUB_LIVE_FLAG[$i]} -eq 1 ] && [ ${NA_PROD_PUB_COUNT_RESULT[$i]} -eq 0 ]; then
FAILED_INSTANCE_NAME[${COUNT_FAILED}]="${NA_PROD_PUB_INSTANCE_NAME[$i]}"
COUNT_FAILED=$((${COUNT_FAILED}+1))
NA_DATA="<td bgcolor=\"#F8CBAD\">${NA_PROD_PUB_INSTANCE_NAME[$i]}</td><td bgcolor=\"#F8CBAD\">${NA_PROD_PUB_LIVE[$i]}</td><td bgcolor=\"#F8CBAD\">${NA_PROD_PUB_STATUS[$i]}</td>"
else
NA_DATA="<td>${NA_PROD_PUB_INSTANCE_NAME[$i]}</td><td>${NA_PROD_PUB_LIVE[$i]}</td><td>${NA_PROD_PUB_STATUS[$i]}</td>"
fi
if [ "${EMEA_PROD_PUB_LIVE_FLAG[$i]}" != "" ] && [ ${EMEA_PROD_PUB_LIVE_FLAG[$i]} -eq 1 ] && [ ${EMEA_PROD_PUB_COUNT_RESULT[$i]} -eq 0 ]; then
FAILED_INSTANCE_NAME[${COUNT_FAILED}]="${EMEA_PROD_PUB_INSTANCE_NAME[$i]}"
COUNT_FAILED=$((${COUNT_FAILED}+1))
EMEA_DATA="<td bgcolor=\"#F8CBAD\">${EMEA_PROD_PUB_INSTANCE_NAME[$i]}</td><td bgcolor=\"#F8CBAD\">${EMEA_PROD_PUB_LIVE[$i]}</td><td bgcolor=\"#F8CBAD\">${EMEA_PROD_PUB_STATUS[$i]}</td>"
else
EMEA_DATA="<td>${EMEA_PROD_PUB_INSTANCE_NAME[$i]}</td><td>${EMEA_PROD_PUB_LIVE[$i]}</td><td>${EMEA_PROD_PUB_STATUS[$i]}</td>"
fi
if [ "${ASPAC_PROD_PUB_LIVE_FLAG[$i]}" != "" ] && [ ${ASPAC_PROD_PUB_LIVE_FLAG[$i]} -eq 1 ] && [ ${ASPAC_PROD_PUB_COUNT_RESULT[$i]} -eq 0 ]; then
FAILED_INSTANCE_NAME[${COUNT_FAILED}]="${ASPAC_PROD_PUB_INSTANCE_NAME[$i]}"
COUNT_FAILED=$((${COUNT_FAILED}+1))
ASPAC_DATA="<td bgcolor=\"#F8CBAD\">${ASPAC_PROD_PUB_INSTANCE_NAME[$i]}</td><td bgcolor=\"#F8CBAD\">${ASPAC_PROD_PUB_LIVE[$i]}</td><td bgcolor=\"#F8CBAD\">${ASPAC_PROD_PUB_STATUS[$i]}</td>"
else
ASPAC_DATA="<td>${ASPAC_PROD_PUB_INSTANCE_NAME[$i]}</td><td>${ASPAC_PROD_PUB_LIVE[$i]}</td><td>${ASPAC_PROD_PUB_STATUS[$i]}</td>"
fi
echo -e "<tr>\n" >> ${TABLE_TEMPLATE}
echo -e "\t${NA_DATA}\n\t${EMEA_DATA}\n\t${ASPAC_DATA}" >> ${TABLE_TEMPLATE}
echo -e "</tr>\n" >> ${TABLE_TEMPLATE}
done
echo -e "$(timestamp):\tHTML Table has been created."
}
consolidate_html_template() {
echo -e "$(timestamp):\tConsolidating HTML Template"
cat ${HEADER_TEMPLATE_01} > ${ALERT_FILE_NAME}
echo -e " $(timestamp)\n" >> ${ALERT_FILE_NAME}
cat ${HEADER_TEMPLATE_02} >> ${ALERT_FILE_NAME}
cat ${TABLE_TEMPLATE} >> ${ALERT_FILE_NAME}
cat ${FOOTER_TEMPLATE} >> ${ALERT_FILE_NAME}
echo -e "<b>Report generated at: </b>$(timestamp)" >> ${ALERT_FILE_NAME}
cat ${CLOSING_TEMPLATE} >> ${ALERT_FILE_NAME}
echo -e "$(timestamp):\tHTML Template Created at $(timestamp)"
}
send_html_to_team() {
echo -e "$(timestamp):\tSending Mail"
if [ ${COUNT_FAILED} -ne 0 ]; then
for(( i=0; i<${COUNT_FAILED}; i++ ))
do
if [ $i -eq 0 ]; then
FAIL_SUBJECT="${FAIL_SUBJECT}${FAILED_INSTANCE_NAME[$i]}"
else
FAIL_SUBJECT="${FAIL_SUBJECT} || ${FAILED_INSTANCE_NAME[$i]}"
fi
done
EMAIL_SUBJECT="{Publisher Report} Publisher Instance: ${FAIL_SUBJECT} is not running : $(timestamp)"
else
EMAIL_SUBJECT="{Publisher Report} All instances are up and running: $(timestamp)"
fi
echo -e "$(timestamp):\tSubject: ${EMAIL_SUBJECT}"
echo -e "$(timestamp):\tFile Name: ${ALERT_FILE_NAME}"
echo -e "$(timestamp):\tTo: ${EMAIL_NOTIFY1};${EMAIL_NOTIFY2};${EMAIL_NOTIFY3};${EMAIL_NOTIFY4}"
(
echo "To: ${EMAIL_NOTIFY1};${EMAIL_NOTIFY2};"
echo "Cc: ${EMAIL_NOTIFY3};${EMAIL_NOTIFY4}"
echo "Subject: ${EMAIL_SUBJECT}"
echo "Content-Type: text/html"
echo
cat ${ALERT_FILE_NAME}
) | /usr/sbin/sendmail -t
echo -e "$(timestamp):\tPublisher Report Sent"
}
#################################################
# Sub Methods #
#################################################
#################################################
# Parent Method: read_config_file #
#################################################
store_data_region_wise() {
ENV=$1
RECLINE=$2
if [ ${ENV} = "ASPAC_PROD" ]; then
i=${ASPAC_PROD_COUNT}
ASPAC_PROD_PUB_ENV[$i]=`echo ${RECLINE} | cut -d":" -f1 | cut -d"=" -f2`
ASPAC_PROD_PUB_LIVE_FLAG[$i]=`echo ${RECLINE} | cut -d":" -f2 | cut -d"=" -f2`
ASPAC_PROD_SSH_UID[$i]=`echo ${RECLINE} | cut -d":" -f3 | cut -d"=" -f2`
ASPAC_PROD_SSH_SERVER[$i]=`echo ${RECLINE} | cut -d":" -f4 | cut -d"=" -f2`
ASPAC_PROD_PUB_INSTANCE_NAME[$i]=`echo ${RECLINE} | cut -d":" -f5 | cut -d"=" -f2`
ASPAC_PROD_COUNT=$((${ASPAC_PROD_COUNT}+1))
elif [ ${ENV} = "NA_PROD" ]; then
i=${NA_PROD_COUNT}
NA_PROD_PUB_ENV[$i]=`echo ${RECLINE} | cut -d":" -f1 | cut -d"=" -f2`
NA_PROD_PUB_LIVE_FLAG[$i]=`echo ${RECLINE} | cut -d":" -f2 | cut -d"=" -f2`
NA_PROD_SSH_UID[$i]=`echo ${RECLINE} | cut -d":" -f3 | cut -d"=" -f2`
NA_PROD_SSH_SERVER[$i]=`echo ${RECLINE} | cut -d":" -f4 | cut -d"=" -f2`
NA_PROD_PUB_INSTANCE_NAME[$i]=`echo ${RECLINE} | cut -d":" -f5 | cut -d"=" -f2`
NA_PROD_COUNT=$((${NA_PROD_COUNT}+1))
elif [ ${ENV} = "EMEA_PROD" ]; then
i=${EMEA_PROD_COUNT}
EMEA_PROD_PUB_ENV[$i]=`echo ${RECLINE} | cut -d":" -f1 | cut -d"=" -f2`
EMEA_PROD_PUB_LIVE_FLAG[$i]=`echo ${RECLINE} | cut -d":" -f2 | cut -d"=" -f2`
EMEA_PROD_SSH_UID[$i]=`echo ${RECLINE} | cut -d":" -f3 | cut -d"=" -f2`
EMEA_PROD_SSH_SERVER[$i]=`echo ${RECLINE} | cut -d":" -f4 | cut -d"=" -f2`
EMEA_PROD_PUB_INSTANCE_NAME[$i]=`echo ${RECLINE} | cut -d":" -f5 | cut -d"=" -f2`
EMEA_PROD_COUNT=$((${EMEA_PROD_COUNT}+1))
fi
}
greatest_count_of_regions() {
echo -e "$(timestamp):\tFinding Greatest of ASPAC_COUNT: ${ASPAC_PROD_COUNT} NA_COUNT: ${NA_PROD_COUNT} EMEA_COUNT: ${EMEA_PROD_COUNT}"
if [ ${ASPAC_PROD_COUNT} -gt ${NA_PROD_COUNT} ] && [ ${ASPAC_PROD_COUNT} -gt ${EMEA_PROD_COUNT} ]; then
TABLE_COUNT=${EMEA_PROD_COUNT}
elif [ ${EMEA_PROD_COUNT} -gt ${ASPAC_PROD_COUNT} ] && [ ${EMEA_PROD_COUNT} -gt ${NA_PROD_COUNT} ]; then
TABLE_COUNT=${EMEA_PROD_COUNT}
else
TABLE_COUNT=${NA_PROD_COUNT}
fi
echo -e "$(timestamp):\tTable Count Initialized to: ${TABLE_COUNT}"
readonly TABLE_COUNT
}
#####################################################
# Parent Method: ssh_to_check_pub_status #
#####################################################
na_ssh_to_check_pub_status() {
for(( i=0; i<${NA_PROD_COUNT} ; i++ ))
do
read_status_log_na ${i}
NA_PROD_PUB_COUNT_RESULT[${i}]=`ssh -q -o PasswordAuthentication=no -o ConnectTimeout=600 -n ${NA_PROD_SSH_UID[${i}]}@${NA_PROD_SSH_SERVER[${i}]} "ps -eaf | grep PwxCDCPublisher.sh | grep -v grep | grep ${NA_PROD_PUB_INSTANCE_NAME[${i}]} | wc -l;"`
if [ ${NA_PROD_PUB_LIVE_FLAG[${i}]} -eq 0 ]; then
NA_PROD_PUB_LIVE[${i}]="Not Live"
else
NA_PROD_PUB_LIVE[${i}]="Live"
fi
if [ ${NA_PROD_PUB_COUNT_RESULT[${i}]} -gt 0 ]; then
NA_PROD_PUB_STATUS[${i}]="Running"
else
NA_PROD_PUB_STATUS[${i}]="Not Running"
fi
pub_status_log_na ${i}
done
}
read_status_log_na() {
i=$1
printstar
echo -e "$(timestamp):\tCurrently Checking Publisher status of ${NA_PROD_PUB_INSTANCE_NAME[${i}]} in ${NA_PROD_PUB_ENV[${i}]}"
echo -e "$(timestamp):\tPublisher Instance: ${NA_PROD_PUB_INSTANCE_NAME[${i}]} runs in server: ${NA_PROD_SSH_SERVER[${i}]} and ssh userid is ${NA_PROD_SSH_UID[${i}]}"
}
pub_status_log_na() {
i=$1
echo -e "$(timestamp):\t${NA_PROD_PUB_INSTANCE_NAME[${i}]} is a ${NA_PROD_PUB_LIVE[${i}]} instance.."
echo -e "$(timestamp):\t${NA_PROD_PUB_INSTANCE_NAME[${i}]} is currently ${NA_PROD_PUB_STATUS[${i}]} "
}
emea_ssh_to_check_pub_status() {
for(( i=0; i<${EMEA_PROD_COUNT} ; i++ ))
do
read_status_log_emea ${i}
EMEA_PROD_PUB_COUNT_RESULT[${i}]=`ssh -q -o PasswordAuthentication=no -o ConnectTimeout=600 -n ${EMEA_PROD_SSH_UID[${i}]}@${EMEA_PROD_SSH_SERVER[${i}]} "ps -eaf | grep PwxCDCPublisher.sh | grep -v grep | grep ${EMEA_PROD_PUB_INSTANCE_NAME[${i}]} | wc -l;"`
if [ ${EMEA_PROD_PUB_LIVE_FLAG[${i}]} -eq 0 ]; then
EMEA_PROD_PUB_LIVE[${i}]="Not Live"
else
EMEA_PROD_PUB_LIVE[${i}]="Live"
fi
if [ ${EMEA_PROD_PUB_COUNT_RESULT[${i}]} -gt 0 ]; then
EMEA_PROD_PUB_STATUS[${i}]="Running"
else
EMEA_PROD_PUB_STATUS[${i}]="Not Running"
fi
pub_status_log_emea ${i}
done
}
read_status_log_emea() {
i=$1
printstar
echo -e "$(timestamp):\tCurrently Checking Publisher status of ${EMEA_PROD_PUB_INSTANCE_NAME[${i}]} in ${EMEA_PROD_PUB_ENV[${i}]}"
echo -e "$(timestamp):\tPublisher Instance: ${EMEA_PROD_PUB_INSTANCE_NAME[${i}]} runs in server: ${EMEA_PROD_SSH_SERVER[${i}]} and ssh userid is ${EMEA_PROD_SSH_UID[${i}]}"
}
pub_status_log_emea() {
i=$1
echo -e "$(timestamp):\t${EMEA_PROD_PUB_INSTANCE_NAME[${i}]} is a ${EMEA_PROD_PUB_LIVE[${i}]} instance.."
echo -e "$(timestamp):\t${EMEA_PROD_PUB_INSTANCE_NAME[${i}]} is currently ${EMEA_PROD_PUB_STATUS[${i}]} "
}
aspac_ssh_to_check_pub_status() {
for(( i=0; i<${ASPAC_PROD_COUNT} ; i++ ))
do
read_status_log_aspac ${i}
ASPAC_PROD_PUB_COUNT_RESULT[${i}]=`ssh -q -o PasswordAuthentication=no -o ConnectTimeout=600 -n ${ASPAC_PROD_SSH_UID[${i}]}@${ASPAC_PROD_SSH_SERVER[${i}]} "ps -eaf | grep PwxCDCPublisher.sh | grep -v grep | grep ${ASPAC_PROD_PUB_INSTANCE_NAME[${i}]} | wc -l;"`
if [ ${ASPAC_PROD_PUB_LIVE_FLAG[${i}]} -eq 0 ]; then
ASPAC_PROD_PUB_LIVE[${i}]="Not Live"
else
ASPAC_PROD_PUB_LIVE[${i}]="Live"
fi
if [ ${ASPAC_PROD_PUB_COUNT_RESULT[${i}]} -gt 0 ]; then
ASPAC_PROD_PUB_STATUS[${i}]="Running"
else
ASPAC_PROD_PUB_STATUS[${i}]="Not Running"
fi
pub_status_log_aspac ${i}
done
}
read_status_log_aspac() {
i=$1
printstar
echo -e "$(timestamp):\tCurrently Checking Publisher status of ${ASPAC_PROD_PUB_INSTANCE_NAME[${i}]} in ${ASPAC_PROD_PUB_ENV[${i}]}"
echo -e "$(timestamp):\tPublisher Instance: ${ASPAC_PROD_PUB_INSTANCE_NAME[${i}]} runs in server: ${ASPAC_PROD_SSH_SERVER[${i}]} and ssh userid is ${ASPAC_PROD_SSH_UID[${i}]}"
}
pub_status_log_aspac() {
i=$1
echo -e "$(timestamp):\t${ASPAC_PROD_PUB_INSTANCE_NAME[${i}]} is a ${ASPAC_PROD_PUB_LIVE[${i}]} instance.."
echo -e "$(timestamp):\t${ASPAC_PROD_PUB_INSTANCE_NAME[${i}]} is currently ${ASPAC_PROD_PUB_STATUS[${i}]} "
}
#####################################################
# Logging Method #
#####################################################
timestamp() {
echo -e "$(date '+%d-%b-%Y %H:%M:%S') EST"
}
printstar() {
echo -e "********************************************************************"
}
main "$@"
| true
|
bafcda58eba97281c47e5d39cebc62d685b69733
|
Shell
|
sjpi/cloudflare-backup
|
/cfbkup.sh
|
UTF-8
| 2,119
| 3.4375
| 3
|
[] |
no_license
|
#!/bin/bash
# Licensed under GPLv3
# created by "black" on LET
# please give credit if you plan on using this for your own projects
# https://github.com/blackdotsh
#TODO:
# -add subdomain support
# -switch back to main IP when the main server is online
# -maybe add more reliable detection methods or even integrate other monitoring services
tkn=""; #cloudflare token
bkupIP=""; #backup IP
email=""; #your CF email
domain="" #change me
domainName="" #currently it only supports the main domain, so it should be the same as the domain variable (no subdomains)
srvceMode="1" #Status of CloudFlare Proxy, 1 = orange cloud, 0 = grey cloud.
sleepTime="60" #number of seconds to sleep for before checking again
function rec_load_all() {
curl https://www.cloudflare.com/api_json.html \
-d 'a=rec_load_all' \
-d "tkn=$tkn" \
-d "email=$email" \
-d "z=$1" -s | python -mjson.tool > results.txt
}
#$1 = searching for that zone's dns ID
function getDNSID() {
dnsLineNum=`grep -E "$1" -n results.txt | head -n 1 | cut -d ":" -f1`;
echo "dns line #:$dnsLineNum";
dnsIDLineNum=`echo "$dnsLineNum + 13" | bc -q`;
echo "dns id line #: $dnsIDLineNum";
DNSID=`sed -n "$dnsIDLineNum p" results.txt | cut -d ":" -f2 | sed "s/\"//g;s/ //;s/\,//";`
}
#$1=target domain
#$2=name of the DNS record
#$3=new ip address
#$4=service mode
function edit_rec() {
echo "DNSID: $DNSID";
curl -s https://www.cloudflare.com/api_json.html \
-d 'a=rec_edit' \
-d "tkn=$tkn" \
-d "id=$DNSID" \
-d "email=$email" \
-d "z=$1" \
-d "type=A" \
-d "name=$2" \
-d "content=$3" \
-d "service_mode=$4" \
-d "ttl=1" | grep '"result":"success"';
if [ $? -eq 0 ]
then
sleep 0;
echo `date` "successfully changed servers to $bkupIP" >> cf.log
fi
}
while [ true ]
do
curl -I -s "$domain" | grep "HTTP/1.1 200 OK" -q;
if [ $? -eq 1 ]
then
echo `date` "service is down, going to backup IP" >> cf.log
rec_load_all "$domain";
getDNSID "$domain";
edit_rec "$domain" "$domainName" "$bkupIP" "$srvceMode";
#add whatever else you want to do here, perhaps send yourself an email using mailx
fi
sleep $sleepTime;
done
| true
|
3b234a2af8edaf19f961307a3cf075486b19074e
|
Shell
|
atoledo/spry_framework
|
/vendor/git/git.bash
|
UTF-8
| 2,789
| 4
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
function git_clone_only() {
local _GIT_CLONE_REPO_URL=${1:-}
local _GIT_REPO_PATH=${2:-}
if [ -z "${_GIT_CLONE_REPO_URL}" ]; then
raise RequiredParameterNotFound "[git_clone_only] Please provide a valid repository"
fi
out_info "Cloning repository: [ ${_GIT_CLONE_REPO_URL} ]." 1
${_GIT} clone ${_GIT_CLONE_REPO_URL} ${_GIT_REPO_PATH}
out_check_status $? "Cloned with success" "Clone failed"
}
function check_is_same_repostory() {
local _GIT_CLONE_REPO_URL=${1:-}
local _GIT_REPO_PATH=${2:-}
if [ -z "${_GIT_CLONE_REPO_URL}" ]; then
raise RequiredParameterNotFound "[check_is_same_repostory] Please provide a valid repository"
fi
local _GIT_ACTIVE_REPOSITORY=$(${_GIT} -C ${_GIT_REPO_PATH} config --get remote.origin.url)
if [ "${_GIT_CLONE_REPO_URL}" == "${_GIT_ACTIVE_REPOSITORY}" ]; then
return 0;
fi
return 1;
}
function git_check_if_resource_exists() {
local _GIT_RESOURCE=${1:-}
local _GIT_REPO_PATH=${2:-}
local _GIT_REMOTES=$(git -C ${_GIT_REPO_PATH} remote)
local _GIT_REMOTE_LIST=""
for _FORMAT in ${_GIT_REMOTES}; do
_GIT_REMOTE_LIST="${_GIT_REMOTE_LIST} ${_FORMAT}"
done
_GIT_REMOTE_LIST=$(echo ${_GIT_REMOTE_LIST} | sed "s/ /\|/g")
if (${_GIT} -C ${_GIT_REPO_PATH} tag -l | egrep "^\s*((${_GIT_REMOTE_LIST})?/)?${_GIT_RESOURCE}$" > /dev/null); then
out_success "Tag found"
return 0
elif (${_GIT} -C ${_GIT_REPO_PATH} branch -a | egrep "^\s*(remotes/)?((${_GIT_REMOTE_LIST})?/)?${_GIT_RESOURCE}$" > /dev/null); then
out_success "Branch found"
return 0
elif (${_GIT} -C ${_GIT_REPO_PATH} cat-file -e ${_GIT_RESOURCE}); then
out_success "Commit found"
return 0
else
out_warning "No resource found for ${_GIT_RESOURCE}"
return 1
fi
}
function git_extract_repository_name() {
local _GIT_REPO=${1:-}
if [ ! -z ${_GIT_REPO} ]; then
local _REPO_BASENAME=$(basename "${_GIT_REPO}" ".${_GIT_REPO##*.}")
echo ${_REPO_BASENAME}
else
raise RepositoryNotFound "[git_extract_repository_name] ${_GIT_REPO} is not a valid repository"
fi
}
function git_is_tag() {
local _GIT_REPO_PATH=${1:-}
local _GIT_RESOURCE=${2:-}
local _TAG_FOUND=""
if [ ! -d ${_GIT_REPO_PATH} ]; then
raise FolderNotFound "[git_is_tag] ${_GIT_REPO_PATH} is not a valid folder"
fi
_TAG_FOUND=$(${_GIT} -C ${_GIT_REPO_PATH} show-ref --tags | ${_GREP} -E "refs/tags/${_GIT_RESOURCE}$" > /dev/null)
if [ ! -z "${_TAG_FOUND}" ]; then
return 0
fi
return 1
}
function git_is_in_commit() {
local _GIT_REPO_PATH=${1:-}
${_GIT} -C ${_GIT_REPO_PATH} symbolic-ref -q HEAD &> /dev/null && true
# If symbolic-ref is not found, we are in a commit
if [ $? -ne 0 ]; then
return 0
fi
return 1
}
| true
|
b1cf2056918d889d121367f75caf2c582dc57fa5
|
Shell
|
prakashsurya/dotfiles
|
/ansible/roles/dot-base/files/home/user/.local/bin/vpn
|
UTF-8
| 751
| 3.3125
| 3
|
[] |
no_license
|
#!/bin/bash -eu
function die() {
echo "ERROR: $@" >&2
exit 2
}
if [[ "$1" == "update" ]]; then
[[ -n "$DELPHIX_OTP" ]] || die "DELPHIX_OTP variable is missing"
NEW=$(oathtool --totp -b "$DELPHIX_OTP")
[[ -z "$NEW" ]] && die "failed to get new OTP token"
sudo sed -i "s/^\(password = [[:alnum:]]\{9\}\).*$/\1$NEW/" \
/opt/openfortivpn/etc/openfortivpn/config ||
die "failed to update VPN with new OTP token"
exit 0
fi
if [[ "$1" == "otp" ]]; then
[[ -n "$DELPHIX_OTP" ]] || die "DELPHIX_OTP variable is missing"
oathtool --totp -b "$DELPHIX_OTP"
exit 0
fi
if [[ "$1" == "edit" ]]; then
sudo vi /opt/openfortivpn/etc/openfortivpn/config
exit 0
fi
sudo systemctl $1 openfortivpn.service
sudo journalctl -f -u openfortivpn.service
| true
|
051dc099ff0fadd443274cc3a79c2f25fbcca8a6
|
Shell
|
m-lab/mlab-vis-pipeline
|
/run_bq.sh
|
UTF-8
| 2,764
| 3.984375
| 4
|
[] |
no_license
|
#!/bin/bash
# Run all parts of the pipeline with a provided end date.
# options:
# -s <YYYY-MM-DD>: start date to run pipeline from.
# -e <YYYY-MM-DD>: end date to run pipeline to.
# -m staging|production|sandbox: environment to use
# -t : to do a test run (doesn't start dataflow)
set -e
set -x
set -u
usage() {
echo "Usage: KEY_FILE=<path> $0 -s <YYYY-MM-DD> -e <YYYY-MM-DD> -m staging|production|sandbox [-t]" $1 1>&2; exit 1;
}
ENDDATE=""
STARTDATE=""
TEST=0
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
while getopts ":t:e:s:m:" opt; do
case $opt in
e)
ENDDATE=${OPTARG}
;;
s)
STARTDATE=${OPTARG}
;;
m)
echo "${OPTARG} environment"
if [[ "${OPTARG}" == production ]]; then
source $DIR/environments/production.sh
elif [[ "${OPTARG}" == staging ]]; then
source $DIR/environments/staging.sh
elif [[ "${OPTARG}" == sandbox ]]; then
source $DIR/environments/sandbox.sh
else
echo "BAD ARGUMENT TO $0: ${OPTARG}"
exit 1
fi
;;
t)
echo "Setting Test to 1" >&2
TEST=1
;;
\?)
echo "Invalid option: -$OPTARG" >&2
exit 1
;;
:)
echo "Option -$OPTARG requires an argument." >&2
exit 1
;;
esac
done
DATAFLOW_DIR="${DIR}/dataflow"
JAR_BASEDIR="${DIR}/dataflow/target"
JAR_FILE="${JAR_BASEDIR}/mlab-vis-pipeline.jar"
echo "Project: ${PROJECT}"
echo "Start date: ${STARTDATE}"
echo "End date: ${ENDDATE}"
# echo "moving into dir: ${DATAFLOW_DIR}"
cd ${DATAFLOW_DIR}
ENVIRONMENT_PARAMETERS="--runner=DataflowRunner --project=$PROJECT --stagingLocation=${STAGING_LOCATION} --maxNumWorkers=20 --skipNDTRead=0 --test=${TEST} --diskSizeGb=30"
echo "Starting server for metrics & bigquery pipeline (DAY and HOUR)"
if [ -z "${ENDDATE}" ] && [ -z "${STARTDATE}" ]; then
# empty start and end dates, going to let auto detection happen
echo "Empty start and end dates, going to let pipeline determine dates."
if [ -n "${KEY_FILE}" ]; then
export GOOGLE_APPLICATION_CREDENTIALS=${KEY_FILE}
mvn exec:java -Dexec.mainClass=mlab.dataviz.main.BQRunner -Dexec.args="$ENVIRONMENT_PARAMETERS"
else
mvn exec:java -Dexec.mainClass=mlab.dataviz.main.BQRunner -Dexec.args="$ENVIRONMENT_PARAMETERS"
fi
else
echo "Running on dates ${STARTDATE} - ${ENDDATE}"
if [ -n "${KEY_FILE}" ]; then
export GOOGLE_APPLICATION_CREDENTIALS=${KEY_FILE}
mvn exec:java -Dexec.mainClass=mlab.dataviz.main.BQRunner -Dexec.args="$ENVIRONMENT_PARAMETERS --startDate=${STARTDATE} --endDate=${ENDDATE}"
else
mvn exec:java -Dexec.mainClass=mlab.dataviz.main.BQRunner -Dexec.args="$ENVIRONMENT_PARAMETERS --startDate=${STARTDATE} --endDate=${ENDDATE}"
fi
fi
| true
|
4e0ed921999b0038fcedc2888d1e0b3776414baa
|
Shell
|
bigfunorama/cgrex
|
/cgc_pin_tracer/test.sh
|
UTF-8
| 578
| 3.1875
| 3
|
[] |
no_license
|
#!/bin/sh
echo "test.sh START"
PORT=10000
#a problem is that we cannot have any output from pin
#not to interfere with the testing
nc -e ./pin_wrap.sh -l 127.0.0.1 -p $PORT &
PID=$!
#wait for nc opening the port
netstat -ltun | grep ":$PORT" > /dev/null
STATUS=$?
while [ $STATUS -eq 1 ]
do
sleep 1
netstat -ltun | grep ":$PORT" > /dev/null
STATUS=$?
echo "waiting for netcat..."
done
#echo `cat /proc/$PID/cmdline` ###
cb-replay --host 127.0.0.1 --port 10000 $1
echo "waiting $PID"
#echo `cat /proc/$PID/cmdline`
#wait for nc termination
wait $PID
echo "test.sh END"
| true
|
d3536c3509916e795bf38136609eb3c06b9c46f4
|
Shell
|
InnovAnon-Inc/HafrenHaver
|
/new/run.sh
|
UTF-8
| 475
| 2.640625
| 3
|
[
"Unlicense",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-public-domain"
] |
permissive
|
#! /usr/bin/env bash
set -euxo pipefail
(( ! $# ))
(( $UID ))
export CC=mpicc
PV=$(python3 --version|cut -d\ -f2|cut -d. -f2)
CFLAGS=$(python3-config --cflags)
LDFLAGS=$(python3-config --ldflags)
python3 setup.py build_ext --inplace
$CC $CFLAGS \
-DNPY_NO_DEPRECATED_API=NPY_1_7_API_VERSION \
-I/usr/include/python3.$PV \
-o main main.c \
-lpython3.$PV $LDFLAGS
mpiexec main
| true
|
deb019b41e89b7cbf21a5fc8f7ceb3688bb40778
|
Shell
|
ed9e/i3blocks-docker-containers
|
/services.bash
|
UTF-8
| 538
| 3.578125
| 4
|
[] |
no_license
|
#!/bin/bash
FILE_TMP="/tmp/.services"
INTERVAL=25
LIST=""
if [[ ! -f $FILE_TMP ]] || (( `date +%s` - `stat -c %Y $FILE_TMP` > $INTERVAL )); then
all=$(rancher ps -a | grep "ebo/" | awk '{print $3}' | awk -F '/' '{print $2}' | sort)
active=$(rancher ps | grep "ebo/" | awk '{print $3}' | awk -F '/' '{print $2}')
for item in $all; do [[ $active =~ (^|[[:space:]])$item($|[[:space:]]) ]] && LIST="${LIST}<span color='#EE37B8'>$item</span>\n" || LIST="${LIST}$item\n"; done;
echo -e $LIST > $FILE_TMP
fi
cat $FILE_TMP
| true
|
db20ba6c1f85ac4ff5b3a08d9746877b122b184f
|
Shell
|
AzusaOS/azusa-opensource-recipes
|
/sys-libs/libselinux/libselinux-3.5.sh
|
UTF-8
| 534
| 2.6875
| 3
|
[] |
no_license
|
#!/bin/sh
source "../../common/init.sh"
get https://github.com/SELinuxProject/selinux/releases/download/${PV}/${P}.tar.gz
acheck
importpkg sys-libs/libsepol
CPPFLAGS="${CPPFLAGS} -DPIE -fPIE -fno-semantic-interposition -Wno-stringop-truncation"
LDFLAGS="${LDFLAGS} -pthread"
cd "${S}"
MAKEOPTS=(
USE_PCRE2=y
CC="gcc ${CPPFLAGS}"
LDFLAGS="${LDFLAGS}"
PREFIX="/pkg/main/${PKG}.core.${PVRF}"
LIBDIR="/pkg/main/${PKG}.libs.${PVRF}/lib$LIB_SUFFIX"
DESTDIR="${D}"
)
make "${MAKEOPTS[@]}"
make install "${MAKEOPTS[@]}"
finalize
| true
|
397a8d189c213318632b25993494914650aa2d61
|
Shell
|
forkdump/wikitopics
|
/src/pageviews/sub_extract_pageviews.sh
|
UTF-8
| 3,770
| 3.609375
| 4
|
[] |
no_license
|
#$ -S /bin/bash
#$ -j y
#$ -cwd
#$ -V
#$ -q all.q@*
#$ -l mem_free=1G
#$ -o /export/a05/wikitopics/src/pageviews/log/
echo "$HOSTNAME:`pwd`\$ $0 $*"
if [ $# -lt 3 ]; then
echo "usage: `basename $0` [LIST_FILE|PAGE_TITLE] YEAR MONTH [DAY [HOUR]] [OUTPUT_PATH]"
exit
fi
DELETE_LATER=
if [ ! -e "$1" ]; then
LIST_FILE=list-$RANDOM.txt
echo $1 > $LIST_FILE
DELETE_LATER=1
else
LIST_FILE=$1
fi
if [ ! -s "$LIST_FILE" ]; then
echo the $LIST_FILE file is empty
exit 1
fi
if [ `awk 'NF>1 { print NF; exit }' $LIST_FILE` ]; then
echo $LIST_FILE is not a proper list
exit 1
fi
YEAR=$2
MONTH=$3
THISYEAR=`date "+%Y"`
THISMONTH=`date "+%m"`
shift; shift; shift
if [ "$YEAR" -lt 2007 -o "$YEAR" -gt $THISYEAR ] 2> /dev/null; then
echo "Year $YEAR is out of range"
exit 1
fi
if [ $? -eq 2 ]; then
echo "Year $YEAR must be a number"
exit 1
fi
if [ "$YEAR" -eq 2007 -a "$MONTH" -lt 12 -o "$YEAR" -eq "$THISYEAR" -a "$MONTH" -gt "$THISMONTH" -o "$MONTH" -lt 1 -o "$MONTH" -gt 12 ] 2> /dev/null; then
echo "Pair of year-month $YEAR-$MONTH is out of range"
exit 1
fi
if [ $? -eq 2 ]; then
echo "Month $MONTH must be a number"
exit 1
fi
DAY=
if [ "$1" != "" ]; then
if [ "$1" -ge 1 -o "$1" -le 31 ] 2> /dev/null; then
DAY=$1
shift
elif [ $? -eq 1 ]; then
echo "Day $1 is out of range"
exit 1
fi
fi
HOUR=
if [ "$1" != "" ]; then
if [ $1 -ge 0 -o $1 -lt 24 ] 2> /dev/null; then
HOUR=$1
shift
elif [ $? -eq 1 ]; then
echo "Hour $1 is out of range"
exit 1
fi
fi
# make each numbers two-digited
for i in `seq 9`; do
if [ "$MONTH" -eq $i ] 2> /dev/null; then
MONTH="0$i"
fi
if [ "$DAY" -eq $i ] 2> /dev/null; then
DAY="0$i"
fi
if [ "$HOUR" -eq $i ] 2> /dev/null; then
HOUR="0$i"
fi
done
if [ "$1" == "" ]; then
OUTPUTPATH="$WIKITOPICS/data/pageviews/$YEAR/$MONTH"
else
OUTPUTPATH=$1
fi
# check the input and output paths
ARCHIVE="$WIKISTATS/archive/$YEAR/$MONTH"
if [ ! -e "$ARCHIVE" ]; then
echo "no pageviews files at $ARCHIVE"
exit 1
fi
mkdir -p $OUTPUTPATH
if [ "$DAY" != "" ]; then
OUTPUTFILE="$OUTPUTPATH/pageviews_$YEAR$MONTH$DAY.txt"
if [ -e "$OUTPUTFILE" ]; then
echo "$OUTPUTFILE exists. skipping..."
exit
fi
if [ "$HOUR" != "" ]; then
HOUR="-$HOUR"
fi
date
echo "processing $YEAR$MONTH$DAY$HOUR"
for FILE in $ARCHIVE/pagecounts-$YEAR$MONTH$DAY$HOUR*.gz; do
if [ ! -e "$FILE" ]; then
continue
fi
DATETIME=`echo $FILE | sed -e 's/.*pagecounts-//' -e 's/\.gz$//'`
echo "processing $FILE" 1>&2
time gunzip -c $FILE | grep '^en ' | ./sub_extract_pageviews.pl $LIST_FILE $DATETIME >> $OUTPUTFILE
if [ ! $? ]; then
echo "failed."
exit 1
fi
done
if [ -e "$OUTPUTFILE" ]; then
TEMP_FILE=$OUTPUTPATH/foo$RANDOM.txt
echo "sorting the results..."
time sort -k 2,2 -k 1,1 $OUTPUTFILE > $TEMP_FILE
mv $TEMP_FILE $OUTPUTFILE
fi
else
date
echo "processing $YEAR/$MONTH"
for DAY in `seq 31`; do
if [ $DAY -ge 1 -a $DAY -le 9 ]; then
DAY="0$DAY"
fi
OUTPUTFILE="$OUTPUTPATH/pageviews_$YEAR$MONTH$DAY.txt"
if [ -e "$OUTPUTFILE" ]; then
echo "$OUTPUTFILE exists. skipping..."
exit
fi
for FILE in $ARCHIVE/pagecounts-$YEAR$MONTH$DAY*.gz; do
if [ ! -e "$FILE" ]; then
continue
fi
DATETIME=`echo $FILE | sed -e 's/.*pagecounts-//' -e 's/\.gz$//'`
echo "processing $FILE" 1>&2
time gunzip -c $FILE | grep '^en ' | ./sub_extract_pageviews.pl $LIST_FILE $DATETIME >> $OUTPUTFILE
if [ ! $? ]; then
echo "failed."
exit 1
fi
done
if [ -e $OUTPUTFILE ]; then
TEMP_FILE=$OUTPUTPATH/foo$RANDOM.txt
echo "sorting the results..."
time sort -k 2,2 -k 1,1 $OUTPUTFILE > $TEMP_FILE
mv $TEMP_FILE $OUTPUTFILE
fi
done
fi
if [ "$DELETE_LATER" ]; then
rm -f $LIST_FILE
fi
echo done extracting monthly pageviews.
date
| true
|
36b47d38a89003ef8e234563ad0514e59d21313f
|
Shell
|
xiongbolu/hwxreposetup
|
/SetupLocalDebianAmbariHDPRepo.sh
|
UTF-8
| 7,169
| 3.875
| 4
|
[
"MIT"
] |
permissive
|
#! /bin/bash
# Below script sets up an local debian repo using apache web service with Ambari and HDP binaries
# that are mirrored from HWX repo for a given Ambari and HDP stack version and HDP UTILS version
#Example of all input params
#AMBARI_REPO_URL=http://private-repo-1.hortonworks.com/ambari/ubuntu16/2.x/updates/2.5.0.4-1/ambari.list
#HDP_REPO_URL=http://private-repo-1.hortonworks.com/HDP/ubuntu16/2.x/updates/2.5.2.1-6/hdp.list
AMBARI_LIST_URL=$1
HDP_LIST_URL=$2
usage()
{
echo "Usage: sudo bash setuplocaldebianrepo.sh AMBARI_REPO_URL HDP_REPO_URL";
exit 1;
}
#validate input arguments
if [ "$#" -ne 2 ]; then
echo "Expected number of arguments is 8"
usage
fi
if [ -z $AMBARI_LIST_URL ] ; then
echo "AMBARI hwx repo url not provided"
usage
fi
if [ -z $HDP_LIST_URL ] ; then
echo "HDP hwx repo url not provided"
usage
fi
FullHostName=private-repo-hwx.chinanorth.cloudapp.chinacloudapi.cn
echo ----------------------------------------------------------------------------------------------
echo ----------------------------------------------------------------------------------------------
echo Setting up local debian repo for Abmari and HDP on $FullHostName
regex="([a-z]*.list)$"
[[ $AMBARI_LIST_URL =~ $regex ]] && AMBARI_LIST_FILE=${BASH_REMATCH[1]}
[[ $HDP_LIST_URL =~ $regex ]] && HDP_LIST_FILE=${BASH_REMATCH[1]}
wget -q $AMBARI_LIST_URL -O $AMBARI_LIST_FILE
wget -q $HDP_LIST_URL -O $HDP_LIST_FILE
versionregex="([^\/]+$)"
ambarifilelines=`cat $AMBARI_LIST_FILE`
stringarray=($ambarifilelines)
AMBARI_REPO_URL=${stringarray[2]}
[[ $AMBARI_REPO_URL =~ $versionregex ]]
AMBARI_STACK_VERSION=${BASH_REMATCH[0]}
SYNC_AMBARI=
if [ -z "$AMBARI_STACK_VERSION" ]
then
SYNC_AMBARI=#
fi
hdpfilelines=`cat $HDP_LIST_FILE`
stringarray=($hdpfilelines)
HDP_REPO_URL=${stringarray[2]}
HDP_UTILS_REPO_URL=${stringarray[6]}
[[ $HDP_REPO_URL =~ $versionregex ]]
HDP_STACK_VERSION=${BASH_REMATCH[0]}
regex="([0-9]+\.[0-9]+\.[0-9]+\.[0-9]+)"
[[ ${stringarray[6]} =~ $regex ]]
HDP_UTILS_VERSION=${BASH_REMATCH[1]}
SYNC_HDP=
if [ -z "$HDP_STACK_VERSION" ]
then
SYNC_HDP=#
fi
SYNC_HDP_UTIL=
if [ -z "$HDP_UTILS_VERSION" ]
then
SYNC_HDP_UTIL=#
fi
LOCAL_REPO_NAME=hwx-$AMBARI_STACK_VERSION-$HDP_STACK_VERSION
setUpLocalHDPDebianRepo()
{
echo "AMBARI_REPO_URL is $AMBARI_REPO_URL"
echo "HDP_REPO_URL is $HDP_REPO_URL"
echo "HDP_UTILS_REPO_URL is $HDP_UTILS_REPO_URL"
echo "AMBARI_STACK_VERSION is $AMBARI_STACK_VERSION"
echo "HDP_STACK_VERSION is $HDP_STACK_VERSION"
echo "HDP_UTILS_VERSION is $HDP_UTILS_VERSION"
cat >/etc/apt/$LOCAL_REPO_NAME.list <<EOL
set nthreads 20
set base_path /tmp/$LOCAL_REPO_NAME
#HDP 2.6
$SYNC_AMBARI deb $AMBARI_REPO_URL Ambari main
$SYNC_HDP deb $HDP_REPO_URL HDP main
$SYNC_HDP_UTIL deb $HDP_UTILS_REPO_URL HDP-UTILS main
EOL
mkdir -p /tmp/$LOCAL_REPO_NAME
downloadPackagesLocally
}
downloadPackagesLocally()
{
apt-mirror /etc/apt/$LOCAL_REPO_NAME.list
SOURCE_FOLDER=/tmp/$LOCAL_REPO_NAME/mirror
cd $SOURCE_FOLDER
ambariPath=$(find . -type d -name "ambari" -print 2>/dev/null -quit)
hdpPath=$(find . -type d -name "HDP" -print 2>/dev/null -quit)
hdpUtilsPath=$(find . -type d -name "HDP-UTILS-$HDP_UTILS_VERSION" -print 2>/dev/null -quit)
targetambariPath=$(echo $AMBARI_REPO_URL | awk -F"/ambari" '{print $2}')
targethdpPath=$(echo $HDP_REPO_URL | awk -F"/HDP" '{print $2}')
targethdpUtilPath=$(echo $HDP_UTILS_REPO_URL | awk -F"/HDP-UTILS-$HDP_UTILS_VERSION" '{print $2}')
# Get path before last slash
pathregex=(.+)\/
[[ $targetambariPath =~ $pathregex ]]
AMBARI_FOLDER=${BASH_REMATCH[1]}
[[ $targethdpPath =~ $pathregex ]]
HDP_FOLDER=${BASH_REMATCH[1]}
TARGET_FOLDER=/var/hwx/repo
echo Create folder $TARGET_FOLDER/ambari/$AMBARI_FOLDER
echo Create folder $TARGET_FOLDER/HDP/$HDP_FOLDER
echo Create folder $TARGET_FOLDER/HDP-UTILS-$HDP_UTILS_VERSION/repos
mkdir -p $TARGET_FOLDER/ambari/$AMBARI_FOLDER
mkdir -p $TARGET_FOLDER/HDP/$HDP_FOLDER
mkdir -p $TARGET_FOLDER/HDP-UTILS-$HDP_UTILS_VERSION/repos
echo Move folder $SOURCE_FOLDER/$ambariPath$targetambariPath to folder $TARGET_FOLDER/ambari/$AMBARI_FOLDER
echo Move folder $SOURCE_FOLDER/$hdpPath$targethdpPath to folder $TARGET_FOLDER/HDP/$HDP_FOLDER
echo Move folder $SOURCE_FOLDER/$hdpUtilsPath$targethdpUtilPath to folder $TARGET_FOLDER/HDP-UTILS-$HDP_UTILS_VERSION$targethdpUtilPath
mv -f $SOURCE_FOLDER/$ambariPath$targetambariPath $TARGET_FOLDER/ambari/$AMBARI_FOLDER
mv -f $SOURCE_FOLDER/$hdpPath$targethdpPath $TARGET_FOLDER/HDP/$HDP_FOLDER
mv -f $SOURCE_FOLDER/$hdpUtilsPath$targethdpUtilPath $TARGET_FOLDER/HDP-UTILS-$HDP_UTILS_VERSION/repos
#ln -s $TARGET_FOLDER/ambari /var/www/html/ambari
#ln -s $TARGET_FOLDER/HDP /var/www/html/HDP
#ln -s $TARGET_FOLDER/HDP-UTILS-$HDP_UTILS_VERSION /var/www/html/HDP-UTILS-$HDP_UTILS_VERSION
}
startAndValidateLocalHDPDebianRepo()
{
#echo "Starting local debian repo"
#systemctl daemon-reload
#systemctl stop apache2
#systemctl start apache2
#echo "End of starting local debian repo"
# these directory structure can change post mirroring, depending on what HWX repo is used (private-repo-1.hortonworks.com or AWS repo)
# so basically extract the path from the given input urls
ambariPath=$(echo $AMBARI_REPO_URL | awk -F"/ambari" '{print $2}')
hdpPath=$(echo $HDP_REPO_URL | awk -F"/HDP" '{print $2}')
hdpUtilPath=$(echo $HDP_UTILS_REPO_URL | awk -F"/HDP-UTILS-$HDP_UTILS_VERSION" '{print $2}')
echo "Printing ambariPath $ambariPath"
echo "Printing hdpPath $hdpPath"
echo "Printing hdpUtilPath $hdpUtilPath"
ambariUrl=http://$FullHostName/ambari$ambariPath
hdpRepoUrl=http://$FullHostName/HDP$hdpPath
hdpUtilsUrl=http://$FullHostName/HDP-UTILS-$HDP_UTILS_VERSION$hdpUtilPath
validateRepo $ambariUrl
echo "Creating ambari.list file"
cat >/var/www/html/ambari/$ambariPath/ambari.list <<EOL
#VERSION_NUMBER=$AMBARI_STACK_VERSION
deb $ambariUrl Ambari main
EOL
validateRepo $ambariUrl/ambari.list
validateRepo $hdpRepoUrl
validateRepo $hdpUtilsUrl
echo "Creating hdp.list file"
cat >/var/www/html/HDP/$hdpPath/hdp.list <<EOL
#VERSION_NUMBER=$HDP_STACK_VERSION
deb $hdpRepoUrl HDP main
deb $hdpUtilsUrl HDP-UTILS main
EOL
validateRepo $hdpRepoUrl/hdp.list
}
validateRepo()
{
echo "Validating $1 is accessible"
wget -q $1
if [ $? -ne 0 ]; then
echo "$1 is NOT accessible"
exit 132
else
echo "Local Repo $1 successfully set up ....."
fi
}
cleanUpTmpDirectories()
{
echo Cleaning up /tmp/$LOCAL_REPO_NAME
rm -rf /tmp/$LOCAL_REPO_NAME
}
setUpLocalHDPDebianRepo
startAndValidateLocalHDPDebianRepo
#cleanUpTmpDirectories
| true
|
1fb065ed8b5243e0f280a81c2a0f44ad0cd45824
|
Shell
|
amouat/xsd-validator
|
/xsdv.sh
|
UTF-8
| 196
| 3.078125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# call xsdv
#First find out where we are relative to the user dir
callPath=${0%/*}
if [[ -n "${callPath}" ]]; then
callPath=${callPath}/
fi
java -jar ${callPath}lib/xsdv.jar "$@"
| true
|
599e6958ff261a34c84bcc2b0d1285208ff114c5
|
Shell
|
e-takeda/kenshu
|
/minny6/restore/shell/mkmeibo
|
UTF-8
| 177
| 2.828125
| 3
|
[] |
no_license
|
#!/bin/sh
while :
do
echo -n 'name? '
read name
case $name in
end) break
;;
esac
echo -n 'tel number? '
read tel
echo "$name $tel" >> meibo
done
| true
|
4c1e5d1c7ea2fe3f202f92a571ae7c07e8b82454
|
Shell
|
TritonDataCenter/kafka-connect-manta
|
/misc/zk-copy-config.sh
|
UTF-8
| 772
| 3.46875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
source $(dirname "$0")/zk-common
PROGRAM_NAME=$(basename "$0")
if [ "$#" -eq 0 ]; then
echo "Usage: $PROGRAM_NAME ZK_INSTANCE..."
exit 1
fi
ZK_HOSTS=("$@")
ZK_HOSTS_ADDRESS=()
cfg=$(mktemp -t zoo.cfg.XXXXXXXXXX)
trap "rm -f $cfg" EXIT
cat > "$cfg" <<EOF
tickTime=2000
dataDir=/var/lib/zookeeper
clientPort=2181
initLimit=5
syncLimit=2
EOF
count=1
for hostid in "${ZK_HOSTS[@]}"; do
addr=$(triton instance ip "$hostid")
echo "server.${count}=${addr}:2888:3888" >> "$cfg"
ZK_HOSTS_ADDRESS=("${ZK_HOSTS_ADDRESS[@]}" "$addr")
ssh "$addr" "rm -rf /var/lib/zookeeper/*; echo $count > /var/lib/zookeeper/myid"
count=$((count + 1))
done
cat "$cfg"
parallel scp "$cfg" "{}:$ZK_ROOT/conf/zoo.cfg" ::: "${ZK_HOSTS_ADDRESS[@]}"
| true
|
6c08ea935821cfe88c844282c2a9bb7117ca70a5
|
Shell
|
BillTheBest/raspberry-iceweasel
|
/install.sh
|
UTF-8
| 1,570
| 2.796875
| 3
|
[] |
no_license
|
#IceWeasel browser specific
echo "----- update system"
sudo apt-get update
echo "----- installing iceweasel"
sudo apt-get -y install iceweasel
#run iceweasel to create a profile
iceweasel
echo "----- installing iceweasel config"
#Grab default profile directory name and add user.js file - that disables recovery on crash
icedir=$(ls /home/pi/.mozilla/firefox | grep .default)
sudo mv user.js /home/pi/.mozilla/firefox/$icedir/
echo "----- installing xdotool"
#Xdotool allows key stroke generation to put iceweasel into full screen with F11
sudo apt-get -y install xdotool
#Generic things:
#echo "----- installing screen saver"
#sudo apt-get -y install xscreensaver
echo "----- installing unclutter"
sudo apt-get -y install unclutter
echo "----- installing NGINX"
sudo apt-get -y install nginx
echo "----- moving files"
sudo mv part1.html /home/pi/
sudo mv part2.html /home/pi/
sudo mv http_server.py /home/pi
sudo mv index.css /var/www/html/
echo "----- setting up Avahi"
sudo apt-get -y install avahi-daemon avahi-dnsconfd avahi-discover avahi-utils
sudo apt-get -y install libnss-mdns
cd /home/pi/raspberry-iceweasel/avahi_services
sudo insserv avahi-daemon/
sudo mv multiple.service /etc/avahi/services/
sudo mv rfb.service /etc/avahi/services/
sudo /etc/init.d/avahi-daemon restart
cd /home/pi/raspberry-iceweasel
echo "----- setting up startup"
sudo mv startup /etc/init.d/
cd /etc/init.d/
sudo chmod +x startup
sudo update-rc.d startup defaults
echo "----- setting up vnc"
cd /home/pi/raspberry-iceweasel
sudo bash installx11vnc.sh
cd /home/pi
echo "...all done."
| true
|
04d7244ed49eb6ad3c68a80bc8940fcf632f887d
|
Shell
|
bottkars/azs-concourse
|
/ci/scripts/scale-aks.sh
|
UTF-8
| 1,731
| 2.75
| 3
|
[] |
no_license
|
#!/bin/bash
set -eu
echo "${CA_CERT}" >> ${AZURE_CLI_CA_PATH} # beware in "" for keep as single literal
#az cloud register -n AzureStackUser \
#--endpoint-resource-manager ${ENDPOINT_RESOURCE_MANAGER} \
#--suffix-storage-endpoint ${SUFFIX_STORAGE_ENDPOINT} \
#--suffix-keyvault-dns ${VAULT_DNS} \
##--profile ${PROFILE}
#az cloud set -n AzureStackUser
#az cloud list --output table
#az login --service-principal \
# -u ${AZURE_CLIENT_ID} \
# -p ${AZURE_CLIENT_SECRET} \
# --tenant ${AZURE_TENANT_ID}
#az account set --subscription ${AZURE_SUBSCRIPTION_ID}
TAG=$(cat aks-engine/tag)
tar xzf aks-engine/aks-engine-${TAG}-linux-amd64.tar.gz
export SSL_CERT_FILE=${AZURE_CLI_CA_PATH}
aks-engine-${TAG}-linux-amd64/aks-engine scale \
--azure-env AzureStackCloud \
--location ${LOCATION} \
--resource-group ${AKS_RESOURCE_GROUP} \
--subscription-id ${AZURE_SUBSCRIPTION_ID} \
--client-id ${AZURE_CLIENT_ID} \
--client-secret ${AZURE_CLIENT_SECRET} \
--api-model current-installation/${AKS_RESOURCE_GROUP}/apimodel.json \
--new-node-count ${AKS_AGENT_0_NEW_NODE_COUNT} --debug
timestamp="$(date '+%Y%m%d.%-H%M.%S+%Z')"
export timestamp
APIMODEL_OUTPUT_FILE="$(echo "$APIMODEL_FILE" | envsubst '$timestamp')"
cp current-installation/${AKS_RESOURCE_GROUP}/apimodel.json apimodel/"$APIMODEL_OUTPUT_FILE"
KUBECONFIG_OUTPUT_FILE="$(echo "$KUBECONFIG_FILE" | envsubst '$timestamp')"
cp current-installation/${AKS_RESOURCE_GROUP}/kubeconfig/kubeconfig.*.json kubeconfig/"${KUBECONFIG_OUTPUT_FILE}"
INSTALLATION_OUTPUT_FILE="$(echo "$INSTALLATION_FILE" | envsubst '$timestamp')"
pushd current-installation
zip -qq -r $OLDPWD/aks-installation/"${INSTALLATION_OUTPUT_FILE}" ${AKS_RESOURCE_GROUP}
popd
| true
|
a2cba6fd7077f37f15ca97f86e959bfe9e0a7825
|
Shell
|
solutionDrive/sdSwPluginManager
|
/etc/scripts/test/installShopware.sh
|
UTF-8
| 1,586
| 3.171875
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env sh
SHOPWARE_VERSION=5.4.6
SHOPWARE_URL=http://releases.s3.shopware.com.s3.amazonaws.com/install_5.4.6_f667f6471a77bb5af0c115f3e243594e6353747e.zip
if [ -z "${PROJECT_HOME}" ]; then
echo "$$PROJECT_HOME must be set!"
exit 1
fi
DB_HOST=mysql
DB_PORT=3306
DB_USERNAME=root
DB_PASSWORD=root
DB_DATABASE="getenv('MYSQL_DATABASE')"
CONFIG_FILE=${PROJECT_HOME}/config.php
cd ${PROJECT_HOME}
wget -O install.zip "${SHOPWARE_URL}"
unzip install.zip
# write config
printf '%s\n' ",s~'host' => '.*'~'host' => '${DB_HOST}'~g" w q | ed -s "${CONFIG_FILE}"
printf '%s\n' ",s~'port' => '.*'~'port' => '${DB_PORT}'~g" w q | ed -s "${CONFIG_FILE}"
printf '%s\n' ",s~'username' => '.*'~'username' => '${DB_USERNAME}'~g" w q | ed -s "${CONFIG_FILE}"
printf '%s\n' ",s~'password' => '.*'~'password' => '${DB_PASSWORD}'~g" w q | ed -s "${CONFIG_FILE}"
printf '%s\n' ",s~'dbname' => '.*'~'dbname' => ${DB_DATABASE}~g" w q | ed -s "${CONFIG_FILE}"
# install shopware including database
php recovery/install/index.php \
--no-interaction \
--quiet \
--no-skip-import \
--db-host="${DB_HOST}" \
--db-user="${DB_USERNAME}" \
--db-password="${DB_PASSWORD}" \
--db-name="${MYSQL_DATABASE}" \
--shop-locale="de_DE" \
--shop-host="${WEB_HOST}" \
--shop-path="" \
--shop-name="Testshop" \
--shop-email="sdadmin@sd.test" \
--shop-currency="EUR" \
--admin-username="sdadmin" \
--admin-password="sdadmin" \
--admin-email="sdadmin@sd.test" \
--admin-name="sdadmin" \
--admin-locale="de_DE"
chown -R www-data:www-data ${PROJECT_HOME}/
| true
|
1bd08f3c4928c2978737d1cfb5fe6ea7d3ece157
|
Shell
|
xandriatiu/Post-it-later
|
/bin/sync_dev.sh
|
UTF-8
| 849
| 3.5625
| 4
|
[] |
no_license
|
#!/bin/bash
PROJECT_NAME=''
SCRIPT_SOURCE=`dirname "$BASH_SOURCE"`
PROJ_DIR=$SCRIPT_SOURCE/..
ENV_DIR=$SCRIPT_SOURCE/../../env
echo 'FETCHING LATEST CHANGES...'
git fetch upstream
echo 'MERGING LATEST CHANGES TO DEV...'
git rebase upstream/dev
echo 'CHECKING VIRTUAL ENVIRONMENT...'
if [ -x "$(command -v deactivate)" ]; then deactivate; fi
test -d $ENV_DIR || virtualenv --prompt="($PROJECT_NAME)" $ENV_DIR
echo 'ACTIVATING VIRTUALENV...'
source $ENV_DIR/bin/activate
echo 'INSTALLING DEPENDENCIES...'
pip install -q -r $PROJ_DIR/requirements.txt
echo "CREATING DATABASE MIGRATIONS..."
python $PROJ_DIR/manage.py makemigrations
echo "APPLYING MIGRATIONS..."
python $PROJ_DIR/manage.py migrate
echo "COLLECTING STATIC FILES..."
python $PROJ_DIR/manage.py collectstatic
echo 'DONE!'
echo 'You can now activate virtualenv and runserver.'
| true
|
799ef2a24ec8d58faea682858151bd9d13abb7fa
|
Shell
|
sqaxomonophonen/femtopus
|
/tools/exporters/export_lump.sh
|
UTF-8
| 190
| 2.890625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
if [ -z "$1" ] ; then
echo "usage: $0 <lump.blend>" > /dev/stderr
exit 1
fi
dir="$( dirname "${BASH_SOURCE[0]}" )"
blender -noaudio -b $1 -P $dir/export_lump.py -- $2
| true
|
3fc5e274d4ce364ef134998c8ec32659202dc7c8
|
Shell
|
pangr/testing
|
/shiyan3.sh
|
UTF-8
| 1,516
| 3.71875
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
MYDATE=`date +%d/%m/%Y`
MYHOST=`hostname -s`
USER=`whoami`
tput clear
cat <<MAYDAY
_____________________________________________________________________________
User: $USER Host: $MYHOST Date: $MYDATE
_____________________________________________________________________________
A: 创建子目录并复制文件
B:文件权限测试
C:文件字符转换
D:文件总数统计
E:文本文件行数统计
Q:退出
_____________________________________________________________________________
MAYDAY
while :
do
echo -e -n "请选择:>"
read choos
case $choos in
A) if [ -d 庞荣 ]
then
rm -r 庞荣
fi
chmod 755 庞荣 `mkdir 庞荣`
echo -e "目录建立成功!"
cp file1 庞荣/file1
cp file2 庞荣/file2
cat 庞荣/file1
cat 庞荣/file2
>庞荣/file1
>庞荣/file2
;;
B)read file
ls -l $file
;;
C)for files in file1 file2
do
cat $files|tr "[a-z]" "[A-Z]" >$files.UC
cat $files.UC
done
;;
D)num=`ls -Al /dev|grep ^d|wc -l`
echo "目录文件有:" $num
nue=`ls -Al /dev|grep ^l|wc -l`
echo "符号链接文件有:" $nue
;;
E)read file
wc -l $file
;;
Q)exit
break
;;
esac
#read DUMMY
done
| true
|
72d3748c2b074cff042372d0e848cf4e168eec98
|
Shell
|
biocodellc/ppo-data-pipeline
|
/run.sh
|
UTF-8
| 564
| 2.921875
| 3
|
[] |
no_license
|
PROJECT=$1
if [[ -z $PROJECT ]]
then
echo "Usage: non_docker_run.sh {PROJECT}"
echo ""
echo "This bash script runs the pipeline for any INPUT_DATAFILE and places output in the specified OUTPUT_DIRECTORY."
exit 0
fi
echo "processing incoming data file data/$PROJECT/processed/data.csv"
python ../ontology-data-pipeline/pipeline.py \
-v --drop_invalid \
--num_processes 1 \
data/$PROJECT/processed/data.csv \
data/$PROJECT/processed \
file:/Users/jdeck/IdeaProjects/ppo-data-pipeline/config/ppo.owl \
config \
| true
|
956c643eae1807de2907e73a74421c3ab7c795d1
|
Shell
|
tarwinmuralli/dotfiles
|
/bash/.bash_profile
|
UTF-8
| 1,266
| 2.515625
| 3
|
[] |
no_license
|
[ -f "$HOME/.bashrc" ] && . "$HOME"/.bashrc
[ -d "$HOME/.local/bin" ] && export PATH="$HOME/.local/bin:$PATH"
[ -d "$HOME/.scripts" ] && export PATH="$HOME/.scripts:$PATH"
[ -d /sbin ] && export PATH="/sbin:$PATH"
[ -d /usr/sbin ] && export PATH="/usr/sbin:$PATH"
export WM="bspwm"
export EDITOR="nvim"
export TERMINAL="st"
export BROWSER="firefox"
export READER="zathura"
export HISTCONTROL=ignoreboth:erasedups
# ~/ Cleanup
export XDG_CONFIG_HOME="$HOME"/.config
export XDG_DATA_HOME="$HOME"/.local/share
export XDG_CACHE_HOME="$HOME"/.cache
export HISTFILE="$XDG_DATA_HOME"/bash/history
export XINITRC="$XDG_CONFIG_HOME"/X11/xinitrc
export XAUTHORITY="$XDG_RUNTIME_DIR"/Xauthority
export ANDROID_SDK_HOME="$XDG_CONFIG_HOME"/android
export ADB_VENDOR_KEY="$XDG_CONFIG_HOME"/android
export ANDROID_AVD_HOME="$XDG_DATA_HOME"/android/
export ANDROID_EMULATOR_HOME="$XDG_DATA_HOME"/android/
export _JAVA_OPTIONS=-Djava.util.prefs.userRoot="$XDG_CONFIG_HOME"/java
export GOPATH="$XDG_DATA_HOME"/go
export GTK2_RC_FILES="$XDG_CONFIG_HOME"/gtk-2.0/gtkrc
export GNUPGHOME="$XDG_DATA_HOME"/gnupg
export WGETRC="$XDG_CONFIG_HOME"/wget/wgetrc
export LESSHISTFILE=-
# START X SERVER
[ "$(tty)" = "/dev/tty1" ] && pgrep ${WM:-bspwm} || startx "$XDG_CONFIG_HOME/X11/xinitrc"
| true
|
c262a9f355813d3cf952b2f35a612db10bc6fad2
|
Shell
|
keto/handy-scripts
|
/bin/assh
|
UTF-8
| 911
| 3.875
| 4
|
[] |
no_license
|
#!/bin/bash
# Avahi ssh client script
#
# Replacement for bssh from avahi-utils
#
# Searches for available ssh services using avahi-browse and dispalys
# selection dialog using zenity. All arguments are passed to ssh command.
#
# Environment variables:
# ASSH_IPV - Define IP version to display
# Default is "4", use "6" for only IPv6 and "46" for both
#
# (C) Keto, 2010-2011
echo -ne "\033]2;ASSh\007"
LIST=$(avahi-browse -tpr _ssh._tcp|
grep -e "^=;.*;IPv[${ASSH_IPV:-4}]"|
cut -d ';' -f 2,3,4,8,9 --output-delimiter ' ')
SELECTION=`zenity --list --title "ASSh" --text "Select host" \
--column "if" --column "ipv" --column "host" --column "address" \
--column "port" --print-column ALL --separator ' ' $LIST`
if [ ! -z "$SELECTION" ]; then
DATA=($SELECTION)
HOST=${DATA[3]}
PORT=${DATA[4]}
[ "${DATA[1]}" = "IPv6" ] && HOST=$HOST%${DATA[0]}
ssh -p $PORT $HOST $@
fi
| true
|
1a0dbbb51a260119157e838c822ce2bde1ab0f8e
|
Shell
|
skmthomassen/projectOWL
|
/record/recordstreams.sh
|
UTF-8
| 1,689
| 3.6875
| 4
|
[] |
no_license
|
#/bin/bash
if [ $# -ne 1 ] ; then
echo "ERROR: Please supply time to record"
exit 10
fi
TIME=$1
SEGTIME="600"
SECONDS=0
IP0="192.168.0.200"
IP1="192.168.0.201"
IP2="192.168.0.202"
IP3="192.168.0.203"
IP2UP=1
IP3UP=1
AUDIOUP=1
SUF=$(head -n 1 $PWD/suffix)
SUF=$(( $SUF + 1 ))
echo $SUF > $PWD/suffix
echo "---Starting recording job no: $SUF---"
#Testing for devices
if ping -c 1 $IP2 &> /dev/null ; then
IP2UP=$((IP2UP-1))
else echo "No camera at IP:$IP2 was found"
fi
if ping -c 1 $IP3 &> /dev/null ; then
IP3UP=$((IP3UP-1))
else echo "No camera at IP:$IP3 was found"
fi
if [ "$IP2UP" -eq 1 ] && [ "$IP3UP" -eq 1 ] ; then
echo "ERROR: No cameras where found"
echo "No reason to live... Will exit..."
exit 10
fi
echo "Will record for:" $(($1/86400))" days "$(date -d "1970-01-01 + $1 seconds" "+%H hours %M minutes %S seconds")
echo
parallel --progress --verbose --joblog $PWD/logs/$SUF.log ::: \
"ffmpeg -hide_banner -thread_queue_size 512 -rtsp_transport tcp -i rtsp://$IP2/av0_0 -f segment \
-segment_time $SEGTIME -y -c:v copy -segment_format mpegts -t $TIME "$PWD/clips/$SUF-202-%03d.ts"" \
"ffmpeg -hide_banner -thread_queue_size 512 -rtsp_transport tcp -i rtsp://$IP3/av0_0 -f segment \
-segment_time $SEGTIME -y -c:v copy -segment_format mpegts -t $TIME "$PWD/clips/$SUF-203-%03d.ts"" \
"ffmpeg -hide_banner -thread_queue_size 512 -f alsa -i hw:1 -y -f segment \
-segment_time $SEGTIME -segment_format aac -acodec aac -t $TIME "$PWD/clips/$SUF-audio-%03d.aac""
DUR=$SECONDS
echo "Actual time recorded:" $(($DUR/86400))" days "$(date -d "1970-01-01 + $DUR seconds" "+%H hours %M minutes %S seconds")
echo "---Ending recording job no: $SUF---"
exit 0
| true
|
094f2fe3acdcb8a79a42bc8ca93d8750dab3af9a
|
Shell
|
symphoniacloud/ccstar
|
/build.sh
|
UTF-8
| 616
| 3.34375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
set -euo pipefail
# Package Lambda source code
# TODO - see if we can get sam build to do this nicely, e.g. with Makefile
PACKAGE_DIR=".temp-deploy-package"
TARGET_DIR="target"
LAMBDA_ZIP_FILE="prod-lambda.zip"
[ -d $PACKAGE_DIR ] && rm -rf $PACKAGE_DIR
[ -d $TARGET_DIR ] && rm -rf $TARGET_DIR
mkdir $PACKAGE_DIR
mkdir $TARGET_DIR
cd $PACKAGE_DIR
cp ../{package.json,package-lock.json} .
cp -rp ../src/* .
npm install --prod
rm package.json package-lock.json
zip ../$TARGET_DIR/$LAMBDA_ZIP_FILE -r *
cd ..
rm -rf $PACKAGE_DIR
echo
echo "Lambda zip file packaged at ./$TARGET_DIR/$LAMBDA_ZIP_FILE"
echo
| true
|
15fafacd7762f91838d56c310a4bcf8ce4bf96ea
|
Shell
|
anLA7856/shell
|
/src/main/java/anla/shell/guessNum.sh
|
UTF-8
| 1,146
| 3.765625
| 4
|
[] |
no_license
|
#!/bin/sh
#一个猜数字游戏
total=0
export LANG="zh_CN.UTF=8"
NUM=$((RANDOM%61))
echo "当前苹果的价格是没斤 $NUM 元"
echo "=============================="
usleep 1000000
clear
echo '请问这苹果多少钱一斤?请猜0~60的数字'
apple() {
read -p "请输入价格:" PRICE
expr $PRICE + 1 &>/dev/null
if [ $? -ne 0 ];then
echo "老哥,赶紧猜数字"
apple
fi
}
guess() {
((total++))
if [ $PRICE -eq $NUM ]; then
echo "厉害了我的老哥,猜对了,就是$NUM元"
if [ $total -le 3 ];then
echo "一共猜了$total次,厉害了"
elif [ $total -gt 3 -a $total -le 6 ]; then
echo "猜了$total次,加油哦"
elif [ $total -gt 6 ]; then
echo "一共猜了$total次,行不行"
fi
exit 0
elif [ $PRICE -gt $NUM ]; then
echo "嘿嘿,这个高了,再给你一次机会"
apple
elif [ $PRICE -lt $NUM ]; then
echo "价格猜低了,再试一次"
apple
fi
}
main() {
apple
while true
do
guess
done
}
main
| true
|
10be821396c233c857ee435565ce305ac52b1d94
|
Shell
|
delkyd/alfheim_linux-PKGBUILDS
|
/qhttpserver/PKGBUILD
|
UTF-8
| 788
| 2.578125
| 3
|
[] |
no_license
|
# Maintainer: Thomas Wucher <arch@thomaswucher.de>
pkgname=qhttpserver
pkgver=r134.ca8f327
pkgrel=1
pkgdesc="A Qt HTTP Server - because hard-core programmers write web-apps in C++ :)"
arch=('i686' 'x86_64' 'arm6h' 'arm7h')
url="https://github.com/nikhilm/qhttpserver"
license=('custom')
groups=()
depends=(qt5-base)
makedepends=('git')
replaces=()
backup=()
options=()
install=
source=('qhttpserver::git+https://github.com/nikhilm/qhttpserver.git')
noextract=()
md5sums=('SKIP')
pkgver() {
cd "$srcdir/${pkgname}"
printf "r%s.%s" "$(git rev-list --count HEAD)" "$(git rev-parse --short HEAD)"
}
build() {
cd "$srcdir/${pkgname}"
qmake
make
}
package() {
cd "$srcdir/${pkgname}"
mkdir -p "$pkgdir/usr/include"
cp src/*.h "$pkgdir/usr/include/"
cp -r lib "$pkgdir/usr/"
}
| true
|
63891908e7a9425ed02cce998be4172fcb7a2ae4
|
Shell
|
robertord/Shell-scripts
|
/renameTVShows.sh
|
UTF-8
| 3,515
| 4.09375
| 4
|
[] |
no_license
|
#!/bin/bash
# File: changeDownloadasName.sh
# Author: Roberto Rosende Dopazo
# <roberto[dot]rosende[dot]dopazo[at]gmail[dot]com>
# Script renames files *.avi or *.mkv inside a dir
# it puts first name of dir to the file and then "clean" name
if [ $# -lt 1 ];
then
echo -e "\tUsage: changeDownloadsName.sh <path/with/dirs/to/rename>"
exit 1
fi
if [ ! -d "$1" ];
then
echo -e "\tUsage: changeDownloadsName.sh <path/with/dirs/to/rename>"
echo -e "\tError: $1 is not a valid directory"
exit 1
fi
read -p " - You want change $1, .avi or .mkv will be renamed and dir removed. Agree?[y/n] " answer
if [[ $answer != y ]] ; then
exit 0;
fi
PATHD=`readlink -f .`/
SAVEIFS=$IFS
IFS=$(echo -en "\n\b")
find $1 -mindepth 1 -type d |
while read -r DIR
do
if [ "$DIR" != "$PATHD" ]&& [ "$DIR" != ".." ];
then
DIRNEWNAME=`
echo ${DIR} | \
sed -e 's/\[/./gI' | \
sed -e 's/\]/./gI' | \
sed -e 's/\.$//gI' \
`
DIRNEW=${DIRNEWNAME}
#if [ "${DIR}" != "${DIRNEW}" ];
#then
mv "$DIR"/*.avi "$DIRNEW.avi"
mv "$DIR"/*.mkv "$DIRNEW.mkv"
rm -rf $DIR
#fi
fi
done
##now rename all files erasing unwanted parts
find $1 -type f -iname "*.avi" -o -iname "*.mkv" |
while read -r FILE
do
FILENEW=`
echo $FILE | \
sed 's/\[/./gI' | \
sed 's/\]/./gI' | \
sed 's/www.newpct.com//gI' | \
sed 's/newpct.com//gI' | \
sed 's/www.newpcestrenos.com//gI' | \
sed 's/newpcestrenos.com//gI' | \
sed 's/www.pctestrenos.com//gI' | \
sed 's/pctestrenos.com//gI' | \
sed 's/(EliteTorrent.net)//gI' | \
sed 's/HDTV 720px//gI' | \
sed 's/HDTV 720p//gI' | \
sed 's/HDTV 720//gI' | \
sed 's/HDTV //gI' | \
sed 's/HDV//gI' | \
sed 's/HQ//gI' | \
sed 's/TV//gI' | \
sed 's/-NoTV//gI' | \
sed 's/REPACK//gI' | \
sed 's/DVDRip//gI' | \
sed 's/DSRRIP//gI' | \
sed 's/BluRayRIP//gI' | \
sed 's/BluRay Rip//gI' | \
sed 's/Ac3 5.1//gI' | \
sed 's/Ac3 2.0//gI' | \
sed 's/AC3//gI' | \
sed 's/FRENCH//gI' | \
sed 's/XViD//gI' | \
sed 's/-PEPiTO//gI' | \
sed 's/XviD-FQM//gI' | \
sed 's/HD//gI' | \
sed 's/VTV//gI' | \
sed 's/-LOL//gI' | \
sed 's/XviD-MiNT//gI' | \
sed 's/Español Castellano//gI' | \
sed 's/AC3 5.1 //gI' | \
sed 's/Spanish//gI' | \
sed 's/BRrip//gI' | \
sed 's/ 5.1//gI' | \
sed 's/-././gI' | \
sed -e 's/()//gI' | \
sed 's/\(Proper\)//gI' | \
sed 's/Temporada \([0-9]\{1,\}\)//gI' | \
sed 's/Temp.\([0-9]\{1,\}\)//gI' | \
sed -e 's/[^a-zA-Z0-9]-[^a-zA-Z0-9]/ /gI' | \
sed 's/[ ]\{2,\}/./gI' | \
sed -e 's/\s\{1,\}[\.]/./gI' | \
sed -e 's/\([\.]\{2,\}\)/./gI' \
`
if [ "${FILE}" != "${FILENEW}" ];
then
echo -e "Renaming: \n\t $FILE \n\t\t into \n\t $FILENEW"
mv "${FILE}" "${FILENEW}"
fi
done
IFS=$SAVEIFS
exit 0
| true
|
94538bdc9ebbe9ef8baad7c2c2f58bfc4a10bcc8
|
Shell
|
stokuvg/direct-type-ios
|
/switchApi.sh
|
UTF-8
| 756
| 2.515625
| 3
|
[] |
no_license
|
#!/bin/bash
echo 'サーバ側開発向けビルドに切り替えます'
\cp -f ./_BuildMode/awsconfiguration_Api.json ./direct-type/awsconfiguration.json
\cp -f ./_BuildMode/BuildMode_Api.swift ./direct-type/BuildMode.swift
##【api.json】はモデル定義が変わるとDevとRelで異なる可能性あり、切り替えるとビルドエラーの可能性あることに留意
\cp -f ./_BuildMode/api_Api.json ./api/api.json
sh rebuildAll.sh
echo ''
echo '********************************************************************************'
echo '* 【BuildMode.swift】が「ApiDev」に設定されているか確認してビルドしてください *'
echo '********************************************************************************'
echo ''
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.