blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 4 115 | path stringlengths 2 970 | src_encoding stringclasses 28 values | length_bytes int64 31 5.38M | score float64 2.52 5.28 | int_score int64 3 5 | detected_licenses listlengths 0 161 | license_type stringclasses 2 values | text stringlengths 31 5.39M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
3cf1d6f9d82fb72171ed56572ca5b116663e589c | Shell | IS-ENES-Data/CMIP6_replication | /util/MakeSelectionFiles/getSelectionFileList.sh | UTF-8 | 2,979 | 3.421875 | 3 | [] | no_license | #! /bin/bash
getExps()
{
local curr_act=$1
eval test \${${curr_act}_exps} && return
local count=0
local act line is_act
while read line ; do
if [ ${count} -lt 2 ] ; then
count=$(( count +1 ))
continue
fi
e=$( expr match ${line} ' \{8\}"\(.*\)":{' )
if [ $e ] ; then
exp=$e
continue
fi
if [ ${line// /} = '"activity_id":[' ] ; then
while read line ; do
line=${line// /}
test "${line}" = '],' && break
act=$( expr match ${line} ' *"\(.*\)"' )
if [ ${curr_act} = $act ] ; then
eval ${act}_exps=\${${act}_exps}${exp},
is_act=t
fi
done
continue
fi
t=$( expr match ${line} ' *"tier":"\(.*\)"' )
if [ ${#t} -gt 0 -a ${is_act:-f} = t ] ; then
eval ${act}_tier=\${${act}_tier}T${t},
is_act=f
fi
done < /hdh/hdh/QA_Tables/tables/projects/CMIP6/CMIP6_CVs/CMIP6_experiment_id.json
return
}
IFS=''
set -x
csv=''
count=0
acts=()
test ! -d /hdh/hdh/Selection/Activity && mkdir -p /hdh/hdh/Selection/Activity
cd /hdh/hdh/Selection/Activity
\rm -f *.csv
while read line ; do
if [ ${count} -lt 2 ] ; then
count=$(( count +1 ))
continue
fi
m=$( expr match ${line} ' \{8\}"\(.*\)":{' )
if [ ! ${model} ] ; then
model=$m
continue
fi
if [ ${line// /} = '"activity_participation":[' ] ; then
while read line ; do
line=${line// /}
test "${line}" = '],' && break
acts[${#acts[*]}]=$( expr match ${line} ' *"\(.*\)"' )
done
fi
if [ ${line// /} = '"institution_id":[' ] ; then
while read line ; do
line=${line// /}
test "${line}" = '],' && break
inst=$( expr match ${line} ' *"\(.*\)"' )
for act in ${acts[*]} ; do
getExps ${act} # executed only once for each activity
eval act_str=\${${act}_exps}
eval tier_str=\${${act}_tier}
if [ ${act_str} ] ; then
act_last=$(( ${#act_str} -1 ))
act_str=${act_str:0:act_last} # strip trailing ,
tier_last=$(( ${#tier_str} -1 ))
tier_str=${tier_str:0:tier_last} # strip trailing ,
while [ ${act_str} ] ; do
a=${act_str%%,*}
act_str=${act_str#*,}
b=${tier_str%%,*}
tier_str=${tier_str#*,}
echo "${act},${inst},${model},${a},${b}" >> ${act}.csv
test "$a" = "${act_str}" && break
done
else
for(( i=0 ; i < ${#empty_act[*]} ; ++i )) ; do
test $act == ${empty_act[i]} && break
done
if [ $i -eq ${#empty_act[*]} ] ; then
empty_act[${#empty_act[*]}]=$act
echo "act without any experiment: ${act}"
fi
fi
done
done
model=''
acts=()
fi
#count=$(( count +1 ))
#test ${count} -gt 20 && exit
done < /hdh/hdh/QA_Tables/tables/projects/CMIP6/CMIP6_CVs/CMIP6_source_id.json
| true |
0d8b0a2be5f972d1ca57697481f90912ad0453d4 | Shell | tmck-code/dotfiles | /bin/bin/dk | UTF-8 | 1,564 | 3.765625 | 4 | [
"BSD-3-Clause"
] | permissive | #!/bin/bash
# 2018-03-31 17:19:21 +1100 AEDT memcached latest 784b59c50820
function ls_images() { docker images --format "table {{.CreatedAt}} {{.ID}} {{.Repository}}:{{.Tag}}"; }
function ps() { docker ps -a --format "table {{.RunningFor}}\t{{.ID}} {{.Names}}\t{{.Image}}"; }
function ls_networks() { docker network ls; }
function count_images() { docker images -q | wc -l; }
function count_containers() { docker ps -aq | wc -l; }
function dls() {
cat <<EOF
- Networks:
$(ls_networks)
- Images:
$(ls_images)
- Containers:
$(ps)
- Summary:
No. of images: $(count_images)
No. of containers: $(count_containers)
EOF
}
function purge_containers () { echo -e "\n> Purging all containers"; docker ps -aq | xargs docker rm -f; }
function prune_volumes() { echo -e "\n> Pruning volumes"; docker volume ls -q | xargs docker volume prune -f; }
function prune_networks() { echo -e "\n> Pruning networks"; docker network prune -f; }
function cleanup_images () {
echo -e "\n> Removing dangling images"
docker images --filter=dangling=true -q | xargs docker rmi -f
}
function cleanup () {
purge_containers
cleanup_images
prune_networks
echo "$OPTS" [ "${OPTS:-}" == "--volumes" ]
[ "${OPTS:-}" == "--volumes" ] && prune_volumes
dls
}
function engine_status() {
until docker ps; do sleep 1; done
echo -e "\n> Docker Engine is up"
}
# Grab the 2nd command-line arg (if supplied) and use it for options
OPTS="${*:2}"
case ${1} in
"ls") dls;;
"cleanup") cleanup;;
"st") engine_status;;
esac
| true |
9d07f234e66a7385748353f6e0a17fa70054af1f | Shell | nysenate/Bluebird-CRM | /scripts/fixPermissions.sh | UTF-8 | 2,280 | 3.6875 | 4 | [] | no_license | #!/bin/sh
#
# fixPermissions.sh - Set Bluebird directory permissions appropriately.
#
# Project: BluebirdCRM
# Author: Ken Zalewski
# Organization: New York State Senate
# Date: 2010-09-13
# Revised: 2011-12-09
# Revised: 2014-02-26 - enforce read-only group access on template/ directory
# Revised: 2014-04-22 - enforce read-only group access on common/ directory
#
prog=`basename $0`
script_dir=`dirname $0`
readConfig=$script_dir/readConfig.sh
if [ `id -u` -ne 0 ]; then
echo "$prog: This script must be run by root." >&2
exit 1
fi
. $script_dir/defaults.sh
appdir=`$readConfig --global app.rootdir` || appdir="$DEFAULT_APP_ROOTDIR"
datdir=`$readConfig --global data.rootdir` || datdir="$DEFAULT_DATA_ROOTDIR"
impdir=`$readConfig --global import.rootdir` || impdir="$DEFAULT_IMPORT_ROOTDIR"
webdir=`$readConfig --global drupal.rootdir` || webdir="$DEFAULT_DRUPAL_ROOTDIR"
appowner=`$readConfig --global app.rootdir.owner`
datowner=`$readConfig --global data.rootdir.owner`
impowner=`$readConfig --global import.rootdir.owner`
webowner=`$readConfig --global drupal.rootdir.owner`
appperms=`$readConfig --global app.rootdir.perms`
datperms=`$readConfig --global data.rootdir.perms`
impperms=`$readConfig --global import.rootdir.perms`
webperms=`$readConfig --global drupal.rootdir.perms`
set -x
[ "$appowner" ] && chown -R "$appowner" "$appdir/"
[ "$appperms" ] && chmod -R "$appperms" "$appdir/"
[ "$datowner" ] && chown -R "$datowner" "$datdir/"
[ "$datperms" ] && chmod -R "$datperms" "$datdir/"
# kz: Kludge Alert: The images/template directory must be read-only so that
# Senators cannot delete their own header and footer images. I am chowning
# the directory to "root" so that only root can modify images there.
# In addition, the common/ directory must be locked down.
chown -R root "$datdir"/*/pubfiles/images/template
chmod -R go-w "$datdir"/*/pubfiles/images/template
chown -R root "$datdir"/common/
chmod -R go-w "$datdir"/common/
[ "$impowner" ] && chown -R "$impowner" "$impdir/"
[ "$impperms" ] && chmod -R "$impperms" "$impdir/"
[ "$webowner" ] && chown -R "$webowner" "$webdir/"
[ "$webperms" ] && chmod -R "$webperms" "$webdir/"
# The Bluebird config file should have the strictest permissions.
cfgpath=`$readConfig`
chmod g-wx,o= "$cfgpath"
exit 0
| true |
45510c72bd24157ceeba19de536b24fb022224d8 | Shell | rsenn/scripts | /sh/mkloglinks.sh | UTF-8 | 241 | 3.390625 | 3 | [] | no_license | #!/bin/sh
find sv \
-type d \
-name "log" \
-and -not -wholename "*/.*" |
{
IFS="./$IFS"
while read DIR; do
if [ -x "$DIR/run" ]; then
set ${DIR%/log}
shift
ln -sf "/var/log/$*" "$DIR/main"
fi
done
}
| true |
fc441ea69c1b1a488b9a4c3827e8cd5bcd4a7659 | Shell | gooroom/gooroom-exe-protector | /data/genkey_vendor.sh | UTF-8 | 775 | 3.546875 | 4 | [] | no_license | #!/bin/bash
if [ $# -eq 0 ]; then
export KEY_PATH=.
export vendor_name=v3
elif [ -z "$1" ] || [ -z "$2" ] ; then
echo -e ">>> $0 <KEY_PATH> <csr_v3_ima name>"
exit 1;
else
export KEY_PATH=$1
export vendor_name=$2
fi
if [ -e $KEY_PATH/csr_${vendor_name}_ima.pem ]; then
openssl x509 -req -in $KEY_PATH/csr_ima.pem -days 36500 \
-extfile ima_extension \
-extensions v3_usr \
-CA $KEY_PATH/gooroom_x509_system.pem \
-CAkey $KEY_PATH/gooroom_privkey_system.pem \
-CAserial gooroom_x509_system.srl \
-outform DER \
-out $KEY_PATH/x509_${vendor_name}_ima.der
else
echo "$KEY_PATH/csr_${vendor_name}_ima.pem is not exist !!!"
fi
| true |
c96da59914f7fbf2880dee8f9d1c7b0c7f8167e9 | Shell | GeoSensorWebLab/gsw-chef | /beddington/files/default/offline_dokuwiki.sh | UTF-8 | 3,898 | 3.71875 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash
# authors:
# 20110221 written by samlt / https://www.dokuwiki.org/tips:offline-dokuwiki.sh
# 20180529 modified by jeremie.francois@gmail.com (remove broken/useless navigation, more dom cleanup via sed)
#
# default values
DEF_HOSTNAME=mydoku.wiki.lan
DEF_LOCATION=doku.php?id=start
USERNAME=
PASSWORD=
PROTO=http
DEF_DEPTH=2
ADDITIONNAL_WGET_OPTS=${AWO}
PROGNAME=${0##*/}
HEADER="<div class='small docInfo'>This is a copy of the <a href='%HOSTNAME%'>online version</a>.</div>"
FOOTER="<footer><div class='small text-right docInfo'>Cloned on $(date).</div></footer>"
PREFIX='auto'
show_help() {
cat<<EOT
NAME
$PROGNAME: make an offline export of a dokuwiki documentation
SYNOPSIS
$PROGNAME options
OPTIONS
--login username
--passwd password
--ms-filenames download only windows-compatible filenames
--https use https instead of http
--depth number
--hostname doku.host.tld
--location path/to/start
--header raw html content to add after <body> (do not use @ caracters)
--footer raw html content to add before </body> (do not use @ caracters)
--prefix path to store files into. Default is date-host.
NOTES
if not specified on the command line
* username and password are empty
* hostname defaults to '$DEF_HOSTNAME'
* location defaults to '$DEF_LOCATION'
EOT
}
while [ $# -gt 0 ]; do
case "$1" in
--login)
shift
USERNAME=$1
;;
--passwd)
shift
PASSWORD=$1
;;
--hostname)
shift
HOST=$1
;;
--depth)
shift
DEPTH=$1
;;
--location)
shift
LOCATION=$1
;;
--https)
PROTO=https
;;
--ms-filenames)
ADDITIONNAL_WGET_OPTS="$ADDITIONNAL_WGET_OPTS --restrict-file-names=windows"
;;
--header)
shift
HEADER="$1"
;;
--footer)
shift
FOOTER="$1"
;;
--prefix)
shift
PREFIX="$1"
;;
--help)
show_help
exit
;;
esac
shift
done
: ${DEPTH:=$DEF_DEPTH}
: ${HOST:=$DEF_HOSTNAME}
: ${LOCATION:=$DEF_LOCATION}
[[ "$PREFIX" == "auto" ]] && PREFIX="$(date +'%Y%m%d')-$HOST"
url="$PROTO://$HOST/$LOCATION"
echo "[WGET] downloading: start: http://$HOSTNAME/$LOCATION (login/passwd=${USERNAME:-empty}/${PASSWORD:-empty})"
wget --no-verbose \
--recursive \
--level="$DEPTH" \
--execute robots=off \
--no-parent \
--page-requisites \
--convert-links \
--http-user="$USERNAME" \
--http-password="$PASSWORD" \
--auth-no-challenge \
--adjust-extension \
--exclude-directories=_detail,_export \
--reject="feed.php*,*do=*,*indexer.php?id=*" \
--directory-prefix="$PREFIX" \
--no-host-directories \
$ADDITIONNAL_WGET_OPTS \
"$url"
HEADER=$(echo "$HEADER" | sed "s#%HOSTNAME%#$url#g")
FOOTER=$(echo "$FOOTER" | sed "s#%HOSTNAME%#$url#g")
echo
echo "[SED] fixing links(href...) in the HTML sources: ${PREFIX}/${LOCATION%/*}/*.html"
sed -i -e 's#href="\([^:]\+:\)#href="./\1#g' \
-e "s#\(indexmenu_\S\+\.config\.urlbase='\)[^']\+'#\1./'#" \
-e "s#\(indexmenu_\S\+\.add('[^']\+\)#\1.html#" \
-e "s#\(indexmenu_\S\+\.add([^,]\+,[^,]\+,[^,]\+,[^,]\+,'\)\([^']\+\)'#\1./\2.html'#" \
-e "s#<link[^>]*do=[^>]*>##g" \
-e "s#<a href.*\?do=.*\?</a>##g" \
-e "/<nav/,/<\/nav>/d" \
-e "/<footer/,/<\/footer>/d" \
-e "s@^<body\(.*\)@<body\1 $HEADER@" \
-e "s@</body>@$FOOTER</body>@" \
${PREFIX}/${LOCATION%/*}/*.html
# Restore broken links to the outside
sed -i -e 's#<a href="./http#<a href="http#g' \
${PREFIX}/${LOCATION%/*}/*.html
# Remove some files that went through (eg. revisions)
sudo find -name '*doku.php*&rev=*' -exec rm {} \;
| true |
5a4803063d1a17b5d19d68201492d028d50341ee | Shell | farseeker/pirate-3d-buccaneer | /SD-Card-Contents/home/buccaneer/bin/buccaneer-propeller/5.4/ensurestopped.sh | UTF-8 | 407 | 3.359375 | 3 | [] | no_license | #! /bin/sh
VERSION=5.4
PIDS=`ps auxwww | grep propeller | grep -v "grep" | awk '{print $2}'`
if [ -z "$PIDS" ]; then
echo "No instance of propeller " $VERSION " running." 1>&2
else
for PID in $PIDS; do
(while [ -r /tmp/propeller_busy ] ; do sleep 10 ; done ; kill -9 $PID) &
done
echo "Killed one instance of running propeller " $VERSION
fi
exit 0
| true |
edd9f5976dae03545472e3a24ca69755e889ddb7 | Shell | yduman/dotfiles | /.zshrc | UTF-8 | 2,389 | 3.109375 | 3 | [] | no_license | # Enable Powerlevel10k instant prompt. Should stay close to the top of ~/.zshrc.
# Initialization code that may require console input (password prompts, [y/n]
# confirmations, etc.) must go above this block; everything else may go below.
if [[ -r "${XDG_CACHE_HOME:-$HOME/.cache}/p10k-instant-prompt-${(%):-%n}.zsh" ]]; then
source "${XDG_CACHE_HOME:-$HOME/.cache}/p10k-instant-prompt-${(%):-%n}.zsh"
fi
export ZSH="/Users/yadullahd/.oh-my-zsh"
export PATH=$PATH
# export JAVA_HOME="/usr/lib/jvm/default-java"
# ---------- ZSH configs ---------- #
ZSH_THEME="powerlevel10k/powerlevel10k"
bindkey "[D" backward-word
bindkey "[C" forward-word
bindkey "^[a" beginning-of-line
bindkey "^[e" end-of-line
# ---------- Plugins ---------- #
plugins=(docker zsh-autosuggestions zsh-syntax-highlighting)
source $ZSH/oh-my-zsh.sh
# ---------- Aliases ---------- #
alias zshconfig="vim ~/.zshrc"
alias p10kconfig="vim ~/.p10k.zsh"
alias sourcezsh="source ~/.zshrc"
alias update="brew update"
alias upgrade="brew upgrade"
alias outdated="brew outdated"
alias ga="git add"
alias gs="git status"
alias gc="git commit -m"
alias gl="git log"
alias gp="git push"
alias glg="git log --graph --pretty=format:'%Cred%h%Creset -%C(yellow)%d%Creset %s %Cgreen(%cr) %C(bold blue)<%an>%Creset' --abbrev-commit"
alias c="clear"
alias code="code-insiders"
alias c.="code ."
alias dps="docker ps --format 'table {{.ID}}\t{{.Names}}\t{{.Image}}\t{{.Status}}'"
alias dpsa="docker ps -a --format 'table {{.ID}}\t{{.Names}}\t{{.Image}}\t{{.Status}}'"
alias dlogs="docker logs --follow "
alias projects="cd ~/Projects"
alias uni="cd ~/Uni"
alias p10k-update="git -C $ZSH_CUSTOM/themes/powerlevel10k pull"
alias notes="cd ~/Projects/notes"
# ---------- CONFIG AT EOF ---------- #
# NVM
export NVM_DIR="$HOME/.nvm"
[ -s "$NVM_DIR/nvm.sh" ] && \. "$NVM_DIR/nvm.sh"
[ -s "$NVM_DIR/bash_completion" ] && \. "$NVM_DIR/bash_completion"
# This speeds up pasting w/ autosuggest
# https://github.com/zsh-users/zsh-autosuggestions/issues/238
pasteinit() {
OLD_SELF_INSERT=${${(s.:.)widgets[self-insert]}[2,3]}
zle -N self-insert url-quote-magic
}
pastefinish() {
zle -N self-insert $OLD_SELF_INSERT
}
zstyle :bracketed-paste-magic paste-init pasteinit
zstyle :bracketed-paste-magic paste-finish pastefinish
# To customize prompt, run `p10k configure` or edit ~/.p10k.zsh.
[[ -f ~/.p10k.zsh ]] && source ~/.p10k.zsh
| true |
0b29ef454f07b29756c29c210a2c5e4a92beb5df | Shell | silverfox2016/shell-python-perl | /srv/salt/files/script/z_netstat.sh | UTF-8 | 1,459 | 3.1875 | 3 | [] | no_license | #!/bin/bash
config=/usr/local/zabbix/etc/zabbix_agentd.conf
execfile=/usr/local/zabbix/script/ss.sh
sed -i '/UserParameter=iptstate/d' $config
sed -i '/###ss/d' $config
cat >>$config<<EOF
###ss
UserParameter=iptstate.tcp.[*],/bin/bash /usr/local/zabbix/script/ss.sh \$1
UserParameter=iptstate.[*],/bin/bash /usr/local/zabbix/script/ss.sh \$1
EOF
cat >$execfile<<EOF
#!/bin/bash
#made by xianglong.meng 20150806
#use ss replace netstat
type=\$1
case \$type in
TCP|tcp)
ss -ta |wc -l;;
UDP|udp)
ss -ua |wc -l;;
CLOSE_WAIT|close_wait)
ss -ta |grep -i ^CLOSE-WAIT |wc -l;;
ESTABLISHED|established)
ss -ta |grep -i ^ESTAB |wc -l;;
FIN_WAIT1|fin_wait1)
ss -ta |grep -i ^FIN-WAIT-1 |wc -l;;
FIN_WAIT2|fin_wait2)
ss -ta |grep -i ^FIN-WAIT-2 |wc -l;;
LAST_ACK|last_ack)
ss -ta |grep -i ^LAST-ACK |wc -l;;
CLOSING|closing)
ss -ta |grep -i ^CLOSING |wc -l;;
LISTEN|listen)
ss -ta |grep -i ^LISTEN |wc -l;;
SYN_RECV|syn_recv)
ss -ta |grep -i ^SYN-RECV |wc -l;;
SYN_SENT|syn-sent)
ss -ta |grep -i ^SYN-SENT |wc -l;;
TIME_WAIT|time_wait)
ss -ta |grep -i ^TIME-WAIT |wc -l;;
*)
echo "USAGE: \$0 (TCP|tcp UDP|udp | CLOSE_WAIT|close_wait | ESTABLISHED|established | FIN_WAIT1|fin_wait1 | FIN_WAIT2|fin_wait2 | LAST_ACK|last_ack CLOSING|closing | LISTEN|listen | SYN_RECV|syn_recv | SYN_RECV|syn_recv | TIME_WAIT|time_wai ) "
;;
esac
EOF
chmod a+x $execfile
/etc/init.d/zabbix_agentd restart
[ $? -eq 0 ] && echo "is ok!"
| true |
03a0d0477b7f53cfff779e6e4f102add89f40995 | Shell | koray6/model-zoo-models | /utils/dev_docker_run | UTF-8 | 656 | 3.140625 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
if ! [ -x "$(command -v nvidia-docker)" ]; then
echo 'nvidia-docker is not installed. Using CPU mode docker' >&2
DOCKER_CMD=docker
DOCKER_IMG="latentaiorg/public-images-limited:cpu_base_2.4"
else
echo 'Found nvidia-docker. Using GPU mode nvidia-docker' >&2
DOCKER_CMD=nvidia-docker
DOCKER_IMG="latentaiorg/public-images-limited:gpu_base_2.4"
fi
mkdir -p ~/.keras
mkdir -p ~/.latentai-model-zoo
# add -p 6006:6006 to share port...
$DOCKER_CMD run --rm -it \
-v `pwd`:/shared \
-v `pwd`/../utils:/shared/utils \
-v ~/.keras:/root/.keras \
-v ~/.latentai-model-zoo:/root/.latentai-model-zoo \
-w /shared \
$DOCKER_IMG "$@"
| true |
79c5f65e896195c2d6e37bb00ebb62dca2c57464 | Shell | LaGG-1/bash_practice | /nfs_retrans_calc.sh | UTF-8 | 1,867 | 3.625 | 4 | [] | no_license | #!/bin/bash
# mount -t nfsのtimeoオプションはRPCタイムアウト後の再送までの時間。単位は1=0.1秒。デフォルトは7(0.7秒)
# retransはRPCタイムアウト後の再送の回数。これを越えるとメジャータイムアウト。
# timeoの時間は回数を重ねると次は再送時間を倍にする。例えばtimeo=10であれば一度目は1秒、次は2秒、その次は4秒、8秒と増加していく。これが60秒を越えるとメジャータイムアウトとなる。
# retryはマウントオペレーションにおけるリトライを放棄するまでの時間。デフォルトは1000分
# [課題] retransの回数を越えるかタイムアウトが60秒を越えるとメジャータイムアウトとなり"server not responding"のメッセージが表示される。最初に与えられたtimeo値からタイムアウトが60秒になるまでの最大のretansの回数を計算せよ
# 標準入力からタイムアウト値を入力
echo -n "input timeo value : "
read timeov
# 入力値が数字かチェック
#if ! expr "$timeov" : '[0-9]*' > /dev/null; then
# 正規表現 [[ 文字列 =~ 正規表現 ]]で文字のチェック
# [0-9]+$
if [[ "$timeov" =~ [0-9]+$ ]]; then
:
else
echo "数値を入力してください"
exit 1
fi
# タイムアウト最大値は60秒
max_timeo=600
# retransの初期値は1
retrans=1
# タイムアウト合計値
ttime=0
until [ $timeov -ge $max_timeo ]; do
# マイナータイムアウトの時間表示
echo "wait time $retrans : $timeov"
# タイムアウトの合計時間の表示
ttime=`expr $ttime + $timeov`
# 次回のタイムアウト値とretrans値
timeov=`expr $timeov \* 2`
retrans=`expr $retrans + 1`
done
echo "max retrans: $retrans times, total time out: $ttime + 600"
| true |
9fa3e6a042d4d3e263588fe5c62a1a3c48bf442f | Shell | CTFallon/Stat | /Limits/test/condorScripts/datacardsOnly.sh | UTF-8 | 1,511 | 3.078125 | 3 | [] | no_license | #!/bin/bash
echo "Starting job on " `date` #Date/time of start of job
echo "Running on: `uname -a`" #Condor job is running on this node
echo "System software: `cat /etc/redhat-release`" #Operating System on that node
source /cvmfs/cms.cern.ch/cmsset_default.sh
xrdcp -s root://cmseos.fnal.gov//store/user/cfallon/CMSSW_10_2_13.tgz .
tar -xf CMSSW_10_2_13.tgz
rm CMSSW_10_2_13.tgz
export SCRAM_ARCH=slc6_amd64_gcc700
cd CMSSW_10_2_13/src/
scramv1 b ProjectRename
eval `scramv1 runtime -sh`
echo "CMSSW: "$CMSSW_BASE
ls -la Stat/Limits/python
cd Stat/Limits/test
echo "Arguments passed to this script are:"
echo "Name of output directory : ${1}"
echo "Mode: ${2}"
echo "Doing Systematics. ${7}"
if [ ${7} == "N" ]
then
s="-s"
else
s=""
fi
echo "Signal Parameters: ${3} ${4} ${5} ${6}"
cmd="python createDatacardsOnly.py -m ${2} -t -Z ${3} -D ${4} -R ${5} -A ${6} ${s}"
echo "combine commands:"
echo ${cmd}
echo ${cmd} >/dev/stderr
$cmd
SVJ_NAME="SVJ_mZprime${3}_mDark${4}_rinv${5}_alpha${6}"
# export items to EOS
echo "List all root files = "
ls *.root
echo "List all files"
ls
echo "*******************************************"
EOSDIR=/store/user/cfallon/datacards_07tsb_sys/${1}
OUTDIR=root://cmseos.fnal.gov/${EOSDIR}
echo "xrdcp output for condor"
for FILE in *.root *.pdf *.txt #Residuals/*.pdf plots/*.pdf Fisher/*.txt ${SVJ_NAME}/*.txt
do
echo "xrdcp -f ${FILE} ${OUTDIR}${FILE}"
xrdcp -f ${FILE} ${OUTDIR}${FILE} 2>&1
rm ${FILE}
done
cd ${_CONDOR_SCRATCH_DIR}
rm -rf CMSSW_10_2_13
| true |
43dc52749e589cca9dd19e434cf8ca41e6ab239d | Shell | fransixles/admin-scripts | /ecryptfs/unmount.sh | UTF-8 | 389 | 3.265625 | 3 | [
"MIT"
] | permissive | #!/bin/bash
hash cryptsetup 2>/dev/null || { echo >&2 "You need to install cryptsetup-bin. Aborting."; exit 1; }
# Load configuration
CONFIG=$(dirname "$0")/config.sh
if ! [ -e "${CONFIG}" ]; then
echo >&2 "ERROR: Missing configuration file '${CONFIG}'."
exit 1
fi
# shellcheck source=/dev/null
source "${CONFIG}"
sudo umount "${MNTPATH}"
sudo cryptsetup close "${DEVNAME}"
exit 0
| true |
f86f2231e8325f7c40214252aec87d4f53a85436 | Shell | cert-manager/cert-manager | /hack/update-deps.sh | UTF-8 | 2,219 | 3.75 | 4 | [
"Apache-2.0",
"BSD-3-Clause",
"ISC",
"MPL-2.0",
"JSON",
"BSD-2-Clause",
"MIT"
] | permissive | #!/usr/bin/env bash
# Copyright 2020 The cert-manager Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# NB: This script requires bazel, and is no longer supported since we no longer support bazel
# It's preserved for now but might be removed in the future
# Update vendor and bazel rules to match go.mod
#
# Usage:
# update-deps.sh [--patch|--minor] [packages]
set -o nounset
set -o errexit
set -o pipefail
if [[ -n "${BUILD_WORKSPACE_DIRECTORY:-}" ]]; then # Running inside bazel
echo "Updating modules..." >&2
elif ! command -v bazel &>/dev/null; then
echo "This script is preserved for legacy reasons and requires bazel. You shouldn't need to run this as part of your normal development workflow" >&2
echo "If you need to run this script, install bazel from https://bazel.build" >&2
exit 1
else
(
set -o xtrace
bazel run //hack:update-deps -- "$@"
)
exit 0
fi
go=$(realpath "$1")
export PATH=$(dirname "$go"):$PATH
gazelle=$(realpath "$2")
kazel=$(realpath "$3")
update_bazel=(
$(realpath "$4")
"$gazelle"
"$kazel"
)
update_deps_licenses=(
$(realpath "$5")
"$go"
)
shift 5
cd "$BUILD_WORKSPACE_DIRECTORY"
trap 'echo "FAILED" >&2' ERR
# Update hack/build/repos.bzl based of the go.mod file
"$gazelle" update-repos \
--from_file=go.mod --to_macro=hack/build/repos.bzl%go_repositories \
--build_file_generation=on --build_file_proto_mode=disable -prune=true
# `gazelle update-repos` adds extra unneeded entries to the
# go.sum file, run `go mod tidy` to remove them
"$go" mod tidy
# Update Bazel (changes in hack/build/repos.bzl might affect other bazel files)
"${update_bazel[@]}"
# Update LICENSES
"${update_deps_licenses[@]}"
echo "SUCCESS: updated modules"
| true |
11f7457573d9e874200c0c160701118f415e1ae9 | Shell | FAANG/comm-methylation | /RRBS-toolkit/Bismark_methylation_call/bismark.sh | UTF-8 | 3,092 | 3.15625 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/sh
#$ -l mem=10G
#$ -l h_vmem=20G
#
#----------------------------------------------------------------
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
#----------------------------------------------------------------
#authors :
#---------
# Piumi Francois (francois.piumi@inra.fr) software conception and development (engineer in bioinformatics)
# Jouneau Luc (luc.jouneau@inra.fr) software conception and development (engineer in bioinformatics)
# Gasselin Maxime (m.gasselin@hotmail.fr) software user and data analysis (PhD student in Epigenetics)
# Perrier Jean-Philippe (jp.perrier@hotmail.fr) software user and data analysis (PhD student in Epigenetics)
# Al Adhami Hala (hala_adhami@hotmail.com) software user and data analysis (postdoctoral researcher in Epigenetics)
# Jammes Helene (helene.jammes@inra.fr) software user and data analysis (research group leader in Epigenetics)
# Kiefer Helene (helene.kiefer@inra.fr) software user and data analysis (principal invertigator in Epigenetics)
#
if [ "$RRBS_HOME" = "" ]
then
#Try to find RRBS_HOME according to the way the script is launched
RRBS_PIPELINE_HOME=`dirname $0`
else
#Use RRBS_HOME as defined in environment variable
RRBS_PIPELINE_HOME="$RRBS_HOME/Bismark_methylation_call"
fi
. $RRBS_PIPELINE_HOME/../config.sh
dir_data=$1
dir_genome=$2
dir_data_basename=`basename "$dir_data"`
work_dir_bis="$dir_data/bismark"
logFile="$dir_data/bismark.log"
(
if [ ! -d $work_dir_bis ]
then
mkdir $work_dir_bis
chmod 775 $work_dir_bis
if [ $? -ne 0 ]
then
echo "Bismark output directory impossible to create"
exit 1
fi
fi
R2=`find $dir_data/trim_galore -name "*R2_val_2.fq*"`
if [ -f "$R2" ]
then
R1=`find $dir_data/trim_galore -name "*R1_val_1.fq*"`
$BISMARK_HOME/bismark --unmapped --ambiguous \
$dir_genome \
-1 $R1 -2 $R2\
--path_to_bowtie $BOWTIE_HOME \
--output_dir $work_dir_bis
else
R1=`find $dir_data/trim_galore -name "*R1_trimmed.fq*"`
$BISMARK_HOME/bismark --unmapped --ambiguous \
$dir_genome \
$R1 \
--path_to_bowtie $BOWTIE_HOME \
--output_dir $work_dir_bis
fi
if [ $? -ne 0 ]
then
echo "Problem during bismark run"
exit 1
fi
exit $?
) 1> $logFile 2>&1
if [ $? -ne 0 ]
then
#Former process error output
exit 1
fi
(
#Extract summary report
echo ""
echo "+------------------+"
echo "| Bismark step |"
echo "+------------------+"
$PYTHON_EXECUTE $RRBS_PIPELINE_HOME/get_bismark_report.py $logFile
)>> $dir_data/summary_report.txt
| true |
b043cc3197f00087054f496d708d9c03fbe41c39 | Shell | zawster/Shell_Programming | /file7.sh | UTF-8 | 260 | 3.15625 | 3 | [] | no_license | #!/bin/bash
add()
{
c=`expr $1 + $2`
echo "Addition = $c"
}
#add 5 10
a=2
b=3
swap1()
{
v1=$1
c2=$2
t=$v1
v1=$c2
c2=$t
echo "$v $c"
}
swap1 4 5
swap2()
{
v=$1
c=$2
v=`expr $v + $c`
c=`expr $v - $c`
v=`expr $v - $c`
echo "$v $c"
}
swap2 $a $b | true |
eff4627e6deb9e118b43b549177cf618da1bc5d0 | Shell | syranez/crawl-utilities | /develzmorgues-collector/develzmorgues.sh | UTF-8 | 1,379 | 4.25 | 4 | [] | no_license | #!/usr/bin/env bash
#
# Retrieves all linked morgue files from player stats page of http://crawl.akrasiac.org/scoring/players/
#
# Usage: ./develzmorgues.sh "player"
if [ ! $# -eq 1 ]; then
echo 'Usage: ./develzmorgues.sh "player"';
exit 1;
fi
PLAYER="$1";
# uri of the players stats page.
BASE_URI="http://crawl.akrasiac.org/scoring/players/${PLAYER}.html";
# all morge data is stored in this directory.
TEMP_DIR="/tmp/morgues";
# morge files of player $PLAYER are placed here.
TEMP_USER_DIR="${TEMP_DIR}/${PLAYER}";
# retrieved morgue file uris are temporarily stored in this file.
TEMP_URI_FILE=$(mktemp);
if [[ ! -e ${TEMP_USER_DIR} ]]; then
mkdir -p "${TEMP_USER_DIR}";
fi
# collects the morgue file uris from the document
#
# @param uri of the document
# @side writes to $TEMP_URI_FILE
getUris () {
if [ ! $# -eq 1 ]; then
exit 1;
fi
wget "${1}" -q -O - | sed 's/>/>\n/g' | grep '\.txt' | sed 's/^.*http/http/g' | sed 's/\.txt.*$/\.txt/g' > "${TEMP_URI_FILE}";
}
# retrieves all morgue files referenced in $1
#
# @param local file with uris to morge files
# @side writes data to $TEMP_USER_DIR
getMorgues () {
if [[ ! -e $1 ]]; then
exit 1;
fi
cd "${TEMP_USER_DIR}"
wget -q --input-file="$1" -nc
cd ~
}
getUris "${BASE_URI}";
getMorgues "${TEMP_URI_FILE}";
echo "Files written to ${TEMP_USER_DIR}";
| true |
af520a268c764720f83799875c3a2da077a78ef4 | Shell | SliTaz-official/spk | /spk-sql | UTF-8 | 2,267 | 4 | 4 | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | #!/bin/sh
#
# Spk-sql - SliTaz packages DB the SQLite way. This is for testing
# SQLite's speed to handle packages info. It may not be used to manage
# OS system but for pkgs.slitaz.org search engine and arm.slitaz.org
#
# Copyright (C) 2014 SliTaz GNU/Linux - BSD License
# Author: See AUTHORS files
#
. /usr/lib/slitaz/libspk.sh
# Benchmarks:
#
# Listing all packages with:
#
# spk-ls --short : real 0m 10.92s
# spk-sql list --output=html : real 0m 1.56s
#
#
#db="${root}${PKGS_DB}/packages.sql"
db="packages.sql"
table="mirror"
#wok=/home/slitaz/wok
wok=/home/pankso/Projects/wok
#
# Functions
#
# Help and usage
usage() {
name=$(basename $0)
cat << EOT
$(boldify $(gettext "Usage:")) $name [command|package] packageN
$(gettext "SliTaz SQLite packages DB manager")
$(boldify $(gettext "Commands:"))
master Show master record
tables List all DB tables
dbi Table database information
list List SQL DB $table table
gendb Generate a SQL DB of all packages
$(boldify $(gettext "Options:"))
--output= Set the output format (list, html)
EOT
exit 0
}
# Create the SQL database
create_db() {
rm -f $db
sqlite3 $db << EOT
create table $table(
pkg,
version,
category,
url,
desc,
deps
);
EOT
}
#
# Handle --options
#
for opt in $@
do
case "$opt" in
*usage|*help) usage ;;
--count)
exit 0 ;;
esac
done
#
# Handle commands
#
case "$1" in
"") usage ;;
master) sqlite3 $db 'select * from sqlite_master' ;;
tables) sqlite3 $db '.tables' ;;
dbi)
# Data Base Info
du -sh ${db} ;;
list)
# Very fast listing of fancy output: html, tcl, tabs, ...
[ "$output" ] || output="list"
sqlite3 ${db} << EOT
.mode $output
select * from $table;
EOT
;;
gendb)
# Takes long to build the +4300 pkgs DB!
time=$(date +%s)
echo "Initializing: $db --> $table"
create_db
for pkg in $(ls $wok)
do
echo "Inserting: $count: $pkg"
. ${wok}/${pkg}/receipt
sqlite3 ${db} << EOT
insert into $table values(
"$PACAKAGE",
"$VERSION",
"$CATEGORY",
"$WEB_SITE",
"$SHORT_DESC",
"$DEPENDS"
);
EOT
done
time=$(($(date +%s) - $time))
echo -n "$nb "
gettext "packages added in"
echo " ${time}s ("$(date "+%Y%m%d %H:%M")")"
newline ;;
esac && exit 0
| true |
12dfc31452bab22e0084f2694c4fa14b5cf7508f | Shell | nuxlli/azk_bash | /libexec/azk-agent-ssh | UTF-8 | 1,010 | 3.90625 | 4 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env bash
#
# Summary: Run a command in azk agent with ssh
#
# Usage: azk agent-ssh [command] [arg1 arg2...]
AZK_COMMAND="$1"
if [ -z "$AZK_COMMAND" ]; then
azk-help --usage agent-ssh >&2
exit 1
fi
user="core"
port="22"
host=$1; shift
options="-o DSAAuthentication=yes -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o LogLevel=FATAL -o IdentitiesOnly=yes"
READLINK=$(type -p greadlink readlink | head -1)
if [ -z "$READLINK" ]; then
echo "azk: cannot find readlink - are you missing GNU coreutils?" >&2
exit 1
fi
resolve_link() {
$READLINK "$1"
}
abs_dirname() {
local cwd="$(pwd)"
local path="$1"
while [ -n "$path" ]; do
cd "${path%/*}"
local name="${path##*/}"
path="$(resolve_link "$name" || true)"
done
pwd
cd "$cwd"
}
root="$(abs_dirname "$0")/.."
identify="-i $root/private/etc/insecure_private_key"
if [ ! -z $AZK_INTERACTIVE ]; then
interative=" -t"
fi
eval "ssh $user@$host $options ${identify}${interative}$(printf ' %q' "$@")"
| true |
9d26326aa4e61d0c8329ae71665bddb384bc80aa | Shell | vanwaals/zookeeper-operator | /docker/bin/zookeeperTeardown.sh | UTF-8 | 1,026 | 3.234375 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env bash
set -ex
source /conf/env.sh
source /usr/local/bin/zookeeperFunctions.sh
DATA_DIR=/data
MYID_FILE=$DATA_DIR/myid
LOG4J_CONF=/conf/log4j-quiet.properties
# Check to see if zookeeper service for this node is a participant
set +e
ZKURL=$(zkConnectionString)
set -e
MYID=`cat $MYID_FILE`
# Remove server from zk configuration
java -Dlog4j.configuration=file:"$LOG4J_CONF" -jar /root/zu.jar remove $ZKURL $MYID
# Wait for client connections to drain. Kubernetes will wait until the confiugred
# "terminationGracePeriodSeconds" before focibly killing the container
CONN_COUNT=`echo cons | nc localhost 2181 | grep -v "^$" |grep -v "/127.0.0.1:" | wc -l`
for (( i = 0; i < 36; i++ )); do
echo "$CONN_COUNT non-local connections still connected."
sleep 5
CONN_COUNT=`echo cons | nc localhost 2181 | grep -v "^$" |grep -v "/127.0.0.1:" | wc -l`
done
# Kill the primary process ourselves to circumvent the terminationGracePeriodSeconds
ps -ef | grep zoo.cfg | grep -v grep | awk '{print $1}' | xargs kill
| true |
ea84226219d09dde51d69c7a3102cb4b3c28196b | Shell | caius/zshrc | /dot_zsh/lib/alias.zsh | UTF-8 | 3,520 | 2.578125 | 3 | [
"MIT"
] | permissive | alias ehco="echo"
alias ea-ssh="ssh"
alias ls="ls -G"
alias ..='cd ..'
alias cd..='cd ..'
alias cd...='cd ../..'
alias cd....='cd ../../..'
alias cd.....='cd ../../../..'
alias cd/='cd /'
alias d='dirs -v'
# SSH stuff
alias ssht='ssh -D 8080 -f -C -q -N'
## Rails stuff
alias edev='RAILS_ENV=development'
alias etest='RAILS_ENV=test'
alias ecuke='RAILS_ENV=cucumber'
alias estaging='RAILS_ENV=staging'
alias eproduction='RAILS_ENV=production'
alias eprod="eproduction"
# For running test unit *sigh*
# Depends on my rtest function
alias rt="rtest"
alias bert="rtest" # bundle exec ruby -Itest, etc
# Pry > IRB
alias irb="pry"
alias be="bundle exec"
alias gvg='grep -v grep'
alias rc='mate $(chezmoi source-path)'
alias sshc='mate ~/.ssh/config'
alias gitrc='mate ~/.gitconfig'
## Top Stuff
alias cpu='(which htop > /dev/null && htop --sort-key PERCENT_CPU) || top -o cpu'
alias ram='(which htop > /dev/null && htop --sort-key RES) || top -o rsize'
alias same='open .'
## My nifty stuff for copying/pasting dir paths
alias copypath='echo `pwd` | pbcopy'
alias cdpath='cd "`pbpaste`"'
## Textmate alias'
alias mate='\mate -r'
alias m='mate -r'
# alias ,='mate -r'
alias matew='mate -rw'
alias mw='mate -rw'
alias m.='mate .'
# Open all .xcodeproj files in the current folder
alias xcode='open *.xcodeproj'
# General ones
alias df='df -Hl'
alias mktar='tar -cvf'
alias mkbz2='tar -cvjf'
alias mkgz='tar -cvzf'
alias untar='tar -xvf'
alias unbz2='tar -xvjf'
alias ungz='tar -xvzf'
alias mv='mv -i'
alias cp='cp -i'
alias dnsflush='dscacheutil -flushcache'
# Git alias'
alias bitx='gitx'
alias ghit='git'
alias bit='git'
alias gib='git'
alias got='git'
alias gut='git'
alias gti='git'
alias gtu='git'
alias gto='git'
alias giot='git'
alias hit='git'
alias tgit='git'
alias tgi='git'
alias gt='git'
alias gi='git'
alias it='git'
alias g='git'
alias ggit='git'
alias giit='git'
alias fit='git'
alias igt='git'
alias tit='git'
alias gits='git'
alias guit="git"
alias ig="git"
alias gitgit="git"
alias gity="git"
alias gtis="git"
alias gitr="git"
alias gigit="git"
alias gitt="git"
alias gitb='git b'
alias gitst='git st'
alias gtst='git st'
alias gitpush="git push"
alias gitup="git up"
alias gme='gem'
alias rkae='rake'
alias bundel="bundle"
alias vybdke="bundle"
alias js="/System/Library/Frameworks/JavaScriptCore.framework/Versions/Current/Resources/jsc"
alias sqlite='sqlite3'
alias crontab="EDITOR=nano crontab"
alias chrome="/Applications/Google\ Chrome.app/Contents/MacOS/Google\ Chrome"
# Override ZSH's built ins
alias time="$(whence -p time)"
# ln -s /System/Library/Frameworks/JavascriptCore.framework/Versions/A/Resources/jsc ~/bin/
alias javascript="jsc"
# From http://www.leancrew.com/all-this/2013/02/getting-rid-of-open-with-duplicates/
alias fixopenwith='/System/Library/Frameworks/CoreServices.framework/Frameworks/LaunchServices.framework/Support/lsregister -kill -r -domain local -domain system -domain user'
# Prettify JSON/XML
alias pretty_json="python -m json.tool"
alias pretty_xml="xmllint --format -"
# Habit
alias rvm="rbenv"
# ag is faster than ack
alias ack="ag"
# Fuck yes, I am this lazy
alias v="vagrant"
alias tf="terraform"
alias md="macdown"
alias macos="$(brew --prefix m-cli)/bin/m"
alias k="kitchen"
alias cz="chezmoi"
# Have to explicitly use /usr/local brew path here
alias rbrew="rosetta /usr/local/bin/brew"
alias tailscale="/Applications/Tailscale.app/Contents/MacOS/Tailscale"
## Global alias'
alias -g §='$(fzf)'
alias -g rosetta="arch -x86_64"
| true |
c0bda4ac9436a554f0a109203c53b6e640401bd1 | Shell | dannyshaw/dotfiles-old | /bash/.bashrc | UTF-8 | 2,131 | 2.75 | 3 | [] | no_license | export PATH=$HOME/bin:$HOME/pear/bin:$PATH
#---------------------------------------------------------
# Big Mobile
#---------------------------------------------------------
export BMROOT=$HOME"/bm"
export OPS_PATH=$BMROOT"/process/ops"
export STORY_PATH=$BMROOT"/process/story"
export BIG=$BMROOT"/big"
export DEV=$HOME"/dev"
function opst {
name=$1
find $OPS_PATH/tasks -maxdepth 1 -name "*$name*" | head -n 1
}
function story {
name=$1
find $STORY_PATH -maxdepth 1 -name "*$name*" | head -n 1
}
function storyt {
story=$1
task=$2
if [ -z "$story" ] || [ -z "$task" ]; then
echo `story "$story"`
else
find $STORY_PATH -maxdepth 3 -wholename "*$story*/tasks/*$task*" | head -n 1
fi
}
#oath
#PATH=$PATH:/home/danny/bin
#export PATH
#handy tools
alias sagi='sudo apt-get install'
alias cc='xclip -sel clip'
alias pp='xclip -sel clip -o'
alias n='notify-send "Script Completed"'
alias dp='php $HOME/bin/dp.php | while read line; do notify-send -t 8000000 "DPs Playlist" "$line"; done'
alias dom='dig @ns1.bmsrv.net axfr bmsrv.net'
alias vpn='sudo openvpn --config /home/danny/bm/bash/openvpn/client.ovpn --script-security 2'
#servers
alias web1='ssh custadmin@web1.psa.bmsrv.net'
alias web2='ssh custadmin@web2.psa.bmsrv.net'
alias webutil='ssh custadmin@webutil1.psa.bmsrv.net'
alias db1="ssh -t -L 3308:localhost:3306 custadmin@db1.psa.bmsrv.net"
alias db2="ssh -t -L 3308:localhost:3306 custadmin@db2.psa.bmsrv.net"
alias stagingdb="ssh -t -L 3309:localhost:3306 custadmin@staging.bmdev.net"
alias dp1='php /home/danny/bin/dp.php 1'
#directory shortcuts
alias ds="cd $DEV"
alias b="cd $BIG"
alias w='cd $BIG/web/wap'
alias l='cd $BIG/lib/php/Code'
alias p='cd $BIG/web/picture'
alias c='cd $BIG/web/customad'
alias doc='cd $BIG/doc/dev'
alias ops="cd $OPS_PATH"
alias h='thg log .'
alias db='cd $BIG/db'
alias s='cd $STORY_PATH'
#for local env, not needed now dev app is around..
alias fixwlan='sudo iw dev wlan0 set power_save off'
alias btfix='pactl load-module module-bluetooth-discover'
export PROCESS_PATH='/home/danny/bm/process'
PATH="$PATH:/home/danny/apps/flow/"
[ -f ~/.fzf.bash ] && source ~/.fzf.bash
| true |
dc5d226560e7a5399ef222b4d0f363d46f1b26d5 | Shell | Sasun/realtimeedit | /linux/umount_tmpfs.sh | UTF-8 | 542 | 4.125 | 4 | [] | no_license | #!/bin/sh
# This program has two features.
#
# 1. Unmount a disk image.
# 2. Detach the disk image from RAM.
#
# Usage:
# $0 <dir>
#
# dir:
# The `dir' is a directory, the dir is mounting a disk image.
#
if [ $# -lt 1 ]; then
echo "Usage: $0 <mount point>" >&2
exit 1
fi
mount_point=$1
if [ ! -d "${mount_point}" ]; then
echo "The mount point isn't available." >&2
exit 1
fi
mount_point=$(cd $mount_point && pwd)
sudo umount "${mount_point}"
if [ $? -ne 0 ]; then
echo "Could not unmount." >&2
exit $?
fi
| true |
331e59c7cbcd67240026026952acaf435afdcf0a | Shell | ezbake/ezbake-security-build-vm | /provisioning/boost_library_patch.sh | UTF-8 | 1,303 | 3.109375 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
# Copyright (C) 2013-2014 Computer Sciences Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#this patch can be removed when the installed boost version is 1.45 and greater
REQUIRED_VERSION="104100"
BOOST_VERSION=`grep '#define BOOST_VERSION ' /usr/include/boost/version.hpp | awk '{ print $3 }'`
PATCH_FILE=/vagrant/provisioning/BOOST_json_parser_read.hpp.patch
patch -p0 -N --dry-run --silent < ${PATCH_FILE} 2>/dev/null
if [ $? -eq 0 ]; then
echo "Boost library needs to be patched"
if [ $BOOST_VERSION = $REQUIRED_VERSION ]; then
patch -p0 -N < ${PATCH_FILE}
echo " - Patched boost library"
else
echo " - Fail. Not patching Boost library. Require version $REQUIRED_VERSION. Detected version $BOOST_VERSION"
fi
fi
| true |
9d09e7b51a84d0c0f7281a0cdd9add1f22351c52 | Shell | UCL-RITS/rcps-cluster-scripts | /realname | UTF-8 | 4,881 | 3.8125 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env bash
LDAPSEARCH_PATH=/usr/bin/ldapsearch
while getopts ":eybhdmp" opt; do
case $opt in
h)
echo "
usage: $0 [options] username [username [...]]
Options:
-h show this help message
-b reverse lookup -- i.e. search for user names, not usernames (slow)
-y ypcat mode -- requires ssh into socrates
-e getent mode -- requires ssh into socrates
-d turns on some debugging output
-m search for mail instead (LDAP only) (sort of a dodgy add-on)
-p search for department instead (LDAP only) (ditto)
"
exit 0
;;
b)
echo "Using reverse lookup -- this can be pretty slow." >&2
backwards_mode="y"
;;
e)
echo "Using remote getent mode -- this can work for old usernames that are no longer in LDAP." >&2
lookup_mode="getent"
;;
y)
echo "Using remote ypcat mode -- this can work for old usernames that are no longer in LDAP." >&2
lookup_mode="ypcat"
;;
d)
debug_mode="y"
;;
m)
mail_search="y"
;;
p)
department_search="y"
;;
\?)
echo "Invalid option: -$OPTARG" >&2
exit 4
;;
esac
done
shift $((OPTIND-1));
if [ -z "$lookup_mode" ] && [ ! -x "$LDAPSEARCH_PATH" ]; then
echo "Error: this script requires ldapsearch.
(You might still be able to use ypcat or getent modes.)" >&2
exit 3
fi
for name in "$@"; do
if [ "${#name}" -ne "7" ] && [ -z "$backwards_mode" ]; then
echo "Invalid username: \"${name}\"" >&2
exit 2
else
if [ -n "$debug_mode" ]; then echo "Debug: Searching for '$name'" >&2; fi
if [ -z "$lookup_mode" ]; then
if [ -z "$backwards_mode" ]; then
search_key="cn"
display_key="gecos"
else
search_key="gecos"
display_key="cn"
fi
if [ -n "$mail_search" ]; then
display_key="mail"
fi
if [ -n "$department_search" ]; then
display_key="department"
fi
# I still don't know what rdn stands for, but it's like the connection user I think?
ldap_uri="ldaps://openldap-auth3.ucl.ac.uk:636/"
ldap_rdn="cn=unixauth,ou=System Users,dc=uclusers,dc=ucl,dc=ac,dc=uk"
ldap_pass_file="/shared/ucl/etc/ldappw"
ldap_bind_user="dc=uclusers,dc=ucl,dc=ac,dc=uk"
ldap_search_term="(${search_key}=${name})"
if [ ! -e "$ldap_pass_file" ]; then
echo "Exiting because LDAP password file does not exist: $ldap_pass_file" >&2
exit 6
fi
if [ ! -r "$ldap_pass_file" ]; then
echo "Exiting because LDAP password file is not readable at: $ldap_pass_file" >&2
exit 5
fi
if [ -n "$debug_mode" ]; then
echo "Debug: ldap_rdn: $ldap_rdn"
ldap_search_args=("-d5" "-vvv" "-x" "-LLL" "-y$ldap_pass_file" "-H$ldap_uri" "-D$ldap_rdn" "-b$ldap_bind_user" "$ldap_search_term")
else
ldap_search_args=("-x" "-LLL" "-y$ldap_pass_file" "-H$ldap_uri" "-D$ldap_rdn" "-b$ldap_bind_user" "$ldap_search_term")
fi
if [ -n "$debug_mode" ]; then
echo "Debug: ldapsearch path is: $LDAPSEARCH_PATH" >&2
echo "Debug: ldapsearch arguments are: ${ldap_search_args[*]}" >&2
fi
if [ -n "$debug_mode" ]; then
set -x
fi
search_result=$($LDAPSEARCH_PATH "${ldap_search_args[@]}" | sed -rn "s/^${display_key}: (.+)$/\1/p" )
if [ -n "$debug_mode" ]; then
set +x
fi
# And that's us done with LDAP
elif [ "$lookup_mode" = "getent" ]; then
if [ -z "$backwards_mode" ]; then
search_result=$(ssh socrates.ucl.ac.uk "getent passwd $name" 2>/dev/null | awk -F: '{print $5;}')
else
search_result=$(ssh socrates.ucl.ac.uk "getent passwd" 2>/dev/null | awk -F: '{print $5 ":" $1;}' | grep "$name" | awk -F: '{print $2;}')
fi
elif [ "$lookup_mode" = "ypcat" ]; then
if [ -z "$backwards_mode" ]; then
search_result=$(ssh socrates.ucl.ac.uk "/usr/local/rbin/ypcat passwd" 2>/dev/null | awk -F: '{print $1 ":" $5;}' | grep "^$name" 2>/dev/null | awk -F: '{print $2;}')
else
search_result=$(ssh socrates.ucl.ac.uk "/usr/local/rbin/ypcat passwd" 2>/dev/null | awk -F: '{print $5 ":" $1;}' | grep "$name" 2>/dev/null | awk -F: '{print $2;}')
fi
fi
if [ -n "$search_result" ]; then
if [ $# -gt 1 ]; then
echo "${name}: ${search_result}"
else
echo "${search_result}"
fi
elif [ -n "$mail_search" ]; then
echo "No mail address found for username \"${name}\"" >&2
exit 8
elif [ -n "$department_search" ]; then
echo "Warning: no department found for username \"${name}\" -- returning None/Unknown" >&2
echo "None/Unknown"
else
echo "Error: no user found for username \"${name}\"" >&2
exit 7
fi
fi
done
| true |
475b3e2429c822f933e1131837ea287feed2753d | Shell | physicalist/dot.osx | /home/bin/upgrade-neovim | UTF-8 | 690 | 3.25 | 3 | [] | no_license | #!/usr/bin/env bash
gnudate()
{
if which gdate > /dev/null 2>&1; then
gdate "$@"
else
date "$@"
fi
}
[ -d "$(brew --repo)/Library/Taps/neovim/homebrew-neovim" ] || brew tap neovim/neovim
nowsecs=$(gnudate --utc +%s)
modsecs=$(gnudate --utc --reference="$(brew --repo)/Library/Taps/neovim/homebrew-neovim/.git/FETCH_HEAD" +%s)
[ $((nowsecs-modsecs)) -lt 86400 ] || brew update
brew reinstall neovim/neovim/neovim --HEAD && \
pip2 install --upgrade neovim && \
pip3 install --upgrade neovim && \
gem install neovim && \
nvim --headless -c PlugUpgrade +q && \
nvim --headless -c PlugClean! +qall && \
nvim --headless -c PlugUpdate! +qall
| true |
27082344b53d8f1489919c2a7078dbb05a06ac4a | Shell | hoangxuyenle/DPF-Core | /docker/run_image.sh | UTF-8 | 720 | 3.28125 | 3 | [
"MIT"
] | permissive | #!/bin/bash
# start the docker image locally and share the directory one level up
# Notes:
# DPF_DOCKER tells "conftest.py" to resolve all the test files
# relative to the docker image's directory. Provided that you've
# mapped this source directory over (through -v `pwd`/../:/dpf below).
# DPF_PORT is the port exposed from the DPF container.
# DPF_START_SERVER tells `ansys.dpf.core` not to start an instance and
# rather look for the service running at DPF_IP and DPF_PORT. If
# those environment variables are undefined, they default to 127.0.0.1
# and 50054 for DPF_IP and DPF_PORT respectively.
source IMAGE_NAME
docker run -it --rm -v `pwd`/../:/dpf -v /tmp:/dpf/_cache -p 50054:50054 --name dpf $IMAGE
| true |
35223ba4ee6c4ba8eee340ac5bc6363fbc137dfb | Shell | jollyvjacob/fang-hacks | /bootstrap/www/cgi-bin/parts/network.udhcpd | UTF-8 | 3,916 | 3.375 | 3 | [] | no_license | #!/bin/sh
PATH="/bin:/sbin:/usr/bin:/media/mmcblk0p2/data/bin:/media/mmcblk0p2/data/sbin:/media/mmcblk0p2/data/usr/bin"
CFG_DHCPD="/media/mmcblk0p2/data/etc/udhcpd.conf"
# get posted variables
source ./func.cgi
if [ "${REQUEST_METHOD}" = "POST" ]
then
# update the cfg file with the posted variables
if [ -e $cfgfile ]; then
set | grep -e '^F_udhcpd_' | while IFS='=' read name value
do
temp="${value%\'}"
temp="${temp#\'}"
value=$temp
if [ "$value" ]; then
name=${name:9}
line=$(grep -E '^(opt|option)\s+'$name $CFG_DHCPD)
newline="opt $name $value"
if [ "$line" ]; then
sed -i '/'"${line}"'/s/.*/'"${newline}"'/' "${CFG_DHCPD}"
else
echo $newline >> $CFG_DHCPD
fi
fi
done
set | grep -e '^F_udhcpdl_' | while IFS='=' read name value
do
temp="${value%\'}"
temp="${temp#\'}"
value=$temp
if [ "$value" ]; then
name=${name:10}
line=$(grep -E '^'$name $CFG_DHCPD)
newline="$name $value"
if [ "$line" ]; then
sed -i '/'"${line}"'/s/.*/'"${newline}"'/' "${CFG_DHCPD}"
else
echo $newline >> $CFG_DHCPD
fi
fi
done
if [ -e "/var/run/udhcpc.pid" ]; then
echo "Terminating udhcpc"
killall udhcpc
fi
if [ -e "/var/run/udhcpd.pid" ]; then
echo "Terminating udhcpd"
killall udhcpd
fi
udhcpc -i wlan0 -p /var/run/udhcpc.pid -b 2>&1
rc=$?
if [ $rc -ne 0 ]; then
echo "Failed to start udhcpc"
fi
else
echo "The configuration file is not present: " $cfgfile
return
fi
fi
udhcpd_router="$(cat $CFG_DHCPD | grep ^opt.*router | awk '{print $3}')"
udhcpd_subnet="$(cat $CFG_DHCPD | grep ^opt.*subnet | awk '{print $3}')"
udhcpd_lease="$(cat $CFG_DHCPD | grep ^opt.*lease | awk '{print $3}')"
udhcpd_dns="$(cat $CFG_DHCPD | grep ^opt.*dns | awk '{print $3}')"
udhcpd_hostname="$(cat $CFG_DHCPD | grep ^opt.*hostname | awk '{print $3}')"
udhcpd_domain="$(cat $CFG_DHCPD | grep ^opt.*domain | awk '{print $3}')"
udhcpdl_start="$(cat $CFG_DHCPD | grep ^start | awk '{print $2}')"
udhcpdl_end="$(cat $CFG_DHCPD | grep ^end | awk '{print $2}')"
cat << EOF
<form name="network.udhcpd" method="POST">
<div class="panel panel-default">
<div class="panel-heading">
DHCP Server
</div>
<div class="panel-body">
<div class="row">
<div class="form-group">
<label>
DNS primary
</label>
<input type="text" name="udhcpd_dns" class="form-control" value="$udhcpd_dns">
</div>
<div class="form-group">
<label>
Subnet
</label>
<input type="text" name="udhcpd_subnet" class="form-control" value="$udhcpd_subnet">
</div>
<div class="form-group">
<label>
Router
</label>
<input id="router" name="udhcpd_router" type="text" class="form-control" value="$udhcpd_router">
</div>
<div class="form-group">
<label>
Lease
</label>
<input id="lease" name="udhcpd_lease" type="text" class="form-control" value="$udhcpd_lease">
</div>
<div class="form-group">
<label>
Hostname
</label>
<input name="udhcpd_hostname" class="form-control" type="text" value="$udhcpd_hostname">
</div>
<div class="form-group">
<label>
Domain
</label>
<input name="udhcpd_domain" class="form-control" type="text" value="$udhcpd_domain">
</div>
<div class="form-group">
<label>
IP lease block
</label>
<input name="udhcpdl_start" class="form-control" type="text" value="$udhcpdl_start">
<input name="udhcpdl_end" class="form-control" type="text" value="$udhcpdl_end">
</div>
</div>
<div class="row">
<button type="submit" class="btn btn-default">Submit</button>
<button type="reset" class="btn btn-default" disabled>Reset</button>
</div>
</div>
</div>
</form>
| true |
f359dc9843a80a010beb9ba9b1fb546f1aca751f | Shell | cjchung/main | /larch/cd-root/boot0/support/support | UTF-8 | 158 | 3.03125 | 3 | [] | no_license | #!/bin/bash
command=$1
shift
FULLPATH="$( readlink -f $0 )"
LIBDIR="$( dirname ${FULLPATH} )/lib"
$LIBDIR/loader --library-path $LIBDIR $LIBDIR/$command $*
| true |
1a012c4c1ddd01d8432821abe6bb5d23c4f8fcb4 | Shell | officialdarksheao/darkbien-linux-project | /build/build-static-gcc.sh | UTF-8 | 1,097 | 3.09375 | 3 | [] | no_license | #!/bin/bash
echo "Uncompressing gcc"
CURRENT_DIR=$(pwd)
cd ${SOURCE_DIR}
tar -xvf ${SOURCE_DIR}/gcc-8.2.0.tar.gz
cd ${SOURCE_DIR}/gcc-8.2.0
tar xjf ../gmp-6.1.2.tar.bz2
mv gmp-6.1.2 gmp
tar xJf ../mpfr-4.0.1.tar.xz
mv mpfr-4.0.1 mpfr
tar xzf ../mpc-1.1.0.tar.gz
mv mpc-1.1.0 mpc
mkdir gcc-static
cd gcc-static
AR=ar LDFLAGS="-Wl,-rpath,${BUILD_DIR}/cross-tools/lib" \
../configure --prefix=${BUILD_DIR}/cross-tools \
--build=${BUILD_DIR_HOST} --host=${BUILD_DIR_HOST} \
--target=${BUILD_DIR_TARGET} \
--with-sysroot=${BUILD_DIR}/target --disable-nls \
--disable-shared \
--with-mpfr-include=$(pwd)/../mpfr/src \
--with-mpfr-lib=$(pwd)/mpfr/src/.libs \
--without-headers --with-newlib --disable-decimal-float \
--disable-libgomp --disable-libmudflap --disable-libssp \
--disable-threads --enable-languages=c,c++ \
--disable-multilib --with-arch=${BUILD_DIR_CPU}
make all-gcc all-target-libgcc && \
make install-gcc install-target-libgcc
ln -vs libgcc.a `${BUILD_DIR_TARGET}-gcc -print-libgcc-file-name | sed 's/libgcc/&_eh/'`
cd ${CURRENT_DIR}
echo "Gcc installed, Next: source build-glibc.sh" | true |
25c7d4672dc22426c48a7b9149b9ad55137a406d | Shell | shiranD/oclm | /main/gen_machine/ltr_dist_gen.sh | UTF-8 | 379 | 2.59375 | 3 | [] | no_license | #!/bin/bash
set -x
export LD_LIBRARY_PATH=/usr/local/lib/fst
syms=$1
special=$2
echo -e "0\t0\t<sigma>\t<epsilon>\n0\t1\t<sigma>\t<sigma>\n1" > sig.txt
fstcompile --isymbols=${syms} --osymbols=${syms} --keep_isymbols --keep_osymbols sig.txt | fstspecial --fst_type=sigma --sigma_fst_sigma_label=$special --sigma_fst_rewrite_mode="always" > machines/ltr_dist.sigma.fst
rm sig.txt
| true |
8b41822286d0d1e2c2068f98d23c3745a5e57c59 | Shell | rranshous/magicwindow | /node | UTF-8 | 395 | 3.03125 | 3 | [] | no_license | #!/usr/bin/env bash
#POST_TARGET=http://lilnit:5041
image_name=`hostname`.jpeg
out_path=/tmp/$image_name
echo "taking picture: $out_path"
if [[ "$OSTYPE" == "darwin"* ]]; then
imagesnap "$out_path"
else
streamer -o "$out_path"
fi
echo "sending picture: $out_path => $POST_TARGET"
curl -XPOST --data-binary "@$out_path" $POST_TARGET/$image_name
echo "cleaning up: $out_path"
rm "$out_path"
| true |
c20c23d4716f9dc54eb4a20b1df31f4c4975efe7 | Shell | SocialGeeks/vagrant-openstack | /scripts/controller.compute-configuration.sh | UTF-8 | 955 | 2.6875 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env bash
echo
echo [+] Configuring compute controller services
echo
sleep 2
nova-manage db sync
sleep 5
source /root/keystonerc
source /root/passwordsrc
keystone user-create --name=nova --pass=$KEYSTONE_NOVA --email=nova@example.com
keystone user-role-add --user=nova --tenant=service --role=admin
keystone service-create --name=nova --type=compute --description="Nova Compute service" | tee /root/nova-service
export NOVA_SERVICE=$(grep 'id ' /root/nova-service | cut -d'|' -f3 | cut -d' ' -f2)
keystone endpoint-create --service-id=$NOVA_SERVICE --publicurl=http://controller:8774/v2/%\(tenant_id\)s \
--internalurl=http://controller:8774/v2/%\(tenant_id\)s \
--adminurl=http://controller:8774/v2/%\(tenant_id\)s
unset NOVA_SERVICE
rm /root/nova-service
service nova-api restart
service nova-cert restart
service nova-consoleauth restart
service nova-scheduler restart
service nova-conductor restart
service nova-novncproxy restart
sleep 6
| true |
76fb9eb2d19519f54fe20221cf1afb47303b7bc9 | Shell | shohei/antimony | /app/deploy_app.sh | UTF-8 | 1,459 | 2.78125 | 3 | [
"LicenseRef-scancode-warranty-disclaimer"
] | no_license | #!/bin/sh
set -x -e
if [ $# -ne 1 ]; then
echo "Usage: deploy_app.sh (debug|release)"
exit 1
fi
cd ../build-$1
make clean
make qmake
rm -rf antimony.app
make
macdeployqt antimony.app
cd antimony.app/Contents/PlugIns
rm -rf accessible audio imageformats mediaservice playlistformats position printsupport qml1tooling sensorgestures sensors
cd ../Frameworks
rm -rf QtDeclarative.framework QtMultimedia.framework QtMultimediaWidgets.framework QtNetwork.framework QtPositioning.framework QtQml.framework QtQuick.framework QtScript.framework QtSensors.framework QtSql.framework QtXmlPatterns.framework
cp -r /usr/local/Frameworks/Python.framework .
install_name_tool -change /usr/local/Frameworks/Python.framework/Versions/3.4/Python \
@executable_path/../Frameworks/Python.framework/Versions/3.4/Python \
libboost_python3.dylib
cd ../Resources
rm empty.lproj
cd ../MacOS
install_name_tool -change /usr/local/Frameworks/Python.framework/Versions/3.4/Python \
@executable_path/../Frameworks/Python.framework/Versions/3.4/Python \
antimony
cd ../../..
cp -r fab antimony.app/Contents/Frameworks/Python.framework/Versions/3.4/lib/python3.4/fab
cp ../README.md .
tar -cvzf antimony.tar.gz antimony.app README.md
rm README.md
if [ `whoami` = "mkeeter" ]; then
scp antimony.tar.gz mattkeeter.com:mattkeeter.com/projects/antimony/antimony.tar.gz
fi
| true |
47b961032bc508cab65e031751e2a8477484cb8a | Shell | matplaneta/pi_sensors | /tools/rtc/sbin/i2c-hwclock | UTF-8 | 1,139 | 3.875 | 4 | [] | no_license | #!/bin/sh
#
# Trivial script to load/save current contents of the kernel clock
# from an i2c RTC.
#
# This Version is made for Raspberry PI fake-hwclock replacement
#
# Tested on: Debian Wheezy with kernel 3.2.21-rp1+_5_armel.deb (2012-06-23)
# This kernel includes support for i2c and spi!
# --> http://www.bootc.net
#
# Using NTP is still recommended on these machines to get to real time sync
# once more of the system is up and running.
#
# Copyright 2012 Reiner Geiger
#
# License: GPLv2, see COPYING
set -e
FILE=/dev/rtc0
COMMAND=$1
case $COMMAND in
save)
if [ -e $FILE ] ; then
hwclock -w
echo "Current system time: $(date -u '+%Y-%m-%d %H:%M') written to RTC"
else
echo "No RTC device $FILE found"
fi
;;
load)
if [ -e $FILE ] ; then
hwclock -s
echo "Current system time: $(date -u '+%Y-%m-%d %H:%M') written to RTC"
else
modprobe i2c-dev
modprobe rtc_pcf8563
command -- echo pcf8563 0x51 > /sys/class/i2c-adapter/i2c-1/new_device
if [ -e $FILE ] ; then
hwclock -s
echo "Current system time: $(date -u '+%Y-%m-%d %H:%M') written to RTC"
else
echo "No RTC device $FILE found"
fi
fi
;;
*)
echo $0: Unknown command $COMMAND
exit 1
;;
esac
| true |
665b8b1527c4b02981ff459410ba112444ccc36a | Shell | flywind2/openpnp-capture-java | /scripts/download-openpnp-capture.sh | UTF-8 | 1,496 | 3.625 | 4 | [
"MIT"
] | permissive | #!/bin/bash
if [ "$#" -ne 1 ]; then
echo "Usage: pull-release-binaries.sh <release/tag name>"
echo " Downloads source and binaries for the native portion of the library"
echo " from Github releases so that this library can be built."
exit
fi
rm -rf openpnp-capture
mkdir -p openpnp-capture
curl -L -o openpnp-capture/source.tar.gz https://github.com/openpnp/openpnp-capture/archive/$1.tar.gz
tar -C openpnp-capture -xzf openpnp-capture/source.tar.gz --strip 1
mkdir -p openpnp-capture/binaries/darwin
mkdir -p openpnp-capture/binaries/win32-x86
mkdir -p openpnp-capture/binaries/win32-x86-64
mkdir -p openpnp-capture/binaries/linux-x86-64
mkdir -p openpnp-capture/binaries/linux-arm64
curl -L -o openpnp-capture/binaries/darwin/libopenpnp-capture.dylib https://github.com/openpnp/openpnp-capture/releases/download/$1/libopenpnp-capture.dylib
curl -L -o openpnp-capture/binaries/win32-x86/openpnp-capture.dll https://github.com/openpnp/openpnp-capture/releases/download/$1/openpnp-capture_Win32.dll
curl -L -o openpnp-capture/binaries/win32-x86-64/openpnp-capture.dll https://github.com/openpnp/openpnp-capture/releases/download/$1/openpnp-capture_x64.dll
curl -L -o openpnp-capture/binaries/linux-x86-64/libopenpnp-capture.so https://github.com/openpnp/openpnp-capture/releases/download/$1/libopenpnp-capture.so
curl -L -o openpnp-capture/binaries/linux-arm64/libopenpnp-capture.so https://github.com/openpnp/openpnp-capture/releases/download/$1/libopenpnp-capture-arm64.so
| true |
12d8035010e9d9fc0a14d3f8f5e61f487b5ba87e | Shell | chamnan/dotfiles | /install.sh | UTF-8 | 1,304 | 2.625 | 3 | [] | no_license | #!/bin/bash
curl https://raw.githubusercontent.com/git/git/master/contrib/completion/git-completion.bash -o ~/.git-completion.bash
curl https://raw.githubusercontent.com/git/git/master/contrib/completion/git-prompt.sh -o ~/.git-prompt.sh
git clone https://github.com/chamnan/dotfiles.git ~/.dotfiles
ln -s ~/.dotfiles/vim ~/.vim
ln -s ~/.dotfiles/vimrc ~/.vimrc
ln -s ~/.dotfiles/zshrc ~/.zshrc
ln -s ~/.dotfiles/gitconfig ~/.gitconfig
git clone https://github.com/gmarik/vundle.git ~/.vim/bundle/vundle
# Font Installation
wget https://github.com/Lokaltog/powerline/raw/develop/font/PowerlineSymbols.otf
wget https://github.com/Lokaltog/powerline/raw/develop/font/10-powerline-symbols.conf
mkdir -p ~/.fonts/ && mv PowerlineSymbols.otf ~/.fonts/
mkdir -p ~/.config/fontconfig/conf.d/
mv 10-powerline-symbols.conf ~/.config/fontconfig/conf.d/
cp ~/.dotfiles/fonts/Ubuntu\ Mono\ derivative\ Powerline.ttf ~/.fonts
fc-cache -vf ~/.fonts
# Change Termilal Font to Ubuntu Mono derivative Powerline, size 13
gconftool-2 --set /apps/gnome-terminal/profiles/Default/font --type string "Ubuntu Mono derivative Powerline 13"
gconftool-2 --set /apps/gnome-terminal/profiles/Default/use_system_font --type bool '0'
gconftool-2 --set /apps/gnome-terminal/profiles/Default/scrollback_unlimited --type bool '1'
| true |
72c0761f39fdf6ae8c74e00a29a67b877cd83849 | Shell | yuval1488/containers_by_bazel | /kibana/entrypoint-kibana | UTF-8 | 362 | 3 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
set -e
set -o pipefail
if [ "$1" = 'kibana' ]; then
# the command bin/kibana executes
exec chroot --userspec=kibana / node $KIBANA_NODE_OPTIONS --no-warnings "$KIBANA_HOME/src/cli" \
--config "$CONF_DIR/kibana.yml" \
--plugin-dir "$KIBANA_HOME/src/core_plugins" \
--plugin-dir "$KIBANA_PLUGIN_DIR" \
"${@:2}"
else
exec "$@"
fi
| true |
5dbff6da1700fb08df94484aee9d481d5e5a2139 | Shell | cmstas/expressTools_UCSD | /allCheck.sh | UTF-8 | 709 | 3.171875 | 3 | [] | no_license | #!/bin/bash
#export VDT_LOCATION=/data/vdt
#export EDG_WL_LOCATION=$VDT_LOCATION/edg
#source /data/vdt/setup.sh
ConfigFiles=$@
while [ 1 ]; do
for Config in $ConfigFiles; do
. loadConfig.sh $Config
Datasets=`echo $Datasets | sed 's/,/ /g'`
for Dataset in $Datasets; do
DatasetDir_tmp=`echo $Dataset |sed -e 's?/?_?g' `
DatasetDir="${DatasetDir_tmp:1}"
touch /data/tmp/${USER}/${DatasetDir}/checkFailedJobs.log && chmod a+r /data/tmp/${USER}/${DatasetDir}/checkFailedJobs.log
./checkFailedJobs.sh $Config $Dataset 2>&1 | ./appendTimeStamp.sh >> /data/tmp/${USER}/${DatasetDir}/checkFailedJobs.log &
echo "checkFailedJobs.sh PID is $$"
done
sleep 5400
done
done
| true |
0e932561ad869eb59059de52534d01f7d3bf6ce7 | Shell | j-maynard/terminal-config | /scripts/mac-setup.sh | UTF-8 | 2,821 | 3.9375 | 4 | [] | no_license | #!/bin/bash
STARTPWD=$(pwd)
realpath() {
[[ $1 = /* ]] && echo "$1" || echo "$PWD/${1#./}"
}
SCRIPT=`realpath -s $0`
SCRIPTPATH=`dirname $SCRIPT`
if [ -z $GIT_REPO ]; then
GIT_REPO="https://raw.githubusercontent.com/j-maynard/terminal-config/main"
fi
# Define colors and styles
normal="\033[0m"
bold="\033[1m"
green="\e[32m"
red="\e[31m"
yellow="\e[93m"
usage() {
echo -e "Usage:"
echo -e " ${bold}${red}-V --verbose${normal} Shows command output for debugging"
echo -e " ${bold}${red}-v --version${normal} Shows version details and exit"
echo -e " ${bold}${red}-h --help${normal} Shows this usage message and exit"
}
version() {
echo -e "Ubuntu Setup Script Version 0.8"
echo -e "(c) Jamie Maynard 2021"
}
show_msg() {
echo -e "${bold}${1}${normal}" > /dev/tty
}
show_pkgs() {
PKGS=("$@")
SORTED_PKGS=($(for a in "${PKGS[@]}"; do echo "$a "; done | sort))
col=0
for pkg in ${SORTED_PKGS[@]}; do
if [[ $col == '3' ]]; then
pkg_out="${pkg_out}${pkg}\n"
col=0
else
pkg_out="${pkg_out}${pkg} | "
col=$(expr $col + 1)
fi
done
echo -e ${pkg_out[@]} | column -t -s "|" > /dev/tty
unset pkg_out
}
install_homebrew() {
show_msg "Installing Homebrew..."
if [ ! -f /opt/homebrew/bin/brew ]; then
/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)"
fi
eval "$(/opt/homebrew/bin/brew shellenv)"
}
add_taps() {
show_msg "Adding tap for OpenJDK..."
brew tap adoptopenjdk/openjdk
}
install_brews() {
BREWS=("python" "maven" "openssh" "htop" "links" "coreutils" "tree" "tmux" "most"
"gnupg" "jenv" "rbenv" "lsd" "bat" "ncspot" "yq" "jq" "jed" "neovim" "awscli" "go"
"visual-studio-code" "discord" "iterm2" "obsidian" "antibody" "openjdk@11"
"openjdk@17" "insomnia" "google-chrome" "multipass" "fzf" "pinentry-mac")
show_msg "Installing the following brews using homebrew:"
show_pkgs "${BREWS[@]} nerd-fonts"
BREWS="${BREWS[@]}"
brew install ${BREWS[@]}
brew install $( brew search font | grep nerd | tr '\n' ' ' )
}
install_casks() {
CASKS=("aws-vault" "emacs" "1password-cli" "macvim" "powershell" "vimr")
show_msg "Installing the following casks using homebrew:"
show_pkgs "${CASKS[@]}"
brew install --cask ${CASKS[@]}
}
setup_jdk() {
sudo ln -sfn $(brew --prefix)/opt/openjdk/libexec/openjdk.jdk /Library/Java/JavaVirtualMachines/openjdk.jdk
}
# Silence output
if [[ $VERBOSE == "false" ]]; then
echo "Silencing output"
GIT_QUIET="-q"
exec > /dev/tty
exec > /dev/null
fi
if [[ $FUNC == "true" ]]; then
$FUNC_NAME
exit $?
fi
show_msg "Setting up Mac apps and terminal apps..."
install_homebrew
add_taps
install_brews
install_casks
setup_jdk | true |
bb59418240d564129006b5167ec2bbeb661048b9 | Shell | carlsmedstad/dotfiles | /.local/bin/crypt-run | UTF-8 | 287 | 3.234375 | 3 | [
"MIT"
] | permissive | #!/bin/sh -eu
xdg_state_home=${XDG_STATE_HOME:-$HOME/.local/state}
crypt_file=${CRYPT_FILE:-$xdg_state_home/crypt}
if [ "$(id -u)" = "$(stat -c '%u' "$crypt_file")" ]; then
eval "$(gpg --decrypt "$crypt_file" 2> /dev/null)"
fi
# shellcheck disable=SC2046
/bin/$(basename "$0") "$@"
| true |
8513399fb131dbeed19fef04367730734f1dc736 | Shell | isabella232/gnome-i18n | /el/README.glossary.tool | UTF-8 | 2,091 | 3.453125 | 3 | [] | no_license |
Documentation file that is out of date. Do not use.
It will go away soon. Read the README file instead.
=====
A way to extract the terms from the GnomeGlossary.gnumeric file
into a .po file for translation.
*** REQUIREMENTS
- A recent installation of Perl
- expat library (from http://sourceforge.net/projects/expat/)
- XML::Parser Perl module
- XML::Twig Perl module
*** INSTALLATION
A. For the expat library, download the latest version of the library
(at this moment it's: http://download.sourceforge.net/expat/expat-1.95.1.tar.gz)
and
. ./configure
. make
. make install
B. For the two Perl modules, you can install them easily with
# perl -MCPAN -eshell
cpan shell -- CPAN exploration and modules installation (v1.59)
ReadLine support enabled
cpan> install XML::Parser
...
cpan> install XML::Twig
...
If it is the first time you run perl like that, it may ask you some
configuration questions.
*** USAGE
At the moment, we cover two test cases.
1. You want to start translating from scratch. You run
xml-i18n-update --pot GnomeGlossary.gnumeric
A glossary.pot is created. Rename it to XX.po and you may
start translating.
For the header of the .po file you may change it to put your local
information (at the moment I write this, the header says not to put
any personal information. This document takes precedent untill it's
fixed.
The first comment line for each term has a significant role. It's the
term description provided in the .gnumeric file. Please do not alter
it. If you want to add your own comments, do so on a second comment line.
At the moment, please use a single line for the comment.
In the future there will be functionality to put the .po file back
in the .gnumeric document.
For now, you only use your XX.po file.
2. You have already done some work on your copy of GnomeGlossary.gnumeric
and you want to make a nice XX.po file out of it.
Suppose your language is "Es". Just do:
xml-i18n-update es
You will get a es.po file with the already made translations.
*** FEEDBACK
For any feedback, please contact Simos Xenitellis <simos@hellug.gr>.
| true |
033dd2e6b07886d58b45331b18525be46e83f1fd | Shell | ziqbal/cassopi | /scripts/retrieve.sh | UTF-8 | 329 | 2.8125 | 3 | [] | no_license | #!/bin/bash
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
cd $DIR/..
APP=${PWD##*/}
IP="192.168.1.13"
############################################################
ssh pi@$IP 'tar zcvf tmp.tar.gz /tmp/*.jpg'
scp pi@$IP:tmp.tar.gz /tmp/tmp.tar.gz
ssh pi@$IP 'rm tmp.tar.gz'
cd /tmp/
tar zxvf tmp.tar.gz
open tmp
| true |
cb8b87fb3f8bc36f8ad96cb51c642b7f0aa95cee | Shell | erwincoumans/pytorch | /.jenkins/pytorch/build-mobile-code-analysis.sh | UTF-8 | 536 | 2.75 | 3 | [
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0",
"BSD-2-Clause"
] | permissive | #!/usr/bin/env bash
# DO NOT ADD 'set -x' not to reveal CircleCI secret context environment variables
set -eu -o pipefail
# This script builds and runs code analyzer tool to generate aten op dependency
# graph for custom mobile build.
# shellcheck disable=SC2034
COMPACT_JOB_NAME="${BUILD_ENVIRONMENT}"
source "$(dirname "${BASH_SOURCE[0]}")/common.sh"
echo "Clang version:"
clang --version
export LLVM_DIR="$(llvm-config-5.0 --prefix)"
echo "LLVM_DIR: ${LLVM_DIR}"
time ANALYZE_TEST=1 CHECK_RESULT=1 tools/code_analyzer/build.sh
| true |
6d123db8dbf61440f94ac93a2417c0d7d5a9fa3f | Shell | kotoroshinoto/LabScripts | /cluster_scripts/user_profile/.bash_profile | UTF-8 | 427 | 2.578125 | 3 | [] | no_license | source /UCHC/HPC/Everson_HPC/cluster_scripts/user_profile/modules.sh
export MODULEPATH=/UCHC/HPC/Everson_HPC/cluster_scripts/modulefiles:$MODULEPATH
module load EversonLabBiotools/1.0
#module load hugeseq/1.0
function reloadmod {
module unload EversonLabBiotools/1.0 && module load EversonLabBiotools/1.0
}
function listmake {
make -qp | awk -F':' '/^[a-zA-Z0-9][^$#\/\t=]*:([^=]|$)/ {split($1,A,/ /);for(i in A)print A[i]}'
} | true |
f3b64116d2a6e6b66da3cb905465648ce6ddd871 | Shell | scotthain/omnibus-open-vm-tools | /vmtoolsd | UTF-8 | 4,852 | 4.0625 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/sh
#
# vmtoolsd - Open VMware Tools Daemon
#
# chkconfig: - 80 20
# description: Open VMware Tools daemon for virtual machines hosted on VMware.
### BEGIN INIT INFO
# Required-Start: $local_fs
# Required-Stop: $local_fs
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: Open VMware Tools daemon for virtual machines hosted on VMware.
# Description: Open VMware Tools daemon is part of open-vm-tools project,
# an open source implementation of VMware Tools. It loads
# various open-vm-tools plugins and makes their services
# available to the guest OS for improved usability and to
# the VM management applications for easier administration.
### END INIT INFO
set -x
# Source function library.
. /etc/init.d/functions
# A function to start a program.
daemon() {
# Test syntax.
local gotbase= force= nicelevel corelimit
local pid base= user= nice= bg= pid_file=
local cgroup=
nicelevel=0
while [ "$1" != "${1##[-+]}" ]; do
case $1 in
'') echo $"$0: Usage: daemon [+/-nicelevel] {program}"
return 1;;
--check)
base=$2
gotbase="yes"
shift 2
;;
--check=?*)
base=${1#--check=}
gotbase="yes"
shift
;;
--user)
user=$2
shift 2
;;
--user=?*)
user=${1#--user=}
shift
;;
--pidfile)
pid_file=$2
shift 2
;;
--pidfile=?*)
pid_file=${1#--pidfile=}
shift
;;
--force)
force="force"
shift
;;
[-+][0-9]*)
nice="nice -n $1"
shift
;;
*) echo $"$0: Usage: daemon [+/-nicelevel] {program}"
return 1;;
esac
done
# Save basename.
[ -z "$gotbase" ] && base=${1##*/}
# See if it's already running. Look *only* at the pid file.
__pids_var_run "$base" "$pid_file"
[ -n "$pid" -a -z "$force" ] && return
# make sure it doesn't core dump anywhere unless requested
corelimit="ulimit -S -c ${DAEMON_COREFILE_LIMIT:-0}"
# if they set NICELEVEL in /etc/sysconfig/foo, honor it
[ -n "${NICELEVEL:-}" ] && nice="nice -n $NICELEVEL"
# if they set CGROUP_DAEMON in /etc/sysconfig/foo, honor it
if [ -n "${CGROUP_DAEMON}" ]; then
if [ ! -x /bin/cgexec ]; then
echo -n "Cgroups not installed"; warning
echo
else
cgroup="/bin/cgexec";
for i in $CGROUP_DAEMON; do
cgroup="$cgroup -g $i";
done
fi
fi
# Echo daemon
[ "${BOOTUP:-}" = "verbose" -a -z "${LSB:-}" ] && echo -n " $base"
# And start it up.
if [ -z "$user" ]; then
$cgroup $nice /bin/bash -c "$corelimit >/dev/null 2>&1 ; $*"
else
$cgroup $nice runuser -s /bin/bash $user -c "$corelimit >/dev/null 2>&1 ; $*"
fi
[ "$?" -eq 0 ] && success $"$base startup" || failure $"$base startup"
}
exec="/usr/bin/vmtoolsd"
prog=${exec##*/}
[ -e /etc/sysconfig/$prog ] && . /etc/sysconfig/$prog
lockfile=/var/lock/subsys/$prog
start() {
[ -x $exec ] || exit 5
echo -n $"Starting $prog: "
daemon $exec --background=/var/run/$prog.pid
retval=$?
echo
[ $retval -eq 0 ] && touch $lockfile
return $retval
}
stop() {
echo -n $"Stopping $prog: "
killproc $prog
retval=$?
echo
[ $retval -eq 0 ] && rm -f $lockfile
return $retval
}
restart() {
stop
start
}
rh_status() {
# run checks to determine if the service is running or use generic status
status $prog
}
rh_status_q() {
rh_status >/dev/null 2>&1
}
case "$1" in
start)
rh_status_q && exit 0
$1
;;
stop)
rh_status_q || exit 0
$1
;;
restart)
$1
;;
force-reload)
restart
;;
status)
rh_status
;;
try-restart|condrestart)
rh_status_q || exit 0
restart
;;
reload)
action $"Service ${0##*/} does not support the reload action: " /bin/false
exit 3
;;
*)
echo $"Usage: $0 {start|stop|status|restart|try-restart|force-reload}"
exit 2
esac
exit $?
| true |
0f69594b42b92d0a46eb439b469e78249e221cec | Shell | biddyweb/cloudify-packager | /vagrant/docker_images/provision.sh | UTF-8 | 1,061 | 3.703125 | 4 | [] | no_license | #! /bin/bash -e
CORE_TAG_NAME="master"
install_docker()
{
curl -sSL https://get.docker.com/ubuntu/ | sudo sh
}
setup_jocker_env()
{
sudo apt-get install -y python-pip
}
clone_packager()
{
git clone https://github.com/cloudify-cosmo/cloudify-packager.git $1
pushd $1
git checkout -b tmp_branch $CORE_TAG_NAME
git log -1
popd
}
build_images()
{
CLONE_LOCATION=/tmp/cloudify-packager
clone_packager $CLONE_LOCATION
cp /cloudify-packager/docker/metadata/* /tmp/cloudify-packager/docker/metadata/
setup_jocker_env
echo Building cloudify stack image.
pushd $CLONE_LOCATION
./docker/build.sh $CLONE_LOCATION
popd
}
start_and_export_containers()
{
sudo docker run -t --name=cloudify -d cloudify:latest /bin/bash
sudo docker export cloudify > /tmp/cloudify-docker_.tar
sudo docker run -t --name=cloudifycommercial -d cloudify-commercial:latest /bin/bash
sudo docker export cloudifycommercial > /tmp/cloudify-docker_commercial.tar
}
main()
{
install_docker
build_images
start_and_export_containers
}
main
| true |
f581ce1c2f8b79f9968fef4189d8cf45111c498c | Shell | yukimura1227/dotfiles | /for_mac_only/setup_imgcommand_for_iterm2.sh | UTF-8 | 228 | 3.25 | 3 | [] | no_license | #!/bin/bash
BASE_DIR=`dirname ${0}`
setup_targets=(imgls imgcat)
for target in ${setup_targets[@]}
do
destinate_path=/usr/local/bin/${target}
cp -p ${BASE_DIR}/${target} ${destinate_path}
chmod +x ${destinate_path}
done
| true |
ced56e2ef850fe0662a12c79be49460742850fec | Shell | bemopu1980/Shell-Scripts | /Tema 4 tarea3/1.sh | UTF-8 | 515 | 3.34375 | 3 | [] | no_license | #!/bin/bash
max=`cat nombres.txt | wc -l`
cont=1
actual=1
while [ $actual -le $max ]; do
max=`cat nombres.txt | head -$actual | tail -1`
actual=$((actual+1))
mkdir $i
while [ $cont -le $1 ]; do
mkdir $i/capeta$cont
cont=$((cont+1))
done
actual=$((actual+1))
cont=1
done
#Alternativa con while
#for i in $(cat nombres.txt); do
# mkdir $i
#max=`cat nombres.txt | wc -l`
#actual=1
#while [ $actual -le $max ]; do
# i=`cat nombres.txt | head -$actual | tail -1`
#actual=$((actual+1))
#done
| true |
a8cf1b9edc3f28e3cd2f2bc2e49165700bdabb87 | Shell | worldofprasanna/showcase-terraform-k8s | /infrastructure_provisioning/eks/setup_eks_cluster.sh | UTF-8 | 620 | 2.875 | 3 | [] | no_license | #!/bin/bash
set -e
echo "=========== Persist the terraform output =========="
terraform output kubeconfig > k8s/kubeconfig
echo "=========== Update the kube config manually =========="
echo "It is a manual step. Add cluster, user, context for accessing k8s"
echo "=========== Setting up the worker nodes to join the cluster =========="
terraform output config_map_aws_auth > configmap.yml
kubectl apply -f configmap.yml
echo "=========== Setting up the tiller =========="
kubectl apply -f tiller-user.yaml
helm init --service-account tiller
echo "=========== Setup completed for k8s. Happy Helming !!! =========="
| true |
7c6368acbdab90850239907aa2cf6a972ab62581 | Shell | wennergr/tobiasconf | /shell/hrun | UTF-8 | 286 | 2.578125 | 3 | [] | no_license | #!/bin/bash
###############################################################################
# Compile and run haskell programs
# Syntax: hrun [program name]
set -e
runhaskell Setup configure --prefix=$HOME --user
runhaskell Setup build
runhaskell Setup install
# Execute command
$@
| true |
907af8a35ba5d190379ceecba665cfdc39ce4584 | Shell | kevday/FedoraClean | /swap.sh | UTF-8 | 392 | 2.59375 | 3 | [] | no_license | clear
echo -e ' \033[44;1;37m Add swapfile 4GB \033[0m'
echo '--------------------------------------------------------------------'
dd if=/dev/zero of=/swapfile bs=1024 count=4194304
mkswap /swapfile
chmod 600 /swapfile
swapon /swapfile
echo '/swapfile swap swap defaults 0 0' >> /etc/fstab
mkdir /root/Desktop /root/Documentos /root/Downloads /root/Imagens /root/Música /root/Vídeos
exit
| true |
5610e8d0d256ffc66529748dcb462ad49f5180be | Shell | vbondzio/sowasvonunsupported | /vmid2name.sh | UTF-8 | 1,223 | 3.4375 | 3 | [
"MIT"
] | permissive | #!/bin/sh
# https://github.com/vbondzio/sowasvonunsupported/blob/master/vmid2name.sh
# lists most of the ids you might need to "identify" a certain VM
# we don't want errors by default because some vsi nodes might already be partially destroyed
exec 2> /dev/null
printf "%+12s %+12s %+12s %+12s %-25s %-30s\n" "WID" "CID" "GID" "LWID" "displayName" "workDir"
# https://stackoverflow.com/questions/38861895/how-to-repeat-a-dash-hyphen-in-shell
printf -- '-%.0s' $(seq 150); echo ""
for vm in $(vsish -e ls /vm/ | sort -n)
do
cartel=$(vsish -e get /vm/${vm}vmxCartelID)
group=$(vsish -e get /sched/Vcpus/${cartel}/groupID | sed 's/[ \t]*$//')
vcpulead=$(vsish -e get /sched/groups/${group}/vcpuLeaderID | sed 's/[ \t]*$//')
vmname=$(vsish -e get /world/${vcpulead}/name | cut -d : -f 2-)
if [[ "${#vmname}" -ge "24" ]]
then
vmnameShort=$(echo ${vmname} | cut -c -20)
vmname="${vmnameShort}(..)"
fi
dir=$(vsish -e get /userworld/cartel/${cartel}/cmdline | grep -o /vmfs/volumes.* | cut -d / -f 4-5)
printf "%+12s %+12s %+12s %+12s %-25s %-35s\n" ${vm%%/} ${cartel} ${group} ${vcpulead} "${vmname%%/}" "${dir}"
done
| true |
578b6c553c5d91ae06a5e81594563f0117a25eb5 | Shell | sztupy/tumblr-neo4j | /docker/steps/E1-calculate-pagerank.sh | UTF-8 | 260 | 2.828125 | 3 | [
"MIT"
] | permissive | echo "# Running pageRank iterations"
docker cp neo4j/pageRank.cyp neo4j-tumblr:/var/lib/neo4j/import
for i in `seq 1 50`;
do
echo "# Iteration $i"
docker exec -ti neo4j-tumblr /var/lib/neo4j/bin/neo4j-shell -file /var/lib/neo4j/import/pageRank.cyp
done
| true |
c0dc693c4c305932d1a516f14a512e8e1c4d14e7 | Shell | GunterMueller/xds | /WINDOWS/Build/build_git.bat | UTF-8 | 1,898 | 3.65625 | 4 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | @echo off
rem Build XDS product from a Git repository
rem Usage: build_git.bat local|remote [branch]
rem local - clone local Git repository
rem remote - clone remote Git repository
rem brach - branch name to checkout (by default "work")
set XDS_PRODUCT=XDS-x86
set BUID_EXT_LOG_DIR=%~dp0\log
set BUID_EXT_LOG_FILE=%BUID_EXT_LOG_DIR%\build_git.log
call %~dp0\..\.config\config.bat XDS_BUILD_GIT_DIR || goto lbl_Error
if "%1" == "local" (
set GIT_REPOSITORY=%~dp0\..
) else if "%1" == "remote" (
set GIT_REPOSITORY=https://github.com/excelsior-oss/xds.git
) else (
goto :lbl_Error_Invalid_argument
)
set GIT_BRANCH=work
if not "%2" == "" set GIT_BRANCH=%2
echo Setup %XDS_PRODUCT% Build Environment in "%XDS_BUILD_GIT_DIR%
echo Repository: %GIT_REPOSITORY%
echo Branch: %GIT_BRANCH%
if not exist "%BUID_EXT_LOG_DIR%" mkdir "%BUID_EXT_LOG_DIR%" || goto lbl_Error
if exist "%BUID_EXT_LOG_FILE%" del /Q "%BUID_EXT_LOG_FILE%" || goto lbl_Error
if exist "%XDS_BUILD_GIT_DIR%" rmdir /Q /S "%XDS_BUILD_GIT_DIR%"
mkdir "%XDS_BUILD_GIT_DIR%"
call git clone "%GIT_REPOSITORY%" "%XDS_BUILD_GIT_DIR%" 1> "%BUID_EXT_LOG_FILE%" 2>&1
if errorlevel 1 goto lbl_Error
rem Copy local configuration
xcopy "%~dp0\..\.config\*.env-*.bsc" "%XDS_BUILD_GIT_DIR%\.config\" /Y
pushd "%XDS_BUILD_GIT_DIR%\Build"
call git checkout %GIT_BRANCH% 1>> "%BUID_EXT_LOG_FILE%" 2>>&1
if errorlevel 1 goto lbl_Error
echo.
call build.bat Release
popd
goto :EOF
:lbl_Error_Invalid_argument
echo Invalid argument: %~nx0 %*
echo.
echo Usage: %~nx0 local^|remote [branch]
echo local - clone local Git repository
echo remote - clone remote Git repository
echo brach - branch name to checkout (by default "work")
exit /B 1
:lbl_Error
echo ******* Build %XDS_PRODUCT% ******* Failed! =^> "%BUID_EXT_LOG_FILE%"
exit /B 1
| true |
5cd2e9ae61a631e04eb1d7a6e334a26cb845dd9d | Shell | AleksaMCode/kriptografija-i-racunarska-zastita | /kolokvijum_20170118/z06/solution.sh | UTF-8 | 1,248 | 2.953125 | 3 | [] | no_license | #!/bin/bash
#Podesimo openssl.cnf datoteku prvo.
mkdir {private,crl,requests,certs,newcerts}
touch index.txt crlnumber serial
mv private.key private/
#Kreirajmo root CA.
openssl req -x509 -new -key private/private.key -out cacert.crt -config openssl.cnf
#Kreirajmo tri nova RSA kljuca.
for i in {1..3}; do openssl genrsa -out private/key$i.pem 2048; done
#Prvi sertifikat:
#Nije CA i keyUsage = cRLSign, digitalSignature.
echo 12 > serial
openssl req -new -key private/key1.pem -out requests/req1.csr -config openssl.cnf
#Na 2 mjeseca kreiran sertifikat.
openssl ca -in requests/req1.csr -out certs/c1.pem -config openssl.cnf -days 60
#Treci sertifikat:
#Isto nije CA kao i prvi, a keyUsage nije specifikovan pa cemo ostaviti isti kao i kod prvog.
echo 15 > serial
openssl req -new -key private/key3.pem -out requests/req3.csr -config openssl.cnf
#Na jednu godinu kreiran sertifikat.
openssl ca -in requests/req3.csr -out certs/c3.pem -config openssl.cnf -days 365
#Drugi sertifikat:
#Ovaj sertifikat mora biti CA i keyUsage = encipherOnly.
echo 14 > serial
openssl req -new -key private/key2.pem -out requests/req2.csr -config openssl.cnf
#Potpisan na 6 mjeseci.
openssl ca -in requests/req2.csr -out certs/c2.pem -config openssl.cnf -days 180
| true |
08543d4746eecd17d56e039803e3d089d79d4f6b | Shell | ryobot/box_backup | /upload_file | UTF-8 | 603 | 3.3125 | 3 | [] | no_license | #!/bin/sh
FOLDER_ID=$1
FILE_PATH=$2
FILE_NAME=$3
echo "" > upload_file_response
TOKEN=`cat box_token.json | jq -r '.access_token'`
HTTP_CODE=`curl -s https://upload.box.com/api/2.0/files/content -H "Authorization: Bearer ${TOKEN}" -X POST -F attributes='{"name":"'$FILE_NAME'","parent":{"id":"'$FOLDER_ID'"}}' -F file=@$FILE_PATH -w '%{http_code}\n' -o upload_file_response`
if [ $HTTP_CODE == 201 ] ; then
echo "File $FILE_PATH successfully uploaded as $FILE_NAME in folder $FOLDER_ID"
exit 0
fi
cat upload_file_response | jq '.'
echo "Error in upload_file : http status code is $HTTP_CODE"
exit 2 | true |
227ba40e755ac5e425c1c97bc6af261ecf2034c5 | Shell | kmacoskey/pxf | /server/pxf-service/src/scripts/pxf | UTF-8 | 15,981 | 3.890625 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# pxf-service start/stop/initialize/status the PXF instance
#
parent_script_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )/.." && pwd )"
# establish PXF_HOME and global vars used by all commands
# do not allow overrides of PXF_HOME so that his script only manages this given installation
export PXF_HOME=$parent_script_dir
default_env_script=${PXF_HOME}/conf/pxf-env-default.sh
tomcat_root=${PXF_HOME}/apache-tomcat
tomcat_templates=${PXF_HOME}/templates/tomcat
instance=${PXF_HOME}/pxf-service
SED_OPTS=(-i)
if [[ $OSTYPE == "darwin"* ]]; then
SED_OPTS+=("")
fi
SED_OPTS+=(-e)
function validate_user()
{
# make sure current user is not root
if (( EUID == 0 )); then
fail 'Cannot run as root user'
fi
}
function confirm_with_user()
{
echo '*****************************************************************************'
echo '* PXF_CONF variable is not defined, using default location'
echo "* Using ${PXF_CONF} for user configurations"
echo '* WARNING: using this directory is not recommended for production deployments'
echo '*****************************************************************************'
read -rp 'Do you want to proceed? [Y]: ' answer
answer=${answer:-'Y'}
case ${answer:0:1} in
y|Y)
echo 'Proceeding with the initialization'
;;
*)
echo 'Initialization aborted'
exit 1
;;
esac
}
function update_pxf_conf()
{
if [[ -z $PXF_CONF ]]; then
# PXF_CONF was not specified during init command
# need to default to user's home directory and ask for confirmation, unless silent mode is used
local current_user pxf_user_home
current_user=$( id -un )
pxf_user_home=$HOME
if [[ -z $pxf_user_home ]]; then
fail "PXF_CONF is not set and user $current_user home directory is not defined"
elif [[ ! -d ${pxf_user_home} ]]; then
fail "PXF_CONF is not set and user $current_user home directory $pxf_user_home does not exist"
else
export PXF_CONF=${pxf_user_home}/pxf
if [[ $silent == false ]]; then
confirm_with_user
fi
fi
fi
echo "Using $PXF_CONF as a location for user-configurable files"
# update the value on PXF_CONF in the default env file
sed "${SED_OPTS[@]}" "s|{PXF_CONF:-.*}$|{PXF_CONF:-\"${PXF_CONF}\"}|g" "$default_env_script"
}
function getEnvironment()
{
local USER_SPECIFIED_JAVA_HOME=${JAVA_HOME}
# load default environment
if [[ ! -f $default_env_script ]]; then
fail "Failed to find $default_env_script"
fi
# shellcheck source=/Users/pivotal/workspace/pxf/server/pxf-service/src/scripts/pxf-env-default.sh
source "$default_env_script"
# we may have just overridden the user's intended JAVA_HOME
# by sourcing PXF_CONF/conf/pxf-env.sh
[[ ${PRESERVE_JAVA_HOME} == true ]] && JAVA_HOME=${USER_SPECIFIED_JAVA_HOME}
}
function checkJavaHome()
{
# validate JAVA_HOME
if [[ ! -x ${JAVA_HOME}/bin/java ]]; then
fail "\$JAVA_HOME=$JAVA_HOME is invalid. Set \$JAVA_HOME in your environment before initializing PXF."
fi
}
# print error message and return with error code
function fail()
{
echo "ERROR: $1"
exit 1
}
#
# createInstance creates a tomcat instance and
# configures based on pre-configured template files.
#
function createInstance()
{
mkdir -p "$instance"
if ! cp -r "$tomcat_root"/* "$instance"; then
echo 'ERROR: instance creation failed'
return 1
fi
chmod 700 "$instance"
# copy configuration files into instance
cp "$tomcat_templates"/bin/{kill-pxf,setenv,catalina}.sh "${instance}/bin"
cp "$tomcat_templates"/conf/{logging.properties,{server,web}.xml} "${instance}/conf"
return 0
}
#
# deployWebapp adds the pxf-webapp to the new instance's webapps folder
# and the custom loader to the instance's lib directory
#
function deployWebapp()
{
cp "${PXF_HOME}/lib/pxf.war" "${instance}/webapps" || return 1
cp "$PXF_HOME"/lib/pxf-service-*[0-9]*.jar "${instance}/lib" || return 1
return 0
}
#
# waitForTomcat waits for tomcat to finish loading
# for given attempts number.
#
function waitForTomcat()
{
attempts=0
max_attempts=$1 # number of attempts to connect
sleep_time=1 # sleep 1 second between attempts
# wait until tomcat is up:
sleep 2
echo 'Checking if tomcat is up and running...'
until $curl --silent --connect-timeout 1 -I "http://localhost:$PXF_PORT" | grep 'PXF Server'; do
if (( ++attempts == max_attempts )); then
echo 'ERROR: PXF is down - tomcat is not running'
return 1
fi
echo "tomcat not responding, re-trying after $sleep_time second (attempt number ${attempts})"
sleep $sleep_time
done
return 0
}
#
# checkWebapp checks if tomcat is up for $1 attempts and then
# verifies PXF webapp is functional
#
function checkWebapp()
{
waitForTomcat "$1" || return 1
echo 'Checking if PXF webapp is up and running...'
curlResponse=$($curl -s "http://localhost:${PXF_PORT}/pxf/v0")
expectedResponse='Wrong version v0, supported version is v[0-9]+'
if [[ $curlResponse =~ $expectedResponse ]]; then
echo "PXF webapp is listening on port $PXF_PORT"
return 0
fi
echo 'ERROR: PXF webapp is inaccessible but tomcat is up. Check logs for more information'
return 1
}
# instanceExists returns 0 when the instance exists
# non zero otherwise
function instanceExists()
{
if [[ ! -d $instance ]]; then
return 1
fi
"${instance}/bin/catalina.sh" version > /dev/null 2>&1
}
function checkInstance()
{
instanceExists || fail 'Cannot find PXF instance, maybe call init?'
}
function createLogsDir()
{
# logs dir is likely outside PXF_HOME, setup only if it does not yet exist
if [[ ! -d $PXF_LOGDIR ]]; then
echo "Creating PXF logs directory $PXF_LOGDIR ..."
mkdir -p "$PXF_LOGDIR"
chmod 700 "$PXF_LOGDIR"
fi
}
function createRunDir()
{
echo "Creating PXF runtime directory $PXF_RUNDIR ..."
mkdir -p "$PXF_RUNDIR"
chmod 700 "$PXF_RUNDIR"
}
function generatePrivateClasspath()
{
# verify that a template file for the distribution exists
local template_file=${PXF_HOME}/templates/pxf/pxf-private.classpath.template
if [[ ! -f $template_file ]]; then
fail "Template file $template_file not found"
fi
echo "Generating ${PXF_HOME}/conf/pxf-private.classpath file from $template_file ..."
# create initial version of the file by replacing PXF_HOME and PXF_CONF tokens
cp "$template_file" "${PXF_HOME}/conf/pxf-private.classpath"
sed "${SED_OPTS[@]}" "s|PXF_HOME|${PXF_HOME}|g" "${PXF_HOME}/conf/pxf-private.classpath"
sed "${SED_OPTS[@]}" "s|PXF_CONF|${PXF_CONF}|g" "${PXF_HOME}/conf/pxf-private.classpath"
}
function generateUserConfigs()
{
# create and setup user configuration directories (only if they do not exist)
setup_conf_directory "$PXF_CONF"
setup_conf_directory "${PXF_CONF}/conf" "${PXF_HOME}/templates/user/conf"
setup_conf_directory "${PXF_CONF}/keytabs"
setup_conf_directory "${PXF_CONF}/lib"
setup_conf_directory "${PXF_CONF}/servers/default"
setup_conf_directory "${PXF_CONF}/templates" "${PXF_HOME}/templates/user/templates" 'override'
}
function setup_conf_directory()
{
local target=$1
local source=$2
local override=$3
if [[ ! -d $target ]]; then
if [[ -z $source ]]; then
echo "Creating configuration directory $target ..."
mkdir -p "$target"
else
echo "Copying configurations from $source to $target ..."
cp -R "$source" "$(dirname "$target")"
fi
elif [[ -n $source && -n $override ]]; then
echo "Updating configurations from $source to $target ..."
cp -R "$source" "$(dirname "$target")"
else
echo "Directory $target already exists, no update required"
fi
}
function validate_system()
{
# validate curl
if ! curl=$(command -v curl); then
fail 'curl is not installed, please install'
fi
}
function printUsage()
{
local normal bold
normal=$(tput sgr0)
bold=$(tput bold)
cat <<-EOF
${bold}usage${normal}: pxf <command> [-y]
pxf cluster <command>
pxf {-h | --help}
EOF
}
# doHelp handles the help command
doHelp() {
local normal bold
normal=$(tput sgr0)
bold=$(tput bold)
printUsage
cat <<-EOF
${bold}List of commands${normal}:
init initialize the local PXF server instance, install PXF extension under \$GPHOME
start start the local PXF server instance
stop stop the local PXF server instance
restart restart the local PXF server instance (not supported for cluster)
status show the status of the local PXF server instance
version show the version of PXF server
reset undo the local PXF initialization
register install PXF extension under \$GPHOME (useful after upgrades of Greenplum server)
cluster <command> perform <command> on all the segment hosts in the cluster; try ${bold}pxf cluster help$normal
sync <hostname> synchronize \$PXF_CONF/{conf,lib,servers} directories onto <hostname>. Use --delete to delete extraneous remote files
${bold}Options${normal}:
-h, --help show command help
-y answer yes, use default PXF_CONF=\$HOME/pxf user configuration directory
EOF
exit 0
}
function promptUser() {
echo "$1"
read -r answer
[[ $answer == y || $answer == Y ]]
}
# doReset handles the reset command
function doReset()
{
local force=$2
local prompt='Ensure your local PXF instance is stopped before continuing. '
prompt+='This is a destructive action. Press y to continue:'
if [[ $force != -f && $force != --force ]] && ! promptUser "$prompt"; then
echo 'pxf reset cancelled'
return 1
fi
if doStatus >/dev/null; then
echo "PXF is running. Please stop PXF before running 'pxf [cluster] reset'"
return 1
fi
echo "Cleaning ${PXF_HOME}/conf/pxf-private.classpath..."
rm -f "${PXF_HOME}/conf/pxf-private.classpath"
echo "Ignoring ${PXF_CONF}..."
echo "Cleaning ${instance}..."
rm -rf "$instance"
echo "Cleaning ${PXF_RUNDIR}..."
rm -rf "$PXF_RUNDIR"
echo "Reverting changes to ${default_env_script}..."
sed "${SED_OPTS[@]}" "s|{PXF_CONF:-\"${PXF_CONF}\"}|{PXF_CONF:-NOT_INITIALIZED}|g" "$default_env_script"
echo "Finished cleaning PXF instance directories"
}
function installExternalTableExtension()
{
if [[ -d ${parent_script_dir}/gpextable ]]; then
if [[ -z "${GPHOME}" ]]; then
echo 'WARNING: environment variable GPHOME is not set, skipping install of Greenplum External Table PXF Extension'
elif [[ ! -f ${GPHOME}/greenplum_path.sh ]]; then
echo "WARNING: environment variable GPHOME (${GPHOME}) must be set to a valid Greenplum installation, skipping install of Greenplum External Table PXF Extension'"
else
echo "Installing Greenplum External Table PXF Extension into ${GPHOME}"
cp -av ${parent_script_dir}/gpextable/* ${GPHOME}
fi
fi
}
# doInit handles the init command
function doInit()
{
if instanceExists; then
echo "Instance already exists. Use 'pxf [cluster] reset' before attempting to re-initialize PXF"
return 1
fi
update_pxf_conf
# preserve JAVA_HOME in case user is trying to change it
PRESERVE_JAVA_HOME=true getEnvironment
checkJavaHome
generatePrivateClasspath || return 1
generateUserConfigs || return 1
editPxfEnvSh || return 1
createInstance || return 1
deployWebapp || return 1
createLogsDir || return 1
createRunDir || return 1
installExternalTableExtension || return 1
}
function editPxfEnvSh()
{
sed -i.bak -e "s|.*JAVA_HOME=.*|JAVA_HOME=${JAVA_HOME}|g" "${PXF_CONF}/conf/pxf-env.sh" && rm "${PXF_CONF}/conf/pxf-env.sh.bak"
}
#
# doStart handles start commands
# command is executed as the current user
#
# after start, uses checkWebapp to verify the PXF webapp was loaded
# successfully
#
function doStart()
{
local flags=()
getEnvironment
checkJavaHome
checkInstance
checkPxfConf
warnUserEnvScript
[[ $PXF_DEBUG == true ]] && flags+=(jpda)
flags+=(start)
"${instance}/bin/catalina.sh" "${flags[@]}" || return 1
checkWebapp 300 || return 1
}
#
# doStop handles stop commands
# command is executed as the current user
#
# the -force flag is passed to catalina.sh to enable force-stopping
# the JVM
#
function doStop()
{
getEnvironment
checkJavaHome
checkInstance
checkPxfConf
warnUserEnvScript
"${instance}/bin/catalina.sh" stop -force || return 1
}
function doStatus()
{
getEnvironment
checkJavaHome
checkInstance
checkPxfConf
warnUserEnvScript
checkWebapp 1 || return 1
}
function doSync()
{
local target_host=$1
if [[ -z $target_host ]]; then
fail 'A destination hostname must be provided'
fi
getEnvironment
checkInstance
checkPxfConf
warnUserEnvScript
rsync -az${DELETE:+ --delete} -e "ssh -o StrictHostKeyChecking=no" "$PXF_CONF"/{conf,lib,servers} "${target_host}:$PXF_CONF"
}
function doCluster()
{
local pxf_cluster_command=$2 is_init=false
[[ ${pxf_cluster_command} == init ]] && is_init=true
PRESERVE_JAVA_HOME=${is_init} getEnvironment
# Go CLI handles unset PXF_CONF when appropriate
[[ $PXF_CONF == NOT_INITIALIZED ]] && unset PXF_CONF
[[ ${is_init} == false ]] && warnUserEnvScript
"${parent_script_dir}/bin/pxf-cli" "$@"
}
function warnUserEnvScript()
{
local user_env_script=${PXF_CONF}/conf/pxf-env.sh
if [[ ! -f $user_env_script ]]; then
echo "WARNING: failed to find ${user_env_script}, default parameters will be used"
fi
}
function checkPxfConf()
{
if [[ $PXF_CONF == NOT_INITIALIZED ]]; then
echo 'ERROR: PXF is not initialized, call pxf init command'
exit 1
fi
}
pxf_script_command=$1
silent=false
validate_user
validate_system
case $pxf_script_command in
'init')
if [[ $2 == -y || $2 == -Y ]]; then
silent=true
fi
doInit
;;
'register')
installExternalTableExtension
;;
'start')
doStart
;;
'stop')
doStop
;;
'restart')
doStop
sleep 1s
doStart
;;
'status')
doStatus
;;
'sync')
if [[ $2 == -d || $2 == --delete ]]; then
DELETE=1 doSync "$3"
else
doSync "$2"
fi
;;
'help' | '-h' | '--help')
doHelp
;;
'version' | '--version' | '-v')
"${parent_script_dir}/bin/pxf-cli" --version
;;
'cluster')
doCluster "$@"
;;
'reset')
doReset "$@"
;;
*)
printUsage
exit 2
;;
esac
exit $?
| true |
ec2b19a38d5818e0e79d7ab90d6a6cd6eb91c7d9 | Shell | connectthefuture/dotfiles-38 | /bash/bashrc | UTF-8 | 1,635 | 3.359375 | 3 | [
"Apache-2.0"
] | permissive | ####################
# General settings #
####################
# Detect which 'ls' flavor is in use
if [[ $OSTYPE == 'linux-gnu' ]]; then
colorflag='--color'
elif [[ $OSTYPE == 'darwin'* ]]; then
colorflag="-G"
fi
##########
# Prompt #
##########
function parse_git_branch {
git branch --no-color 2> /dev/null | sed -e '/^[^*]/d' -e 's/* \(.*\)/(\1)/'
}
export PS1="\n\u@\h:\W\$(parse_git_branch)\n\\$> "
###########
# Aliases #
###########
# cd aliases
alias ..="cd .."
alias ...="cd ../.."
alias ....="cd ../../.."
alias .....="cd ../../../.."
# ls aliases
alias ls="ls ${colorflag}"
alias ll="ls -alF"
alias la="ls -lha"
alias l="ls -CF"
# System aliases
alias df="df -Tha --total"
alias free="free -mth"
# Utility aliases
alias fhere="find . -name"
alias mkdir="mkdir -pv"
alias wget="wget -c"
alias more="less"
alias jsonbeautify="python -m json.tool"
alias sshb0t="docker run -d --restart always --name sshb0t -v ${HOME}/.ssh/authorized_keys:/root/.ssh/authorized_keys r.j3ss.co/sshb0t --user wdhif"
alias ctop="docker run --rm -it --name=ctop -v /var/run/docker.sock:/var/run/docker.sock quay.io/vektorlab/ctop:latest"
# Darwin aliases
if [[ $OSTYPE == 'darwin'* ]]; then
alias lock="pmset displaysleepnow"
fi
# Sanity aliases
alias dokcer="docker"
# Private specific aliases
if [ -f ~/.bash_private ]; then
. ~/.bash_private
fi
# Work specific aliases
if [ -f ~/.bash_work ]; then
. ~/.bash_work
fi
########
# Path #
########
# macOS local bin
PATH=$PATH:/usr/local/bin
# Custom functions
PATH=$PATH:~/.bin
# Homebrew binaries
PATH=$PATH:~/.homebrew/bin
# OVH functions
PATH=$PATH:~/.bin/ovh
| true |
34d3bf79679bd6c65d8263c2ea1717bead7d922e | Shell | clanam/react-native-playground | /build/build-android-debug.sh | UTF-8 | 957 | 2.90625 | 3 | [] | no_license | #!/usr/bin/env bash
starting_dir=$(pwd)
root_dir=`git rev-parse --show-toplevel`
# Prep the debug build to not require its assets from the development server.
# the --dev false flag is important!
cd $root_dir/Playground
mkdir -p android/app/src/main/assets
./node_modules/.bin/react-native bundle \
--platform android \
--dev false \
--entry-file index.js \
--bundle-output android/app/src/main/assets/index.android.bundle \
--assets-dest android/app/src/main/res \
--sourcemap-output android/app/src/main/assets/index.android.bundle.map
# Enter android directory where gradlew is
cd android
# Build the debug apk
./gradlew assembleDebug \
--no-daemon \
--max-workers 4 \
-Drelease=true \
-Dorg.gradle.caching=true \
-Dorg.gradle.configureondemand=true \
-Dkotlin.compiler.execution.strategy=in-process
mkdir -p $root_dir/apks
cp app/build/outputs/apk/debug/*.apk $root_dir/apks
# return to previous directory
cd $starting_dir
| true |
9a7d6e45befb1dd19b69a1f157be65db7ad2de90 | Shell | adrien-thierry/hmf-training | /code/nodejs-project/start/gs.sh | UTF-8 | 164 | 2.765625 | 3 | [] | no_license | #!/bin/sh
apt-get update
echo 'TECHIO> open -p 8080 /home'
while :
do
if [ ! -f result.txt ]; then
sleep 1s
else
cat result.txt
exit 0
fi
done
| true |
c2a3353929c84b22cc7fae54b89445f4d673b24e | Shell | alexgabriel-ca/admutils | /functions | UTF-8 | 2,687 | 3.015625 | 3 | [] | no_license | #!/usr/bin/env bash
#Author: Alex Gabriel <alex.gabriel@microfocus.com>
#Created: 04-Feb-2020
#Modified: 26-May-2020
#Description: Functions for all commands related to ADM Utilities.
#License: GPL 3.0
ALMOctaneVersion="15.0.60.116"
ALMVersion="15.0.1"
AOSQAVersion="2.3"
AOSVersion="2.3"
AutopassVersion="11.5.0"
DAVersion="6.3.1"
DevOpsVersion="2.3.1"
IntelliJVersion="2.3.1"
JIRAVersion="8.0.2"
LFTChromeVersion="15.0"
MailVersion="1.5.10"
MFConnectVersion="4.3.1-HF1"
PPMVersion="9.55"
RPAVersion="2019.11"
SCAVersion="19.2.3"
SonarqubeVersion="7.7"
SSCVersion="19.2.0"
SVVersion="5.3"
UFTMVersion="3.4"
function pull(){
PRODUCT=$1
VERSION=$2
if docker image ls -a | grep -wq "${PRODUCT}"; then
echo " ${PRODUCT}:${VERSION} already exists"
read -rp " Would you like to pull it again? [yn] " repull
if [[ "$repull" == y ]]; then
nimbusapp "${PRODUCT}":"${VERSION}" pull
fi
else
nimbusapp "${PRODUCT}":"${VERSION}" pull
fi
}
function restart(){
PRODUCT=$1
VERSION=$2
if docker ps | grep -wq "${PRODUCT}"; then
nimbusapp "${PRODUCT}":"${VERSION}" restart
else
echo " ${PRODUCT}:${VERSION} is not running, no need to restart"
fi
}
function start(){
PRODUCT=$1
VERSION=$2
if docker ps | grep -wq "${PRODUCT}"; then
echo " ${PRODUCT}:${VERSION} is already running."
elif ! docker container ls -a | grep -wq "${PRODUCT}"; then
nimbusapp "${PRODUCT}":"${VERSION}" up
else
nimbusapp "${PRODUCT}":"${VERSION}" start
fi
}
function stop(){
PRODUCT=$1
VERSION=$2
if ! docker ps | grep -wq "${PRODUCT}"; then
echo " ${PRODUCT}:${VERSION} has already been stopped."
else
nimbusapp "${PRODUCT}":"${VERSION}" stop
fi
}
function remove(){
PRODUCT=$1
VERSION=$2
if ! (docker container ls -a | grep -q "${PRODUCT}" && docker image ls -a | grep -q "${PRODUCT}"); then
echo " Container ${PRODUCT}:${VERSION} does not exist or has already been removed."
else
read -rp " Are you sure you wish to remove the ${PRODUCT} container? [yn] " blowitaway
if [[ "$blowitaway" == y ]]; then
if docker ps | grep -q "${PRODUCT}"; then
echo "${PRODUCT} is running, will stop it before removal."
nimbusapp "${PRODUCT}":"${VERSION}" stop
fi
if docker container ls -a | grep -q "${PRODUCT}"; then
echo " Removing ${PRODUCT}:${VERSION} container."
nimbusapp ${PRODUCT}:${VERSION} -f down
echo " Container ${PRODUCT}:${VERSION} has been removed."
fi
if docker image ls -a | grep -q "${PRODUCT}"; then
echo " Removing ${PRODUCT}:${VERSION} image."
docker rmi --force $(docker images --format '{{.Repository}}:{{.Tag}}' | grep "${PRODUCT}") 1>/dev/null 2>/dev/null
echo " Image ${PRODUCT}:${VERSION} has been removed."
fi
fi
fi
} | true |
505128dda93eb36c2da338c90c27e02ffded35b1 | Shell | zz2liu/Zhang_Science_TM_2021 | /20161004.Batch3.sh | UTF-8 | 1,633 | 2.828125 | 3 | [] | no_license | cd ~/scratch60/tmpMeiling/Batch3
mkdir bowtie2hg19
cd bowtie2hg19
genomeDir=~/local/data/hg19
export genomePrefix=$genomeDir/Sequence/Bowtie2Index/hg19
export gtfFile=$genomeDir/Annotation/Genes/genes.gtf
export chromSizeFile=$genomeDir/chrom.sizes
workDir=$PWD
ppn=8
mem=32G
##test interactively
qsub -I -q interactive \
-V -d . -l nodes=1:ppn=$ppn -l mem=$mem
i=../rawData/Sample_MZ424_M4_007
outDir=$(echo $i | sed 's/.*Sample_MZ424_//')
mkdir $outDir
echo $i; date
export sampleDir=$(realpath $i)
export outDir=$(realpath $outDir)
bash -x ~/code/ngs/bowtie2localSe.rpm.featureCount.pipeline.sh
## run batch
for i in $(ls -d ../rawData/Sample*); do
id=$(echo $i | sed 's/.*Sample_MZ424_//')
mkdir $id
echo $id; date
export sampleDir=$(realpath $i)
export outDir=$(realpath $id)
qsub -N $id \
-V -d . -l nodes=1:ppn=$ppn -l mem=$mem \
~/code/ngs/bowtie2localSe.rpm.featureCount.pipeline.sh
done
#summary bowtie and featureCounts
find -name "bowtie2.log" | xargs python ~/code/ngs/bowtie2Summary.py | sed 's%./%%;s%/bowtie2.log%%;/totalReads/d' > tmp.csv
Rscript ~/code/ngs/bowtie2Summary.report.R bowtie2Summary < tmp.csv
bash ~/code/ngs/bowtie2FeatureCounts_summary.sh */ | sed 's%/%%' > tmp.csv
Rscript ~/code/ngs/bowtie2FeatureCounts_summary.report.R bowtie2FeatureCounts_summary.report <tmp.csv
#join the count files
files=$(echo */ | sed 's%/%/featureCounts.txt%g')
python3 ~/code/ngs/join_count_files.py $files > tmp.csv
samples=$(echo */ | sed 's%/%%g;s/ /,/g')
cat tmp.csv | sed "1d;2s/,.*/,$samples/" > featureCounts.csv
## DE analysis
| true |
5bca8522b966fcdf6c4571ceffe0ff1cc0355e25 | Shell | ryandub/docker-lambda-uploader | /run.sh | UTF-8 | 271 | 2.59375 | 3 | [] | no_license | #! /bin/bash
set -xe
if [ -e /data/requirements.txt ]; then
echo "Installing requirements to virtualenv /venv..."
/venv/bin/pip install -r /data/requirements.txt
fi
echo "Running lambda-uploader with virtualenv /venv..."
exec lambda-uploader --virtualenv=/venv $@
| true |
1906b9d420ddbc14d9d8f1d130a788744f8351cf | Shell | kevinsperrine/nothing | /bin/increment-android-version.sh | UTF-8 | 358 | 3.453125 | 3 | [] | no_license | #!/bin/sh
GRADLE_BUILD="$1"
if [ -z ${CIRCLE_BUILD_NUM+x} ]; then
awk '{sub(/versionCode [[:digit:]]+$/, "versionCode "$2+1)}1' $GRADLE_BUILD > gradle.tmp
else
VERSION=$(( $CIRCLE_BUILD_NUM + 52 ));
awk -v version=$VERSION '{sub(/versionCode [[:digit:]]+$/, "versionCode "version)}1' $GRADLE_BUILD > gradle.tmp
fi
mv -f gradle.tmp $GRADLE_BUILD | true |
2fc8f87f04e04fe0215324fae05b34dcca5b5d74 | Shell | oschrenk/machines | /modules/defaults/user/finder-set-new-window-path.sh | UTF-8 | 571 | 2.703125 | 3 | [] | no_license | #!/bin/sh
# Finder: new window location set to $HOME/Downloads. Same as Finder > Preferences > New Finder Windows show
# For $HOME use "PfHm" and "file://${HOME}/"
# For other path use "PfLo" and "file:///foo/bar/"
echo "old 'com.apple.finder NewWindowTarget': '`defaults read com.apple.finder NewWindowTarget`'"
defaults write com.apple.finder NewWindowTarget -string "PfLo"
echo "old 'com.apple.finder NewWindowTargetPath': '`defaults read com.apple.finder NewWindowTargetPath`'"
defaults write com.apple.finder NewWindowTargetPath -string "file://$(HOME)/Downloads"
| true |
593ff8fd7df4313d37c48a6b70b2697b87f0e53d | Shell | giserh/com.sma | /src/main/shell/lang/set.sh | UTF-8 | 407 | 3.65625 | 4 | [] | no_license | #!/usr/bin/env bash
function Set::-U() {
set -u
echo "Unset (i.e. uninitialized) parameters and variables except '@' or '*'"\
"are considered error when performing expansion"
echo "\${1} is not set"
echo "${1}"
echo "${UNSET_VARIABLE}"
set +u
}
function main() {
# Set::-U
echo "null has no special meaning in bash."
UNSET_VARIABLE=null
Set::-U "${UNSET_VARIABLE}"
}
main "$@"
| true |
c2393b4e456c8f4e748c5b10d401011274c0fd69 | Shell | Jimcumming/ubuntu-nginx-lucee | /scripts/300-tomcat.sh | UTF-8 | 2,238 | 3.453125 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
echo "Installing Tomcat 8"
apt-get install tomcat8
echo "Configuring Tomcat"
mkdir backup
mkdir backup/etc
mkdir backup/etc/tomcat8
mkdir backup/etc/default
#backup default tomcat web.xml
cp /etc/tomcat8/web.xml backup/etc/tomcat8/web.xml-orig-backup
#copy our web.xml to tomcat directory
cp etc/tomcat8/web.xml /etc/tomcat8/
#backup default server.xml
cp /etc/tomcat8/server.xml backup/etc/tomcat8/server.xml-orig-backup
#copy our server.xml to tomcat dir
cp etc/tomcat8/server.xml /etc/tomcat8/
#backup default catalina.properties
cp /etc/tomcat8/catalina.properties backup/etc/tomcat8/catalina.properties-orig-backup
#copy our catalina properties
cp etc/tomcat8/catalina.properties /etc/tomcat8/
cp /etc/default/tomcat8 backup/etc/default/tomcat8
echo "Installing mod_cfml Valve for Automatic Virtual Host Configuration"
if [ -f lib/mod_cfml-valve_v1.1.05.jar ]; then
cp lib/mod_cfml-valve_v1.1.05.jar /opt/lucee/current/
else
curl --location -o /opt/lucee/current/mod_cfml-valve_v1.1.05.jar https://raw.githubusercontent.com/utdream/mod_cfml/master/java/mod_cfml-valve_v1.1.05.jar
fi
MODCFML_JAR_SHA256="22c769ccead700006d53052707370c5361aabb9096473f92599708e614dad638"
if [[ $(sha256sum "/opt/lucee/current/mod_cfml-valve_v1.1.05.jar") =~ "$MODCFML_JAR_SHA256" ]]; then
echo "Verified mod_cfml-valve_v1.1.05.jar SHA-256: $MODCFML_JAR_SHA256"
else
echo "SHA-256 Checksum of mod_cfml-valve_v1.1.05.jar verification failed"
exit 1
fi
if [ ! -f /opt/lucee/modcfml-shared-key.txt ]; then
echo "Generating Random Shared Secret..."
openssl rand -base64 42 >> /opt/lucee/modcfml-shared-key.txt
#clean out any base64 chars that might cause a problem
sed -i "s/[\/\+=]//g" /opt/lucee/modcfml-shared-key.txt
fi
shared_secret=`cat /opt/lucee/modcfml-shared-key.txt`
sed -i "s/SHARED-KEY-HERE/$shared_secret/g" /etc/tomcat8/server.xml
echo "Setting Permissions on Lucee Folders"
mkdir /var/lib/tomcat8/lucee-server
chown -R tomcat8:tomcat8 /var/lib/tomcat8/lucee-server
chmod -R 750 /var/lib/tomcat8/lucee-server
chown -R tomcat8:tomcat8 /opt/lucee
chmod -R 750 /opt/lucee
echo "Setting JVM Max Heap Size to " $JVM_MAX_HEAP_SIZE
sed -i "s/-Xmx128m/-Xmx$JVM_MAX_HEAP_SIZE/g" /etc/default/tomcat8
| true |
357628dbefa0ad98c7edcb42449befa822aceecf | Shell | Adlink-ROS/intel-ac9260-on-l4t | /install-wifi.sh | UTF-8 | 1,172 | 3.546875 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash
set -eu
WORK_DIR=`pwd`
# Prepare the kernel headers
if [[ $# -eq 0 ]]; then
bash kernel-header.sh
fi
# Download backport driver for AC9260
cd $WORK_DIR
if [[ ! -d backport-iwlwifi ]]; then
echo "Downloading backport driver for AC9260"
git clone https://git.kernel.org/pub/scm/linux/kernel/git/iwlwifi/backport-iwlwifi.git -b release/core52
fi
# Build backport driver
echo "Building backport driver"
cd $WORK_DIR/backport-iwlwifi
make defconfig-iwlwifi-public
make -j$(( $(nproc) + 1 ))
make install
# Download linux firmware
cd $WORK_DIR
if [[ ! -d linux-firmware ]]; then
echo "Downloading linux-firmware"
git clone git://git.kernel.org/pub/scm/linux/kernel/git/firmware/linux-firmware.git
fi
# Copy AC9260 WiFi firmware to system
cp $WORK_DIR/linux-firmware/iwlwifi-9260* /lib/firmware/
# Register backport driver for AC9260
modprobe iwlwifi
echo "Installing Network Manager"
apt update
apt install -y network-manager-gnome rfkill
echo "***************************************************************"
echo " Done! Please reboot the system for the driver to take effect. "
echo "***************************************************************"
| true |
f304f2083ee5f266ff664c848edeb097df94b028 | Shell | paulhybryant/dotfiles | /zsh/plugin/functions/git::sync-repos | UTF-8 | 877 | 3.40625 | 3 | [] | no_license | # vim: filetype=zsh sw=2 ts=2 sts=2 et tw=80 foldlevel=0 nospell
setopt localoptions err_return nounset
local -a dirty_repos
dirty_repos=()
for dir in */; do
io::vlog 2 "[${0:t}] Updating ${dir}"
pushd "${dir}"
if [[ -d '.git' ]]; then
if [[ -n $(git status --porcelain) ]]; then
# Dirty
dirty_repos+=${dir}
else
io::msg "Pulling ${dir}"
git submodule foreach 'git checkout master'
git pull --recurse-submodules
if git remote | grep 'upstream' > /dev/null 2>&1; then
io::msg "Checkinging upstream"
git fetch upstream
git merge upstream/master
fi
fi
fi
popd
done
if [[ ${#dirty_repos} -gt 0 ]]; then
io::hl "${#dirty_repos} dirty repo(s) found!"
for dir in ${dirty_repos}; do
io::hl "Not updated repo: ${dir}"
done
return 1
else
io::msg "Done!"
git::check-dirty-repos
fi
| true |
a7449800723580572c1c9f68105751a55a2afd91 | Shell | jeandeaual/mplabx-xc8-build-action | /build.sh | UTF-8 | 201 | 2.703125 | 3 | [
"MIT"
] | permissive | #!/bin/bash
echo "Building project $1:$2 with MPLAB X v5.45 and XC8 v1.34"
set -x -e
/opt/mplabx/mplab_platform/bin/prjMakefilesGenerator.sh "$1@$2" || exit 1
make -C "$1" CONF="$2" build || exit 2
| true |
db133e4ca6a70056893634eeaf00fa4e66664e7f | Shell | RomiC/dotfiles | /.zshrc | UTF-8 | 4,358 | 2.65625 | 3 | [] | no_license | # Go path
export GOPATH=$HOME/work/go
# Additional paths
if [ -d "$HOME/bin" ] ; then
PATH="$HOME/bin:$PATH"
fi
# NeoVIM mac os installation
if [ -d "$HOME/nvim-osx64/bin" ] ; then
PATH="$HOME/nvim-osx64/bin:$PATH"
fi
# Path to the globally installed npm-packages
if [ -d "$HOME/.npm-global/bin" ] ; then
PATH="$HOME/.npm-global/bin:$PATH"
fi
# Path to GO root project
if [ -d "$GOPATH/bin" ] ; then
PATH="$GOPATH/bin:$PATH"
fi
# Path to VSCode cli
if [ -d "/Applications/Visual Studio Code.app/Contents/Resources/app/bin" ] ; then
PATH="/Applications/Visual Studio Code.app/Contents/Resources/app/bin:$PATH"
fi
# JDK
export PATH="/usr/local/opt/openjdk/bin:$PATH"
# Default locale
export LC_ALL=en_US.UTF-8
# Settign default editor to vim
export EDITOR=vim
# Path to your oh-my-zsh installation.
export ZSH=$HOME/.oh-my-zsh
PURE_PROMPT_SYMBOL="➜"
# Uncomment the following line to use case-sensitive completion.
# CASE_SENSITIVE="true"
# Uncomment the following line to use hyphen-insensitive completion. Case
# sensitive completion must be off. _ and - will be interchangeable.
# HYPHEN_INSENSITIVE="true"
# Uncomment the following line to disable bi-weekly auto-update checks.
# DISABLE_AUTO_UPDATE="true"
# Uncomment the following line to change how often to auto-update (in days).
# export UPDATE_ZSH_DAYS=13
# Uncomment the following line to disable colors in ls.
# DISABLE_LS_COLORS="true"
# Uncomment the following line to disable auto-setting terminal title.
# DISABLE_AUTO_TITLE="true"
# Uncomment the following line to enable command auto-correction.
# ENABLE_CORRECTION="true"
# Uncomment the following line if you want to change the command execution time
# stamp shown in the history command output.
# The optional three formats: "mm/dd/yyyy"|"dd.mm.yyyy"|"yyyy-mm-dd"
HIST_STAMPS="dd.mm.yyyy"
# Which plugins would you like to load? (plugins can be found in ~/.oh-my-zsh/plugins/*)
# Custom plugins may be added to ~/.oh-my-zsh/custom/plugins/
# Example format: plugins=(rails git textmate ruby lighthouse)
# Add wisely, as too many plugins slow down shell startup.
plugins=(
docker
docker-compose
fnm
fzf
fzf-tab
git
npm
sudo
tmux
vscode
zsh-autosuggestions
)
source $ZSH/oh-my-zsh.sh
fpath+=$HOME/.zsh/pure
autoload -U promptinit; promptinit
prompt pure
# fnm-init for managing different node versions
eval "$(fnm env)"
# rbenv setting up
#eval "$(rbenv init - zsh)"
# vim mode
# bindkey -v
zstyle :fzy:tmux enabled no
zstyle :fzy:history show-scores no
zstyle :fzy:history lines '10'
zstyle :fzy:history prompt 'history >> '
zstyle :fzy:history command fzy-history-default-command
zstyle :fzy:file show-scores no
zstyle :fzy:file lines '10'
zstyle :fzy:file prompt 'file >> '
zstyle :fzy:file command fzy-file-default-command
zstyle :fzy:cd show-scores no
zstyle :fzy:cd lines ''
zstyle :fzy:cd prompt 'cd >> '
zstyle :fzy:cd command fzy-cd-default-command
zstyle :fzy:proc show-scores no
zstyle :fzy:proc lines '10'
zstyle :fzy:proc prompt 'proc >> '
zstyle :fzy:proc command fzy-proc-default-command
# fzf-tab completion plugin config
zstyle ':fzf-tab:*' fzf-bindings 'space:toggle' \
'ctrl-a:toggle-all' \
'ctrl-j:down' \
'ctrl-k:up'
# Aliases
# - Git
alias glg='g lg' gsth='g sth' gusth='g usth' grhm='g reset --mixed' gbro='git branch --merged origin/master | grep -v master | xargs git branch -d'
# - NeoVIM instead of vim
alias vim=nvim v=nvim
# - Vifm
alias vf=vifm
# - Docker
alias doc=docker
# - List files
alias lsa='ls -lhA' lsv='ls -lh' lsn='ls -1A'
# - FNM (Node manager)
alias nu='fnm use' nls='fnm list' nlsr='fnm list-remote'
# - Node
alias nv='node -v'
# - Haste
alias acronis-haste="HASTE_SERVER=https://hastebin.acronis.work haste"
export NGINX_PROXY_HOST="docker.for.mac.localhost"
# VSCode plugin settings
export VSCODE=code
export FZF_DEFAULT_COMMAND='fd --type f --hidden --follow --exclude .git' # Follow links, exclude hiddens and node_modules
# Colorizing zsh suggestions
export ZSH_AUTOSUGGEST_HIGHLIGHT_STYLE='fg=#685e4a'
# Run tmux on Startup
export ZSH_TMUX_AUTOSTART=true
export ZSH_TMUX_AUTOQUIT=true
export ZSH_TMUX_DEFAULT_SESSION_NAME="charugin"
export ZSH_TMUX_CONFIG=$HOME/.config/tmux/tmux.conf
| true |
2d6e4cec21899621b887332898f06f93d6db8611 | Shell | Lunch-Time-Projects/DCA-BOT | /build-lambda.sh | UTF-8 | 779 | 3.15625 | 3 | [] | no_license | #!/usr/bin/env bash
# Remove existing build folder
echo "Remove existing build folder..."
if [ -d build ]; then
rm -rf build
fi
# Recreate build directory
echo "Recreate build directory..."
mkdir -p build/function/ build/layer/
# Copy source files
echo "Copy source files..."
cp -r src/ build/function/
# Activate virtualenv
echo "Activate virtualenv..."
python3 -m venv env
. env/bin/activate
# Pack python libraries
echo "Pack python libraries..."
pip3 install -r src/requirements.txt -t build/layer/python
# Remove pycache in build directory
# https://stackoverflow.com/questions/28991015/python3-project-remove-pycache-folders-and-pyc-files
echo "Remove pycache in build directory..."
find build -type f | grep -E "(__pycache__|\.pyc|\.pyo$)" | xargs rm
echo "Done!" | true |
d5de67b4246bb9096b66626f06692d189e4f4c4d | Shell | travis-g/dotfiles | /chunkwm/.bitbar/chunkwm.1s.sh | UTF-8 | 598 | 2.859375 | 3 | [
"WTFPL"
] | permissive | #!/bin/sh
# <bitbar.title>Chunkwm Status</bitbar.title>
# <bitbar.author.github>travis-g</bitbar.author.github>
# <bitbar.version>v1.0</bitbar.version>
# <bitbar.desc>Plugin to display basic chunkwm status</bitbar.desc>
export PATH="/usr/local/bin:$PATH"
ID="$(chunkc tiling::query -d id)"
MODE="$(chunkc tiling::query -d mode)"
case $MODE in
"bsp")
ICON="";;
"monocle")
ICON="";;
"float")
ICON="";;
esac
#echo "$ID:$ICON | font=\"Hack Nerd Font\""
echo "$ID:$MODE"
#echo "---"
#echo "$(chunkc tiling::query -d windows | grep -v invalid | cut -c-60)"
| true |
fe6e4ec0bd2000eb585e22d38cbfdca6eb37d200 | Shell | fastrom/watch-make | /watch-make | UTF-8 | 161 | 3.03125 | 3 | [] | no_license | #!/bin/sh
while true;
do
if ! make -q "$@";
then
echo "#-> Starting build: `date`"
make "$@";
echo "#-> Build complete."
fi
sleep 0.5;
done
| true |
9b782abf89f44b66aa5ddacef03bf6b73814998f | Shell | Zacxie/linux-server-assignment1 | /assignment1/assignment1.sh | UTF-8 | 2,679 | 4.09375 | 4 | [] | no_license | #!/bin/bash --
#1
#ask for the package to be downloaded
read -p "Name of package: " packagename
#2
#ask if it want to install from source or dpkg/rpm
#Skip this and look at the extension of the downloaded file
#3
#read the website
read -p "Enter link for package: " url
#set extension for use in step 6
extension="${url: -3}"
#4
#check/changes permissions of /usr/local/src so everyone can download
#rwxrwxrwx
sudo chmod 747 /usr/local/src
#5
#use wget to download package
wget $url -P /usr/local/src
filename=`ls /usr/local/src -tu | head -n 1`
#6
#install package depended on the package type
case $extension in
".gz")
#test website https://nmap.org/dist/nmap-7.91.tar.bz2
#Source install
#extract package
sudo tar zxvf /usr/local/src/$filename
#choose output name
unzippedFilename=`ls /usr/local/src -tu | head -n 1`
#cd into the folder
sudo chmod 747 /usr/local/src/$unzippedFilename/
cd /usr/local/src/$unzippedFilename/
sudo ./configure
sudo make
sudo make install
;;
"bz2")
#test website https://nmap.org/dist/nmap-7.91.tar.bz2
#Source install
#extract package
sudo tar jxvf /usr/local/src/$filename -C /usr/local/src
#choose output name
unzippedFilename=`ls /usr/local/src -tu | head -n 1`
#cd into the folder
sudo chmod 747 /usr/local/src/$unzippedFilename/
cd /usr/local/src/$unzippedFilename/
sudo ./configure
sudo make
sudo make install
;;
"deb")
# test website https://download.teamviewer.com/download/linux/teamviewer_amd64.deb
#deb install
sudo dpkg -i /usr/local/src/$filename
;;
"rpm")
#test website https://nmap.org/dist/nmap-7.91-1.x86_64.rpm
#rpm install
#the -i auto installs the generated packages (deb)
sudo alien -i /usr/local/src/$filename
;;
*)
echo "Unsupported filetype."
;;
esac
#7
#Report if the installation was successful
#if $? is 0, successful, if 1, unsuccessful
if [ $? -eq 0 ]
then
echo "Successfully installed $packagename""."
exit
else
echo "Error. Installation was unsuccessful."
fi
#8
echo "Missing dependencies."
read -p "Do you wish to install the missing dependencies? (y/n): " dependsAnswer
#9
if [ "$dependsAnswer" = "y" ]
then
#install dependencies with apt
yes | sudo apt -f install
else
echo "Abort."
exit
fi
#check if successfull
if [ $? -eq 0 ]
then
echo "Successfully installed $packagename""."
else
echo "Error. Installation was unsuccessful."
fi
| true |
a970906ecebb00e8f51d74b685d75516b0aec218 | Shell | sunweiconfidence/lab | /redis-cluster/build.sh | UTF-8 | 530 | 3.21875 | 3 | [] | no_license | #!/bin/bash
set -ex
WORKDIR=`pwd`
REDIS_VER=3.2.5
REDIS_URL=http://download.redis.io/releases/redis-${REDIS_VER}.tar.gz
# 存放redis的可执行文件
mkdir -p bin
# 下载源码
wget $REDIS_URL
# 解压源码&编译
tar fxz redis-${REDIS_VER}.tar.gz && cd redis-${REDIS_VER} && make
# 复制可执行文件到bin目录
cd src && cp redis-benchmark redis-check-aof redis-check-rdb redis-cli redis-sentinel redis-server redis-trib.rb ../../bin
# 删除源码
cd $WORKDIR && rm -rf redis-${REDIS_VER}.tar.gz redis-${REDIS_VER}
| true |
020348fd559bc334db22b5db2cf4e5d71c190fb9 | Shell | radek-daniluk/zywa-przestrzen.org.pl | /.automation-scripts/images.sh | UTF-8 | 1,209 | 3.796875 | 4 | [] | no_license | #!/bin/sh
if [ $# -lt 1 ]; then
echo "Argument number mismatch. Give input file and optionally output basename."
exit 1
fi
inname="$1"
inbase=$(basename "$inname" | cut -d. -f1)
if [ $# -eq 1 ]; then
outbase=$inbase
else
outbase="$2"
fi
if [ ! -f $inname ]; then
echo "No such file: $inname"
exit 2
fi
if [ ! -d ./assets/img ]; then
echo "No assets/img dir in current dir. Please change or create dir(s)."
exit 3
fi
if [ ! -d ./assets/img/300 ]; then
echo "No ./assets/img/300 dir. Please change or create dir(s)."
exit 3
fi
if [ ! -d ./assets/img/600 ]; then
echo "No ./assets/img/600 dir. Please change or create dir(s)."
exit 3
fi
if [ ! -d ./assets/img/740 ]; then
echo "No ./assets/img/740 dir. Please change or create dir(s)."
exit 3
fi
echo $inbase
echo $outbase
cwebp -resize 300 0 -m 6 -o "./assets/img/300/$outbase.webp" "$inname"
cwebp -resize 600 0 -m 6 -o "./assets/img/600/$outbase.webp" "$inname"
cwebp -resize 740 0 -m 6 -o "./assets/img/740/$outbase.webp" "$inname"
convert -resize 300 "$inname" "./assets/img/300/$outbase.jpg"
convert -resize 600 "$inname" "./assets/img/600/$outbase.jpg"
convert -resize 740 "$inname" "./assets/img/740/$outbase.jpg"
| true |
fd44a7b977fa18083040f014521e7783cfaff167 | Shell | michaelcunningham/oracledba | /admin/listener_log/listener_log_report.sh | UTF-8 | 1,547 | 3 | 3 | [] | no_license | #!/bin/sh
export ORACLE_SID=apex
export ORAENV_ASK=NO
. /usr/local/bin/oraenv
. /dba/admin/dba.lib
tns=`get_tns_from_orasid $ORACLE_SID`
username=lmon
userpwd=lmon
log_date=`date +%a`
log_file=/dba/admin/listener_log/log/listener_log_report.log
sqlplus -s /nolog << EOF > $log_file
connect $username/$userpwd
set linesize 100
set pagesize 100
column server_name format a10 heading "Server"
column instance_name format a10 heading "Instance"
column host_name format a30 heading "Client Machine"
column program_name format a40 heading "Program"
--
-- Find programs that are used.
--
--ttitle center '***** Program usage for &_db *****' skip 2
--select distinct upper( substr( program_name, instr( program_name, '\', -1 )+1 ) ) program_name
--from listener_log
--where program_name <> 'oracle'
--and user_name <> 'oracle'
--order by 1;
--
-- Find server programs that are used.
--
ttitle center '***** Program and Server Usage *****' skip 2
select *
from (
select distinct ll.server_name, ll.instance_name,
-- ll.host_name,
upper( substr( ll.program_name, instr( ll.program_name, '\', -1 )+1 ) ) program_name
from listener_log ll
where ll.program_name <> 'oracle'
and ll.user_name <> 'oracle'
and upper( ll.host_name ) not in(
select llfh.host_name from listener_log_filter_host llfh )
) r
where upper( r.program_name ) not in(
select llfa.program_name from listener_log_filter_apps llfa )
order by 1, 2, 3;
exit;
EOF
echo "See Attatchment" | mutt -s "Listener/Applications Report" mcunningham@thedoctors.com -a $log_file
| true |
3f5e2dd0be854531e539d199c9330a3907a44f11 | Shell | kenobikty/adbclickloop | /loopClick.sh | UTF-8 | 336 | 3.46875 | 3 | [] | no_license | timestamp() {
date +"%T" # current time
}
while getopts x:y: flag
do
case "${flag}" in
x) x1=${OPTARG};;
y) y1=${OPTARG};;
esac
done
while
true
do
x=$((x1 + (RANDOM % 10) + 1))
y=$((y1 + (RANDOM % 10) + 1))
t=$((1 + (RANDOM % 2) + 1))
echo $(timestamp)
adb shell input tap $x $y
sleep $t
done
| true |
97a2b629ef8fa02ddfd010b4232145bd941a5c2a | Shell | CesarArroyo09/MPI_FiberAssig_PIP_script | /parallel_pip_bianchi_first_test.sh | UTF-8 | 1,457 | 2.9375 | 3 | [] | no_license | #!/bin/bash -l
# Submit this script as: "./prepare-env.sh" instead of "sbatch prepare-env.sh"
# Prepare user env needed for Slurm batch job
# such as module load, setup runtime environment variables, or copy input files, etc.
# Basically, these are the commands you usually run ahead of the srun command
source /project/projectdirs/desi/software/desi_environment.sh 19.2
module unload fiberassign
module swap PrgEnv-intel PrgEnv-gnu
export CONDA_ENVS_PATH=$SCRATCH/desi/conda
source activate desienv
export PYTHONPATH=$SCRATCH/desi/conda/desienv/lib/python3.6/site-packages:$PYTHONPATH
# Generate the Slurm batch script below with the here document,
# then when sbatch the script later, the user env set up above will run on the login node
# instead of on a head compute node (if included in the Slurm batch script),
# and inherited into the batch job.
cat << EOF > prepare-env.sl
#!/bin/bash
#SBATCH --constraint=haswell
#SBATCH --nodes=4
#SBATCH --ntasks-per-node=1
#SBATCH --time-min=01:00:00
#SBATCH --time=02:00:00
#SBATCH --output='../output/parallel_pip_bianchi_first_test.out'
#SBATCH --error='../output/parallel_pip_bianchi_first_test.err'
#SBATCH --qos=regular
#SBATCH --account=desi
#SBATCH --job-name=pip_lrg
srun -c 64 python ./parallel_pip_bianchi_first_test.py
# Other commands needed after srun, such as copy your output filies,
# should still be incldued in the Slurm script.
EOF
# Now submit the batch job
sbatch prepare-env.sl
| true |
044212ac10d338575cadaf1aeb69247f78ab1be3 | Shell | Pajk/aws-beanstalk-docker-travis-example | /travis/deploy.sh | UTF-8 | 360 | 2.96875 | 3 | [] | no_license | #!/usr/bin/env bash
set -e
# Use git commit hash as image tag
IMAGE=beanstalk-example/hello-world
ECR_TAG=$(git show -s --format=%h)
# Push latest available production image to ECR
. devops/ci/ecr-push.sh $IMAGE $ECR_TAG
. devops/ci/eb-deploy.sh \
$IMAGE $ECR_TAG \
${EB_APP} ${EB_ENV} \
docker # Folder with version files (Dockerrun.aws.json)
| true |
471b12e6e3c1a870bb0af7e8b8dd0c06a0b9b563 | Shell | jetweedy/twtVagrant | /destroy.sh | UTF-8 | 547 | 3.609375 | 4 | [] | no_license | #!/bin/bash
confirm () {
# call with a prompt string or use a default
read -r -p "${1:-Are you sure? [y/N]} " response
case $response in
[yY][eE][sS]|[yY])
true
;;
*)
false
;;
esac
}
if (confirm "Are you sure you want to completely destroy this machine?")
then
# cd ${0%/*}
cd ./box/vagrant
echo "vagrant destroy"
vagrant destroy --force
cd ../../
echo "rm -rf box"
rm -rf ./box
echo "machine destroyed"
sleep 1s
else
echo "nothing destroyed"
sleep 1s
fi
| true |
7caf3f555edcaf938e05af5921ae4677bdea3d87 | Shell | meocad/shell | /abs/1-ParameterSubstitution/5-variableexpansion_Substringreplacement.sh | UTF-8 | 4,387 | 3.84375 | 4 | [] | no_license | # Variable expansion / Substring replacement
#
# 1. ${var:pos}
# Variable var expanded, starting from offset pos.
#
# 2. ${var:pos:len}
# Expansion to a max of len characters of variable var, from offset pos.
# 3. ${var/Pattern/Replacement}
# First match of Pattern, within var replaced with Replacement.
# If Replacement is omitted, then the first match of Pattern is replaced by nothing, that is, deleted.
stringZ=abcABC123ABCabc
echo ${stringZ/abc/XYZ} # XYZABC123ABCabc
echo ${stringZ/abc/} # ABC123ABCabc
# 4. ${var//Pattern/Replacement}
# Global replacement. All matches of Pattern, within var replaced with Replacement.
# if Replacement is omitted, then all occurrences of Pattern are replaced by nothing, that is, deleted.
stringZ=abcABC123ABCabc
echo ${stringZ//abc/XYZ} # XYZABC123ABCXYZ
echo ${stringZ//abc/} # ABC123ABC
var1=abcd-1234-defg
echo "var1 = $var1"
t=${var1#*-*}
echo "var1 (with everything, up to and including first - stripped out) = $t"
# t=${var1#*-} works just the same,
#+ since # matches the shortest string,
#+ and * matches everything preceding, including an empty string.
t=${var1##*-*}
echo "If var1 contains a \"-\", returns empty string... var1 = $t"
t=${var1%*-*}
echo "var1 (with everything from the last - on stripped out) = $t"
echo
# -------------------------------------------
path_name=/home/bozo/ideas/thoughts.for.today
# -------------------------------------------
echo "path_name = $path_name"
t=${path_name##/*/}
echo "path_name, stripped of prefixes = $t"
# Same effect as t=`basename $path_name` in this particular case.
# t=${path_name%/}; t=${t##*/} is a more general solution,
#+ but still fails sometimes.
# If $path_name ends with a newline, then `basename $path_name` will not work,
#+ but the above expression will.
# (Thanks, S.C.)
t=${path_name%/*.*}
# Same effect as t=`dirname $path_name`
echo "path_name, stripped of suffixes = $t"
# These will fail in some cases, such as "../", "/foo////", # "foo/", "/".
# Removing suffixes, especially when the basename has no suffix,
#+ but the dirname does, also complicates matters.
# (Thanks, S.C.)
echo
t=${path_name:11}
echo "$path_name, with first 11 chars stripped off = $t"
t=${path_name:11:5}
echo "$path_name, with first 11 chars stripped off, length 5 = $t"
echo
t=${path_name/bozo/clown}
echo "$path_name with \"bozo\" replaced by \"clown\" = $t"
t=${path_name/today/}
echo "$path_name with \"today\" deleted = $t"
t=${path_name//o/O}
echo "$path_name with all o's capitalized = $t"
t=${path_name//o/}
echo "$path_name with all o's deleted = $t"
# 5. ${var/#Pattern/Replacement}
# If prefix of var matches Pattern, then substitute Replacement for Pattern.
# 6. ${var/%Pattern/Replacement}
# If suffix of var matches Pattern, then substitute Replacement for Pattern.
#
#
v0=abc1234zip1234abc # Original variable.
echo "v0 = $v0" # abc1234zip1234abc
echo
# Match at prefix (beginning) of string.
v1=${v0/#abc/ABCDEF} # abc1234zip1234abc
# |-|
echo "v1 = $v1" # ABCDEF1234zip1234abc
# |----|
# Match at suffix (end) of string.
v2=${v0/%abc/ABCDEF} # abc1234zip123abc
# |-|
echo "v2 = $v2" # abc1234zip1234ABCDEF
# |----|
echo
# ----------------------------------------------------
# Must match at beginning / end of string,
#+ otherwise no replacement results.
# ----------------------------------------------------
v3=${v0/#123/000} # Matches, but not at beginning.
echo "v3 = $v3" # abc1234zip1234abc
# NO REPLACEMENT.
v4=${v0/%123/000} # Matches, but not at end.
echo "v4 = $v4" # abc1234zip1234abc
# NO REPLACEMENT.
# 7. ${!varprefix*}, ${!varprefix@}
# Matches names of all previously declared variables beginning with varprefix.
xyz23=whatever
xyz24=
a=${!xyz*} # Expands to *names* of declared variables
# ^ ^ ^ + beginning with "xyz".
echo "a = $a" # a = xyz23 xyz24
a=${!xyz@} # Same as above.
echo "a = $a" # a = xyz23 xyz24
echo "---"
abc23=something_else
b=${!abc*}
echo "b = $b" # b = abc23
c=${!b} # Now, the more familiar type of indirect reference.
echo $c # something_else
| true |
cc6f4f9ce9d69f0cb84e6fad38b0283be17636ca | Shell | JoppeDD/SysteemBeheer | /scripts/http_add_vhost | UTF-8 | 800 | 2.765625 | 3 | [
"LicenseRef-scancode-warranty-disclaimer"
] | no_license | #!/bin/bash
domain=(${1//./ })
echo "<VirtualHost *:80>" >> /etc/apache2/sites-available/$domain.conf
echo " ServerName $domain.joppe-duthoit.sb.uclllabs.be" >> /etc/apache2/sites-available/$domain.conf
echo " DocumentRoot /var/www/labo/scripts/$domain" >> /etc/apache2/sites-available/$domain.conf
echo "</VirtualHost>" >> /etc/apache2/sites-available/$domain.conf
mkdir /var/www/labo/scripts/$domain
touch /var/www/labo/scripts/$domain/index.html
echo "<html>" >> /var/www/labo/scripts/$domain/index.html
echo " welcome $domain" >> /var/www/labo/scripts/$domain/index.html
echo "</html>" >> /var/www/labo/scripts/$domain/index.html
#echo "$domain IN A 193.191.177.198" >> /etc/bind/zones/db.joppe-duthoit.be
a2dissite $domain.conf
a2ensite $domain.conf
service bind9 restart
service apache2 restart
| true |
fecc52f76988423bff942d6f046417e0939e6de0 | Shell | babl-ws/ws-harness | /scripts/launch-client.sh | UTF-8 | 1,030 | 2.671875 | 3 | [] | no_license | #!/usr/bin/env bash
RESULT_DIR="$(date +%Y%m%d_%H%M%S)"
GC_LOG_FILE="client-gc-$(date +%Y%m%d_%H%M%S).log"
SCRIPT_DIR="$(dirname $0)"
export JVM_ARGS="-XX:MaxDirectMemorySize=32g -XX:PerfDataSaveFile=/dev/shm/pf.bin -XX:+UnlockDiagnosticVMOptions -XX:GuaranteedSafepointInterval=300000 -XX:+UnlockExperimentalVMOptions -XX:+TrustFinalNonStaticFields -XX:BiasedLockingStartupDelay=0 -XX:+AlwaysPreTouch -Djava.lang.Integer.IntegerCache.high=65536 -Djava.net.preferIPv4Stack=true -Dagrona.disable.bounds.checks=true -Daeron.pre.touch.mapped.memory=true -Daeron.term.buffer.sparse.file=false -Dbabl.socket.tcpNoDelay.enabled=true"
export JVM_MEM="-XX:+UseParallelOldGC -Xmx1g -Xms1g -XX:NewSize=512m -Xlog:gc*,safepoint=info:file=/dev/shm/$GC_LOG_FILE:time"
source "$SCRIPT_DIR/hosts_conf.sh"
java $JVM_ARGS $JVM_MEM -cp 'build/libs/*:lib/*' -Dbabl.perf.results.dir=$RESULT_DIR com.aitusoftware.ws.benchmark.LatencyTestMain "${SERVER_HOST}:8080" latency-client.properties
mkdir -p "$RESULT_DIR"
cp /dev/shm/*-gc.log "$RESULT_DIR/" | true |
46be4638a6ff03697f43a84d774cd9ebbddaf7bb | Shell | xlegend1024/az-cloudscale-adv-analytics | /script/00.azlab_DE.azcli | UTF-8 | 5,539 | 3.65625 | 4 | [] | no_license | #!/bin/bash
# Select Azure subscription
az account list --output table
echo "\n\r"
read -p "Subscription Name for lab>>" subName
if [ ! -z "$subName" ]; then
echo "You select " $subName " for the lab."
az account set --subscription "$subName" --verbose
else
echo "Please run the script again!! EXIT"
exit
fi
chkName=$(az account list --output tsv --query "[?isDefault].name")
if [ "$subName" = "$chkName" ]; then
echo "Subscripion Name has confirmed"
else
echo "Please try again with correct subscription name"
exit
fi
# Set a parameter that will be used for password
sqlpwd=1q2w3e4r5t^Y
echo ""
echo "Type Passowrd for the lab" $sqlpwd
echo ""
az account list-locations --output table --query []['name','displayName']
echo "Type location for the lab"
read -p "Location >>" loc
# set azure region
if [ ! -z "$loc" ]; then
echo "You set location" $loc " for the lab."
else
echo "Default location is West US 2"
loc=westus2
fi
# 3. get subscruption ID
# if you have exsiting one please use the one
num=$(shuf -i0-1000 -n1)
rgName=g-rsg-1n-exploratrium01-datasci-training-01
#azlab-$num-rg #Save it as ps1
# set blob name that should be unique
blobName=azlabblob$num # It will be saved it in parameter file
# set container name that will be created in Blob
containerName=sampledata
# set SQL Server (Instance) name
sqlsvrName=azlabsql$num # It will be saved it in parameter file
# Azure Key Vault Name
akvName=akv$num
# DO NOT CHANGE!!!
sqldbName=wwimdb
# DO NOT CHANGE!!!
sqladm=sqladmin
# Create the Resource Group
# if you have exsiting one you can skip this part
echo "1. Create Resource Group " $rgName
#az group create --name $rgName --location $loc
# Storage Account
# Create the Storage Account
echo "2. Create Blob " $blobName
az storage account create --name $blobName --resource-group $rgName --sku Standard_LRS --location $loc --kind StorageV2
# Get blob connection string
blobConn=$(az storage account show-connection-string --name $blobName --resource-group $rgName --output tsv)
blobkey1=$(az storage account keys list -n $blobName -g $rgName --output tsv --query [0].value)
# Create a container
echo "3. Create container in the blob" $containerName
az storage container create --name $containerName --connection-string $blobConn
# Create a DB and restore Database from backup
# Download Sample DB (Backup)
echo "4. Download a sample DB"
file_to_upload="./wwimdb.bacpac"
objName=wwimdb.bacpac
# wget is tool you can download files
# Cloud Shell has wget.exe on both Bash or Powershell
wget -O $file_to_upload https://github.com/xlegend1024/az-cloudscale-adv-analytics/raw/master/sampledb/wwimdb.bacpac
# Uploading a SQLDB Backup file
echo "5. Upload sample db to blob"
az storage blob upload --container-name $containerName --file $file_to_upload --name $objName --connection-string $blobConn
# Confirm file is uploaded
az storage blob list --container-name $containerName --connection-string $blobConn --output table
# Create SQL Server and restore database from backup
# Create a SQL Server
echo "6. Create a SQL Server " $sqlsvrName
az sql server create --admin-user $sqladm --admin-password $sqlpwd --location $loc --name $sqlsvrName --resource-group $rgName
# Update SQL Server Firewall rule
# Opeing all firewall is NOT recommend for production or in any environment
# Opening all IP address is for this demo and lab only
echo "7. Update SQL Server Firewall"
az sql server firewall-rule create -n openall --start-ip-address 1.1.1.1 --end-ip-addres 255.255.255.255 -g $rgName -s $sqlsvrName
az sql server firewall-rule create -g $rgName -s $sqlsvrName -n allowazure --start-ip-address 0.0.0.0 --end-ip-address 0.0.0.0
# Create a SQL DB
echo "8. Create a SQL DB " $sqldbName
az sql db create --name $sqldbName --resource-group $rgName --server $sqlsvrName --service-objective S3
# Get uri where sql backup is
bloburi="https://$blobName.blob.core.windows.net/$containerName/$objName"
time=2020-01-01T00:00:00Z
sas=$(az storage blob generate-sas --account-name $blobName --container-name $containerName --name $objName --permissions r --expiry $time --output tsv)
# Restore SQL DB
echo "9. Restore DB"
az sql db import -s $sqlsvrName -n $sqldbName -g $rgName -p $sqlpwd -u $sqladm --storage-key $sas --storage-key-type SharedAccessKey --storage-uri $bloburi
#sqldbconn='Server=tcp:'$sqlsvrName'.database.windows.net,1433;Initial Catalog='$sqldbName';Persist Security Info=False;User ID=sqladmin@;Password='$sqlpwd';Encrypt=true;Connection Timeout=30;'
sqldbconn=$(az sql db show-connection-string -s $sqlsvrName -n $sqldbName -c ado.net)
sqldbconn=${_sqldbconn/<username>/$sqladm}
sqldbconn=${_sqldbconn/<password>/$sqlpwd}
# Create keyvault
## Steps
echo "Create Azure Key Vault"
az keyvault create --name $akvName --resource-group $rgName --location $loc
az keyvault secret set --vault-name $akvName --name 'dev-sqldbconn' --value $sqldbconn
az keyvault secret set --vault-name $akvName --name 'dev-blobconn' --value $blobConn
echo $(date) >> ./azlab.txt
echo "Resource Group: " $rgName >> ./azlab.txt
echo "Location: " $loc >> ./azlab.txt
echo "Blob: " $blobName >> ./azlab.txt
echo "Blob Key: " $blobkey1 >> ./azlab.txt
echo "SQL Server: " $sqlsvrName >> ./azlab.txt
echo "SQL Database: " $sqladm >> ./azlab.txt
echo "SQL DB Password: " $sqlpwd >> ./azlab.txt
echo "SQL DB Connection:" $sqldbconn >> ./azlab.txt
echo "" >> ./azlab.txt
echo "Your lab environment information is saved as azlab.txt"
| true |
14e6ef3c83528f675ceef2af5c4b9cc9ee43d619 | Shell | namin/nfv-benchmark | /test-rxer/paper-benchmarks.sh | UTF-8 | 3,368 | 3.328125 | 3 | [
"MIT"
] | permissive | #!/bin/bash
# Script directory
DIR="$( cd "$(dirname "$0")" ; pwd -P )"
# Batch size
BS=146
FLTO=0
mrc() {
PREFIX=$1
[ $PREFIX != "" ] && PREFIX="${PREFIX}_"
echo "${PREFIX}measurement_large,${BS} ${PREFIX}routing_stanford,${BS} ${PREFIX}checksum,${BS}"
}
src() {
PREFIX=$1
[ $PREFIX != "" ] && PREFIX="${PREFIX}_"
echo "${PREFIX}measurement_small,${BS} ${PREFIX}routing_stanford,${BS} ${PREFIX}checksum,${BS}"
}
# Pipeline with queue
pipeline-make-wq() {
${DIR}/make-pipeline.sh timer,${BS} $@ drop,${BS}
}
# Pipeline without queue
pipeline-make-nq() {
${DIR}/make-pipeline-bypass-queue.sh timer,${BS} $@ drop,${BS}
}
pipeline-run() {
CMD_PREFIX=$@
sudo ${CMD_PREFIX} ${DIR}/../bin/rxer-test -n4 -l1-3
}
CMD_PREFIX_PERF="perf record -e cycles:pp,cache-misses"
CMD_PREFIX_TIMEOUT="timeout -k20 20"
# Measure the throughput of the pipeline
pipeline-thr() {
mean='thr'
tmpFile=`mktemp`
for i in `seq 1 5`; do
pipeline-run "${CMD_PREFIX_TIMEOUT}" >${tmpFile} 2>&1
val="`cat ${tmpFile} | grep Rate | grep In | awk '{print $14}' | tail -n10 | head -n6 | datamash -s trimmean 1`"
mean="${mean}\n${val}"
done
paste <(echo -e "TempFile\n${tmpFile}\n") <(echo -e "${mean}" | datamash -H -s trimmean 1 mean 1 q1 1 median 1 q3 1 iqr 1 sstdev 1)
}
pipeline-stats() {
header="$1"
BS="$2"
cmd="$3"
is_mrc="$4"
typ="$5"
if [[ $is_mrc -eq 1 ]]; then
pipeline="`mrc ${typ}`"
name="MRC/$typ@${BS}"
fi
if [[ $is_mrc -eq 2 ]]; then
pipeline="`src ${typ}`"
name="SRC/$typ@${BS}"
fi
if [[ $is_mrc -eq 0 ]]; then
pipeline="${typ},${BS}"
name="$typ@${BS}"
fi
${cmd} ${pipeline} >build.log 2>&1
res="`pipeline-thr`"
if [[ $header -eq 1 ]]; then
paste <(echo -e "pipeline\tFLTO\n$name\t${FLTO}") <(echo -e "${res}") | column -t
else
paste <(echo -e "$name\t${FLTO}") <(echo -e "${res}" | tail -n 2) | column -t
fi
}
set-flto() {
if [[ $1 -eq 1 ]]; then
sed -i '/FLTO=/c\FLTO=-flto' $DIR/../Makefile
FLTO=1
else
sed -i '/FLTO=/c\FLTO=' $DIR/../Makefile
FLTO=0
fi
}
zipf-skewed-experiments() {
set-flto 0
pipeline-stats "1" "32" "pipeline-make-nq" "1" "bpp"
pipeline-stats "0" "32" "pipeline-make-nq" "2" "bpp"
# set-flto 1
# pipeline-stats "0" "112" "pipeline-make-nq" "1" "bpp"
# pipeline-stats "0" "130" "pipeline-make-nq" "0" "merged"
# pipeline-stats "0" "130" "pipeline-make-nq" "0" "merged_opt"
# pipeline-stats "0" "130" "pipeline-make-nq" "0" "merged_fastpass"
}
zipf-uniform-experiments() {
set-flto 0
pipeline-stats "1" "1" "pipeline-make-nq" "1" "naive"
pipeline-stats "0" "32" "pipeline-make-wq" "1" "batching"
pipeline-stats "0" "32" "pipeline-make-wq" "1" "bp"
pipeline-stats "0" "32" "pipeline-make-wq" "1" "bpp"
pipeline-stats "0" "32" "pipeline-make-nq" "1" "bpp"
pipeline-stats "0" "112" "pipeline-make-nq" "1" "bpp"
set-flto 1
pipeline-stats "0" "112" "pipeline-make-nq" "1" "bpp"
pipeline-stats "0" "130" "pipeline-make-nq" "0" "merged"
pipeline-stats "0" "130" "pipeline-make-nq" "0" "merged_opt"
pipeline-stats "0" "130" "pipeline-make-nq" "0" "merged_fastpass"
}
main() {
zipf-skewed-experiments
}
main
| true |
8c2c13d5400a3a7d297ba134fe4c5be9358a5ef6 | Shell | sandersk/epubchecker | /box_config/config.sh | UTF-8 | 335 | 3.265625 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env bash
# Add .bashrc config settings if not already done
if [ $(grep -c 'cd /vagrant && bundle install' /home/vagrant/.bashrc) -eq 0 ]
then
echo "Configuring login settings"
echo -e "\n\ncd /vagrant && bundle install" >> /home/vagrant/.bashrc
else
echo ".bashrc already configured"
fi
echo "Provisioning complete!" | true |
14c1b5f25d3b83e4ebc57e5e0c11666c776f29f9 | Shell | cedric84/cmake-tuto | /cmake_build.sh | UTF-8 | 667 | 3.921875 | 4 | [] | no_license | #---Definitions---#
CMAKE_SOURCE_DIR=$(pwd)
CMAKE_BINARY_DIR=${CMAKE_SOURCE_DIR}/out/build
CMAKE_INSTALL_PREFIX=${CMAKE_SOURCE_DIR}/out/install
#---Create & change to build directory---#
rm -Rf ${CMAKE_BINARY_DIR}
mkdir -p ${CMAKE_BINARY_DIR}
cd ${CMAKE_BINARY_DIR}
#---Configure the project & generate a native build system---#
cmake \
-DCMAKE_INSTALL_PREFIX=${CMAKE_INSTALL_PREFIX} \
${CMAKE_SOURCE_DIR}
if [ 0 != $? ]; then
exit 1
fi
#---Change to source directory---#
cd ${CMAKE_SOURCE_DIR}
#---Build the "install" target using the native build system---#
cmake --build ${CMAKE_BINARY_DIR} --target install
if [ 0 != $? ]; then
exit 1
fi
| true |
28f898bd06b3ec7fa070bcf0524c06a91537844d | Shell | chakra-core/ChakraCore | /tools/create_package.sh | UTF-8 | 3,996 | 3.8125 | 4 | [
"MIT"
] | permissive | #!/bin/bash
#-------------------------------------------------------------------------------------------------------
# Copyright (C) Microsoft. All rights reserved.
# Licensed under the MIT license. See LICENSE.txt file in the project root for full license information.
#-------------------------------------------------------------------------------------------------------
DEFAULT_COLOR='\033[0m'
ERROR_COLOR='\033[0;31m'
GREEN_COLOR='\033[0;32m'
IF_ERROR_EXIT() {
ERROR_CODE=$1
if [[ $ERROR_CODE != 0 ]]; then
echo -e "${ERROR_COLOR} $2 ${DEFAULT_COLOR}"
exit $ERROR_CODE
fi
}
PRINT_USAGE() {
echo -e "\n${GREEN_COLOR}ChakraCore Package Creation Script${DEFAULT_COLOR}"
echo -e "${ERROR_COLOR}This script may\n- download third party software\n- \
make changes to your system\nand it won't ask you any questions while doing it.${DEFAULT_COLOR}"
echo -e "For this reason,\nyou should run this script with\n${ERROR_COLOR}\
I_ACCEPT_TO_RUN_THIS${DEFAULT_COLOR} secret argument, ${ERROR_COLOR}at your own risk${DEFAULT_COLOR}\n"
echo -e "${GREEN_COLOR}Usage:${DEFAULT_COLOR} tools/create_package.sh <secret argument> <version>"
echo -e "${GREEN_COLOR}Sample:${DEFAULT_COLOR} tools/create_package.sh <secret argument> 2.0.0\n"
}
if [[ $# < 2 || $1 != "I_ACCEPT_TO_RUN_THIS" ]]; then
PRINT_USAGE
exit 0
fi
CHAKRA_VERSION=$2
CHAKRA_VERSION=${CHAKRA_VERSION//./_}
if [[ $2 == $CHAKRA_VERSION ]]; then
PRINT_USAGE
echo -e "${ERROR_COLOR}Unexpected version argument. Try something similar to \
2.0.0${DEFAULT_COLOR}"
exit 1
fi
if [[ ! -f './build.sh' ]]; then
echo -e "${ERROR_COLOR} Run this script from the repository root folder.${DEFAULT_COLOR}"
echo "Try -> tools/create_package.sh"
exit 1
fi
HOST_OS="osx"
HOST_EXT="dylib"
if [[ ! "$OSTYPE" =~ "darwin" ]]; then # osx
HOST_OS="linux"
HOST_EXT="so"
tools/compile_clang.sh -y
IF_ERROR_EXIT $? "Clang build failed"
fi
## Build
BUILD="./build.sh --embed-icu -y --lto -j=3"
${BUILD} --target-path=out/shared
IF_ERROR_EXIT $? "ChakraCore shared library build failed."
${BUILD} --static --target-path=out/static
IF_ERROR_EXIT $? "ChakraCore static library build failed."
## Create folders
rm -rf out/ChakraCoreFiles/
mkdir -p out/ChakraCoreFiles/include/ out/ChakraCoreFiles/lib/ out/ChakraCoreFiles/bin/ out/ChakraCoreFiles/sample/
IF_ERROR_EXIT $? "Creating ChakraCoreFiles folder failed"
## Copy Files
# lib (so or dylib)
cp "out/shared/Release/libChakraCore.${HOST_EXT}" out/ChakraCoreFiles/lib/
# bin
cp out/static/Release/ch out/ChakraCoreFiles/bin/
# include
cp out/shared/Release/include/*.h out/ChakraCoreFiles/include/
# license
cat LICENSE.txt > out/ChakraCoreFiles/LICENSE
echo -e "\n***** Third Party Notices [ for PreBuilt Binaries ] *****\n" >> out/ChakraCoreFiles/LICENSE
cat tools/XPlatInstall/BINARY-DIST-ONLY-NOTICES.txt >> out/ChakraCoreFiles/LICENSE
# sample
cp "tools/XPlatInstall/sample/README.md" out/ChakraCoreFiles/sample/README.md
cp "tools/XPlatInstall/sample/Makefile.sample" out/ChakraCoreFiles/sample/Makefile
cp "tools/XPlatInstall/sample/sample.cpp.txt" out/ChakraCoreFiles/sample/sample.cpp
## Test
python test/native-tests/test-python/helloWorld.py Release \
"out/ChakraCoreFiles/lib/libChakraCore.${HOST_EXT}" > /dev/null
IF_ERROR_EXIT $? "Shared library test failed"
out/ChakraCoreFiles/bin/ch test/Basics/hello.js > /dev/null
IF_ERROR_EXIT $? "CH binary test failed"
## Package
pushd out/ > /dev/null
PACKAGE_FILE="cc_${HOST_OS}_x64_${CHAKRA_VERSION}.tar.gz"
tar -czf "${PACKAGE_FILE}" ChakraCoreFiles/
mkdir -p temp/ChakraCoreFiles/
cp "${PACKAGE_FILE}" temp/chakracore.tar.gz
cd temp
SHASUM_FILE="cc_${HOST_OS}_x64_${CHAKRA_VERSION}_s.tar.gz"
shasum -a 512256 chakracore.tar.gz > ChakraCoreFiles/shasum
tar -czf "$SHASUM_FILE" ChakraCoreFiles
mv $SHASUM_FILE ../
cd ..
rm -rf temp/
popd > /dev/null
## Credits
echo -e "\nPackage & Shasum files are ready under ${GREEN_COLOR}out/${DEFAULT_COLOR}"
| true |
32eec29a16b42cf0ae680a87154ef49c7a46fbc6 | Shell | Hang-Lei-NOAA/NCEPLIBS-ip | /reg_tests/Runall.theia.ksh | UTF-8 | 4,293 | 3 | 3 | [] | no_license | #!/bin/ksh --login
#----------------------------------------------------------------------------
# Run the entire suite of IPOLATES (or IPLIB) regression tests on Theia.
#
# See the README file for information on setting up and compiling
# the test suite.
#
# Before, running set the $PROJECT_CODE to the project that will
# be charged when running the test suite. To find out which
# projects you are authorized to use, type "account_params".
#
# To run, type: "Runall.theia.ksh". A series of "daisy-chained"
# job steps will be submitted. To check the queue, type:
# "showq -n -v -u USERNAME"
#
# The run output is stored in $WORK_DIR. Log output from the test suite
# will be in "regression.log" To monitor as the suite is running,
# do: grep ">>>" regression.log. Once the suite is complete, a summary
# is placed in "summary.log"
#----------------------------------------------------------------------------
# The project that will be charged when running these jobs.
PROJECT_CODE=${PROJECT_CODE:-rm}
# Location of the regression test directory.
export REG_DIR=$(pwd)
# Working directory.
export WORK_DIR=/scratch3/NCEPDEV/stmp1/$LOGNAME/regression
rm -fr $WORK_DIR
mkdir -p $WORK_DIR
# Output log files.
LOG_FILE=${WORK_DIR}/regression.log
SUM_FILE=${WORK_DIR}/summary.log
module purge
module load intel
export OMP_NUM_THREADS=1
GAUSSLAT=$(qsub -l procs=1 -l vmem=500M -l walltime=0:01:00 -A $PROJECT_CODE -N iptest_gausslat -o $LOG_FILE -e $LOG_FILE \
-v REG_DIR,WORK_DIR,OMP_NUM_THREADS $REG_DIR/gausslat/scripts/runall.ksh)
IPXWAFS=$(qsub -l procs=1 -l vmem=500M -l walltime=0:02:00 -A $PROJECT_CODE -N iptest_ipxwafs -o $LOG_FILE -e $LOG_FILE \
-v REG_DIR,WORK_DIR,OMP_NUM_THREADS -W depend=afterok:$GAUSSLAT $REG_DIR/ipxwafs/scripts/runall.ksh)
IPXWAFS2_3=$(qsub -l procs=1 -l vmem=500M -l walltime=0:02:00 -A $PROJECT_CODE -N iptest_ipxwafs2 -o $LOG_FILE -e $LOG_FILE \
-v REG_DIR,WORK_DIR,OMP_NUM_THREADS -W depend=afterok:$IPXWAFS $REG_DIR/ipxwafs2_3/scripts/runall.ksh)
MAKGDS=$(qsub -l procs=1 -l vmem=500M -l walltime=0:02:00 -A $PROJECT_CODE -N iptest_makgds -o $LOG_FILE -e $LOG_FILE \
-v REG_DIR,WORK_DIR,OMP_NUM_THREADS -W depend=afterok:$IPXWAFS2_3 $REG_DIR/makgds/scripts/runall.ksh)
GDSWZD=$(qsub -l procs=1 -l vmem=2000M -l walltime=0:10:00 -A $PROJECT_CODE -N iptest_gdswzd -o $LOG_FILE -e $LOG_FILE \
-v REG_DIR,WORK_DIR,OMP_NUM_THREADS -W depend=afterok:$MAKGDS $REG_DIR/gdswzd/scripts/runall.ksh)
IPOLATES_1=$(qsub -l procs=1 -l vmem=2000M -l walltime=0:30:00 -A $PROJECT_CODE -N iptest_ipolates1 -o $LOG_FILE -e $LOG_FILE \
-F "1" -v REG_DIR,WORK_DIR,OMP_NUM_THREADS -W depend=afterok:$GDSWZD $REG_DIR/ipolates/scripts/runall.ksh)
export OMP_NUM_THREADS=4
IPOLATES_4=$(qsub -l nodes=1:ppn=24 -l walltime=0:30:00 -A $PROJECT_CODE -N iptest_ipolates4 -o $LOG_FILE -e $LOG_FILE \
-F "4" -W depend=afterok:$IPOLATES_1 \
-v REG_DIR,WORK_DIR,OMP_NUM_THREADS $REG_DIR/ipolates/scripts/runall.ksh)
export OMP_NUM_THREADS=1
IPOLATES_CMP=$(qsub -l procs=1 -l vmem=2000M -l walltime=0:05:00 -A $PROJECT_CODE -N iptest_ipolates_cmp -o $LOG_FILE -e $LOG_FILE \
-v WORK_DIR,OMP_NUM_THREADS -W depend=afterok:$IPOLATES_4 $REG_DIR/ipolates/scripts/compare.ksh)
IPOLATEV_1=$(qsub -l procs=1 -l vmem=2000M -l walltime=0:45:00 -A $PROJECT_CODE -N iptest_ipolatev1 -o $LOG_FILE -e $LOG_FILE \
-F "1" -W depend=afterok:$IPOLATES_CMP \
-v REG_DIR,WORK_DIR,OMP_NUM_THREADS $REG_DIR/ipolatev/scripts/runall.ksh)
export OMP_NUM_THREADS=4
IPOLATEV_4=$(qsub -l nodes=1:ppn=24 -l walltime=0:30:00 -A $PROJECT_CODE -N iptest_ipolatev4 -o $LOG_FILE -e $LOG_FILE \
-F "4" -W depend=afterok:$IPOLATEV_1 \
-v REG_DIR,WORK_DIR,OMP_NUM_THREADS $REG_DIR/ipolatev/scripts/runall.ksh)
export OMP_NUM_THREADS=1
IPOLATEV_CMP=$(qsub -l procs=1 -l vmem=2000M -l walltime=0:05:00 -A $PROJECT_CODE -N iptest_ipolatev_cmp -o $LOG_FILE -e $LOG_FILE \
-v WORK_DIR,OMP_NUM_THREADS -W depend=afterok:$IPOLATEV_4 $REG_DIR/ipolatev/scripts/compare.ksh)
SUMMARY=$(echo "grep '<<<' $LOG_FILE > $SUM_FILE" | qsub -l procs=1 -l vmem=500M -l walltime=0:01:00 -A $PROJECT_CODE -N iptest_summary \
-o $LOG_FILE -e $LOG_FILE -v REG_DIR,WORK_DIR,OMP_NUM_THREADS -W depend=afterok:$IPOLATEV_CMP)
exit 0
| true |
1f477e505aaf2a0cb3ead2f01bf8376915e7a1d1 | Shell | chenyuanchun/etc | /bash/rayrc | UTF-8 | 4,150 | 3.546875 | 4 | [] | no_license | #!/bin/bash -eu
# local bash settings
echo Sourcing user bashrc ...
pathmunge () {
if ! echo $PATH | /bin/egrep "(^|:)$1($|:)" >/dev/null ; then
if [ "$2" = "after" ] ; then
PATH=$PATH:$1
else
PATH=$1:$PATH
fi
fi
}
cdn () { pushd .; for ((i=1; i<=$1; i++)); do cd ..; done; }
ctitle() {
echo -ne "\033]0;$1\007"
}
[ $OS ] || OS=$(uname)
stty -ixon #-ixoff
# history
export HISTIGNORE=".1:.2:.3:.4:pwd:ls:l:la:ll:exit:quit:h:w:who:whoami:alias:which*:whereis*:cd:python*:emacs*:eclipse*:ps:v:vi:vim:envhost*:hostname:make:pstack*:pstree*:ifconfig*:jobs:fg:bg*:cal:date:locate:sleep*"
export HISTCONTROL=ignoreboth:erasedups
export HISTSIZE=3000
export HISTFILESIZE=10000
# extended globbing
shopt -s extglob
# append to the history file, don't overwrite it
shopt -s histappend
# check the window size after each command and, if necessary,
# update the values of LINES and COLUMNS.
shopt -s checkwinsize
# Color definitions
COLORRC=$(dirname ${BASH_SOURCE[0]})/color.rc
[ -f $COLORRC ] && . $COLORRC
##################################
# prompt
#export PS1='\n=== \h [\D{%m-%d %a %H:%M:%S}] \w ===\n$ '
PS1="\n=\!=${Green}[\d \@] ${BIYellow}\u@\h:${White}\w\e[m ===\n\$ "
#########################################
# OS specific settings
#########################################
if [ $OS = SunOS ]; then
PATH=${PATH:+$PATH:}/usr/local/bin:/usr/ucb:/usr/openwin/bin:/usr/sfw/bin:/usr/ccs/bin:/usr/dt/bin${MRM_BIN:+:$MRM_BIN}
LD_LIBRARY_PATH=${LD_LIBRARY_PATH:+$LD_LIBRARY_PATH:}/usr/openwin/lib:/usr/lib/X11:/usr/dt/lib:/usr/sfw/lib:/usr/ccs/lib:/usr/local/lib
# system limit
ulimit -S -c unlimited >&-
# man path
export MANPATH=/usr/share/man:/usr/sfw/man:/usr/man:/usr/openwin/man:/apps/gnu/man:/apps/algo/perl/man
elif [ "$OS" = Linux ]; then
# User specific aliases and functions
#
# cd upper levels
#
function cd {
local option= length= count= cdpath= i=
# if we have -L or -P sym link option, save then remove it
if [ "$1" = "-P" -o "$1" = "-L" ]; then
option="$1"
shift
fi
if [ -n "$1" -a "${1:0:3}" = '...' -a "$1" = "${1%/*}" ]; then
length=${#1}
count=2
for ((i=$count;i<=$length;i++)); do
cdpath="${cdpath}../"
done
builtin cd $option "$cdpath"
elif [ -n "$1" ]; then
builtin cd $option "$*"
else
builtin cd $option
fi
}
#
# mkdir newdir then cd into it
#
function mcd {
local newdir='_mcd_command_failed_'
if [ -d "$1" ]; then
echo "$1 exists..."
newdir="$1"
else
if [ -n "$2" ]; then
command mkdir -p -m $1 "$2" && newdir="$2"
else
command mkdir -p "$1" && newdir="$1"
fi
fi
builtin cd "$newdir"
}
# make less more friendly for non-text input files, see lesspipe(1)
[ -x /usr/bin/lesspipe ] && eval "$(SHELL=/bin/sh lesspipe)"
export TERM=xterm-256color
# set cd path
export CDPATH=".:$HOME/workspace"
# enable color support of ls and also add handy aliases
if [ -x /usr/bin/dircolors ]; then
test -r ~/.dircolors && eval "$(dircolors -b ~/.dircolors)" || eval "$(dircolors -b)"
alias ls='ls --color=auto'
#alias dir='dir --color=auto'
#alias vdir='vdir --color=auto'
alias grep='grep --color=auto'
alias fgrep='fgrep --color=auto'
alias egrep='egrep --color=auto'
fi
# some more ls aliases
alias ll='ls -alF'
alias la='ls -A'
alias l='ls -CF'
alias sl='ll | grep ">"'
alias l.='ls -dF .*'
[ "$OS" = "Linux" ] || alias which='alias | /usr/bin/which --tty-only --read-alias --show-dot --show-tilde'
alias ec='emacsclient -c'
alias et='emacsclient -t'
# Add an "alert" alias for long running commands. Use like so:
# sleep 10; alert
#alias alert='notify-send --urgency=low -i "$([ $? = 0 ] && echo terminal || echo error)" "$(history|tail -n1|sed -e '\''s/^\s*[0-9]\+\s*//;s/[;&|]\s*alert$//'\'')"'
# enable programmable completion features (you don't need to enable
# this, if it's already enabled in /etc/bash.bashrc and /etc/profile
# sources /etc/bash.bashrc).
if [ -f /etc/bash_completion ] && ! shopt -oq posix; then
. /etc/bash_completion
fi
fi
| true |
9a36068c9b01458b0255da20ec5d60cb0b761154 | Shell | tuscland/heroku-buildpack-cl | /bin/test-run | UTF-8 | 355 | 2.625 | 3 | [] | no_license | #!/usr/bin/env bash
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
REPO_NAME="$( basename `git rev-parse --show-toplevel` )"
BUILD_DIR="$TMPDIR/$REPO_NAME/build"
CACHE_DIR="$TMPDIR/$REPO_NAME/cache"
CCL_DIR="$CACHE_DIR/ccl"
export PORT=3000
export XDG_CONFIG_HOME="$BUILD_DIR/.config"
cd $BUILD_DIR
exec $( grep web Procfile | cut -f 2 -d : )
| true |
a3f6af17f0f1b3d0de7f28c309fa8a11645ea73f | Shell | darthguinea/Scripts | /drmi | UTF-8 | 1,113 | 4.09375 | 4 | [] | no_license | #!/bin/bash
stopContainers() {
IMAGE=$1
for i in `docker ps | awk -v image=$IMAGE '{ if ($2 == image) print $0; }' | awk '{print $1}'`
do
docker stop ${i}
done
}
removeContainers() {
IMAGE=$1
for i in `docker ps -a | awk -v image=$IMAGE '{ if ($2 == image) print $0; }' | awk '{print $1}'`
do
docker rm $i
done
}
removeImage() {
IMAGE=$1
for i in `docker images | awk -v image=$IMAGE '{if ($1 == image) print $0; }' | awk '{print $3}'`
do
docker rmi ${i}
done
}
getImage() {
IMAGE_ID=$1
IMAGE=$(docker ps -a | awk -v image_id=$IMAGE_ID '{ if ($1 == image_id || $2 == image_id) print $0; }' | awk '{print $2}')
echo $IMAGE
}
main() {
IMAGE=$(getImage $1)
while getopts ":n" o
do
case $arg in
n)
echo "usage"
;;
?)
echo "usage"
;;
esac
done
if [ "${IMAGE}" != "" ]
then
stopContainers ${IMAGE}
removeContainers ${IMAGE}
removeImage ${IMAGE}
fi
}
IMAGE_ID=$1
main $@
| true |
5b3458405d37c19c2b130f5ed819cdaf47bf7b1b | Shell | pabloskubert/bash_scripts | /pickag | UTF-8 | 895 | 3.953125 | 4 | [] | no_license | #!/bin/bash
LIST_FILE="$HOME/tools/RandomAgent/user-agents.txt"
[ ! -z "$1" ] && END_INT="$1" || END_INT=10;
[ "$2" == "-v" ] && VERBOSE="1" || VERBOSE="0";
if [ "$1" == "--help" -o "$1" == "-h" ]
then
echo -e "\n\tUse: $0 [INTERATIONS default 10] \n\t-v to enable verbose mode\n"
exit;
else
INTERATIONS="$END_INT"
fi
function log() {
STR="$1"
[ "$VERBOSE" == "1" ] && echo -e "\n\t$STR";
}
[ ! -f "$LIST_FILE" ] && echo -e "\n\t[Error] User agents list not found, put it in $HOME/tools/RandomAgent with name \"user-agents.txt\".\n" && exit;
LEN=$(wc -l "$LIST_FILE" | cut -d " " -f1)
log "Starting interaction, init[1] end [$INTERATIONS]"
for i in `seq 1 $INTERATIONS`; do
FROM_LINE=$(shuf -i 1-"$LEN" -n 1)
done
log "Getting user-agent by line $FROM_LINE nº from the file $LIST_FILE"
# get user agent from list by line number
sed "$FROM_LINE q; d" "$LIST_FILE"
| true |
02043a5351551f1796f1a1f4a7dddb3b2c638095 | Shell | XuLiWu/aesthetics_assessment_using_graphs | /extract_graph.sh | UTF-8 | 936 | 2.78125 | 3 | [] | no_license | #!/bin/bash
#This is a default ID. No need to change
ID=AIAG_Extraction
#This is the id of the particular run. Name as you wish
EXPERIMENT_ID=Extraction
#Path to the CSV file containing AVA IDs
DB=meta/A2P2_FULL_Corrected.CSV
#Path to AVA images.
DATAPATH=/path/to/images/
#Directory to store the features.
SAVE_FEAT=dump/
#Feature File Name
FEAT_FILE_NAME=INC_RN_V2.h5
#Backbone. Currently supports Inc-ResNet-v2 only. Adding new backbones is trivial.
BASE_MODEL=inceptionresnetv2
#Saved feature data precision
FP=16
#Number of images to extract features from. Use -1 if all images are to be used. Use a smaller value for debugging.
PILOT=1000
CUDA_VISIBLE_DEVICES=1 python3 -W ignore extract_graph.py --id $ID --db $DB --datapath $DATAPATH --pretrained --exp_id $EXPERIMENT_ID --feature_file_name $FEAT_FILE_NAME\
--base_model $BASE_MODEL \
--data_precision $FP \
--save_feat $SAVE_FEAT --pilot $PILOT --n_workers 4
| true |
edffcd8886734fd34e9465775155b4809128da61 | Shell | keyoffecka/idlebuilder | /bin/locale.sh | UTF-8 | 987 | 2.640625 | 3 | [] | no_license | #!/bin/bash
#Should be executed mannually once after installing GLibC x86_64 for the base system.
set +h
set -e
set -u
mkdir -pv /usr/lib64/locale
/usr/bin/localedef -i ru_RU -f ISO-8859-5 ru_RU
/usr/bin/localedef -i ru_RU -f KOI8-R ru_RU
/usr/bin/localedef -i ru_RU -f CP1251 ru_RU
/usr/bin/localedef -i ru_RU -f IBM866 ru_RU
/usr/bin/localedef -i ru_RU -f UTF-8 ru_RU
/usr/bin/localedef -i en_US -f ISO-8859-1 en_US
/usr/bin/localedef -i en_US -f UTF-8 en_US
/usr/bin/localedef -i en_GB -f ISO-8859-1 en_GB
/usr/bin/localedef -i en_GB -f UTF-8 en_GB
/usr/bin/localedef -i es_ES -f ISO-8859-1 es_ES
/usr/bin/localedef -i es_ES -f UTF-8 es_ES
/usr/bin/localedef -i es_MX -f ISO-8859-1 es_MX
/usr/bin/localedef -i es_MX -f UTF-8 es_MX
/usr/bin/localedef -i fr_FR -f ISO-8859-1 fr_FR
/usr/bin/localedef -i fr_FR@euro -f ISO-8859-15 fr_FR@euro
/usr/bin/localedef -i fr_FR -f UTF-8 fr_FR
/usr/bin/localedef -i cs_CZ -f UTF-8 cs_CZ.UTF-8
/usr/bin/localedef -i ja_JP -f EUC-JP ja_JP
| true |
0a7682501715ac3fea9f427a72cfdd45b484067a | Shell | prairie-guy/gpu_setup | /reinstall-fastai.sh | UTF-8 | 1,457 | 2.75 | 3 | [] | no_license | #!/usr/bin/env bash
# 05/03/2022
## Clean reinstall
## 1. reinstall-anaconda.sh
## 2. reinstall-fastai.sh
## 3. reinstall-jupyter.sh
## Only execute 'reinstall-fastai.sh' from conda env 'fastai'
## 1. completed: 1. reinstall-anaconda.sh (creates conda env 'fastai')
## 2. next : 2. reinstall-fastai.sh
## OR
## 0. conda info --envs # list all envs
## 1. conda activate base
## 2. conda remove --name fastai --all
## 3. conda create -n fastai python=3.9
## 4. conda init bash
## 5. conda activate fastai
## 6. reinstall-fastai.sh
### Install fastai stuff
mamba install -c fastchan fastai
## Mamba installs
mamba install torchaudio -c pytorch
mamba install scikit-learn
mamba install datasets transformers protobuf
mamba install kaggle
# pip installs
pip install -U fastbook
pip install nbdev
## Faster image processing
## `libjpeg-turbo` (2-6x) and `Pillow-SIMD ` (4-6)
## No TIFF support; if required (https://docs.fast.ai/dev/performance.html#faster-image-processing)
## Uncomment to include
# conda uninstall -y --force pillow pil jpeg libtiff libjpeg-turbo
# pip uninstall -y pillow pil jpeg libtiff libjpeg-turbo
# conda install -yc conda-forge libjpeg-turbo
# CFLAGS="${CFLAGS} -mavx2" pip -v install --upgrade --no-cache-dir --force-reinstall --no-binary :all: --compile pillow-simd
# conda install -y jpeg libtiff
## To add jupyter:
## run reinstall-jupyter.sh
echo "done: 2. reinstall-fastai.sh"
echo "next: 3. reinstall-jupyter.sh"
| true |
6c84b545a4d79ff4e11e57c5e227dad5446691c6 | Shell | ccFiona/data-analyse | /5.104 xiaofei/getVVMac.sh | UTF-8 | 834 | 2.8125 | 3 | [] | no_license | ############################################################################
##
## Copyright (c) 2013 hunantv.com, Inc. All Rights Reserved
## $Id: getExpLogs.sh,v 0.0 Wed 18 May 2016 06:19:27 PM CST <tangye> Exp $
##
############################################################################
#
###
# # @date Wed 18 May 2016 06:19:27 PM CST
# # @brief
# #
# ##
#!/bin/bash
maclist=$1
input=$2
out=$3
echo $input
echo $out
awk 'ARGIND==1 {expVV_count[$1]=$2;}
ARGIND==2 {
for(a in expVV_count){
if($5==a){
if($2!="5.0.1.999.2.TY.0.0_Release"){
openVV_count[a]++;
}
}
}
}
END{
for(a in expVV_count){
if(openVV_count[a]=="")
print a,expVV_count[a],0;
else
print a,expVV_count[a],openVV_count[a];
}
}' $maclist $input | sort -n -k2 > $out
## vim: set ts=2 sw=2: #
| true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.