blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
c9266c913830f6d77bf27434bf9c839410b279dd
|
Shell
|
gravitypriest/insights-tower-upload
|
/insights-tower-upload.sh
|
UTF-8
| 1,590
| 3.96875
| 4
|
[] |
no_license
|
#!/bin/bash
verbose=""
default_content_type="application/vnd.redhat.tower.test+tgz" # subject to change, don't know tower's type
help_me() {
echo "Usage: $0 -u USERNAME -p PASSWORD -f FILE -c CONTENT-TYPE"
echo -e "\t-u USERNAME\tRed Hat Customer Portal username"
echo -e "\t-p PASSWORD\tRed Hat Customer Portal password (optional, can input interactively)"
echo -e "\t-f FILE\t\tFile to upload"
echo -e "\t-c CONTENT-TYPE\tContent-type of the file (optional, defaults to $default_content_type)"
echo -e "\t-v\t\tGet verbose output from curl"
echo -e "\t-h\t\tPrint this help message"
}
while getopts "u:p:f:c:vh" o
do
case "$o" in
u ) user="$OPTARG" ;;
p ) pass="$OPTARG" ;;
f ) file="$OPTARG" ;;
c ) content_type="$OPTARG" ;;
v ) verbose=' -v' ;;
h ) help_me; exit 0 ;;
? ) help_me; exit 1 ;;
esac
done
if [ -z "$user" ]
then
echo "Error: Missing authentication username."
help_me; exit 1
fi
if [ -z "$file" ]
then
echo "Error: Missing file to upload."
help_me; exit 1
fi
if [ -z $content_type ]
then
content_type="$default_content_type"
fi
user_arg_string="$user"
if [ "$pass" ]
then
# add password to arg if specified
user_arg_string="$user:$pass"
fi
# send curl output to stderr, but write http status to stdout so we can check it
http_status=$(curl -u "$user_arg_string" -X POST https://cloud.redhat.com/api/ingress/v1/upload -F "file=@$file;type=$content_type" -o /dev/stderr --write-out "%{http_code}" $verbose)
if [ "$http_status" == "202" ]
then
echo "Upload successful."
exit 0
else
echo "Upload failed. HTTP code: $http_status"
exit 1
fi
| true
|
e5269454e2c58d866ba3f68094bcc1eaeca6f462
|
Shell
|
droofe/ctf-repo
|
/csaw2017/pilot/debug.sh
|
UTF-8
| 408
| 2.8125
| 3
|
[] |
no_license
|
#!/bin/bash
# Create a new session
tmux -2 new-session -d -s debug
# Create a new window
tmux new-window -t debug:1 -n "Script Window"
# Split the window
tmux split-window -h
# Setup the first pane, run the program
tmux select-pane -t 0
tmux send-keys "python $1.py" C-m
# Setup the second pane, debug
tmux select-pane -t 1
tmux send-keys "sleep 1;gdb -p \`pidof $1\`" C-m
tmux attach-session -t debug
| true
|
5010996ad8c02bc8ee0b550b0db6b2a3577a7b52
|
Shell
|
samcom12/hpc-collab
|
/clusters/vc/common/provision/requires/vcsvc/4.sync-NTP
|
UTF-8
| 876
| 3.421875
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
## $Header: $
## Source:
## @file vcbuild/requires/vcfs/3.dns-query
VCLOAD=../../../provision/loader/shload.sh
if [ ! -f "${VCLOAD}" ] ; then
echo "${0}: missing: ${VCLOAD}"
exit 99
fi
source ${VCLOAD}
# if we're given an argument, append test output to it
declare -x OUT=${1:-""}
if [ -n "${OUT}" ] ; then
touch ${OUT} || exit 1
exec > >(tee -a "${OUT}") 2>&1
fi
SetFlags >/dev/null 2>&1
declare -x IPADDR=$(cat ipaddr)
Rc ErrExit ${EX_OSERR} "[ -n \"${IPADDR}\" ] || false"
refid=$(chronyc -c ntpdata | grep "${IPADDR}" >/dev/null 2>&1)
rc=$?
if [ ${rc} -ne ${GREP_FOUND} ] ; then
maxtries=10
limit=5
interval=5
for i in $(seq 1 ${limit})
do
Rc ErrExit ${EX_OSERR} "chronyc waitsync ${maxtries} 1 1 ${interval}"
maxtries=$(expr ${maxtries} + ${maxtries})
done
else
( cd /; chronyc waitsync ) &
fi
trap '' 0
exit ${EX_OK}
| true
|
19b25c17be4490fe2962834ab6056a2e35f28aa0
|
Shell
|
Jimdo/prometheus-clj
|
/lein
|
UTF-8
| 477
| 3.171875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
export LEIN_HOME=vendor/lein
export LEIN_ROOT=true
LEIN_BIN=$LEIN_HOME/lein
if [ ! -x $LEIN_BIN ]; then
mkdir -p $LEIN_HOME
echo "-> Downloading lein installer ..."
curl -s https://raw.githubusercontent.com/technomancy/leiningen/stable/bin/lein -o $LEIN_BIN
chmod +x $LEIN_BIN
fi
if [ "$1" == "clojars" ]; then
if [ -n "$BUILD_NUMBER" ]; then
$LEIN_BIN deploy clojars
else
echo "BUILD_NUMBER not set!"
exit 1
fi
else
$LEIN_BIN $@
fi
| true
|
69c02bc6e7b0915c68bad4aa680977d9b33051a5
|
Shell
|
pashachek/sharetribe-docker
|
/include/docker-entrypoint.sh
|
UTF-8
| 7,819
| 3.21875
| 3
|
[] |
no_license
|
#!/bin/bash
set -e
export SPHINX_HOST="${SPHINX_HOST:-search}"
export MYSQL_HOST="${MYSQL_HOST:-mysql}"
export redis_host="${redis_host:-memcache}"
export redis_port="${redis_port:-6379}"
export redis_db="${redis_db:-1}"
export redis_expires_in="${redis_expires_in:-240}"
export RS_MAILCATCHER="${RS_MAILCATCHER:-0}"
function echo_info() {
echo "${1}"
echo " >> ${2}"
}
function mysql_exec() {
mysql --host=${MYSQL_HOST} --user=${MYSQL_USER} --password=${MYSQL_PASSWORD} --skip-column-names --batch --execute="${1}"
}
function app_database_yml() {
FUNC_NAME="app_database_yml"
FILE_PATH="${RS_HOME_DIR_PREFIX}/${RS_USER}/${RS_APP_ROOT}/config/database.yml"
if [[ -n ${MYSQL_DATABASE} ]] && [[ -n ${MYSQL_USER} ]] && [[ -n ${MYSQL_PASSWORD} ]] && [[ -n ${MYSQL_HOST} ]]; then
echo_info ${FUNC_NAME} ${FILE_PATH}
echo "${RAILS_ENV}:" > ${FILE_PATH}
echo " adapter: mysql2" >> ${FILE_PATH}
echo " database: ${MYSQL_DATABASE}" >> ${FILE_PATH}
echo " encoding: utf8" >> ${FILE_PATH}
echo " username: ${MYSQL_USER}" >> ${FILE_PATH}
echo " password: ${MYSQL_PASSWORD}" >> ${FILE_PATH}
echo " host: ${MYSQL_HOST}" >> ${FILE_PATH}
fi
}
function app_config_yml() {
FUNC_NAME="app_config_yml"
FILE_PATH="${RS_HOME_DIR_PREFIX}/${RS_USER}/${RS_APP_ROOT}/config/config.yml"
if [[ ! -f ${FILE_PATH} ]]; then
echo_info ${FUNC_NAME} ${FILE_PATH}
echo "${RAILS_ENV}:" > ${FILE_PATH}
echo " secret_key_base: \"${RS_SECRET_KEY_BASE:-$(bin/bundle exec rake secret)}\"" >> ${FILE_PATH}
echo " sharetribe_mail_from_address: \"${RS_AUTH_USER}@${RS_DOMAIN}\"" >> ${FILE_PATH}
fi
}
function app_msmtp_conf() {
FUNC_NAME="app_msmtp_conf"
FILE_PATH="${RS_HOME_DIR_PREFIX}/${RS_USER}/.msmtprc"
echo_info ${FUNC_NAME} ${FILE_PATH}
if [[ $RS_MAILCATCHER = 1 ]]; then
echo "# Set default values for all following accounts." > ${FILE_PATH}
echo "defaults" >> ${FILE_PATH}
echo "auth off" >> ${FILE_PATH}
echo "tls off" >> ${FILE_PATH}
echo "tls_trust_file /etc/ssl/certs/ca-certificates.crt" >> ${FILE_PATH}
echo "logfile ${RS_HOME_DIR_PREFIX}/${RS_USER}/.msmtp.log" >> ${FILE_PATH}
echo "" >> ${FILE_PATH}
echo "# smtp" >> ${FILE_PATH}
echo "account local" >> ${FILE_PATH}
echo "host sendmail" >> ${FILE_PATH}
echo "port 1025" >> ${FILE_PATH}
echo "from ${RS_AUTH_USER}@${RS_DOMAIN}" >> ${FILE_PATH}
echo "# user ${RS_AUTH_USER}@${RS_DOMAIN}" >> ${FILE_PATH}
echo "# password ${RS_AUTH_PASS}" >> ${FILE_PATH}
echo "" >> ${FILE_PATH}
echo "# Set a default account" >> ${FILE_PATH}
echo "account default : local" >> ${FILE_PATH}
else
echo "# Set default values for all following accounts." > ${FILE_PATH}
echo "defaults" >> ${FILE_PATH}
echo "auth on" >> ${FILE_PATH}
echo "tls off" >> ${FILE_PATH}
echo "tls_trust_file /etc/ssl/certs/ca-certificates.crt" >> ${FILE_PATH}
echo "logfile ${RS_HOME_DIR_PREFIX}/${RS_USER}/.msmtp.log" >> ${FILE_PATH}
echo "" >> ${FILE_PATH}
echo "# smtp" >> ${FILE_PATH}
echo "account local" >> ${FILE_PATH}
echo "host smtp" >> ${FILE_PATH}
echo "port 25" >> ${FILE_PATH}
echo "from ${RS_AUTH_USER}@${RS_DOMAIN}" >> ${FILE_PATH}
echo "user ${RS_AUTH_USER}@${RS_DOMAIN}" >> ${FILE_PATH}
echo "password ${RS_AUTH_PASS}" >> ${FILE_PATH}
echo "" >> ${FILE_PATH}
echo "# Set a default account" >> ${FILE_PATH}
echo "account default : local" >> ${FILE_PATH}
fi
chmod u=rw,g=,o= ${FILE_PATH}
}
function db_structure_load() {
FUNC_NAME="db_structure_load"
FILE_PATH="mysql_exec"
if [[ $(mysql_exec "SELECT COUNT(TABLE_NAME) FROM information_schema.TABLES WHERE TABLE_SCHEMA = \"${MYSQL_DATABASE}\";") = 0 ]]; then
echo_info ${FUNC_NAME} ${FILE_PATH}
bundle exec rake db:structure:load
fi
}
function tmp_clean() {
FUNC_NAME="tmp_clean"
FILE_PATH="${RS_HOME_DIR_PREFIX}/${RS_USER}/${RS_APP_ROOT}/tmp/pids/server.pid"
echo_info ${FUNC_NAME} ${FILE_PATH}
if [[ -f ${FILE_PATH} ]]; then
rm --recursive --force ${FILE_PATH}
fi
}
# if [[ -n ${TZDATA} ]]; then
# cat /dev/null > /etc/locale.gen && \
# echo "en_US.UTF-8 UTF-8" >> /etc/locale.gen && \
# echo "ru_RU.UTF-8 UTF-8" >> /etc/locale.gen && \
# /usr/sbin/locale-gen && \
# echo ${TZDATA} > /etc/timezone && \
# dpkg-reconfigure --frontend noninteractive tzdata
# fi
function help() {
echo "usage: ${0} [OPTIONS]"
echo "OPTIONS:"
echo "-h | --help - print help"
echo ""
echo "config domain - set domain to db"
echo "config payments - load payment table structure to db"
echo "config all - exec all config suboptions"
echo ""
echo "app deploy - rake db:migrate"
echo "app - start app server"
echo "worker - start delayed_job and sphinxsearch"
}
case ${1}:${2} in
config:domain)
mysql_exec "UPDATE ${MYSQL_DATABASE}.communities SET domain = \"${RS_DOMAIN}\" WHERE id = '1';"
mysql_exec "UPDATE ${MYSQL_DATABASE}.communities SET use_domain = '1' WHERE id = '1';"
;;
config:payments)
mysql_exec "INSERT INTO ${MYSQL_DATABASE}.payment_settings (id, active, community_id, payment_gateway, payment_process, commission_from_seller, minimum_price_cents, minimum_price_currency, minimum_transaction_fee_cents, minimum_transaction_fee_currency, confirmation_after_days, created_at, updated_at, api_client_id, api_private_key, api_publishable_key, api_verified, api_visible_private_key, api_country) VALUES (121240, 1, 1, 'paypal', 'preauthorize', NULL, NULL, NULL, NULL, NULL, 14, '2017-10-22 20:12:39', '2017-11-13 23:03:39', NULL, NULL, NULL, 0, NULL, NULL), (121241, 1, 1, 'stripe', 'preauthorize', NULL, NULL, NULL, NULL, NULL, 14, '2017-10-22 20:12:39', '2017-11-13 23:03:39', NULL, NULL, NULL, 0, NULL, NULL);"
echo " app_encryption_key: \"$(rake secret | cut --characters=1-64)\"" >> ${RS_HOME_DIR_PREFIX}/${RS_USER}/${RS_APP_ROOT}/config/config.yml
;;
config:all)
${0} config domain
${0} config payments
;;
app:deploy)
bundle exec rake db:migrate
;;
app:)
tmp_clean
app_database_yml
app_config_yml
app_msmtp_conf
bundle install
db_structure_load
if [[ $(mysql_exec "SELECT COUNT(TABLE_NAME) FROM information_schema.TABLES WHERE TABLE_SCHEMA = \"${MYSQL_DATABASE}\";") -ne 0 ]]; then
${0} app deploy
fi
if [[ $RAILS_ENV = development ]] && [[ $NODE_ENV = development ]]; then
bundle exec rake assets:clobber
foreman start \
--port "${PORT:-3000}" \
--procfile Procfile.static
else
bundle exec rake assets:precompile
bundle exec passenger \
start \
--port "${PORT:-3000}" \
--min-instances "${PASSENGER_MIN_INSTANCES:-1}" \
--max-pool-size "${PASSENGER_MAX_POOL_SIZE:-1}" \
--log-file "/dev/stdout"
fi
;;
worker:)
app_msmtp_conf
bundle install
if [[ $RS_MAILCATCHER = 1 ]]; then
mailcatcher --ip 0.0.0.0 --no-quit
fi
bundle exec rake ts:configure ts:index ts:start
bundle exec rake jobs:work
;;
-h:|--help:) help ;;
*) help ;;
esac
| true
|
5637d189e67f1e1bf3a018112991e9e6b3247b6b
|
Shell
|
ajbouh/dotfiles
|
/shell/.profile
|
UTF-8
| 1,691
| 2.71875
| 3
|
[] |
no_license
|
# Enable nix if present
[ -e ~/.nix-profile/etc/profile.d/nix.sh ] && . ~/.nix-profile/etc/profile.d/nix.sh
# Add personal utilities
export PATH=$HOME/bin:$PATH
# Use Sublime Text
export VISUAL='atom -w'
export EDITOR='atom -w'
# Set up workspace.
export CLICOLOR=1
# Set up environment variables needed for basic work.
# TODO(adamb) Move most of these to MRE.
TSUMOBI=$HOME/tsumobi
export AUDIT_BASE_DIR=$TSUMOBI/.audit
export HERMIT_NODE_CACHE=$TSUMOBI/.hermit-node-cache
export HERMIT_BUILDER_CAP=4
export HERMIT_BUILDER_RATION=4
export INSPECTOR_TIMELINE_DIR=$TSUMOBI/.audit
export QA_TAPJ_OUTPUT_DIR=$TSUMOBI/.tapj
export TAPJ_CACHE_BASE_DIR=$TSUMOBI/.tapj-cache
export SECURE_KEY_DIR=$TSUMOBI/.build-creds
export SECURE_KEY_DIR=$HOME/tsumobi/.build-creds
export HERMIT_PASSTHROUGH_ENV="CLICOLOR QA_TEST_PATTERN"
export NALLOC_DRIVER=virtual_box
export FOG_RC=/Users/adamb/.fog
export SLOTH_RO_SHARED_CACHE=fs:/Volumes/sloth-cache
# So massively parallel builds work.
ulimit -n 8192
# Set up base PATH
# For refinery workflow
# Add a few tsumobi utilities to PATH.
function gimme_unified_output() {
echo $TSUMOBI/.gimme/output/unified/11/$1/$2/+macosx+x86_64
}
function abort {
echo "$@" >&2
exit 1
}
function sleuth {
TOPIC=$1
[ -n "$TOPIC" ] || abort "fatal: no topic given"
SLEUTH_DIR="$HOME/sleuthing/$(date +%F)-$TOPIC"
mkdir -p $SLEUTH_DIR
cd $SLEUTH_DIR
}
function hack {
TOPIC=$1
[ -n "$TOPIC" ] || abort "fatal: no topic given"
HACK_DIR="$HOME/hacking/$TOPIC"
mkdir -p $HACK_DIR
cd $HACK_DIR
}
if [ -e /Users/adamb/.nix-profile/etc/profile.d/nix.sh ]; then . /Users/adamb/.nix-profile/etc/profile.d/nix.sh; fi # added by Nix installer
| true
|
e8e9747011bbf2d111719838b1ef449789cecebf
|
Shell
|
jaunRuizz/Git-init
|
/Practica 2/Script_con_bash.sh
|
UTF-8
| 2,405
| 3.953125
| 4
|
[] |
no_license
|
#!/bin/bash
# Esta funcion ayuda a crear archivos txt
function crea_Archivo(){
ruta=/home/danylsti/Escritorio
echo Escribe el nombre del archivo que deseas crear
read input
touch $ruta/$input.txt
echo "Deseas Crear otro txt y para si n para no (y/n)"
read op2
y=1
n=2
while [[ $op2 -eq 1 ]];
do
echo Escribe el nombre del archivo que deseas crear
read input
touch $ruta/$input.txt
echo "Deseas Crear otro txt y para si n para no (y/n)"
read input
op2=input
done
}
# Esta funcion ayuda a crear Carpetas
function Crea_Directorio(){
ruta=/home/danylsti/Escritorio
echo Escribe el nombre de la carpeta que deseas crear
read input
mkdir $ruta/$input
echo "Deseas Crear otra Carpeta y para si n para no (y/n)"
read op2
y=1
n=2
while [[ $op2 -eq 1 ]];
do
echo Escribe el nombre de la carpeta que deseas crear
read input
touch $ruta/$input.txt
echo "Deseas Crear otra Carpeta y para si n para no (y/n)"
read input
op2=input
done
}
# Esta funcion ayuda a preparar el entorno git se le pasa como parametro la ruta y el nombre de la carpeta a crear
function Descargar_repo() {
cd /home/danylsti/$1
mkdir $2
cd /home/danylsti/Escritorio/$2
#git clone https://github.com/jaunRuizz/Git-init.git
#clear
#echo Se clono repositorio correctamente.
git init
git remote add origin $3
git status
git remote -v
clear
git pull --rebase origin master
git push -u origin master
clear
echo Todo Esta listo para usar git En tu carpeta nueva
#touch Archivo.txt
#git add Archivo.txt
#git commit -m "Este Archivo Fue subido Con un Script desde linux 26/08/2021"
#git push -u origin master
#clear
#echo Se subio tu nuevo archivo correctamente
}
# Menu que se imprime en la linea de comando para recibir una opcion
echo Puedes pasar parametros para la opcion 4 ejemplo "(Script.sh Ruta Name-Carpeta))"
echo elije una opcion [1] Para crear archivos [2] Para crear carpetas [3] Listar los archivos actuales [4] Preparar entorno git
read input
op=$input
# El swicht para manipular las diferentes opciones
case $op in
1)
crea_Archivo
;;
2)
Crea_Directorio
;;
3)
for i in $(ls)
do
echo $i
done
;;
4)
Descargar_repo $1 $2 $3
;;
#...
*)
echo Error 5996 Opcion no valida
;;
esac
| true
|
89095b5d523647e626096e573ba377b8ab3ce78d
|
Shell
|
bensonche/tools
|
/bin/push_to_test.sh
|
UTF-8
| 629
| 3.828125
| 4
|
[] |
no_license
|
#!/bin/bash
cmdline()
{
while getopts ":x" OPTION
do
case $OPTION in
x)
readonly CHANGEBRANCH=0
;;
\?)
echo "Invalid option: -$OPTARG"
exit 1
;;
esac
done
shift $((OPTIND-1))
readonly TARGETBRANCH=$1
}
bc_ptt ()
{
set -e
cmdline $@
local cur=`git rev-parse --abbrev-ref HEAD`
# Use current month/year for branch name if not given
local test=""
if [ -z $TARGETBRANCH ]
then
test=$(testbranch.sh)
else
test=$TARGETBRANCH
fi
git checkout $test
git pull origin $test
git merge $cur
git push origin $test
if [ -z "$CHANGEBRANCH" ]
then
git checkout $cur
fi
}
bc_ptt $@
| true
|
a2f6a8a12dd3fbeec18ea60ab897635f05a6376e
|
Shell
|
nulldriver/cf-cli-resource
|
/spec/util/yq_manifest_spec.sh
|
UTF-8
| 3,452
| 3.078125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env shellspec
set -euo pipefail
Describe 'yq'
BeforeAll 'fixture=$(load_fixture "static-app")'
Describe 'write and read'
It 'can write a new node'
When call yq --inplace '.applications[0].env.SOME_KEY = "some value"' "$fixture/manifest.yml"
The status should be success
End
It 'can read a new node'
When call yq '.applications[0].env.SOME_KEY' "$fixture/manifest.yml"
The status should be success
The output should equal "some value"
End
End
It 'can read single quoted flow scalar'
When call yq '.applications[0].env.SINGLE_QUOTED' "$fixture/manifest.yml"
The status should be success
The output should equal "Several lines of text, containing 'single quotes'. Escapes (like \n) don't do anything.
Newlines can be added by leaving a blank line. Leading whitespace on lines is ignored."
End
It 'can read double quoted flow scalar'
When call yq '.applications[0].env.DOUBLE_QUOTED' "$fixture/manifest.yml"
The status should be success
The output should equal "Several lines of text, containing \"double quotes\". Escapes (like \n) work.
In addition, newlines can be escaped to prevent them from being converted to a space.
Newlines can also be added by leaving a blank line. Leading whitespace on lines is ignored."
End
It 'can read plain flow scalar'
When call yq '.applications[0].env.PLAIN' "$fixture/manifest.yml"
The status should be success
The output should equal "Several lines of text, with some \"quotes\" of various 'types'. Escapes (like \n) don't do anything.
Newlines can be added by leaving a blank line. Additional leading whitespace is ignored."
End
It 'can read block folded scalar'
When call yq '.applications[0].env.BLOCK_FOLDED' "$fixture/manifest.yml"
The status should be success
# internal use of command substitution — $() — strips trailing newlines, so we won't actually have "another line at the end"
The output should equal "Several lines of text, with some \"quotes\" of various 'types', and also a blank line:
plus another line at the end."
End
It 'can read block literal scalar'
When call yq '.applications[0].env.BLOCK_LITERAL' "$fixture/manifest.yml"
The status should be success
# internal use of command substitution — $() — strips trailing newlines, so we won't actually have "another line at the end"
The output should equal "Several lines of text,
with some \"quotes\" of various 'types',
and also a blank line:
plus another line at the end."
End
It 'can read hyphenated string'
When call yq '.applications[0].env.HYPHENATED_STRING' "$fixture/manifest.yml"
The status should be success
The output should equal "- strings that start with a hyphen should be quoted"
End
It 'can read json as string'
When call yq '.applications[0].env.JSON_AS_STRING' "$fixture/manifest.yml"
The status should be success
The output should equal "{ jre: { version: 11.+ }, memory_calculator: { stack_threads: 25 } }"
End
It 'can read array as string'
When call yq '.applications[0].env.ARRAY_AS_STRING' "$fixture/manifest.yml"
The status should be success
The output should equal '[ list, of, things ]'
End
It 'can read json'
When call yq '.applications[0].env.JSON' "$fixture/manifest.yml"
The status should be success
The output should equal '{
"KEY1": {
"KEY2": "some value"
}
}'
End
End
| true
|
aa977f03f93d2a009274d8a33c3d21ece3ad1970
|
Shell
|
TobiasKadelka/build_dta
|
/code/procedures/change-dwi-run-to-acq_fix_all.sh
|
UTF-8
| 218
| 3.046875
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
# this script changes the dwi-file-names
# for example from run-120dir to acq-120dir.
for old_file in sub-DTA*/*/dwi/*run* ; do
new_file="${old_file/run/acq}"
git mv $old_file $new_file
done
| true
|
5cbdadd3833ba8294918e8854ee4bc4b501c2cb1
|
Shell
|
orva/dotfiles
|
/zsh_setup/exports.zsh
|
UTF-8
| 973
| 2.796875
| 3
|
[] |
no_license
|
bindkey -e
export PATH=$PATH:$HOME/bin
export PATH=$PATH:$HOME/.local/bin
export PATH=$PATH:$HOME/.cargo/bin
export PATH=$PATH:$HOME/.dotdata
export PATH=$PATH:$HOME/.dotfiles/bin
export BROWSER=firefox
export TIMEWARRIORDB=$HOME/.dotdata/timewarrior
if [[ -n "$DESKTOP_SESSION" && -z "$SSH_AUTH_SOCK" ]]; then
if hash gnome-keyring-daemon 2> /dev/null; then
eval "$(gnome-keyring-daemon --start)"
export SSH_AUTH_SOCK
fi
fi
if [[ -n $SSH_CONNECTION ]]; then
export EDITOR='vim'
else
if hash nvim 2> /dev/null; then
export EDITOR='nvim'
else
export EDITOR='vim'
fi
fi
# Forcefully set DEBUGINFOD_URLS, for some reason these are not loaded
# to zsh even though everything looks like they should..
if [[ -n $DEBUGINFOD_URLS ]]; then
export DEBUGINFOD_URLS="https://debuginfod.archlinux.org"
fi
# Add dash of colors and syntax highlighting to man pages
if hash bat 2> /dev/null; then
export MANPAGER="sh -c 'col -bx | bat -l man -p'"
fi
| true
|
98203a98e9355b65a101a4581c81cd9812d1be4f
|
Shell
|
Bananicorn/script_collection
|
/vim-openscad.sh
|
UTF-8
| 539
| 3.671875
| 4
|
[] |
no_license
|
#!/bin/bash
# This script was mostly just copied verbatim from the openscad website
# The purpose of it is to open the openscad viewer and vim side by side
# Best used with a tiling window manager like i3
FILE="$1"
# increase stack size to allow deeper recursion
ulimit -s 65536
if [ "$FILE" == "" ]
then
TEMPF=`tempfile -s .scad`
openscad "$TEMPF" >/dev/null 2>/dev/null &
vim "$TEMPF"
rm -f "$TEMPF"
exit
fi
if [ ! -e "$FILE" ]
then
echo -e "$LICENSE" >> "$FILE"
fi
openscad "$FILE" >/dev/null 2>/dev/null &
vim "$FILE"
| true
|
7edce1da35fd8f152648fe5a807b056b3babc396
|
Shell
|
utsavgupta/docker-collection
|
/scripts/docker-run/registry/run.sh
|
UTF-8
| 257
| 2.5625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
function main() {
# Registry Configuration:
# /etc/docker/registry/config.yml \
docker volume create registry-data
docker run \
-it \
-v registry-data:/var/lib/registry \
-p 5000:5000 \
registry:latest
}
main
| true
|
81c769fd57b606c4814a0b3c0479a882e7e471b6
|
Shell
|
yotsuba1022/my-cmd-settings
|
/private/bash/.bash_profile
|
UTF-8
| 4,408
| 2.546875
| 3
|
[] |
no_license
|
# Carl's .bash_profile
# Customized command prompt
export CLICOLOR=1
export TERM=xterm-256color
# enables color in the terminal bash shell
export CLICOLOR=1
# sets up the color scheme for list
export LSCOLORS=fxexbxdxcxegedabagacad
# enables color for iTerm
export TERM=xterm-256color
export PS1="\[\e[1;32m\]\u\[\e[1;33m\]@\[\e[1;35m\]\h\[\e[1;34m\][\t]\[\e[1;33m\]@\[\e[1;31m\]\W \[\e[1;32m\]$ "
# Get the aliases and functions
alias updatebash='. ~/.bash_profile'
alias showFiles='defaults write com.apple.finder AppleShowAllFiles YES; killall Finder /System/Library/CoreServices/Finder.app'
alias hideFiles='defaults write com.apple.finder AppleShowAllFiles NO; killall Finder /System/Library/CoreServices/Finder.app'
alias l.='ls -alFh'
alias clr='clear'
alias vi='vim'
alias jj='java -jar'
alias pgrep='ps -ef | grep'
alias redison='/usr/local/bin/redis-server /usr/local/etc/redis.conf'
alias gomongo='cd $MONGO_HOME'
alias mongoon='$MONGO_HOME/bin/mongod -f $MONGO_HOME/mongo.conf'
alias work='cd /Users/Carl/work/'
alias python_work='cd /Users/Carl/work/Project/python'
alias jcm='cd /Users/Carl/work/Project/jcm'
alias mypage='/Users/Carl/Dropbox/應用程式/Pancake.io/personal_home_page'
alias gorabbit='rabbitmq-server'
alias rmdstore='find . -name .DS_Store -print0 | xargs -0 git rm -f --ignore-unmatch'
alias fd='find . -name'
alias tomcatgo='sh $TOMCAT_HOME/bin/startup.sh'
alias tomcatdown='sh $TOMCAT_HOME/bin/shutdown.sh'
alias catalinaout='tail -f $TOMCAT_HOME/logs/catalina.out'
alias xamp='cd /Applications/XAMPP'
alias p3env='source /Users/Carl/work/python_env/p3-uruz7/bin/activate'
alias aws-uruz7='ssh -i $URUZ7_AWS1_KEY ubuntu@$URUZ7_AWS1'
alias mountawsuruz7='sshfs ubuntu@$URUZ7_AWS1:mnt/ ~/mnt -o IdentityFile=$URUZ7_AWS1_KEY'
alias jenkinsgo='sudo launchctl load /Library/LaunchDaemons/org.jenkins-ci.plist'
alias jenkinsdown='sudo launchctl unload /Library/LaunchDaemons/org.jenkins-ci.plist'
alias jvisualvm='/Library/Java/JavaVirtualMachines/jdk1.8.0_152.jdk/Contents/Home/bin/jvisualvm; exit'
# To make a git book, please download the book repository first, then execute the following command under the repository dir.
alias makegitbook='gitbook pdf ./ ./book.pdf'
# User specific environment and startup programs
export MYVIMRC=~/.vimrc
# export JAVA_HOME=/Library/Java/JavaVirtualMachines/jdk1.8.0_131.jdk/Contents/Home
# export JAVA_HOME=/Library/Java/JavaVirtualMachines/jdk-9.jdk/Contents/Home
# export JAVA_HOME=/Library/Java/JavaVirtualMachines/jdk-10.0.2.jdk/Contents/Home
# export JAVA_HOME=/Library/Java/JavaVirtualMachines/jdk-11.0.1.jdk/Contents/Home
export JAVA_HOME=/Library/Java/JavaVirtualMachines/amazon-corretto-11.jdk/Contents/Home
export M2_HOME=/usr/local/apache-maven-3.3.3
export MYSQL_HOME=/usr/local/opt/mysql
export MONGO_HOME=/Users/Carl/work/mongodb/mongodb-osx-x86_64-3.2.4
export TOMCAT_HOME=/Users/Carl/work/apache-tomcat-9.0.11
export CASSANDRA_HOME=/Users/Carl/work/apache-cassandra-3.11.3
export WEB_APPS=/Users/Carl/work/apache-tomcat-9.0.11/webapps
export URUZ7_AWS1=ec2-18-218-203-211.us-east-2.compute.amazonaws.com
export URUZ7_AWS1_KEY=/Users/Carl/work/resources/uruz7-ec2-1.pem
export GOROOT=/usr/local/go
export GOPATH=/Users/Carl/work/Project/go_workspace
export GRADLE_HOME=/Users/Carl/work/gradle/gradle-4.10.2
export PATH=$PATH:$M2_HOME/bin
export PATH=$PATH:$MYSQL_HOME/bin
export PATH=$PATH:$MONGO_HOME/bin
export PATH=$PATH:$CASSANDRA_HOME/bin
export PATH=$PATH:/usr/local/bin/subl
export PATH=$PATH:/usr/local/sbin
export PATH=/Users/Carl/work/Project/go_workspace/bin:$PATH
export PATH=$PATH:$GRADLE_HOME/bin
# Path for PHP5
#export PATH=/usr/local/php5/bin:/usr/local/php5/sbin:$PATH
export PATH=$PATH:/Applications/MAMP/bin/php/php5.6.30/bin
# MacPorts Installer addition on 2017-01-17_at_12:08:40: adding an appropriate PATH variable for use with MacPorts.
export PATH=$PATH:/opt/local/bin:/opt/local/sbin
# Finished adapting your PATH environment variable for use with MacPorts.
export NVM_DIR=~/.nvm
source $(brew --prefix nvm)/nvm.sh
# DO NOT force vitualenv
export PIP_REQUIRE_VIRTUALENV=false
export PIP_RESPECT_VIRTUALENV=true
# softlink of nodejs and npm is located at the following dir:
# /usr/local/bin
# To set the default node version:
# nvm alias default version_code (e.g. 6.9.4)
# 20171001 Ruru add for Android adb tool. QwQ
export PATH=$PATH:/Users/Carl/Library/Android/sdk/platform-tools
| true
|
ccff92a9b93991fdf687c358b728b6714fb793a6
|
Shell
|
DH-std/cluster-compiler
|
/benchmark/run.sh
|
UTF-8
| 164
| 3.1875
| 3
|
[] |
no_license
|
#!/bin/bash
files=./complete/*
for f in $files
do
if [ ${f: 11: 3} = "img" ]; then
echo "Processing $f"
mv $f "${f/img/IMG}"
fi
done
echo "complete"
| true
|
421ac0014de5a8ef555e0ac4a91fa1fec072ea32
|
Shell
|
dmedme/path
|
/fdesel.sh
|
UTF-8
| 1,198
| 3.4375
| 3
|
[] |
no_license
|
# fdesel.sh
# **********************************************************************
# Include this file in a script from which it is to be used.
#
# Function to allow a user to select from a list of echo files
# Arguments:
# 1 - The Selection Header
# 2 - The list of scripts
#
#
# Returns:
# List of echo files
# @(#) $Name$ $Id$
# Copyright (c) E2 Systems Limited 1993
#
script_selecte () {
head=$1
shift
extra=$1
shift
if [ "$1" = "" -a "$extra" = "" ]
then
SCRIPT_LIST=""
else
if [ ! -z "$PATH_SCENE" ]
then
head="$head ($PATH_SCENE)"
fi
SCRIPT_LIST=`(
echo HEAD=$head
echo PROMPT=Select Scripts, and Press RETURN
echo SEL_YES/COMM_NO/SYSTEM
echo SCROLL
if [ -z "$extra" ]
then
for i in $*
do
echo $i -
tail -1 $PATH_HOME/echo/$i.ech
echo /$i
done
else
for i in $*
do
echo \* $i -
tail -1 $PATH_HOME/echo/$i.ech
echo /$i
done
for i in \`ls -1 $PATH_HOME/echo/$PATH_SCENE/E*.ech 2>/dev/null | sed 's=.*/\\([^/]*\\)\\.ech$=\\1='\`
do
echo $i -
tail -1 $PATH_HOME/echo/$i.ech
echo /$i
done
fi | sed 'N
s.[/=#]. .g
N
s=\n= =g'
echo
) | natmenu 3<&0 4>&1 </dev/tty >/dev/tty`
if [ "$SCRIPT_LIST" = " " -o "$SCRIPT_LIST" = "EXIT:" ]
then
SCRIPT_LIST=""
fi
fi
export SCRIPT_LIST
return 0
}
| true
|
780f3466735e321c16c50f1f4805f0c9b8b74a15
|
Shell
|
MrWWheat/acceptance_watir
|
/docker/docker_run.sh
|
UTF-8
| 766
| 3.765625
| 4
|
[] |
no_license
|
#!/bin/sh
usage() {
echo "Usage:"
echo "\tProvide start and end port range for chromedriver"
echo "\tExample: $0 9810 9820\n"
}
if [ $# -le 1 ]
then
usage
exit 1
fi
PORT="-p 8080:8080"
VOLUME="-v /dev/shm:/dev/shm --volumes-from jenkinsdata"
CONTAINER_NAME="--name jenkins"
IMAGE_NAME="jenkins/watir"
JENKINS_HOME="/var/jenkins_home"
CHROMEDRIVER_CMD="/usr/local/bin/chromedriver"
CHROMEDRIVER_START_PORT=${1}
CHROMEDRIVER_END_PORT=${2}
CONTAINER_ID=$(docker run -d -u jenkins $PORT $CONTAINER_NAME $IMAGE_NAME $COMMANDS)
# COMMANDS="$CHROMEDRIVER_CMD --port=$CHROMEDRIVER_START_PORT &"
for port in $(seq $CHROMEDRIVER_START_PORT $CHROMEDRIVER_END_PORT); do
docker exec -d $CONTAINER_ID $CHROMEDRIVER_CMD --port=$port
done
echo $CONTAINER_ID
| true
|
1c0fd533beec9be54c9fd3cd9fd5c16ec6a37f7d
|
Shell
|
tedbow/drupalorg_infrastructure
|
/vendors/build-bluecheese.sh
|
UTF-8
| 763
| 3.140625
| 3
|
[] |
no_license
|
# Exit immediately on uninitialized variable or error, and print each command.
set -uex
# Jenkins job tracks http://git.drupal.org/project/bluecheese.git
cd /var/git/bluecheese
# Make sure remote exists and is updated.
git remote show private || git remote add private ssh://git@bitbucket.org/drupalorg-infrastructure/bluecheese-private.git
git fetch private
# Reset Git repo.
git reset --hard HEAD
# Mirror changes.
git checkout 7.x-2.x
git pull
git push private 7.x-2.x
# Merge changes.
git checkout branded-2.x
git pull
git merge 7.x-2.x
# Compile CSS.
/opt/puppetlabs/puppet/bin/bundle exec compass compile
if [ -n "$(git status --short css)" ]; then
git add -A css
git commit -m 'compass compile'
fi
# Push to remote.
git push private branded-2.x
| true
|
7e94234ec9d5e7f472589ebd547d16251789588a
|
Shell
|
gpongracz/ansible-template
|
/create-role.sh
|
UTF-8
| 393
| 4.03125
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
if [ -z "$1" ]
then
echo "No argument supplied"; exit 1
fi
# this script is used to create a role
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
TARGET_DIR=$DIR/roles/$1
TEMPLATE_DIR=$DIR/roles/template
if [ ! -d "$TARGET_DIR" ]; then
# Control will enter here if $DIRECTORY exists.
cp -R $TEMPLATE_DIR $TARGET_DIR/
else
echo "$TARGET_DIR already exists"
fi
| true
|
f8db0f9f52622b0c1cd236e2f9bd7ebc1ec2c254
|
Shell
|
Tout/china
|
/test/ec2stubs/ec2-run-instances
|
UTF-8
| 1,209
| 3.390625
| 3
|
[] |
no_license
|
#!/bin/bash
chars='[ !"#$&()*,;<>?\^`{|}]'
do_data_file=0
orig_data_file=""
for arg
do
if [ $do_data_file -eq 1 ] ; then
orig_data_file=${arg}
arg="user_data_md5_$(cat $arg | tr -d ' \n\t' | md5sum | awk '{print $1}')"
do_data_file=0
fi
if [ "$arg" = "-f" ] ; then
do_data_file=1
fi
if [[ $arg == *\'* ]]
then
arg=\""$arg"\"
elif [[ $arg == *$chars* ]]
then
arg="'$arg'"
fi
allargs+=("$arg") # ${allargs[@]} is to be used only for printing
done
if [ "x${CHINA_TEST_LOG}" != "x" ] ; then
echo -n "$(basename $0) " >> ${CHINA_TEST_LOG}
printf "%s " "${allargs[*]}" >> ${CHINA_TEST_LOG}
echo " " >> ${CHINA_TEST_LOG}
if [ "x${orig_data_file}" != "x" ] ; then
echo "# user_data file: ${orig_data_file}" >> ${CHINA_TEST_LOG}
fi
fi
echo "RESERVATION r-b6ea58c1 696664755663 default"
echo "INSTANCE i-945af9e3 ami-dd8ea5b9 pending 0 c1.medium 2010-04-15T10:47:56+0000 eu-west-1a aki-b02a01c4 ari-39c2e94d"
# Note the instance id above, it is also used in the knife command so that when creating a unit, any postprocessing that checks if the server is in the knife env will succeed
| true
|
19ed44b2f2fcccc61974793b2791f38947e0cccc
|
Shell
|
lukas-hetzenecker/docker-postfix
|
/assets/install.sh
|
UTF-8
| 11,213
| 2.96875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
#judgement
if [[ -a /etc/supervisor/conf.d/supervisord.conf ]]; then
exit 0
fi
#supervisor
cat > /etc/supervisor/conf.d/supervisord.conf <<EOF
[supervisord]
nodaemon=true
[program:postfix]
command=/opt/postfix.sh
[program:cron]
command=/usr/sbin/cron -f
[program:spamassassin]
command=/usr/sbin/spamd --create-prefs --max-children 5 --helper-home-dir -d --pidfile=/var/run/spamd.pid
[program:amavisd]
command=/usr/sbin/amavisd-new foreground
[program:rsyslog]
command=/usr/sbin/rsyslogd -n -c3
EOF
###########################
# spam and virus detection
###########################
if [[ -n "$MAIL_DOMAIN" ]]; then
cat > /etc/mailname <<EOF
$MAIL_DOMAIN
EOF
fi
cat > /etc/default/spamassassin <<EOF
ENABLED=1
OPTIONS="--create-prefs --max-children 5 --helper-home-dir"
PIDFILE="/var/run/spamd.pid"
CRON=1
EOF
cat > /etc/amavis/conf.d/15-content_filter_mode << EOF
use strict;
@bypass_virus_checks_maps = (
\%bypass_virus_checks, \@bypass_virus_checks_acl, \$bypass_virus_checks_re);
@bypass_spam_checks_maps = (
\%bypass_spam_checks, \@bypass_spam_checks_acl, \$bypass_spam_checks_re);
1;
EOF
cat > /etc/amavis/conf.d/50-user << EOF
use strict;
\$myhostname = "$MAIL_HOSTNAME";
\$final_spam_destiny = D_PASS; # (defaults to D_REJECT)
\$sa_tag_level_deflt = -100.0;
\$sa_tag2_level_deflt = 5.0;
\$sa_kill_level_deflt = 5.0;
1;
EOF
postconf -e "content_filter = smtp-amavis:[127.0.0.1]:10024"
cat >> /etc/postfix/master.cf << EOF
smtp-amavis unix - - - - 2 smtp
-o smtp_data_done_timeout=1200
-o smtp_send_xforward_command=yes
-o disable_dns_lookups=yes
-o max_use=20
127.0.0.1:10025 inet n - - - - smtpd
-o content_filter=
-o local_recipient_maps=
-o relay_recipient_maps=
-o smtpd_restriction_classes=
-o smtpd_delay_reject=no
-o smtpd_client_restrictions=permit_mynetworks,reject
-o smtpd_helo_restrictions=
-o smtpd_sender_restrictions=
-o smtpd_recipient_restrictions=permit_mynetworks,reject
-o smtpd_data_restrictions=reject_unauth_pipelining
-o smtpd_end_of_data_restrictions=
-o mynetworks=127.0.0.0/8
-o smtpd_error_sleep_time=0
-o smtpd_soft_error_limit=1001
-o smtpd_hard_error_limit=1000
-o smtpd_client_connection_count_limit=0
-o smtpd_client_connection_rate_limit=0
-o receive_override_options=no_header_body_checks,no_unknown_recipient_checks
EOF
sed 's/.*pickup.*/&\n -o content_filter=\n -o receive_override_options=no_header_body_checks/' /etc/postfix/master.cf > /etc/postfix/master.cf.1 && mv /etc/postfix/master.cf.1 /etc/postfix/master.cf
freshclam
service clamav-daemon start
############
# postfix
############
cat >> /opt/postfix.sh <<EOF
#!/bin/bash
service saslauthd start
service postfix start
tail -f /var/log/mail.log
EOF
chmod +x /opt/postfix.sh
if [[ -n "$MAIL_HOSTNAME" ]]; then
postconf -e myhostname=$MAIL_HOSTNAME
fi
if [[ -n "$MAIL_DOMAIN" ]]; then
postconf -e mydomain=$MAIL_DOMAIN
fi
postconf -F '*/*/chroot = n'
# No Open Proxy / No Spam
postconf -e smtpd_sender_restrictions=permit_sasl_authenticated,permit_mynetworks,reject_unknown_sender_domain,permit
postconf -e smtpd_helo_restrictions=permit_mynetworks,reject_invalid_hostname,permit
postconf -e smtpd_relay_restrictions=permit_sasl_authenticated,permit_mynetworks,reject_unauth_destination
postconf -e "smtpd_recipient_restrictions=permit_mynetworks,permit_inet_interfaces,permit_sasl_authenticated,reject_invalid_hostname,reject_non_fqdn_hostname,reject_non_fqdn_sender,reject_non_fqdn_recipient,permit"
############
# SASL SUPPORT FOR CLIENTS
# The following options set parameters needed by Postfix to enable
# Cyrus-SASL support for authentication of mail clients.
############
# /etc/postfix/main.cf
postconf -e smtpd_sasl_auth_enable=yes
postconf -e broken_sasl_auth_clients=yes
# smtpd.conf
if [[ -n "$smtp_user" ]]; then
postconf -e "mydestination = \$myhostname, localhost.\$mydomain, localhost, \$mydomain"
cat >> /etc/postfix/sasl/smtpd.conf <<EOF
pwcheck_method: auxprop
auxprop_plugin: sasldb
mech_list: PLAIN LOGIN CRAM-MD5 DIGEST-MD5 NTLM
EOF
# sasldb2
echo $smtp_user | tr , \\n > /tmp/passwd
while IFS=':' read -r _user _pwd; do
echo $_pwd | saslpasswd2 -p -c -u $MAIL_HOSTNAME $_user
done < /tmp/passwd
chown postfix.sasl /etc/sasldb2
elif [[ -n "$LDAP_HOST" && -n "$LDAP_BASE" ]]; then
adduser postfix sasl
postconf -e "mydestination = localhost.\$mydomain, localhost"
cat > /etc/default/saslauthd <<EOF
START=yes
DESC="SASL Authentication Daemon"
NAME="saslauthd"
MECHANISMS="ldap"
OPTIONS="-c -m /var/run/saslauthd -O /etc/saslauthd.conf"
EOF
cat > /etc/postfix/sasl/smtpd.conf <<EOF
pwcheck_method: saslauthd
EOF
cat > /etc/saslauthd.conf <<EOF
ldap_servers: ldap://$LDAP_HOST
ldap_search_base: $LDAP_BASE
ldap_version: 3
EOF
if [[ -n "$LDAP_USER_FILTER" ]]; then
echo "ldap_filter: $LDAP_USER_FILTER" >> /etc/saslauthd.conf
fi
if [[ -n "$LDAP_BIND_DN" && -n "$LDAP_BIND_PW" ]]; then
echo "ldap_bind_dn: $LDAP_BIND_DN" >> /etc/saslauthd.conf
echo "ldap_bind_pw: $LDAP_BIND_PW" >> /etc/saslauthd.conf
fi
elif [[ -n "$DOVECOT_IP" && -n "$DOVECOT_PORT" ]]; then
postconf -e smtpd_sasl_path=inet:$DOVECOT_IP:$DOVECOT_PORT
postconf -e smtpd_sasl_type=dovecot
fi
LMTP_PORT=${LMTP_PORT:-24}
if [[ -n "$LMTP_HOST" ]]; then
postconf -e virtual_transport=lmtp:$LMTP_HOST:$LMTP_PORT
postconf -e virtual_uid_maps=static:1200
postconf -e virtual_gid_maps=static:1200
fi
if [[ -n "$VIRTUAL_MAILBOX_DOMAINS" ]]; then
postconf -e "mydestination = localhost.\$mydomain, localhost"
postconf -e "virtual_mailbox_domains = $VIRTUAL_MAILBOX_DOMAINS"
fi
if [[ -n "$VIRTUAL_MAILBOX_MAPS" ]]; then
postmap $VIRTUAL_MAILBOX_MAPS
postconf -e "virtual_mailbox_maps = hash:/$VIRTUAL_MAILBOX_MAPS"
fi
if [[ -n "$VIRTUAL_ALIAS_MAPS" ]]; then
postmap $VIRTUAL_ALIAS_MAPS
postconf -e "virtual_alias_maps = hash:/$VIRTUAL_ALIAS_MAPS"
fi
if [[ -n "$MYNETWORKS" ]]; then
postconf -e "mynetworks = $MYNETWORKS"
fi
############
# Enable TLS
############
if [[ -n "$(find /etc/postfix/certs -iname *.crt)" && -n "$(find /etc/postfix/certs -iname *.key)" ]]; then
# /etc/postfix/main.cf
postconf -e smtpd_tls_cert_file=$(find /etc/postfix/certs -iname *.crt)
postconf -e smtpd_tls_key_file=$(find /etc/postfix/certs -iname *.key)
postconf -e smtpd_tls_CAfile=$(find /etc/postfix/certs -iname cacert.pem)
chmod 400 $(find /etc/postfix/certs -iname *.crt) $(find /etc/postfix/certs -iname *.key) $(find /etc/postfix/certs -iname cacert.pem)
# /etc/postfix/master.cf
postconf -M submission/inet="submission inet n - n - - smtpd"
postconf -P "submission/inet/syslog_name=postfix/submission"
postconf -P "submission/inet/smtpd_tls_security_level=encrypt"
postconf -P "submission/inet/smtpd_sasl_auth_enable=yes"
postconf -P "submission/inet/milter_macro_daemon_name=ORIGINATING"
fi
############
# LDAP
############
if [[ -n "$LDAP_HOST" && -n "$LDAP_BASE" ]]; then
groupadd -g 1200 vmail
useradd -u 1200 -g 1200 -s /sbin/nologin vmail
chown vmail:vmail /var/mail
cat >> /etc/postfix/ldap-aliases.cf <<EOF
server_host = $LDAP_HOST
search_base = $LDAP_BASE
version = 3
EOF
if [[ -n "$LDAP_ALIAS_FILTER" ]]; then
echo "query_filter = $LDAP_ALIAS_FILTER" >> /etc/postfix/ldap-aliases.cf
fi
if [[ -n "$LDAP_ALIAS_RESULT_ATTRIBUTE" ]]; then
echo "result_attribute = $LDAP_ALIAS_RESULT_ATTRIBUTE" >> /etc/postfix/ldap-aliases.cf
fi
if [[ -n "$LDAP_ALIAS_SPECIAL_RESULT_ATTRIBUTE" ]]; then
echo "special_result_attribute = $LDAP_ALIAS_SPECIAL_RESULT_ATTRIBUTE" >> /etc/postfix/ldap-aliases.cf
fi
if [[ -n "$LDAP_ALIAS_TERMINAL_RESULT_ATTRIBUTE" ]]; then
echo "terminal_result_attribute = $LDAP_ALIAS_TERMINAL_RESULT_ATTRIBUTE" >> /etc/postfix/ldap-aliases.cf
fi
if [[ -n "$LDAP_ALIAS_LEAF_RESULT_ATTRIBUTE" ]]; then
echo "leaf_result_attribute = $LDAP_ALIAS_LEAF_RESULT_ATTRIBUTE" >> /etc/postfix/ldap-aliases.cf
fi
if [[ -n "$LDAP_BIND_DN" && -n "$LDAP_BIND_PW" ]]; then
echo "bind = yes" >> /etc/postfix/ldap-aliases.cf
echo "bind_dn = $LDAP_BIND_DN" >> /etc/postfix/ldap-aliases.cf
echo "bind_pw = $LDAP_BIND_PW" >> /etc/postfix/ldap-aliases.cf
fi
cat >> /etc/postfix/ldap-mailboxes.cf <<EOF
server_host = $LDAP_HOST
search_base = $LDAP_BASE
version = 3
EOF
if [[ -n "$LDAP_MAILBOX_FILTER" ]]; then
echo "query_filter = $LDAP_MAILBOX_FILTER" >> /etc/postfix/ldap-mailboxes.cf
fi
if [[ -n "$LDAP_MAILBOX_RESULT_ATTRIBUTE" ]]; then
echo "result_attribute = $LDAP_MAILBOX_RESULT_ATTRIBUTE" >> /etc/postfix/ldap-mailboxes.cf
fi
if [[ -n "$LDAP_MAILBOX_RESULT_FORMAT" ]]; then
echo "result_format = $LDAP_MAILBOX_RESULT_FORMAT" >> /etc/postfix/ldap-mailboxes.cf
fi
if [[ -n "$LDAP_BIND_DN" && -n "$LDAP_BIND_PW" ]]; then
echo "bind = yes" >> /etc/postfix/ldap-mailboxes.cf
echo "bind_dn = $LDAP_BIND_DN" >> /etc/postfix/ldap-mailboxes.cf
echo "bind_pw = $LDAP_BIND_PW" >> /etc/postfix/ldap-mailboxes.cf
fi
postconf -e "virtual_mailbox_domains = \$myhostname, \$mydomain"
postconf -e virtual_mailbox_base=/var/mail
postconf -e virtual_alias_maps=ldap:/etc/postfix/ldap-aliases.cf
postconf -e virtual_mailbox_maps=ldap:/etc/postfix/ldap-mailboxes.cf
postconf -e virtual_uid_maps=static:1200
postconf -e virtual_gid_maps=static:1200
fi
#############
# opendkim
#############
if [[ -z "$(find /etc/opendkim/domainkeys -iname *.private)" ]]; then
exit 0
fi
cat >> /etc/supervisor/conf.d/supervisord.conf <<EOF
[program:opendkim]
command=/usr/sbin/opendkim -f
EOF
# /etc/postfix/main.cf
postconf -e milter_protocol=2
postconf -e milter_default_action=accept
postconf -e smtpd_milters=inet:localhost:12301
postconf -e non_smtpd_milters=inet:localhost:12301
cat >> /etc/opendkim.conf <<EOF
AutoRestart Yes
AutoRestartRate 10/1h
UMask 002
Syslog yes
SyslogSuccess Yes
LogWhy Yes
Canonicalization relaxed/simple
ExternalIgnoreList refile:/etc/opendkim/TrustedHosts
InternalHosts refile:/etc/opendkim/TrustedHosts
KeyTable refile:/etc/opendkim/KeyTable
SigningTable refile:/etc/opendkim/SigningTable
Mode sv
PidFile /var/run/opendkim/opendkim.pid
SignatureAlgorithm rsa-sha256
UserID opendkim:opendkim
Socket inet:12301@localhost
EOF
cat >> /etc/default/opendkim <<EOF
SOCKET="inet:12301@localhost"
EOF
cat >> /etc/opendkim/TrustedHosts <<EOF
127.0.0.1
localhost
192.168.0.1/24
*.$maildomain
EOF
cat >> /etc/opendkim/KeyTable <<EOF
mail._domainkey.$MAIL_HOSTNAME $MAIL_HOSTNAME:mail:$(find /etc/opendkim/domainkeys -iname *.private)
EOF
cat >> /etc/opendkim/SigningTable <<EOF
*@$MAIL_HOSTNAME mail._domainkey.$MAIL_HOSTNAME
EOF
chown opendkim:opendkim $(find /etc/opendkim/domainkeys -iname *.private)
chmod 400 $(find /etc/opendkim/domainkeys -iname *.private)
| true
|
37cc3033b1266c0430b541f0583231131a77a34e
|
Shell
|
zhuxu/shell
|
/Chapter11/test21.sh
|
UTF-8
| 176
| 3
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
# testing file dates
if [ ./badfile1 -nt ./badfile2 ]
then
echo "The badfile1 is newer than badfile2"
else
echo "The badfile2 is newer than badfile1"
fi
| true
|
29683accf932a497d10a87079296cb858bf40920
|
Shell
|
zhengzhiren/vimrc
|
/vimrc.sh
|
UTF-8
| 506
| 4.0625
| 4
|
[] |
no_license
|
#!/bin/bash
THIS_DIR=$(dirname $(readlink -f $0))
cmd=$1
function update()
{
echo Updating \"$HOME/.vimrc\"
cd $THIS_DIR; git pull
if [ -f $HOME/.vimrc ]; then
cp $HOME/.vimrc $HOME/.vimrc.bak #make a backup
fi
cp $THIS_DIR/.vimrc $HOME/.vimrc
}
function commit()
{
echo Committing \"$HOME/.vimrc\" to git.
cp $HOME/.vimrc $THIS_DIR/
cd $THIS_DIR; git commit .vimrc; git push
}
case $cmd in
update)
update
;;
commit)
commit
;;
*)
echo $"Usage: $0 {update|commit}"
esac
| true
|
52e75d55ccfb2df3d53865d528b027a65f25a676
|
Shell
|
manticoresoftware/docker
|
/docker-entrypoint.sh
|
UTF-8
| 6,258
| 3.734375
| 4
|
[] |
no_license
|
#!/bin/bash
set -eo pipefail
# check to see if this file is being run or sourced from another script
_is_sourced() {
# https://unix.stackexchange.com/a/215279
[ "${#FUNCNAME[@]}" -ge 2 ] &&
[ "${FUNCNAME[0]}" = '_is_sourced' ] &&
[ "${FUNCNAME[1]}" = 'source' ]
}
_searchd_want_help() {
local arg
for arg; do
case "$arg" in
-'?' | --help | -h | -v)
return 0
;;
esac
done
return 1
}
docker_setup_env() {
if [ -n "$QUERY_LOG_TO_STDOUT" ]; then
export searchd_query_log=/var/log/manticore/query.log
[ ! -f /var/log/manticore/query.log ] && ln -sf /dev/stdout /var/log/manticore/query.log
fi
if [[ "${EXTRA}" == "1" ]]; then
EXTRA_DIR="/var/lib/manticore/.extra/"
if [ -f "${EXTRA_DIR}manticore-executor" ]; then
cp ${EXTRA_DIR}manticore-executor /usr/bin/manticore-executor
fi
if [ ! -f /etc/ssl/cert.pem ]; then
for cert in "/etc/ssl/certs/ca-certificates.crt" \
"/etc/pki/tls/certs/ca-bundle.crt" \
"/etc/ssl/ca-bundle.pem" \
"/etc/pki/tls/cacert.pem" \
"/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem"; do
if [ -f "$cert" ]; then
ln -s "$cert" /etc/ssl/cert.pem
break
fi
done
fi
LAST_PATH=$(pwd)
EXTRA_URL=$(cat /extra.url)
EXTRA_INSTALLED_VERSION_PATH="/var/lib/manticore/.extra.installed"
NEW_EXTRA_VERSION=$(echo $EXTRA_URL | cut -d"_" -f4 | cut -d"-" -f3)
if [ ! -f $EXTRA_INSTALLED_VERSION_PATH ]; then
# Extra was never be installed
echo "Install extra packages"
install_extra $EXTRA_URL $EXTRA_INSTALLED_VERSION_PATH $NEW_EXTRA_VERSION $EXTRA_DIR
else
INSTALLED_EXTRA_VERSION=$(cat $EXTRA_INSTALLED_VERSION_PATH)
if [[ $INSTALLED_EXTRA_VERSION != $NEW_EXTRA_VERSION ]]; then
echo "Extra packages version mismatch. Updating..."
install_extra $EXTRA_URL $EXTRA_INSTALLED_VERSION_PATH $NEW_EXTRA_VERSION $EXTRA_DIR 1
fi
fi
MCL="1"
fi
if [[ "${MCL}" == "1" ]]; then
MCL_DIR="/var/lib/manticore/.mcl/"
LIB_MANTICORE_COLUMNAR="${MCL_DIR}lib_manticore_columnar.so"
LIB_MANTICORE_SECONDARY="${MCL_DIR}lib_manticore_secondary.so"
COLUMNAR_VERSION=$(cat /mcl.url | cut -d"-" -f6 | cut -d"_" -f1)
[ -L /usr/share/manticore/modules/lib_manticore_columnar.so ] || ln -s $LIB_MANTICORE_COLUMNAR /usr/share/manticore/modules/lib_manticore_columnar.so
[ -L /usr/share/manticore/modules/lib_manticore_secondary.so ] || ln -s $LIB_MANTICORE_SECONDARY /usr/share/manticore/modules/lib_manticore_secondary.so
searchd -v | grep -i error | egrep "trying to load" &&
rm $LIB_MANTICORE_COLUMNAR $LIB_MANTICORE_SECONDARY &&
echo "WARNING: wrong MCL version was removed, installing the correct one"
if ! searchd --version | head -n 1 | grep $COLUMNAR_VERSION; then
echo "Columnar version mismatch"
rm $LIB_MANTICORE_COLUMNAR > /dev/null 2>&1 || echo "Lib columnar not installed"
rm $LIB_MANTICORE_SECONDARY > /dev/null 2>&1 || echo "Secondary columnar not installed"
fi
if [[ ! -f "$LIB_MANTICORE_COLUMNAR" || ! -f "$LIB_MANTICORE_SECONDARY" ]]; then
if ! mkdir -p ${MCL_DIR}; then
echo "ERROR: Manticore Columnar Library is inaccessible: couldn't create ${MCL_DIR}."
exit
fi
MCL_URL=$(cat /mcl.url)
wget -P /tmp $MCL_URL
LAST_PATH=$(pwd)
cd /tmp
PACKAGE_NAME=$(ls | grep manticore-columnar | head -n 1)
ar -x $PACKAGE_NAME
tar -xf data.tar.gz
find . -name '*.so' -exec cp {} ${MCL_DIR} \;
cd $LAST_PATH
fi
fi
if [[ -z "${MCL}" && "${MCL}" != "1" ]]; then
export searchd_secondary_indexes=0
fi
if [[ "${CREATE_PLAIN_TABLES}" == "1" ]]; then
indexer --all
fi
}
install_extra() {
# $EXTRA_URL $1
# $EXTRA_INSTALLED_VERSION_PATH $2
# $NEW_EXTRA_VERSION $3
# $EXTRA_DIR $4
# $FORCE $5
# In case force update
if [ $5=1 ]; then
rm -rf "${4}"
fi
if [ ! -d $4 ]; then
mkdir $4
fi
if [[ -z $(find $4 -name 'manticore-executor') ]]; then
wget -P $4 $1
cd $4
PACKAGE_NAME=$(ls | grep manticore-executor | head -n 1)
ar -x $PACKAGE_NAME
tar -xf data.tar.xz
fi
find $4 -name 'manticore-executor' -exec cp {} /usr/bin/manticore-executor \;
echo $3 >$2
cd $LAST_PATH
rm -rf "${4}*"
cp /usr/bin/manticore-executor ${4}
}
_main() {
# first arg is `h` or some `--option`
if [ "${1#-}" != "$1" ]; then
set -- searchd "$@"
fi
if ! _searchd_want_help "@"; then
docker_setup_env "$@"
fi
if ([ "$1" = 'searchd' ] || [ "$1" = 'indexer' ]) && ! _searchd_want_help "@"; then
# allow the container to be started with `--user`
if [ "$(id -u)" = '0' ]; then
find /var/lib/manticore /var/log/manticore /var/run/manticore /etc/manticoresearch \! -user manticore -exec chown manticore:manticore '{}' +
exec gosu manticore "$0" "$@"
fi
fi
_replace_conf_from_env
exec "$@"
}
_replace_conf_from_env() {
# we exit in case a custom config is provided
if [ "$(md5sum /etc/manticoresearch/manticore.conf | awk '{print $1}')" != "$(cat /manticore.conf.md5)" ]; then return; fi
sed_query=""
while IFS='=' read -r oldname value; do
if [[ $oldname == 'searchd_'* || $oldname == 'common_'* ]]; then
value=$(echo ${!oldname} | sed 's/\//\\\//g')
oldname=$(echo $oldname | sed "s/searchd_//g;s/common_//g;")
newname=$oldname
if [[ $newname == 'listen' ]]; then
oldname="listen_env"
IFS='|' read -ra ADDR <<<"$value"
count=0
for i in "${ADDR[@]}"; do
if [[ $count == 0 ]]; then
value=$i
else
value="$value\n listen = $i"
fi
count=$((count + 1))
done
fi
if [[ -z $sed_query ]]; then
sed_query="s/(#\s)*?$oldname\s?=\s?.*?$/$newname = $value/g"
else
sed_query="$sed_query;s/(#\s)*?$oldname\s?=\s?.*?$/$newname = $value/g"
fi
fi
done < <(env)
if [[ ! -z $sed_query ]]; then
sed -i -E "$sed_query" /etc/manticoresearch/manticore.conf
fi
}
# If we are sourced from elsewhere, don't perform any further actions
if ! _is_sourced; then
_main "$@"
fi
| true
|
e17823e7b7d6e1aafe7d80b0c322c6f6aa1b0c36
|
Shell
|
debian-pm-tools/rootfs-builder-debos
|
/librem5/overlay/etc/kernel/postinst.d/zz-zzkernelhack
|
UTF-8
| 571
| 3.203125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
# see Chapter 8 of Debian Linux Kernel Handbook
set -e
# this script is used as postinst.d and postrm.d script; this is used to
# differentiate between the two
self="$0"
# see 8.1, Kernel hooks
abi="$1"
# ignored
_vmlinuz="${2:-/boot/vmlinuz-$abi}"
set -- $DEB_MAINT_PARAMS
action="$1"
action="${action#\'}"
action="${action%\'}"
# ignored
_version="$2"
_version="${version#\'}"
_version="${version%\'}"
# HACK
# For now manually extract the vmlinuz to not have compression
mv ${_vmlinuz} /boot/Image.gz
zcat /boot/Image.gz > ${_vmlinuz}
rm /boot/Image.gz
| true
|
595fa4c3acc67a2cfaca39c56eef735872f4c618
|
Shell
|
aprokop/local_scripts
|
/switch_org
|
UTF-8
| 1,129
| 3.984375
| 4
|
[
"LicenseRef-scancode-warranty-disclaimer",
"MIT"
] |
permissive
|
#!/bin/bash
# Hardcoded location of all org alternatives
loc="$HOME/.personal"
cur_org=`readlink $loc/org`
if [ "$?" -ne "0" ]; then
echo "No current org, exiting..."
exit 0
fi
unamestr=`uname`
if [[ "$unamestr" == "Darwin" ]]; then
# Mac adds "/" at the end of readlink
cur_org=`echo $cur_org | sed 's/\/$//'`
fi
if [ "$1" == "status" ] || [ "$1" == "st" ]; then
if [ "$cur_org" == "org_personal" ]; then echo 'Current status "personal"'
elif [ "$cur_org" == "org_work" ]; then echo 'Current status "work"'
else echo "Current status <unknown> ($cur_org)"; fi
exit 0
fi
emacs_pid=`pgrep emacs || pgrep Emacs`
if [ "$?" -ne "1" ]; then
echo "Emacs process is running. Terminate first before switching..."
exit 0
fi
if [ "$cur_org" == "org_personal" ]; then
# personal -> work
cd $loc
rm -f $loc/org
ln -s org_work org
cd ..
echo 'Switched to "work" mode'
elif [ "$cur_org" == "org_work" ]; then
# work -> personal
cd $loc
rm -f org
ln -s org_personal org
cd ..
echo 'Switched to "personal" mode'
else
echo "Uknown current org \""$cur_org"\", exiting..."
fi
exit 0
| true
|
f4cdcd74af92616a94223a3152649c186690c072
|
Shell
|
thanh-dong/2vivi
|
/network/common.sh
|
UTF-8
| 814
| 4
| 4
|
[] |
no_license
|
#!/bin/bash
YELLOW='\033[1;33m'
RED='\033[1;31m'
GREEN='\033[1;32m'
BLUE='\033[1;34m'
PURPLE='\033[1;35m'
RESET='\033[0m'
OS=$(uname)
export OS=$OS
NETWORK_NAME="2vivi-network"
NETWORK_VERSION=$(cat package.json \
| grep version \
| head -1 \
| awk -F: '{ print $2 }' \
| sed 's/[",]//g')
# Indent text on echo
function indent() {
c='s/^/ /'
case $(uname) in
Darwin) sed -l "$c";;
*) sed -u "$c";;
esac
}
# Displays where we are, uses the indent function (above) to indent each line
function showStep() {
echo ""
echo -e "${PURPLE}$*${RESET}"
echo ""
}
# Grab the current directory
function getCurrent() {
showStep "Getting current directory"
DIR="$(pwd)"
echo "DIR in getCurrent is: ${DIR}"
THIS_SCRIPT=`basename "$0"`
showStep "Running '${THIS_SCRIPT}'"
}
| true
|
f9e302c13137903c82fff956454b4500b130acf0
|
Shell
|
mwhittaker/vms
|
/install_grafana.sh
|
UTF-8
| 433
| 3.625
| 4
|
[] |
no_license
|
#! /usr/bin/env bash
set -euo pipefail
main() {
mkdir -p "$HOME/install"
cd "$HOME/install"
# See [1, 2, 3] for download links.
#
# [1]: https://grafana.com/docs/installation/debian/
# [2]: https://grafana.com/get
# [3]: https://grafana.com/grafana/download
local -r grafana='grafana-6.1.3.linux-amd64.tar.gz'
wget "https://dl.grafana.com/oss/release/$grafana"
tar -xzvf "$grafana"
}
main
| true
|
a3091ab7947b50d20d79bb0f7d428b1ab61c863d
|
Shell
|
Marcnuth/setup-scripts
|
/prepare-xgboost.sh
|
UTF-8
| 366
| 2.828125
| 3
|
[] |
no_license
|
python3 -c "import xgboost"
if [ $? -ne 0 ]; then
rm -rf /usr/bin/xgboost/
git clone --recursive https://github.com/dmlc/xgboost /usr/bin/xgboost/
cd /usr/bin/xgboost/
make -j4
cd python-package
sudo python3 setup.py install
echo 'export PYTHONPATH=/usr/bin/xgboost/python-package' >> /etc/profile
fi
echo '=> XGBoost is installed'
| true
|
baa2260adcc8c8ef1dd6e5a5c604609ef9bd7c39
|
Shell
|
liqiang76/tinyos_cxl
|
/apps/breakfast/bacon/cxActiveMessage/tests/overnight_0918.sh
|
UTF-8
| 1,941
| 3.125
| 3
|
[] |
no_license
|
#!/bin/bash
#re-run 0x8D burst/flood/single tests
shortDuration=$((60*60))
longDuration=$((2 *60 *60))
floodOptions="senderDest 65535UL requestAck 0"
burstOptions="senderDest 0 requestAck 0"
lowRateOptions='targetIpi 143360UL queueThreshold 10'
midRateOptions='targetIpi 61440UL queueThreshold 10'
highRateOptions='targetIpi 1024UL queueThreshold 10'
for rate in mid
do
if [ "$rate" == "low" ]
then
rateOptions=$lowRateOptions
testDuration=$longDuration
elif [ "$rate" == "high" ]
then
rateOptions=$highRateOptions
testDuration=$shortDuration
elif [ "$rate" == "mid" ]
then
rateOptions=$midRateOptions
testDuration=$shortDuration
else
echo "unknown rate $rate"
exit 1
fi
for txp in 0x8D
do
#3 x 6 = 18
for numTransmits in 1
do
#burst with buffer zone
for bufferWidth in 0 1 3
do
for cxForwarderSelection in 1 2
do
for cxRoutingTableEntries in 3 70
do
./installTestbed.sh \
testLabel lpb.${txp}.${numTransmits}.${bufferWidth}.$rate.$cxForwarderSelection.$cxRoutingTableEntries \
txp $txp \
senderMap map.nonroot \
receiverMap map.none \
rootMap map.0 \
numTransmits $numTransmits\
bufferWidth $bufferWidth\
$burstOptions\
$rateOptions \
cxForwarderSelection $cxForwarderSelection \
cxRoutingTableEntries $cxRoutingTableEntries
sleep $testDuration
done
done
done
#flood
./installTestbed.sh testLabel lpf.${txp}.${numTransmits}.$rate \
txp $txp \
senderMap map.nonroot \
receiverMap map.none \
rootMap map.0 \
numTransmits $numTransmits\
bufferWidth $bufferWidth\
$floodOptions\
$rateOptions
sleep $testDuration
done
done
done
| true
|
c271b7681c3c1e68f95dc4e89534157674fa0fcf
|
Shell
|
Ginden/dotfiles
|
/bin/webp-auto
|
UTF-8
| 277
| 3.109375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
#HASH=$((cat $0 && dwebp -version) | md5sum)
#HAS_MEMBER=$(redis-cli sismember "$0:$HASH" "$1" | cat);
OUTPUT_PATH="$1.png"
if [ ! -f "$OUTPUT_PATH" ];
then
echo "Processing file $1";
dwebp "$1" -o "$OUTPUT_PATH"
else
echo "Skipping file $1";
fi;
| true
|
7ed1539c6d1ffd8bd896d48accf3489223adb235
|
Shell
|
jleni/qrl-build-images
|
/xenial-emscripten/build.sh
|
UTF-8
| 1,191
| 2.984375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
set -e
sudo mkhomedir_helper $(whoami)
BUILD_DIR="build"
cmake --version
# Get emscripten
cd ${HOME}
curl -O https://s3.amazonaws.com/mozilla-games/emscripten/releases/emsdk-portable.tar.gz
tar -xvzf emsdk-portable.tar.gz
cd emsdk-portable
./emsdk update &> /dev/null
./emsdk install latest &> /dev/null
./emsdk activate latest &> /dev/null
source ./emsdk_env.sh
cd /travis
mkdir -p ${BUILD_DIR}
cd ${BUILD_DIR}
emconfigure cmake -DBUILD_WEBASSEMBLY=ON ${CMAKE_ARGS} -DCMAKE_BUILD_TYPE=Release /travis
echo "Building..."
emmake make
echo "Emscripten Binding/Optimizing..."
#FIXME: Disable .mem for node.js until this gets fixed: https://github.com/kripken/emscripten/issues/2542
emcc --bind libjsqrl.so -O3 --memory-init-file 0 -o libjsqrl.js
emcc --bind libjsqrl.so -O3 -s WASM=1 -o web-libjsqrl.js
echo "QRLLIB=Module;" >> web-libjsqrl.js
# Fix pathing of web-libjsqrl.wasm for web clients
sed -i 's/web-libjsqrl\.wasm/\/web-libjsqrl\.wasm/g' web-libjsqrl.js
if [ -n "${TEST:+1}" ]; then
echo "Running Tests"
cp ./travis/tests_js/test.js .
node test.js
fi
if [ -n "${DEPLOY:+1}" ]; then
echo "******** Prepare deployment package HERE ********"
fi
| true
|
6fa58478d37355c7cd52e07d7b041f370706d8c4
|
Shell
|
RHood78/monerodo
|
/service_on.sh
|
UTF-8
| 566
| 3.25
| 3
|
[] |
no_license
|
#!/bin/bash
#Turns a service on
# Imports varialbes from global environment created in previous script
if [ -a /etc/init/$mos_service.conf ] && [ "$(echo $running | grep stop/waiting)" ] ; then
./spin.sh & sudo service $mos_service start && echo 0 > /dev/shm/mos_status
elif [ "$(echo $running | grep running)" ] ; then
echo "This service is already running"
else sudo cp $FILEDIR/$mos_service.conf /etc/init/
./spin.sh & sudo service $mos_service start && echo 0 > /dev/shm/mos_status
fi
echo "$mos_service has been turned on. Press enter to continue"
read goback
| true
|
e7d1ad9d450237f6231f37cf74c0512c2516e3f5
|
Shell
|
AvraamMavridis/bash-scripts
|
/guetzli.sh
|
UTF-8
| 387
| 3.640625
| 4
|
[] |
no_license
|
#!/bin/bash
#
# Minify images using the guetzli algorithm
# https://github.com/google/guetzli
# It can be very slow.
# Dont use it in a CI process.
#
minify(){
FILES=$(find "$PWD/app/img" -type f -name '*.jpg')
for f in $FILES
do
echo "Processing $f"
guetzli $f $f
done
}
if which guetzli >/dev/null; then
minify
else
brew update
brew install guetzli
minify
fi
| true
|
56e9426d61effbb07ed98fc975d367d01dc13b58
|
Shell
|
Bypass-Society/timetrap-docker
|
/start.sh
|
UTF-8
| 666
| 3.875
| 4
|
[] |
no_license
|
#!/bin/bash
# This script is to be run from the Docker hosts' local folder:
# - starts the docker container
# - bind mounts the local bin & data directory
# - calls the 'bootstrap.sh' to use the local timezone
# Get current user & timezone
USERNAME=$(id -un)
LOCALTZ=$(cat /etc/timezone)
# Set executable flag on hosts' bin folder
chmod 700 bin/*
# Set strict permissions for hosts' data folder
chmod 600 data/*
# Start container
docker run --rm -it \
-v "$(pwd)/data":/home/$USERNAME/data \
-v "$(pwd)/bin":/usr/local/host/bin \
--name timetrap \
--hostname timetrap \
lehela/timetrap:latest \
/usr/local/host/bin/bootstrap.sh $LOCALTZ
| true
|
ae5efa0c77a402e5ac726e4fb9949628871fa7ea
|
Shell
|
petronny/aur3-mirror
|
/puttygen-svn/PKGBUILD
|
UTF-8
| 743
| 2.96875
| 3
|
[] |
no_license
|
pkgname=puttygen-svn
_svnname=putty
pkgver=9846
pkgrel=1
pkgdesc="Manage rsa keys for use with putty"
arch=('i686' 'x86_64' 'armv7h')
url="http://www.chiark.greenend.org.uk/~sgtatham/putty"
license=('MIT')
makedepends=('subversion' 'zip')
source=('svn://svn.tartarus.org/sgt/putty')
md5sums=('SKIP')
build() {
cd $_svnname
./mkfiles.pl
cd unix
make -f Makefile.ux puttygen || return 1
}
package() {
cd $_svnname/unix
mkdir -p $pkgdir/usr/bin
#install -Dm755 putty puttygen puttytel pterm psftp pscp plink $pkgdir/usr/bin || return 1
install -Dm755 puttygen $pkgdir/usr/bin || return 1
install -Dm644 ../LICENCE $pkgdir/usr/share/licenses/$pkgname/LICENSE
}
pkgver() {
cd $startdir/$_svnname
svnversion | tr -d [A-z]
}
| true
|
103f791d6f2c1bb3e10b31bfdc065e36e1bbce02
|
Shell
|
JohnBerd/epitech-scripts
|
/generate_cpp
|
UTF-8
| 1,924
| 3.84375
| 4
|
[] |
no_license
|
#!/bin/bash
if [ "$#" != 1 ]
then
echo -e "$0 <name of the class to generate>"
exit
fi
if [ ! -f $1.hpp ]
then
touch $1.hpp
echo "/*" >> $1.hpp
echo "** EPITECH PROJECT, 2018" >> $1.hpp
echo "** \$NAME_OF_THE_PROJECT" >> $1.hpp
echo "** File description:" >> $1.hpp
echo "** Here is coded a super function" >> $1.hpp
echo "*/" >> $1.hpp
echo "" >> $1.hpp
echo "#ifndef `echo "$1" | tr '[:lower:]' '[:upper:]'`_H_" >> $1.hpp
echo "#define `echo "$1" | tr '[:lower:]' '[:upper:]'`_H_" >> $1.hpp
echo "" >> $1.hpp
echo "#include <iostream>" >> $1.hpp
echo "#include <string>" >> $1.hpp
echo "" >> $1.hpp
echo "class $1" >> $1.hpp
echo "{" >> $1.hpp
echo "private:" >> $1.hpp
echo "" >> $1.hpp
echo "public:" >> $1.hpp
echo "$1();" >> $1.hpp
echo "~$1();" >> $1.hpp
echo "};" >> $1.hpp
echo "" >> $1.hpp
echo "#endif" >> $1.hpp
echo -e "$1.hpp well created!"
else
echo -e "$1.hpp already exists!"
echo -e "`cat $1.cpp 2> /dev/null | grep "^[a-z]" | sed -e 's/)/);/g' | sed -e "s/$1:://g"`" >> $1.hpp
fi
find . -name "$1.hpp" -type f -exec vim -en "+normal 7gg=GZZ" {} \;
if [ ! -f $1.cpp ]
then
touch $1.cpp
echo "/*" >> $1.cpp
echo "** EPITECH PROJECT, 2018" >> $1.cpp
echo "** \$NAME_OF_THE_PROJECT" >> $1.cpp
echo "** File description:" >> $1.cpp
echo "** Here is coded a super function" >> $1.cpp
echo "*/" >> $1.cpp
echo "" >> $1.cpp
echo "#include \"$1.hpp\"" >> $1.cpp
echo "" >> $1.cpp
echo "$1::$1()" >> $1.cpp
echo "{" >> $1.cpp
echo " " >> $1.cpp
echo "}" >> $1.cpp
echo " " >> $1.cpp
echo "$1::~$1()" >> $1.cpp
echo "{" >> $1.cpp
echo " " >> $1.cpp
echo "}" >> $1.cpp
find . -name "$1.cpp" -type f -exec vim -en "+normal 7gg=GZZ" {} \;
echo -e "$1.cpp well created!"
else
echo -e "$1.cpp already exists!"
fi
| true
|
56004404c834da319e72d4511a2271eeab0cf872
|
Shell
|
seratch/python-slack-scim
|
/generate_response_classes.sh
|
UTF-8
| 481
| 3.1875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
cmd=`which quicktype`
if [ "$cmd" == "" ]; then
npm i -g quicktype
fi
# v1
targets="groups:Groups group:Group users:Users user:User service_provider_configs:ServiceProviderConfigs"
for target in $targets
do
t=(${target//:/ })
cat json-schema/v1/${t[1]}.json | quicktype \
--all-properties-optional \
--src-lang schema \
--lang python \
--python-version=3.6 \
--nice-property-names \
-t ${t[1]} \
-o src/slack_scim/v1/${t[0]}.py
done
| true
|
c2623a27bbc0cc97863dd6438c38d57c533805f8
|
Shell
|
nasimgholizadeh/my-bash-scripts
|
/convert-png-to-jpg.sh
|
UTF-8
| 256
| 3.453125
| 3
|
[] |
no_license
|
#!/bin/bash
IFS=$'\n'
clear
mkdir "$HOME/jpgPictures"
image_name="1"
images=`ls $HOME/Pictures/*png`
for i in $images;do
convert "$i" -resize 50% "$HOME/jpgPictures/$image_name.jpg"
echo "$image_name.jpg created!"
((image_name++))
done
echo "Finished."
| true
|
a6215f51978033a5ba57f5800da515741507b5e6
|
Shell
|
nwithan8/dotfiles
|
/setup/mac/files.sh
|
UTF-8
| 848
| 3.90625
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
REPO_LOCATION=$1
# Symlink files to their appropriate locations
# Using symlinks instead of copying files allows for updates to this repo to be reflected on the user's system immediately
# BashFTW profiles
DOT_FOLDER="$REPO_LOCATION/dot"
# for each directory in DOT_FOLDER, find all files starting with .bashrc and symlink them to the same name in ~
for dir in "$DOT_FOLDER"/*; do
if [ -d "$dir" ]; then
for file in "$dir"/.bashrc*; do
FILE_NAME=$(basename "$file")
DEST_FILE="$HOME/$FILE_NAME"
echo "Symlinking $file to $DEST_FILE"
ln -s "$file" "$DEST_FILE" || true
done
fi
done
# Scripts
SCRIPTS_DIR="$HOME/.scripts"
mkdir -p "$SCRIPTS_DIR" || true
ln -s "$REPO_LOCATION/scripts/bitwarden/bw_add_sshkeys.py" "$SCRIPTS_DIR/bw_add_sshkeys.py" || true
| true
|
1287765d37f98317b9e53785bbd4abdf95e98b15
|
Shell
|
apkemos/POSIX-Processes-and-Threads
|
/Client-Server Threads/testing/run_tests.sh
|
UTF-8
| 488
| 2.953125
| 3
|
[] |
no_license
|
#!/bin/bash
#make it executable with chmod +x run_tests.sh
#or chmod 777 run_tests.sh
#Run with bash ./run_tests.sh
#Must be killed after manually
COUNTER=0
LIMIT=2
name[0]='Player1'
name[1]='Player2'
inventory[0]='testing/inv1.txt'
inventory[1]='testing/inv2.txt'
cd ..; #Go to project's directory
./gameserver -p 2 -i testing/inventory.txt -q 1000 &
while [ $COUNTER -lt $LIMIT ];
do
sleep 3
./player -n ${name[COUNTER]} -i ${inventory[COUNTER]} localhost &
let COUNTER+=1
done
| true
|
38df420d247c79eef7d040792cec69285675e691
|
Shell
|
petertodd/trillian
|
/integration/ct_integration_test.sh
|
UTF-8
| 2,446
| 3.59375
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
set -e
INTEGRATION_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
. ${INTEGRATION_DIR}/common.sh
RPC_PORT=36962
CT_PORT=6962
# Build config file with absolute paths
CT_CFG=$(mktemp ${INTEGRATION_DIR}/ct-XXXXXX)
sed "s!@TESTDATA@!${TESTDATA}!" ./integration/ct_integration_test.cfg > ${CT_CFG}
trap "rm ${CT_CFG}" EXIT
# Retrieve tree IDs from config file
TREE_IDS=$(grep LogID ${CT_CFG} | grep -o '[0-9]\+'| xargs)
for id in ${TREE_IDS}
do
echo "Provisioning test log (Tree ID: ${id}) in database"
${SCRIPTS_DIR}/wipelog.sh ${id}
${SCRIPTS_DIR}/createlog.sh ${id}
done
echo "Starting Log RPC server on port ${RPC_PORT}"
pushd ${TRILLIAN_ROOT} > /dev/null
go build ${GOFLAGS} ./server/trillian_log_server/
./trillian_log_server --private_key_password=towel --private_key_file=${TESTDATA}/log-rpc-server.privkey.pem --port ${RPC_PORT} --signer_interval="1s" --sequencer_sleep_between_runs="1s" --batch_size=100 &
RPC_SERVER_PID=$!
popd > /dev/null
# Set an exit trap to ensure we kill the RPC server once we're done.
trap "kill -INT ${RPC_SERVER_PID}" EXIT
waitForServerStartup ${RPC_PORT}
echo "Starting CT HTTP server on port ${CT_PORT}"
pushd ${TRILLIAN_ROOT} > /dev/null
go build ${GOFLAGS} ./examples/ct/ct_server/
./ct_server --log_config=${CT_CFG} --log_rpc_server="localhost:${RPC_PORT}" --port=${CT_PORT} &
HTTP_SERVER_PID=$!
popd > /dev/null
# Set an exit trap to ensure we kill the servers once we're done.
trap "kill -INT ${HTTP_SERVER_PID} ${RPC_SERVER_PID}" EXIT
# The server will 404 the request as there's no handler for it. This error doesn't matter
# as the test will fail if the server is really not up.
set +e
waitForServerStartup ${CT_PORT}
set -e
echo "Running test(s)"
set +e
go test -v -run ".*CT.*" --timeout=5m ./integration --log_config ${CT_CFG} --ct_http_server="localhost:${CT_PORT}" --testdata=${TESTDATA}
RESULT=$?
set -e
rm ${CT_CFG}
trap - EXIT
echo "Stopping CT HTTP server (pid ${HTTP_SERVER_PID}) on port ${CT_PORT}"
kill -INT ${HTTP_SERVER_PID}
echo "Stopping Log RPC server (pid ${RPC_SERVER_PID}) on port ${RPC_PORT}"
kill -INT ${RPC_SERVER_PID}
if [ $RESULT != 0 ]; then
sleep 1
if [ "$TMPDIR" == "" ]; then
TMPDIR=/tmp
fi
echo "RPC Server log:"
echo "--------------------"
cat ${TMPDIR}/trillian_log_server.INFO
echo "HTTP Server log:"
echo "--------------------"
cat ${TMPDIR}/ct_server.INFO
exit $RESULT
fi
| true
|
b515c55e1baddad30adad875e204d957230c9fa8
|
Shell
|
rafaelvzago/skupper-performance
|
/onpremise-cloud/run-iperf-tests.sh
|
UTF-8
| 1,001
| 3.421875
| 3
|
[
"Apache-2.0"
] |
permissive
|
. common.sh
runIperf() {
#
# Running iperf3 tests
#
kubectl delete job/iperf-client
waitNoPods iperf-client
POD_IP=`kubectl get pods -l app=iperf-server -o json | jq -r .items[0].status.podIP`
export IPERF_SERVER=${IPERF_SERVER:-${POD_IP}}
echo
echo "Running iperf3 tests ($IPERF_SERVER) from `runningFrom`"
echo
cat resources/iperf-client.yaml | envsubst | kubectl apply -f -
waitJobCompleted iperf-client
echo
echo
kubectl logs job/iperf-client
tp=`kubectl logs job/iperf-client | egrep '(sender|receiver)$' | awk '{print $7}'`
echo "iPerf3 Throughput to ${IPERF_SERVER}: ${tp} gbps"
writeResults iperf "${IPERF_SERVER}" ${tp}
echo
echo
echo
}
runIperfAll() {
# pod ip
runIperf
# lb
IPERF_SERVER=iperf-server runIperf
# op
IPERF_SERVER=iperf-skupper-onpremise runIperf
# cl
IPERF_SERVER=iperf-skupper-cloud runIperf
}
[[ $0 =~ run-iperf-tests.sh ]] && runIperfAll
| true
|
b2c6b9597b851ffc7ca2971051197f1f9c66aa44
|
Shell
|
tjheeta/slashpackage-installer
|
/spf-path
|
UTF-8
| 942
| 3.390625
| 3
|
[] |
no_license
|
#!/bin/sh
case $ZSH_VERSION in '') :;; *) alias -g '${1+"$@"}="$@"';; esac &&
CDPATH= && unset CDPATH
DIR="$( cd "$( dirname "$0" )" && pwd )"
. ${DIR}/prjlib.sh
. ${DIR}/spftools.sh
. ${DIR}/sptools.sh
case $? in 0) :;; *) (exit "$?");; esac &&
case $# in
0|1|2|3) prj_fail 'usage: spf-path variable subpath skips program [arg...]';;
*) :;;
esac &&
prj_u2 prj_set prj_program spf-path &&
sp_validate_root &&
prj_u2 prj_set var "$1" && shift &&
prj_u2 prj_set subpath "$1" && shift &&
prj_u2 prj_set skips "$1" && shift &&
prj_unset x &&
sp_process_package_current() {
x=${sp_path?}/${subpath?} &&
if eval 'prj_anyeq "${sp_path?}" '"${skips?}"; then :
elif prj_exists "${x?}"; then prj_append "${var?}" "${x?}"
else :
fi
} &&
if prj_match '/*' "$SP_ROOT" && test -d "$SP_ROOT"/package
then sp_for_each_package "${SP_ROOT?}"/package
else :
fi &&
sp_for_each_package /package &&
export "${var?}" &&
exec ${1+"$@"}
| true
|
39a3f1b5fa74a5a524d46fb1e093c52f7ea2f884
|
Shell
|
DrizlyInc/wait-for
|
/wait-for
|
UTF-8
| 2,463
| 4.4375
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/sh
OLD_TIMEOUT=$TIMEOUT
OLD_QUIET=$QUIET
OLD_PORT=$PORT
OLD_HOST=$HOST
OLD_LOOSE=$LOOSE
TIMEOUT=15
QUIET=0
LOOSE=0
if ! which nc >/dev/null; then
echo "Netcat is not installed. This script requires netcat to work correctly."
exit 1
fi
echoerr() {
if [ "$QUIET" -ne 1 ]; then printf "%s\n" "$*" 1>&2; fi
}
conditionally_output() {
if [ "$QUIET" -ne 1 ]; then
"$@"
else
"$@" > /dev/null 2>&1
fi
}
usage() {
exitcode="$1"
cat << USAGE >&2
Usage:
$(basename $0) host:port [-t timeout] [-- command args]
-q | --quiet Do not output any status messages
-l | --loose Execute subcommand even if the test times out
-t TIMEOUT | --timeout=timeout Timeout in seconds, zero for no timeout
-- COMMAND ARGS Execute command with args after the test finishes
USAGE
exit "$exitcode"
}
test_connection() {
conditionally_output echo "Testing connection to $1:$2..."
# force a 1-second timeout on darwin (https://stackoverflow.com/a/20460402/2063546)
# POSIX-compliant string inclusion test https://stackoverflow.com/a/8811800/2063546
if [ "${OSTYPE#*darwin*}" != "$OSTYPE" ] ; then
conditionally_output nc -z -w 1 -G 1 "$1" "$2"
else
conditionally_output nc -z -w 1 "$1" "$2"
fi
}
wait_for() {
local result
for i in `seq $TIMEOUT` ; do
# use a 1-second timeout, but still sleep 0.1 seconds after just to be safe
test_connection "$HOST" "$PORT"
result=$?
if [ $result -eq 0 ] ; then break ; fi
sleep 1
done
[ $result -ne 0 ] && echoerr "Operation timed out"
if [ $result -eq 0 -o $LOOSE -eq 1 -a $# -gt 0 ] ; then
TIMEOUT=$OLD_TIMEOUT QUIET=$OLD_QUIET PORT=$OLD_PORT HOST=$OLD_HOST LOOSE=$OLD_LOOSE exec "$@"
fi
exit $result
}
while [ $# -gt 0 ]
do
case "$1" in
*:* )
HOST=$(printf "%s\n" "$1"| cut -d : -f 1)
PORT=$(printf "%s\n" "$1"| cut -d : -f 2)
shift 1
;;
-q | --quiet)
QUIET=1
shift 1
;;
-l | --loose)
LOOSE=1
shift 1
;;
-t)
TIMEOUT="$2"
if [ "$TIMEOUT" = "" ]; then break; fi
shift 2
;;
--timeout=*)
TIMEOUT="${1#*=}"
shift 1
;;
--)
shift
break
;;
--help)
usage 0
;;
*)
echoerr "Unknown argument: $1"
usage 1
;;
esac
done
if [ "$HOST" = "" -o "$PORT" = "" ]; then
echoerr "Error: you need to provide a host and port to test."
usage 2
fi
wait_for "$@"
| true
|
5342f55a46f29d665a0d957e2a0f1f22b3802bcc
|
Shell
|
open-datas/Darken
|
/config/src/main/resources/startup.sh
|
UTF-8
| 279
| 2.796875
| 3
|
[] |
no_license
|
#!/bin/sh
for i in /BOOT-INF/lib/*;
do CLASSPATH=$i:"$CLASSPATH";
done
CLASSPATH=.:/BOOT-INF:$CLASSPATH
export CLASSPATH
if [ "$1" = "console" ]
then
java -d64 -cp $CLASSPATH EurekaApplication
else
nohup java -d64 -cp $CLASSPATH EurekaApplication >/dev/null 2>&1 &
fi
| true
|
bbf2f14b32b13cf319bc35d77943e231432ac419
|
Shell
|
journeymonitor/monitor
|
/bin/run-testcase.sh
|
UTF-8
| 1,620
| 3.09375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
/usr/bin/Xvfb :$1 -screen 0 1920x1200x24 -nolisten tcp -ac > /dev/null 2> /dev/null &
XVFB_PID=$!
export DISPLAY=:$1
# Launching a browsermob proxy, ensuring that we don't work with a port that is already taken
PROXY_STARTED=0
while [ $PROXY_STARTED -eq 0 ]
do
PROXY_PORT=$(( ( RANDOM % 32767 ) + 9091 ))
PROXY_START_OUTPUT=`/usr/bin/curl -s -X POST -d "port=$PROXY_PORT" http://localhost:9090/proxy`
if [ "$PROXY_START_OUTPUT" == "{\"port\":$PROXY_PORT}" ]
then
PROXY_STARTED=1
fi
done
/usr/bin/curl -s -X PUT -d "captureHeaders=1" http://localhost:9090/proxy/$PROXY_PORT/har
# Firefoxes should not be started in parallel it seems
sleep $[ ( $RANDOM % 10 ) + 1 ]s
/usr/bin/java \
-jar /opt/selenese-runner-java/selenese-runner.jar \
--driver firefox \
--cli-args "--new-instance" \
--proxy localhost:$PROXY_PORT \
--no-proxy tiles-cloudfront.cdn.mozilla.net,search.services.mozilla.com,shavar.services.mozilla.com,tracking-protection.cdn.mozilla.net,aus5.mozilla.org,tiles.services.mozilla.com,location.services.mozilla.com,ocsp.digicert.com,ocsp.startssl.com,ocsp.verisign.com,ocsp.godaddy.com,ciscobinary.openh264.org \
--width 1920 \
--height 1200 \
--screenshot-on-fail /var/tmp/journeymonitor-screenshots \
--strict-exit-code \
--timeout 240000 \
$2 2>&1
STATUS=$?
echo $STATUS > /var/tmp/journeymonitor-testcase-run-$1-exit-status
/usr/bin/curl -s http://localhost:9090/proxy/$PROXY_PORT/har > /var/tmp/journeymonitor-testcase-run-$1-har
/usr/bin/curl -s -X DELETE http://localhost:9090/proxy/$PROXY_PORT
kill $XVFB_PID
| true
|
9483173ef9423b5b20cee8f6ddadc76440abd932
|
Shell
|
lionelliang/Sipmobile
|
/lib/mailfax.sh
|
UTF-8
| 2,491
| 3.375
| 3
|
[] |
no_license
|
#!/bin/bash
echo Received paramters $1 $2 $3 $4 $5 $6 >>/var/log/faxmail.log
DATETIME=`date +"%A %d %b %Y %H:%M"`
if [ -e $5.tif ]
then
echo fax file $5.tif found. Sending email to $4 .... >>/var/log/faxmail.log
PAGES=$(tiffinfo $5.tif | grep "Page")
DT=$(tiffinfo $5.tif | grep "Date")
DTFAX=${DT#*:}
COUNT=${PAGES#*-}
rm -f $5.txt
echo Dear $3, >>$5.txt
echo >>$5.txt
echo You have just recieved a new fax document. Details as follow >>$5.txt
echo >>$5.txt
echo "From : "$1 >>$5.txt
echo "To : "$2 >>$5.txt
echo "When : "$DATETIME '['$DTFAX' ]'>>$5.txt
echo "Pages : "$COUNT>>$5.txt
echo >>$5.txt
echo >>$5.txt
echo You can view your faxes online by visiting https://fax.abc.com. Your login name is the full fax number >>$5.txt
echo >>$5.txt
echo Thank you for using $6 >>$5.txt
echo sendEmail -f $1@fax.abc.com -t $4 -u "New fax received" -a $5.tif -o message-file=$5.txt \ >> /var/log/faxmail.log
echo "<<<<<<<<<<<<<<<<<<<<---------------->>>>>>>>>>>>>>>>>>>>>>>>>" >> /var/log/faxmail.log
/usr/local/bin/sendEmail -l /var/log/sendEmail.log -q -s auth.smtp.1and1.fr -xu test@sipcom.fr -xp test123 -f $1@sipcom.fr -t $4 -u "New fax received" -a $5.pdf -o "message-file=$5.txt"
#sendEmail -f test@sipcom.fr -t "${EXTNAME} <${EXTEMAIL}>" -u You have a FAX -a /var/lib/asterisk/agi-bin/Sipmobile/fax/${CALLERID(DNID)}/${FAXFILENAME}.pdf -m You have a new FAX. Find attached. -s auth.smtp.1and1.fr -xu test@sipcom.fr -xp test123
else
rm -f $5.txt
echo Dear $3, >>$5.txt
echo >>$5.txt
echo A call was recieved on your fax line, however no fax was recieved or the attempt failed. Details as follow >>$5.txt
echo >>$5.txt
echo "From : "$1 >>$5.txt
echo "To : "$2 >>$5.txt
#echo $DATETIME >>$5.txt
echo "When : "$DATETIME >>$5.txt
#echo "Pages : "$COUNT>>$5.txt
echo >>$5.txt
echo This notification is for your conveniance, if it is not required please notify your system administrator >>$5.txt
#echo >>$5.txt
#echo You can view your faxes online by visiting https://fax.abc.com. Your login name is the full fax number >>$5.txt
echo >>$5.txt
echo Thank you for using $6 >>$5.txt
echo sendEmail -f $1@fax.abc.com -t $4 -u "Fax reception failed" -o message-file=$5.txt \ >> /var/log/faxmail.log
echo "<<<<<<<<<<<<<<<<<<<<---------------->>>>>>>>>>>>>>>>>>>>>>>>>" >> /var/log/faxmail.log
/usr/local/bin/sendEmail -l /var/log/sendEmail.log -q -s 195.219.151.8 -f $1@fax.abc.com -t $4 -u "Fax reception failed" -o "message-file=$5.txt"
exit
fi
| true
|
6499f4799011a2c94b32b7744c318a04d5404bb3
|
Shell
|
fmidev/docker-fmibase
|
/test-inside-container-2.sh
|
UTF-8
| 491
| 2.640625
| 3
|
[] |
no_license
|
#!/bin/bash -ex
# Run inside container, test that yum cache is working
ls -l /var/cache/yum
# Yum cache should be intact from previous test 1 even though the
# container was recreated
test `find /var/cache/yum/ | wc -l` -gt 0
# Test openssh is still in cache
test `find /var/cache/yum/ -name openssh\*.rpm | wc -l` -gt 0
# But is not installed
test ! -e /usr/bin/ssh
# Clean yum cache
# Thise can't be done outside container as we might not have root there
rm -rf /var/cache/yum/*
| true
|
1a7ac8eefc5d88333c1c0b01333320496e762d1c
|
Shell
|
sulromares/automation-ci
|
/master/backup/download-latest.sh
|
UTF-8
| 703
| 2.890625
| 3
|
[] |
no_license
|
#!/bin/sh
token=$(python ../../utils/access_token.py)
bucket="javelin-ci-backup-test"
echo "Fetching jenkins data..."
curl -sO -H "Authorization: Bearer ${token}" "https://storage.googleapis.com/${bucket}/jenkins_data.tar.gz"
echo "OK"
echo "Fetching nexus data..."
curl -sO -H "Authorization: Bearer ${token}" "https://storage.googleapis.com/${bucket}/nexus_data.tar.gz"
echo "OK"
echo "Fetching sonar data..."
curl -sO -H "Authorization: Bearer ${token}" "https://storage.googleapis.com/${bucket}/sonar_data.tar.gz"
echo "OK"
echo "Fetching postgres data..."
curl -sO -H "Authorization: Bearer ${token}" "https://storage.googleapis.com/${bucket}/postgres_data.tar.gz"
echo "OK"
echo "Done!"
| true
|
c7f414f8fe56c6250acfef2b704618cddc770a22
|
Shell
|
FauxFaux/debian-control
|
/f/fs-uae/fs-uae-netplay-server_2.8.4+dfsg-2_all/preinst
|
UTF-8
| 2,404
| 3.84375
| 4
|
[] |
no_license
|
#!/bin/sh
set -e
case "$1" in
install|upgrade)
# If the package has default file it could be sourced, so that
# the local admin can overwrite the defaults
#[ -f "/etc/default/packagename" ] && . /etc/default/fixme
# Sane defaults:
[ -z "$SERVER_HOME" ] && SERVER_HOME=/var/cache/fs-uae-netplay-server
[ -z "$SERVER_USER" ] && SERVER_USER=fs-uae-netplay-server
[ -z "$SERVER_NAME" ] && SERVER_NAME="FS-UAE Netplay Server"
[ -z "$SERVER_GROUP" ] && SERVER_GROUP=fs-uae-netplay-server
# Groups that the user will be added to, if undefined, then none.
ADDGROUP=""
# create user to avoid running server as root
# 1. create group if not existing
if ! getent group | grep -q "^$SERVER_GROUP:" ; then
echo -n "Adding group $SERVER_GROUP.."
addgroup --quiet --system $SERVER_GROUP 2>/dev/null ||true
echo "..done"
fi
# 2. create homedir if not existing
test -d $SERVER_HOME || mkdir $SERVER_HOME
# 3. create user if not existing
if ! getent passwd | grep -q "^$SERVER_USER:"; then
echo -n "Adding system user $SERVER_USER.."
adduser --quiet \
--system \
--ingroup $SERVER_GROUP \
--no-create-home \
--disabled-password \
$SERVER_USER 2>/dev/null || true
echo "..done"
fi
# 4. adjust passwd entry
usermod -c "$SERVER_NAME" \
-d $SERVER_HOME \
-g $SERVER_GROUP \
$SERVER_USER
# 5. adjust file and directory permissions
if ! dpkg-statoverride --list $SERVER_HOME >/dev/null
then
chown -R $SERVER_USER:$SERVER_GROUP $SERVER_HOME
chmod u=rwx,g=rxs,o= $SERVER_HOME
fi
# 6. Add the user to the ADDGROUP group
if test -n "$ADDGROUP"
then
if ! groups $SERVER_USER | cut -d: -f2 | \
grep -qw $ADDGROUP; then
adduser $SERVER_USER $ADDGROUP
fi
fi
mkdir -p /var/log/fs-uae-netplay-server
chown $SERVER_USER /var/log/fs-uae-netplay-server
chgrp $SERVER_GROUP /var/log/fs-uae-netplay-server
;;
esac
# dh_installdeb will replace this with shell code automatically
# generated by other debhelper scripts.
exit 0
| true
|
52aa970d676dd4fffbebd2737e366993f7f2b94d
|
Shell
|
rryk/duplicati
|
/build-preview-update.sh
|
UTF-8
| 5,321
| 3.328125
| 3
|
[] |
no_license
|
RELEASE_TIMESTAMP=`date +%Y-%m-%d`
RELEASE_INC_VERSION=`cat Updates/build_version.txt`
RELEASE_INC_VERSION=$((RELEASE_INC_VERSION+1))
RELEASE_VERSION="2.0.0.${RELEASE_INC_VERSION}"
RELEASE_NAME="${RELEASE_VERSION}_preview_${RELEASE_TIMESTAMP}"
RELEASE_CHANGELOG_FILE="changelog.txt"
RELEASE_CHANGELOG_NEWS_FILE="changelog-news.txt"
RELEASE_FILE_NAME="duplicati-${RELEASE_NAME}"
GIT_STASH_NAME="auto-build-${RELEASE_TIMESTAMP}"
UPDATE_ZIP_URLS="http://updates.duplicati.com/preview/${RELEASE_FILE_NAME}.zip;http://alt.updates.duplicati.com/preview/${RELEASE_FILE_NAME}.zip"
UPDATE_MANIFEST_URLS="http://updates.duplicati.com/preview/latest.manifest;http://alt.updates.duplicati.com/preview/latest.manifest"
UPDATER_KEYFILE="/Users/kenneth/Dropbox/Privat/Duplicati-updater-release.key"
XBUILD=/usr/bin/xbuild
if [ ! -f "${RELEASE_CHANGELOG_FILE}" ]; then
echo "Changelog file is missing..."
exit 0
fi
if [ ! -f "${RELEASE_CHANGELOG_NEWS_FILE}" ]; then
echo "No updates to changelog file found"
echo
echo "To make a build without changelog news, run:"
echo " touch ""${RELEASE_CHANGELOG_NEWS_FILE}"" "
exit 0
fi
echo -n "Enter keyfile password: "
read -s KEYFILE_PASSWORD
echo
RELEASE_CHANGEINFO_NEWS=`cat ${RELEASE_CHANGELOG_NEWS_FILE}`
git stash save "${GIT_STASH_NAME}"
if [ ! "x${RELEASE_CHANGEINFO_NEWS}" == "x" ]; then
echo "${RELEASE_TIMESTAMP}" > "tmp_changelog.txt"
echo "==========" >> "tmp_changelog.txt"
echo "${RELEASE_CHANGEINFO_NEWS}" >> "tmp_changelog.txt"
echo >> "tmp_changelog.txt"
cat "${RELEASE_CHANGELOG_FILE}" >> "tmp_changelog.txt"
cp "tmp_changelog.txt" "${RELEASE_CHANGELOG_FILE}"
rm "tmp_changelog.txt"
fi
echo "${RELEASE_NAME}" > "Duplicati/License/VersionTag.txt"
echo "${UPDATE_MANIFEST_URLS}" > "Duplicati/Library/AutoUpdater/AutoUpdateURL.txt"
cp "Updates/release_key.txt" "Duplicati/Library/AutoUpdater/AutoUpdateSignKey.txt"
RELEASE_CHANGEINFO=`cat ${RELEASE_CHANGELOG_FILE}`
if [ "x${RELEASE_CHANGEINFO}" == "x" ]; then
echo "No information in changeinfo file"
exit 0
fi
rm -rf "Duplicati/GUI/Duplicati.GUI.TrayIcon/bin/Release"
mono "BuildTools/UpdateVersionStamp/bin/Debug/UpdateVersionStamp.exe" --version="${RELEASE_VERSION}"
${XBUILD} /p:Configuration=Debug "BuildTools/AutoUpdateBuilder/AutoUpdateBuilder.sln"
${XBUILD} /p:Configuration=Release Duplicati.sln
BUILD_STATUS=$?
if [ "${BUILD_STATUS}" -ne 0 ]; then
echo "Failed to build, xbuild gave ${BUILD_STATUS}, exiting"
exit 4
fi
if [ ! -d "Updates/build" ]; then mkdir "Updates/build"; fi
UPDATE_SOURCE=Updates/build/preview_source-${RELEASE_VERSION}
UPDATE_TARGET=Updates/build/preview_target-${RELEASE_VERSION}
if [ -e "${UPDATE_SOURCE}" ]; then rm -rf "${UPDATE_SOURCE}"; fi
if [ -e "${UPDATE_TARGET}" ]; then rm -rf "${UPDATE_TARGET}"; fi
mkdir "${UPDATE_SOURCE}"
mkdir "${UPDATE_TARGET}"
cp -R Duplicati/GUI/Duplicati.GUI.TrayIcon/bin/Release/* "${UPDATE_SOURCE}"
cp -R Duplicati/Server/webroot "${UPDATE_SOURCE}"
if [ -e "${UPDATE_SOURCE}/control_dir" ]; then rm -rf "${UPDATE_SOURCE}/control_dir"; fi
if [ -e "${UPDATE_SOURCE}/Duplicati-server.sqlite" ]; then rm "${UPDATE_SOURCE}/Duplicati-server.sqlite"; fi
if [ -e "${UPDATE_SOURCE}/Duplicati.debug.log" ]; then rm "${UPDATE_SOURCE}/Duplicati.debug.log"; fi
if [ -e "${UPDATE_SOURCE}/updates" ]; then rm -rf "${UPDATE_SOURCE}/updates"; fi
rm -rf "${UPDATE_SOURCE}/"*.mdb;
rm -rf "${UPDATE_SOURCE}/"*.pdb;
echo
echo "Building signed package ..."
mono BuildTools/AutoUpdateBuilder/bin/Debug/AutoUpdateBuilder.exe --input="${UPDATE_SOURCE}" --output="${UPDATE_TARGET}" --keyfile="${UPDATER_KEYFILE}" --manifest=Updates/preview.manifest --changeinfo="${RELEASE_CHANGEINFO}" --displayname="${RELEASE_NAME}" --remoteurls="${UPDATE_ZIP_URLS}" --version="${RELEASE_VERSION}" --keyfile-password="$KEYFILE_PASSWORD"
echo "${RELEASE_INC_VERSION}" > "Updates/build_version.txt"
mv "${UPDATE_TARGET}/package.zip" "${UPDATE_TARGET}/latest.zip"
mv "${UPDATE_TARGET}/autoupdate.manifest" "${UPDATE_TARGET}/latest.manifest"
cp "${UPDATE_TARGET}/latest.zip" "${UPDATE_TARGET}/${RELEASE_FILE_NAME}.zip"
cp "${UPDATE_TARGET}/latest.manifest" "${UPDATE_TARGET}/${RELEASE_FILE_NAME}.manifest"
mono BuildTools/UpdateVersionStamp/bin/Debug/UpdateVersionStamp.exe --version="2.0.0.7"
echo "Uploading binaries"
aws --profile=duplicati-upload s3 cp "${UPDATE_TARGET}/${RELEASE_FILE_NAME}.zip" "s3://updates.duplicati.com/preview/${RELEASE_FILE_NAME}.zip"
aws --profile=duplicati-upload s3 cp "${UPDATE_TARGET}/${RELEASE_FILE_NAME}.manifest" "s3://updates.duplicati.com/preview/${RELEASE_FILE_NAME}.manifest"
aws --profile=duplicati-upload s3 cp "${UPDATE_TARGET}/${RELEASE_FILE_NAME}.manifest" "s3://updates.duplicati.com/rene/latest.manifest"
ZIP_MD5=`md5 ${UPDATE_TARGET}/${RELEASE_FILE_NAME}.zip | awk -F ' ' '{print $NF}'`
rm "${RELEASE_CHANGELOG_NEWS_FILE}"
git checkout "Duplicati/License/VersionTag.txt"
git add "Updates/build_version.txt"
git add "${RELEASE_CHANGELOG_FILE}"
git commit -m "Version bump to v${RELEASE_VERSION}-${RELEASE_NAME}" -m "You can download this build from: http://updates.duplicati.com/preview/${RELEASE_FILE_NAME}.zip"
git tag "v${RELEASE_VERSION}-${RELEASE_NAME}" -m "md5 sum: ${ZIP_MD5}"
echo
echo "Built PREVIEW version: ${RELEASE_VERSION} - ${RELEASE_NAME}"
echo " in folder: ${UPDATE_TARGET}"
| true
|
31f92b04188b0709fba9747f494b99bddab3b404
|
Shell
|
doxing/pandaOS
|
/src/scripts/patch
|
UTF-8
| 394
| 3.28125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
FOLDERNAME="patch-2.7.5"
FILENAME="patch-2.7.5.tar.xz"
# The Patch package contains a program for modifying or
# creating files by applying a “patch” file typically
# created by the diff program.
cd ../../pkg/basic/${FOLDERNAME}
# Prepare Patch for compilation
./configure --prefix=/tools
# Compile the package
make $MAKEFLAGS
# Install the package
make $MAKEFLAGS install
| true
|
92d1363fb7b474a0d39e54b0b50db01fca989b0f
|
Shell
|
stringlytyped/dotfiles
|
/install.sh
|
UTF-8
| 3,303
| 4.21875
| 4
|
[
"Apache-2.0"
] |
permissive
|
#! /usr/bin/env bash
# Array of dotfiles to be symlinked in the home directory
files=(bashrc bash_profile inputrc gitconfig gitignore)
# Array of Homebrew formulae that the dotfiles depend on
brew_formulae=(coreutils darksky-weather hub)
# Array of Mac applications and corresponding Homebrew casks that the dotfiles depend on
apps=("Visual Studio Code" "Sourcetree")
casks=("vscode" "sourcetree")
# Get the path to the dotfiles directory
# See https://www.ostricher.com/2014/10/the-right-way-to-get-the-directory-of-a-bash-script/
source_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
# yn()
# Ask a yes/no question.
# Argument $1 = question
# Sets variable "response" to either "y" or "n".
yn() {
read -rp "$* " response
case $response in
[yY] | [yY][Ee][Ss] )
response="y"
;;
[nN] | [nN][Oo] )
response="n"
;;
*)
echo "Invalid input. Try again:"
yn "$*"
;;
esac
}
# Check whether Homebrew is installed
if ! hash brew 2>/dev/null; then
yn "Homebrew (https://brew.sh) was not found on your system. As a result, this install script won't be able to check whether or not the other dependencies are installed. Would you like to continue anyway (y/n)?"
if [ $response = n ]; then
exit 1
fi
else
# Check whether all required Homebrew formulae are installed
for formula in "${brew_formulae[@]}"; do
if ! brew ls --versions "$formula" &>/dev/null; then
yn "$formula was not found on your system. Would you like to run \"brew install $formula\" to install it (y/n)?"
if [ $response = y ]; then
brew install "$formula"
fi
fi
done
fi
# Check whether all required Mac apps are installed
for i in ${!apps[*]}; do
if ! [[ -d "/Applications/${apps[i]}.app" ]]; then
yn "${apps[i]} was not found on your system. Would you like to run \"brew cask install ${casks[i]}\" to install it (y/n)?"
if [ $response = y ]; then
brew cask install "${casks[i]}"
fi
fi
done
# TODO: check for existence of powerline & pyenv
# if ! pip show powerline-status &>/dev/null; then
# fi
# Create a symbolic link for the powerline config directory in ~/.config/powerline
if [[ -d "$HOME/.config/powerline" ]]; then
# shellcheck disable=SC2088
yn "~/.config/powerline/ already exists. Do you want to overwrite it (y/n)?"
if [ $response = y ]; then
rm -r "$HOME/.config/powerline"
ln -sf "$source_dir/powerline" "$HOME/.config/powerline"
fi
else
ln -sf "$source_dir/powerline" "$HOME/.config/powerline"
fi
# Prompt user for Git configuration values
echo "Let's configure Git!"
read -rp " Enter your name (e.g. \"Jane Smith\"): " response
git config -f "$source_dir/gitconfig" user.name "$response"
read -rp " Enter your email (e.g. \"jsmith@example.com\"): " response
git config -f "$source_dir/gitconfig" user.email "$response"
echo "Done configuring Git :)"
# Create a symbolic link in the user's home directory for each dotfile
for file in "${files[@]}"; do
if [[ -f "$HOME/.$file" ]]; then
# shellcheck disable=SC2088
yn "~/.$file already exists. Do you want to overwrite it (y/n)?"
if [ $response = n ]; then
continue
fi
fi
ln -sf "$source_dir/$file" "$HOME/.$file"
done
# Load newly-installed bash config
# shellcheck source=./bashrc
source ~/.bashrc
| true
|
92f6af07a23071e1ff45b2515cdca9432db54d87
|
Shell
|
Rzachprime/fvtt-mutants-and-masterminds-3e
|
/scripts/dist.sh
|
UTF-8
| 903
| 2.734375
| 3
|
[
"MIT"
] |
permissive
|
script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" &>/dev/null && pwd)"
rm -rf $script_dir/../dist &&
mkdir $script_dir/../dist &&
# Ensure system.json is inside dist for release
cp $script_dir/../src/system.json $script_dir/../build/system.json &&
cp $script_dir/../src/system.json $script_dir/../dist/system.json &&
# Ensure template.json is inside the system.zip source
cp $script_dir/../src/template.json $script_dir/../build/template.json &&
# Ensure non TS and SCSS files inside the build
cp -vr $script_dir/../src/assets $script_dir/../build/assets &&
cp -vr $script_dir/../src/lang $script_dir/../build/lang &&
# mkdir $script_dir/../build/styles &&
mv $script_dir/../build/system.css $script_dir/../build/styles/system.css &&
cp -vr $script_dir/../src/templates $script_dir/../build/templates &&
cd $script_dir/../build &&
zip -vr ../dist/system.zip . -x ".DS_Store"
| true
|
0f9a74b713a3bbb0c6a220d1487b025aa7dd5dae
|
Shell
|
leonrudowicz/42env
|
/dock.sh
|
UTF-8
| 2,170
| 2.515625
| 3
|
[
"Unlicense"
] |
permissive
|
#!/usr/bin/env bash
####
#### Dock Customizations
####
### Dock Size
defaults write com.apple.dock tilesize -int 46
## Lock Dock
defaults write com.apple.dock size-immutable -bool true
### Hide the dock
defaults write com.apple.dock autohide -bool true
## Hide delay
defaults write com.apple.dock autohide-delay -float 0
# No Dock magnification
defaults write com.apple.dock magnification -bool false
# Minimization effect
defaults write com.apple.dock mineffect -string 'scale'
# Tabs when opening new windows
defaults write NSGlobalDomain AppleWindowTabbingMode -string 'always'
# Dock orientation: left (ergonomic), right, bottom
defaults write com.apple.dock 'orientation' -string 'left'
### Remove all (default) app icons from the Dock
defaults write com.apple.dock persistent-apps -array
## Add default programs to dock
defaults write com.apple.dock persistent-apps -array-add '<dict><key>tile-data</key><dict><key>file-data</key><dict><key>_CFURLString</key><string>/Applications/Launchpad.app</string><key>_CFURLStringType</key><integer>0</integer></dict></dict></dict>'
defaults write com.apple.dock persistent-apps -array-add '<dict><key>tile-data</key><dict><key>file-data</key><dict><key>_CFURLString</key><string>/Applications/Google Chrome.app</string><key>_CFURLStringType</key><integer>0</integer></dict></dict></dict>'
defaults write com.apple.dock persistent-apps -array-add '<dict><key>tile-data</key><dict><key>file-data</key><dict><key>_CFURLString</key><string>/Applications/Visual Studio Code.app</string><key>_CFURLStringType</key><integer>0</integer></dict></dict></dict>'
defaults write com.apple.dock persistent-apps -array-add '<dict><key>tile-data</key><dict><key>file-data</key><dict><key>_CFURLString</key><string>/Applications/iTerm.app</string><key>_CFURLStringType</key><integer>0</integer></dict></dict></dict>'
defaults write com.apple.dock persistent-apps -array-add '<dict><key>tile-data</key><dict><key>file-data</key><dict><key>_CFURLString</key><string>/Applications/System Preferences.app</string><key>_CFURLStringType</key><integer>0</integer></dict></dict></dict>'
# Restart the dock to show changes
killall Dock
| true
|
e96e58d669ad8196f897473fc5ebece77f753243
|
Shell
|
myst3k/stuff
|
/bash_template.sh
|
UTF-8
| 418
| 3.484375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
# Debug mode, same as set -x
#set -o xtrace
# Exit if attempting to use uninitialized variable, same as set -u
set -o nounset
# Exit if statement returns non-true value, same as set -e
set -o errexit
# Removes the "space" character from the input field separator list
IFS="$(printf '\n\t')"
args=("$@")
argument1=${args[0]}
argument2=${args[1]}
# Use "getopt" to parse cli flags, not "getopts"
| true
|
4e589f2a53e4522e338c015f155afd539b9a40ab
|
Shell
|
bryantrobbins/umd-dozen
|
/common/common-replay-suite.sh
|
UTF-8
| 2,126
| 3.140625
| 3
|
[] |
no_license
|
#!/bin/bash
#
# Copyright (c) 2009-@year@. The GUITAR group at the University of
# Maryland. Names of owners of this group may be obtained by sending
# an e-mail to atif@cs.umd.edu
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
####################################################
# Replay the test suites
#
# By baonn@cs.umd.edu
# Date: 06/08/2011
####################################################
echo "*** Replaying ***"
#--------------------------
# Parameter check
#--------------------------
if [ $# -lt 3 ]
then
echo "Usage: $0 <AUT name> <dbId> <suiteId>"
exit 1
fi
dbId=$2
suiteId=$3
#--------------------------
# Script configuration
#--------------------------
# Load common configuration
source "`dirname $0`/common-load-args.sh"
source "`dirname $0`/common.cfg"
source "`dirname $0`/util.sh"
# Load aut local configuration
source "$aut_scripts_dir/aut.cfg"
source "$aut_scripts_dir/aut.utils.sh"
# Environments
globalRet=0
cmd="gradle -b $guitar_dir/guitar.gradle replaySuite -Paut_name=$aut_name -Psuite_id=$suiteId -Pdb_id=$dbId"
echo $cmd
eval $cmd
| true
|
1c077f2f73da5a5cef5f89d61f72a08f2c50430c
|
Shell
|
xzhoulab/DBSLMM-Analysis
|
/real_dat/EUR/3_herit_cv.sh
|
UTF-8
| 1,012
| 2.671875
| 3
|
[] |
no_license
|
#!/bin/bash
#SBATCH --partition=mulan,nomosix
#SBATCH --time=1-00:00:00
#SBATCH --job-name=herit
#SBATCH --mem-per-cpu=2G
#SBATCH --array=1-5
#SBATCH --output=/net/mulan/disk2/yasheng/out/herit_%a.out
#SBATCH --error=/net/mulan/disk2/yasheng/err/herit_%a.err
bash
let k=0
ldsc=/net/mulan/home/yasheng/Biobank/program/ldsc/ldsc.py
mkldsc=/net/mulan/home/yasheng/Biobank/code/heritability/mkldsc.R
source activate ldsc
for PHENO in 10 ; do
for cross in 1 2 3 4 5; do
let k=${k}+1
if [ ${k} -eq ${SLURM_ARRAY_TASK_ID} ]; then
summ=/net/mulan/disk2/yasheng/out_sample/pheno${PHENO}/summ/summary_cross${cross}
cat ${summ}_chr*.assoc.txt > ${summ}.assoc.txt
Rscript ${mkldsc} --summgemma ${summ}.assoc.txt --summldsc ${summ}.ldsc
## summary data for ldsc
ref=/net/mulan/disk2/yasheng/sample500/ldsc/
h2=/net/mulan/disk2/yasheng/out_sample/pheno${PHENO}/herit/h2_cross${cross}
## heritability
source activate ldsc
python ${ldsc} --h2 ${summ}.ldsc.gz --ref-ld-chr ${ref} --w-ld-chr ${ref} --out ${h2}
fi
done
done
| true
|
b9d85d4381e35570bcb3d982164c924a6fb3d28b
|
Shell
|
ziedbargaoui/simulators
|
/realtime-plane/run.sh
|
UTF-8
| 178
| 2.96875
| 3
|
[] |
no_license
|
#!/bin/bash
if [ -d "build" ]; then
cd build
cmake ..
make
if [ -d "Simulator" ]; then
./Simulator
fi
else
echo "build folder doesn't exist"
fi
| true
|
6a5f418d8ffec05d3d393adad292d604eae70640
|
Shell
|
yyhclimacool/misc
|
/src/thread_implementation/pstree.sh
|
UTF-8
| 125
| 2.796875
| 3
|
[] |
no_license
|
#!/bin/bash
pid=`ps -ef | grep thread_test | grep -v grep | awk '{print $2}'`
if [ ! -z $pid ]; then
pstree -t -p $pid
fi
| true
|
2505807ee9fc8dd97ef94a3598b144b9b562dc27
|
Shell
|
Faronsince2016/config
|
/bin/ssh_virtualbox_ubuntu_1204.sh
|
UTF-8
| 1,823
| 3.296875
| 3
|
[] |
no_license
|
#!/bin/bash
# ----------------------------------------
# 功能介绍
# ssh 到 VirtualBox 中的 Ubuntu 12.04
#
# Tips:
# 默认情况下,VirtualBox 中安装的系统在启动后,IP 不在 192.168.1.X 段。
# 而是 10.0.2.X, 即,Mac OS 上无法 ping 通这个 IP。
# 这是由于虚拟机系统的默认网络连接方式是“网络地址转换(NAT)”。
# 这种网络连接方式只能保证正常的浏览网页,收取邮件,下载文件。但不能保证被
# 其他机器访问。
# 解决方法是,在 VirtualBox 的网络配置中,将连接方式更改为“桥接网卡”,
# 注意:需要针对有线或者 wifi 选择不同的参数,系统启动时,需要注意是否插了网线
# 重启系统,或者重启网络即可获得可用 IP。
# $ sudo /etc/init.d/networking restart
#
# Bridged networking. 详细介绍参考:
# http://www.virtualbox.org/manual/ch06.html
# http://stackoverflow.com/questions/5906441/how-to-ssh-to-a-virtualbox-ubuntu-guest-externally-through-a-windows-host
#
# Network Address Translation (NAT)
# If all you want is to browse the Web, download files and view e-mail inside the guest,
# then this default mode should be sufficient for you, and you can safely skip the rest
# of this section. Please note that there are certain limitations when using Windows file
# sharing (see the section called “NAT limitations” for details).
#
# Bridged networking
# This is for more advanced networking needs such as network simulations and running servers
# in a guest. When enabled, VirtualBox connects to one of your installed network cards and
# exchanges network packets directly, circumventing your host operating system's network stack.
# ----------------------------------------
echo "Password: password"
ssh zhongwei@192.168.1.154
| true
|
f7acea6c5432201fe4f85e56666f3b44f0d40427
|
Shell
|
echvsg/kubernetes-integration-test
|
/integration-test/run.sh
|
UTF-8
| 769
| 2.515625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
sudo docker rm -f mockserver
sudo docker rm -f mariadb
sudo docker rm -f amq
sudo docker run --name mockserver -d -p 1080:1080 docker.io/jamesdbloom/mockserver
sudo docker run --name mariadb -e MYSQL_USER=myuser -e MYSQL_PASSWORD=mypassword -e MYSQL_ROOT_PASSWORD=secret -e MYSQL_DATABASE=testdb -d -p 3306:3306 registry.access.redhat.com/rhscl/mariadb-102-rhel7
sudo docker run --name amq -e AMQ_USER=test -e AMQ_PASSWORD=secret -d -p 61616:61616 -p 8181:8181 registry.access.redhat.com/jboss-amq-6/amq63-openshift
sleep 10
sh sql/setup.sh
sh mockserver/setup.sh
cd ../app-users
java -Dspring.profiles.active=k8sit -jar target/app-users-1.0-SNAPSHOT.jar &
pid=$!
cd -
export AMQ_USER=test
export AMQ_PASSWORD=secret
mvn clean test
kill $pid
| true
|
35f6549f4680a532feb44007d4e6f42bb4cbd315
|
Shell
|
swarren/rpi-dev-scripts
|
/firmware-to-any.sh
|
UTF-8
| 1,994
| 3.109375
| 3
|
[] |
no_license
|
#!/bin/bash
# Copyright (c) 2016 Stephen Warren <swarren@wwwdotorg.org>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
script_dir=$(dirname "$(readlink -f "$0")")
. "${script_dir}/defaults"
dest_type="$1"
if [ -n "$2" ]; then
param="$2"
else
param="${!dest_type}"
fi
stage_dir="$(mktemp -d)"
mkdir -p "${stage_dir}"
"${script_dir}/firmware-gen.sh" "${stage_dir}"
ret=$?
if [ $? -eq 0 ]; then
"${script_dir}/push-${dest_type}.py" "${param}" "rmlist:${stage_dir}/rmlist" "push:${stage_dir}/tree"
ret=$?
fi
rm -rf "${stage_dir}"
exit ${ret}
| true
|
94b46ca057bfe08275f10e4cb0c965793cc649ad
|
Shell
|
amoghe/bashext
|
/funcs/gm
|
UTF-8
| 1,365
| 4.21875
| 4
|
[] |
no_license
|
#!/bin/bash
# This file should be sourced since it expects to modify the calling shell environment
#
# Change go(lang) versions easily
function __gimme_debug() {
echo "[DEBUG] $@" > /dev/null # Remove redirect to enable this
}
function __gimme_error() {
echo "[ERROR] $@" 1>&2
}
function gm() {
GIMME_PATH=`which gimme`
[ -z $1 ] && {
__gimme_error "ERROR: No golang version specified"
return
}
__gimme_debug "Switching go version to: $1"
new_path=""
if [[ -z "$_CLEAN_PATH" ]]; then
for p in `echo $PATH | tr ':' $'\n'`; do
if [[ "$p" != *"gimme"* ]]; then
if [[ "$new_path" == "" ]]; then
new_path="$p"
else
new_path="$new_path:$p"
fi
fi
done
else
# First export a clean PATH, next invoke gimme, which will prepend to PATH
new_path=$_CLEAN_PATH
fi
__gimme_debug "setting PATH as $new_path"
export PATH=$new_path
# Invoke gimme (not necessary if the go version is already installed)
GIMME_SILENT_ENV=1 $GIMME_PATH $1 2>&1 1>/dev/null
# Test if the env file exists
env_file="$HOME/.gimme/envs/go$1.env"
if [ ! -f "$env_file" ]; then
__gimme_error "ERROR: go version $1 isn't installed (no $env_file)"
return
fi
# Now source the file that will set up our env in the calling shell
__gimme_debug "Sourcing $env_file"
source "$env_file"
}
| true
|
3d5db964692f212caadf15194e15f859e4f020ba
|
Shell
|
ddarling85/workspace
|
/conatinerWork/elk-build-centos.sh
|
UTF-8
| 1,706
| 2.984375
| 3
|
[] |
no_license
|
echo '## Uninstall previous Docker version if necessary... \n\n'
sudo yum remove docker \
docker-client \
docker-client-latest \
docker-common \
docker-latest \
docker-latest-logrotate \
docker-logrotate \
docker-engine -y
echo '## Install Docker requirements and adding repo... \n\n'
sudo yum install -y yum-utils device-mapper-persistent-data lvm2
sudo yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
echo '## Install and enable Docker software... \n\n'
sudo yum install -y docker-ce docker-ce-cli containerd.io
sudo systemctl enable docker
sudo systemctl start docker
echo '## Add user to Docker group... \n\n'
sudo usermod -aG docker $USER
echo '## Download docker-compose... \n\n'
sudo curl -L "https://github.com/docker/compose/releases/download/1.24.1/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose
sudo chmod +x /usr/local/bin/docker-compose
sudo ln -s /usr/local/bin/docker-compose /usr/bin/docker-compose
# docker-compose --version
echo '## Add required volume mount points for elasticsearch containers... \n\n'
sudo mkdir -p /srv/elasticsearch/es01_logs /srv/elasticsearch/es01_data /srv/elasticsearch/es02_data /srv/elasticsearch/es02_logs
sudo chown -R 1000:1000 /srv/elasticsearch
echo '## Add required vm.max_map_count for elasticsearch containers... \n\n'
sudo sysctl -w vm.max_map_count=262144
sudo sysctl -p
echo '## Clone the ELK repo... \n\n'
git clone https://github.com/ddarling85/docker-elk-clone.git
chcon -R system_u:object_r:admin_home_t:s0 docker-elk-clone/
cd docker-elk-clone
| true
|
6346b01acae4854c58d2faa52c46c9dd34f4c730
|
Shell
|
krisleech/tmuxinator
|
/templates/example.tmux.erb
|
UTF-8
| 497
| 2.828125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
function start_<%= @project_name %>
{
BASE="<%= @project_root %>"
cd $BASE
tmux start-server
tmux new-session -d -s <%= @project_name %> -n editor
tmux new-window -t <%= @project_name %>:1 -n shell
tmux send-keys -t <%= @project_name %>:0 "cd $BASE; vi" C-m
tmux send-keys -t <%= @project_name %>:1 "cd $BASE; env OPEN_BROWSER=yes script/start && clear" C-m
tmux select-window -t <%= @project_name %>:0
tmux attach-session -t <%= @project_name %>
script/stop
}
| true
|
66bf91ef7a896345b70c43250d47c78647ffe4fb
|
Shell
|
corrieb/350vins
|
/pull-data-GT350.sh
|
UTF-8
| 173
| 2.875
| 3
|
[] |
no_license
|
#!/bin/bash
if [ "$#" -ne 2 ]; then
echo "Usage: ./pull-data-GT350.sh <from 4-digit VIN> <to 4-digit VIN>"
exit 1
fi
./pull-data.sh "GT350" $1 $2 "1FA6P8JZ" "L555"
| true
|
c6cc5e4bc2e15436a81d4dd78ea0389b69e65a58
|
Shell
|
awinawin1/SoalShift_modul1_C15
|
/no4dec.sh
|
UTF-8
| 745
| 3.390625
| 3
|
[] |
no_license
|
#!/bin/bash
i=1
for syslog in syslog/*
do
echo "$i. ${syslog##*/}"
choose[i]=${syslog##*/}
i=$((i + 1))
done
echo "Please choose which one you want to decrypt(in number)>>"
read this
hour=$((${choose[this]:0:1}*10))
hour=$((hour+${choose[this]:1:1}))
echo $hour
dec=$((26-hour))
echo $dec
lowcase=abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz
syslog=$(<syslog/"${choose[this]}")
#syslog="$syslog${choose[this]}"
#echo "$syslog"
syslog=$(echo "$syslog" | tr "${lowcase:0:26}" "${lowcase:${dec}:26}")
#echo "$syslog"
upcase=ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ
syslog=$(echo "$syslog" | tr "${upcase:0:26}" "${upcase:${dec}:26}")
#echo "$syslog"
#echo "$thishour"
echo "$syslog" > dec/"${choose[this]}.dec"
| true
|
6e87fe282d8841f0eac7b8a72ca2bfcd265196a5
|
Shell
|
ChrisCarini/dotfiles
|
/bin/macos_clear_purgable_space.sh
|
UTF-8
| 3,083
| 4.09375
| 4
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
#!/bin/bash
# TAKEN FROM: https://www.reddit.com/r/mac/comments/c8wacm/comment/esrvj6d/
# Modified because there were too many sleep statements between prints.
echo 'This script forces the macOS to purge any purge-able files.'
echo 'You can see the purge-able space by clicking "Get Info" on the main hard drive.'
echo
echo '[Notice] This could take a few minutes, depending on the speed of your disk.'
echo
echo 'Save any open documents and quit running applications now.'
echo
read -n1 -s -r -p $'Press [Enter] to continue...\n' key
if [ "$key" != '' ]; then
# Anything else pressed, do whatever else.
# echo [$key] not empty
echo 'Did not press enter. Exiting...'
exit 1
fi
echo -e 'Once your disk is nearly full, you might see the following dialog box:'
echo -e '\t\"Your Disk space is critically low.\"'
echo
echo 'This is normal.'
echo
startspace=$(($(df -m / | awk 'int($4){print $4}') * 1024 * 1024 / 1000000000))
echo "You currently have ${startspace} GB of available space."
echo 'Creating temporary files ["ClearPurgeableSpace" dir, 4G, 500MB & 100MB files] on desktop...'
mkdir -p ~/Desktop/ClearPurgeableSpace
mkfile 4G ~/Desktop/ClearPurgeableSpace/largefile4G
mkfile 500M ~/Desktop/ClearPurgeableSpace/largefile500M
mkfile 100M ~/Desktop/ClearPurgeableSpace/largefile100M
echo 'Done'
echo 'Now filling up available space... '
diskspace=$(($(df -m / | awk 'int($4){print $4}') * 1024 * 1024 / 1000000))
diskspace_new=0
while [ 0 ]; do
if [ $diskspace_new -gt $diskspace ]; then
echo 'Approx '$(($diskspace_new - $diskspace))' MB of purgeable space was just cleared! Continuing...'
fi
diskspace=$(($(df -m / | awk 'int($4){print $4}') * 1024 * 1024 / 1000000))
echo "${diskspace} MB remaining, please wait..."
if [ 8000 -lt $diskspace ]; then
cp ~/Desktop/ClearPurgeableSpace/largefile4G{,"$(date)"} && sleep 1 && waiting=0
elif [ 2000 -lt $diskspace ]; then
cp ~/Desktop/ClearPurgeableSpace/largefile500M{,"$(date)"} && sleep 1 && waiting=0
elif [ 800 -lt $diskspace ]; then
cp ~/Desktop/ClearPurgeableSpace/largefile100M{,"$(date)"} && sleep 1 && waiting=0
elif [ 500 -lt $diskspace ]; then
cp ~/Desktop/ClearPurgeableSpace/largefile100M{,"$(date)"} && sleep 1 && waiting=1
elif [ $waiting -eq 1 ]; then
echo 'Pausing for 5 seconds to give OS time to purge, please wait...' && sleep 5 && waiting=2
elif [ $waiting -eq 2 ]; then
echo 'Pausing for 10 seconds to give OS time to purge, please wait...' && sleep 10 && waiting=3
elif [ $waiting -eq 3 ]; then
echo 'Pausing for 15 seconds to give OS time to purge, please wait...' && sleep 15 && waiting=4
else
break
fi
diskspace_new=$(($(df -m / | awk 'int($4){print $4}') * 1024 * 1024 / 1000000))
done
echo -e 'Purging complete. Clearing temporary files...'
rm -R ~/Desktop/ClearPurgeableSpace
echo 'All done! Your disk space has been reclaimed.'
endspace=$(($(df -m / | awk 'int($4){print $4}') * 1024 * 1024 / 1000000000))
echo 'You just recovered '$(($endspace - $startspace))' GB!'
echo 'You now have '$endspace' GB of free space.'
| true
|
19334dd52270fde5eafc6717bccce0e61c006b0f
|
Shell
|
SprPstn/Superposition
|
/dev/pipeline/twitter/logtoslack.sh
|
UTF-8
| 470
| 3.03125
| 3
|
[] |
no_license
|
#!/bin/bash
tail -n0 -F "$1" | while read LINE; do
(echo "$LINE" | grep -e "$3") && curl -X POST --silent --data-urlencode \
"payload={\"text\": \"$(echo $LINE | sed "s/\"/'/g")\"}" "$2";
done
#To use this script, save it as an executable script and simply pass the path to the log file and a webhook url to this script.
#./tail-slack.sh "file.log" "https://hooks.slack.com/services/...";
#1
#./tail-slack.sh "file.log" "https://hooks.slack.com/services/...";
| true
|
61e484aa8c36d7a0465c8d7c4b2b7e55dce06e8e
|
Shell
|
kwkroeger/Dotfiles
|
/bashrc
|
UTF-8
| 1,095
| 3.453125
| 3
|
[] |
no_license
|
#!/bin/bash
# If not running interactively, don't do anything
[ -z "$PS1" ] && return
if [ "$SHELL" != "/bin/bash" ]; then
export SHELL=/bin/bash
exec /bin/bash -l
fi
export PS1="\[\e[0;91m\]\u\[\e[0;36m\]@\[\e[0;91m\]\h\[\e[0m\] \[\e[0;36m\]\W\[\e[0m\] \[\e[1;91m\]:\[\e[0m\]"
## COMPLETION ##
complete -cf sudo
set completion-ignore-case on
if [ -f /etc/bash_completion ] && ! shopt -oq posix; then
. /etc/bash_completion
fi
if [ -d /usr/local/etc/bash_completion.d ] && ! shopt -oq posix; then
for completion in /usr/local/etc/bash_completion.d/*;do
. /usr/local/etc/bash_completion.d/completion
done
fi
## HISTORY ##
export HISTCONTROL='ignoredups:ignoreboth'
export HISTSIZE=16384
export HISTFILESIZE=$HISTSIZE
export HISTTIMEFORMAT="%a %F %r "
export HISTIGNORE='&:[ ]*:clear:exit'
export PROMPT_COMMAND='history -a; history -r'
shopt -s cmdhist
shopt -s histappend
## MAIL ##
shopt -u mailwarn
## TERMINAL ##
export TERM=xterm-256color
force_color_prompt=yes
shopt -s cdspell
shopt -s checkwinsize
shopt -s nocaseglob
## SHARED ##
source "$HOME/.shared"
cd $HOME
| true
|
0fcf93b062415a457327e69b94da570ab368a12c
|
Shell
|
nusense/runartg4tk
|
/scripts/genana_g4vmp_proclevel_condor.sh
|
UTF-8
| 40,439
| 2.90625
| 3
|
[] |
no_license
|
#! /usr/bin/env bash
#
# A script for running artg4tk in the Geant4 Varied Model Parameter environment
# see: https://cdcvs.fnal.gov/redmine/projects/g4mps/wiki/Phase2_App_01052016
#
##############################################################################
export THISFILE="$0"
export b0=`basename $0`
export SCRIPT_VERSION=2019-02-05
echo "script version ${SCRIPT_VERSION} ${THISFILE}"
#
#export TARBALL_DEFAULT_DIR=/pnfs/geant4/persistent/rhatcher
export TARBALL_DEFAULT_DIR=/pnfs/geant4/resilient/rhatcher
###
#export TARBALL=localProducts_runartg4tk_v0_03_00_e15_prof_2018-03-21.tar.bz2
#export TARBALL=localProducts_runartg4tk_v0_03_00_e15_prof_2018-04-05.tar.bz2
#export TARBALL=localProducts_runartg4tk_v09_00_00_e17_prof_2019-01-04.tar.bz2
#export TARBALL=localProducts_runartg4tk_v09_00_00_e17_prof_2019-01-10.tar.bz2
export TARBALL=localProducts_runartg4tk_v09_00_00_e17_prof_2019-02-05.tar.bz2
# unrolls to localProducts_runartg4tk_v9_00_00_e17_prof/...
export TARBALL_DOSSIER=dossier_files.2018-12-13.tar.gz
# 0 = use dossier directly
# 1 = try to get tarball, unroll locally (fallback to 2)
# 2 = use home.fnal.gov/~rhatcher
export UNROLLED_DOSSIER=1
# these need process_args opts
export BOOTSTRAP_SCRIPT="bootstrap_ups.sh"
# semicolon separated things to setup after unrolling tarball
# BASE=from tarball name
# product%version%qualifiers
export SETUP_PRODUCTS="BASE;ifdhc%%"
# not necessary: export SETUP_PRODUCTS="artg4tk%v09_00_00%e17:prof;BASE;ifdhc%%"
# how verbose this script should be
export VERBOSE=0
# if not running on condor worker node, use available scratch space
# non-condor jobs can override with --scratch flag
### apparently /scratch isn't writable [ugh]
### export FAKESCRATCHBASE=/scratch/${USER}
export FAKESCRATCHBASE=/geant4/data/${USER}
# don't change these
export USINGFAKESCRATCH=0
export KEEPSCRATCH=1
# actually run the art command?
export RUNART=1
# base directory for returning results
# art files with s$persistent$scratch$g
export OUTPUTTOP="/pnfs/geant4/persistent/rhatcher/genana_g4vmp"
# output of running art in separate .out and .err files?
export REDIRECT_ART=1
export UPS_OVERRIDE="-H Linux64bit+2.6-2.12"
#### a particular choice for defaults
export MULTIVERSE=multiverse181212_Bertini # e.g. (fcl base)
export MULTI_UNIVERSE_SPEC="${MULTIVERSE},0,10" # each job (process) in cluster does 10 universes, start 0
export G4HADRONICMODEL=Bertini # e.g. Bertini
export UBASE=0 # 0=Default, others are RandomUniv[xxxx]
export USTRIDE=1 # how many to do in this $PROCESS unit, 10
export PROBE=piminus # allow either name or pdg
export P3=0,0,5.0 # GeV
#export PROBEP=5.0 # e.g. 5.0 6.5 # (GeV)
export TARGET=Cu # e.g. Cu
export NEVENTS=5000000 # e.g. 500,000, 500000 -> 5000000
export JOBOFFSET=0 # offset PROCESS # by this
# not yet supported ...
export PASS=0 # allow for multiple passes (more events) of same config
# though this requires summming _art_ files, and doing analysis again
export DOSSIER_LIST="HARP,ITEP"
export G4VERBOSITY=0
export RNDMSEED_SPECIAL=123456789
export RNDMSEED=${RNDMSEED_SPECIAL} # will override w/ JOBID number or command line flag
export JOBIDOFFSET=0
#
##############################################################################
function usage() {
cat >&2 <<EOF
Purpose: Run 'artg4tk' w/ the Geant4 Varied Model Parameter setup
version ${SCRIPT_VERSION}
${b0} --output <output-path> [other options]
-h | --help this helpful blurb
-v | --verbose increase script verbosity
-o | --output <path> path to top of output area
(creates subdir for the results)
[${OUTPUTTOP}]
-n | --nevents <nevents> # of events to generate (single job) [${NEVENTS}]
-u | --universes <fname>,[ubase=0],[ustride=10]
ART PROLOG file with multiple universes
[/path/]filename[,ubase[,ustride]]
umin = ubase + \${PROCESS}*ustride
umax = umin + ustride - 1
[${MULTI_UNIVERSE_SPEC}]
( so for 1000 universes, use cluster -N 100 and 0,10 here )
-p | --physics <model> G4 Hadronic Model name [${G4HADRONICMODEL}]
-t | --target <nucleus> target nucleus element (e.g. "Pb") [${TARGET}]
-c | --pdg | --probe <code> incident particle pdg code [${PROBE}]
--p3 <px,py,pz> incident particle 3-vector
-z | --pz <pz> incident particle p_z (p_x=p_y=0)
[ ${PROBE_PX}, ${PROBE_PY}, ${PROBE_PZ} ] // in GeV/c
--g4verbose <int> set G4 verbosity [${G4VERBOSITY}]
--seed <int-val> explicitly set random seed
(otherwise based on PASS)
-x | --pass <int> set pass (default 0)
-T | --tarball <tball> name of tarball to fetch
-P | --pname <pname> ART process name [${ARTPNAME}]
Experts:
--scratchbase <path> if \${_CONDOR_SCRATCH_DIR} not set (i.e. not
running as a condor job on a worker node)
then try creating an area under here
[${FAKESCRATCHBASE}]
--keep-scratch don't delete the contents of the scratch
area when using the above [${KEEPSCRATCH}]
--no-redirect-output default is to redirect ART output to .out/.err
if set then leave them to stdout/stderr
--no-art-run skip the actual running of ART executable
--debug set verbose=999
EOF
}
#
##############################################################################
function process_args() {
PRINTUSAGE=0
DOTRACE=`echo "$@" | grep -c -- --trace`
ISDEBUG=`echo "$@" | grep -c -- --debug`
if [ $DOTRACE -gt 0 ]; then set -o xtrace ; fi
if [ $ISDEBUG -gt 0 ]; then VERBOSE=999 ; fi
if [ $ISDEBUG -gt 0 ]; then echo "pre-getopt \$#=$# \$@=\"$@\"" ; fi
# longarg "::" means optional arg, if not supplied given as null string
# use this for targfile lowth peanut
TEMP=`getopt -n $0 -s bash -a \
--longoptions="help verbose output: \
nevts: nevents: \
universe: universes: physics: model: hadronic: \
target: pdg: probe: p3: pz: g4verbose: seed: joboffset: \
tarball: pname: pass: \
scratchbase: keep-scratch \
no-redirect-output no-art-run no-run-art skip-art \
debug trace" \
-o hvo:n:u:p:m:t:c:z:j:x:T:P:-: -- "$@" `
eval set -- "${TEMP}"
if [ $ISDEBUG -gt 0 ]; then echo "post-getopt \$#=$# \$@=\"$@\"" ; fi
unset TEMP
let iarg=0
while [ $# -gt 0 ]; do
let iarg=${iarg}+1
if [ $VERBOSE -gt 2 ]; then
printf "arg[%2d] processing \$1=\"%s\" (\$2=\"%s\")\n" "$iarg" "$1" "$2"
fi
case "$1" in
"--" ) shift; break ;;
-h | --help ) PRINTUSAGE=1 ;;
-v | --verbose ) let VERBOSE=${VERBOSE}+1 ;;
#
-o | --out* ) export OUTPUTTOP="$2"; shift ;;
-n | --nev* ) export NEVENTS="$2"; shift ;;
-u | --univ* ) export MULTI_UNIVERSE_SPEC="$2"; shift ;;
-p | --physics | \
-m | --model | \
--hadronic ) export G4HADRONICMODEL="$2"; shift ;;
-t | --target ) export TARGET="$2"; shift ;;
-c | --pdg | --probe ) export PROBE="$2"; shift ;;
--p3 ) export P3="$2"; shift ;;
-z | --pz ) export P3="0,0,$2"; shift ;;
--g4verbose ) export G4VERBOSE="$2"; shift ;;
--seed ) export RNDMSEED="$2"; shift ;;
-j | --joboffset ) export JOBOFFSET="$2"; shift ;;
-x | --pass ) export PASS="$2"; shift ;;
#
-T | --tarball ) export TARBALL="$2"; shift ;;
-P | --pname ) export ARTPNAME="$2"; shift ;;
#
--scratch* ) export FAKESCRATCHBASE="$2"; shift ;;
--keep-scratch ) export KEEPSCRATCH=1; ;;
--no-redir* ) export REDIRECT_ART=0; ;;
--no-art* | \
--no-run-art | \
--skip-art ) export RUNART=0; ;;
--debug ) export VERBOSE=999 ;;
--trace ) export DOTRACE=1 ;;
-* ) echo "unknown flag $opt ($1)"
usage
;;
esac
shift # eat up the arg we just used
done
usage_exit=0
# must have a tarball
# but don't check if user asked for --help
if [ ${PRINTUSAGE} == 0 ]; then
if [[ -z "${TARBALL}" ]]
then
echo -e "${OUTRED}You must supply values for:${OUTNOCOL}"
echo -e "${OUTRED} --tarball ${OUTNOCOL}[${OUTGREEN}${TARBALL}${OUTNOCOL}]"
usage_exit=42
fi
fi
# figure out which universes
if [ -z "${PROCESS}" ]; then PROCESS=0; fi # not on the grid as a job
let JOBID=${PROCESS}+${JOBOFFSET}
export JOBID
# convert spaces, tabs, [semi]colons to commas
MULTI_UNIVERSE_SPEC=`echo ${MULTI_UNIVERSE_SPEC} | tr " \t:;" ","`
MULTI_UNIVERSE_SPEC="${MULTI_UNIVERSE_SPEC},,,"
MULTIVERSE_BASE=`echo ${MULTI_UNIVERSE_SPEC} | cut -d',' -f1 ` #
MULTIVERSE_BASE=`basename ${MULTIVERSE_BASE} .fcl` # to be found in tarball w/ .fcl extension
MULTIVERSE_FILE=${MULTIVERSE_BASE}.fcl
UBASE=`echo ${MULTI_UNIVERSE_SPEC} | cut -d',' -f2`
# if uspecified UBASE=0
if [ -z "${UBASE}" ]; then UBASE=0; fi
USTRIDE=`echo ${MULTI_UNIVERSE_SPEC} | cut -d',' -f3`
# if unspecified USTRIDE=10
if [ -z "${USTRIDE}" ]; then USTRIDE=10; fi
let UNIV_FIRST=${JOBID}*${USTRIDE}+${UBASE} # 0 based counting
let UNIV_LAST=${UNIV_FIRST}+${USTRIDE}-1
echo -e "${OUTCYAN}PROCESS=${PROCESS} (JOBOFFSET=${JOBOFFSET}) use UNIVERSES [${UNIV_FIRST}:${UNIV_LAST}]${OUTNOCOL}"
# noramlize probe specified by user ...
export PROBENAME=XYZZY # e.g. piplus, piminus, proton
export PROBEPDG=XYZZY # e.g. 211, -211, 2212
case ${PROBE} in
11 | eminus ) export PROBEPDG=11 ; export PROBENAME=eminus ;;
-11 | eplus ) export PROBEPDG=-11 ; export PROBENAME=eplus ;;
13 | muminus ) export PROBEPDG=13 ; export PROBENAME=muminus ;;
-13 | muplus ) export PROBEPDG=-13 ; export PROBENAME=muplus ;;
111 | pizero ) export PROBEPDG=111 ; export PROBENAME=pizero ;;
211 | piplus ) export PROBEPDG=211 ; export PROBENAME=piplus ;;
-211 | piminus ) export PROBEPDG=-211 ; export PROBENAME=piminus ;;
130 | kzerolong ) export PROBEPDG=130 ; export PROBENAME=kzerolong ;;
311 | kzero ) export PROBEPDG=311 ; export PROBENAME=kzero ;;
321 | kplus ) export PROBEPDG=321 ; export PROBENAME=kplus ;;
-321 | kminus ) export PROBEPDG=-321 ; export PROBENAME=kminus ;;
2212 | proton ) export PROBEPDG=2212 ; export PROBENAME=proton ;;
2112 | neutron ) export PROBEPDG=2112 ; export PROBENAME=neutron ;;
* )
echo -e "${OUTRED}bad PROBE=${PROBE}${OUTNOCOL}" ; exit 42 ;;
esac
# normalize momentum is user set
# turn most punctuation (except ".") into space
# strip leading space
if [ -n "${P3}" ]; then
#echo "initial P3 ${P3}"
P3=`echo "${P3},0.0,0.0,0.0" | tr "\[\],:;\"\t" " " | sed -e 's/^ *//' `
#echo "final ${P3}"
export PROBE_PX=`echo ${P3} | cut -d' ' -f1`
export PROBE_PY=`echo ${P3} | cut -d' ' -f2`
export PROBE_PZ=`echo ${P3} | cut -d' ' -f3`
if [ ${VERBOSE} -gt 0 ]; then
echo -e "${OUTGREEN}using px py pz: ${PROBE_PX} ${PROBE_PY} ${PROBE_PZ} ${OUTNOCOL}"
fi
else
if [ -n "${PROBEP}" ]; then
export PROBE_PX=0
export PROBE_PY=0
export PROBE_PZ=${PROBEP}
else
echo -e "${OUTRED}no \${P3} or \${PROBEP} given ${OUTNOCOL}"
usage_exit=42
fi
fi
if [ ${usage_exit} -eq 0 ]; then
# calculate projectile total momentum
px=${PROBE_PX}
py=${PROBE_PY}
pz=${PROBE_PZ}
# calculate momentum ... 1 digit after decimal point
probepcalc="sqrt(($px*$px)+($py*$py)+($pz*$pz))"
export PROBEP5=`echo "scale=5; ${probepcalc}" | bc`
echo -e "${OUTGREEN}bc scale=5; ${probepcalc} ==> ${PROBEP5}${OUTNOCOL}"
# 1 or 2 digits after the decimal point (printf should round for us)
# but strip trailing 0's, and no bare trail .'s
export PROBEP=`printf "%0.2f" ${PROBEP5} | sed -e 's/0*$//' -e 's/\.$//' `
echo -e "${OUTGREEN}calculated \${PROBEP}=${PROBEP} GeV${OUTNOCOL}"
unset px py pz
fi
# PROBEPNODOT e.g. 5 6p5
# used in dossier PROLOGs, should be no trailing 'p0's
PROBEPNODOT=`echo ${PROBEP} | sed -e 's/\./p/' `
echo -e "${OUTGREEN}\${PROBE}=${PROBE} : normalized \${PROBENAME}=${PROBENAME} \${PROBEPDG}=${PROBEPDG}${OUTNOCOL}"
echo -e "${OUTGREEN}\${PROBEP}=${PROBEP} \${PROBENODOT}=${PROBEPNODOT}${OUTNOCOL}"
# show the defaults correctly now
if [ $PRINTUSAGE -gt 0 -o ${usage_exit} -ne 0 ]; then
echo " "
usage
if [ $PRINTUSAGE -gt 1 ]; then
extended_help
fi
exit ${usage_exit}
fi
# any left over non-flag args
export OPTARGS="$@"
if [ ${VERBOSE} -gt 2 ]; then
echo "OPTARGS=${OPTARGS}"
fi
}
##############################################################################
function fetch_setup_tarball() {
# fetch the tarball, use it to setup environment including its own products
# full path given ??
export TARBALL_IN=${TARBALL}
c1=`echo ${TARBALL_IN} | cut -c1`
if [ "$c1" != "/" ]; then TARBALL=${TARBALL_DEFAULT_DIR}/${TARBALL_IN} ; fi
TARBALL_BASE=`basename ${TARBALL}`
# if we can see it then use cp, otherwise "ifdh cp"
if [ -f ${TARBALL} ]; then
CP_CMD="cp"
else
which_ifdh=`which ifdh 2>/dev/null`
if [ -z "${which_ifdh}" ]; then
source /cvmfs/fermilab.opensciencegrid.org/products/common/etc/setup
setup ifdhc
fi
CP_CMD="ifdh cp"
export IFDH_CP_MAXRETRIES=1 # 8 is crazytown w/ exponential backoff
fi
echo ""
# local dossier is "optional"
if [ ${UNROLLED_DOSSIER} -eq 1 ]; then
# full path given ??
c1=`echo ${TARBALL_DOSSIER} | cut -c1`
if [ "$c1" != "/" ]; then TARBALL_DOSSIER=${TARBALL_DEFAULT_DIR}/${TARBALL_DOSSIER} ; fi
TARBALL_DOSSIER_BASE=`basename ${TARBALL_DOSSIER}`
echo -e "${OUTGREEN}tarball: ${TARBALL_DOSSIER}${OUTNOCOL}"
echo -e "${OUTGREEN}base: ${TARBALL_DOSSIER_BASE}${OUTNOCOL}"
echo -e "${OUTGREEN}${CP_CMD} ${TARBALL_DOSSIER} ${TARBALL_DOSSIER_BASE}${OUTNOCOL}"
${CP_CMD} ${TARBALL_DOSSIER} ${TARBALL_DOSSIER_BASE}
echo "${CP_CMD} status $?"
if [ ! -f ${TARBALL_DOSSIER_BASE} ]; then
echo -e "${OUTRED}failed to fetch: ${TARBALL_DOSSIER_BASE}${OUTNOCOL}"
else
case ${TARBALL_DOSSIER_BASE} in
*.gz | *.tgz ) TAR_OPT="z" ;;
*.bz2 ) TAR_OPT="j" ;;
* ) echo -e "${OUTRED}neither .gz nor .bz2 file extension: ${TARBALL_DOSSIER_BASE}${OUTNOCOL}"
TAR_OPT="z" ;;
esac
# unroll
echo -e "${OUTCYAN}tar x${TAR_OPT}f ${TARBALL_DOSSIER_BASE}${OUTNOCOL}"
tar x${TAR_OPT}f ${TARBALL_DOSSIER_BASE}
if [ $? -ne 0 ]; then
# failed to unroll, use fallback approach
export UNROLLED_DOSSIER=2
fi
fi
fi
# now the real important tarball w/ UPS product
echo -e "${OUTGREEN}in as: ${TARBALL_IN}${OUTNOCOL}"
echo -e "${OUTGREEN}tarball: ${TARBALL}${OUTNOCOL}"
echo -e "${OUTGREEN}base: ${TARBALL_BASE}${OUTNOCOL}"
echo -e "${OUTGREEN}${CP_CMD} ${TARBALL} ${TARBALL_BASE}${OUTNOCOL}"
${CP_CMD} ${TARBALL} ${TARBALL_BASE}
echo "${CP_CMD} status $?"
ls -l
if [ ! -f ${TARBALL_BASE} ]; then
echo -e "${OUTRED}failed to fetch: ${TARBALL_BASE}${OUTNOCOL}"
fi
case ${TARBALL_BASE} in
*.gz | *.tgz ) TAR_OPT="z" ;;
*.bz2 ) TAR_OPT="j" ;;
* ) echo -e "${OUTRED}neither .gz nor .bz2 file extension: ${TARBALL_BASE}${OUTNOCOL}"
TAR_OPT="z" ;;
esac
echo -e "${OUTCYAN}looking for ${BOOTSTRAP_SCRIPT}${OUTNOCOL}"
# expect to find script "${BOOTSTRAP_SCRIPT}"
bootscript=`tar t${TAR_OPT}f ${TARBALL_BASE} | grep ${BOOTSTRAP_SCRIPT} | tail -1`
localarea=`echo ${bootscript} | cut -d'/' -f1`
echo -e "${OUTGREEN}bootscript=${bootscript}${OUTNOCOL}"
echo -e "${OUTGREEN}localarea=${localarea}${OUTNOCOL}"
# unroll
echo -e "${OUTCYAN}tar x${TAR_OPT}f ${TARBALL_BASE}${OUTNOCOL}"
tar x${TAR_OPT}f ${TARBALL_BASE}
if [ -z "${bootscript}" -o ! -f ${bootscript} ]; then
echo -e "${OUTRED}no file ${bootscript} (${BOOTSTRAP_SCRIPT}) in tarball ${TARBALL}${OUTNOCOL}"
exit 42
fi
source ${bootscript}
export PRODUCTS=`pwd`/${localarea}:${PRODUCTS}
for prd in `echo ${SETUP_PRODUCTS} | tr ';' ' '` ; do
if [ -z "$prd" ]; then continue; fi
if [ "$prd" == "BASE" ]; then
PROD=`echo ${localarea} | cut -d'_' -f2`
VERSIONS=`ls -1 ${localarea}/${PROD} | grep -v version `
for vtest in ${VERSIONS} ; do
if [[ ${localarea} =~ .*${PROD}_${vtest}_.* ]]; then
VERS=$vtest
break
fi
done
QUAL=`echo ${localarea} | sed -e "s/${PROD}_${VERS}/ /g" | cut -d' ' -f2 | tr '_' ':'`
else
PROD=`echo ${prd}%% | cut -d "%" -f1`
VERS=`echo ${prd}%% | cut -d "%" -f2`
QUAL=`echo ${prd}%% | cut -d "%" -f3`
fi
if [ -n "${QUAL}" ]; then
echo -e "${OUTCYAN}setup ${PROD} ${VERS} -q ${QUAL}${OUTNOCOL}"
setup ${PROD} ${VERS} -q ${QUAL}
else
echo -e "${OUTCYAN}setup ${PROD} ${VERS}${OUTNOCOL}"
setup ${PROD} ${VERS}
fi
done
export MYMKDIRCMD="ifdh mkdir_p"
export MYCOPYCMD="ifdh cp"
# IFDH_CP_MAXRETRIES: maximum retries for copies on failure -- defaults to 7
export IFDH_CP_MAXRETRIES=2 # 7 is silly
# if STDOUT is a tty, then probably interactive use
# avoid the "ifdh" bugaboo I'm having testing interactively
if [ -t 1 ]; then
export MYMKDIRCMD="mkdir -p"
export MYCOPYCMD="cp"
fi
echo -e "${OUTGREEN}using \"${MYCOPYCMD}\" for copying${OUTNOCOL}"
echo -e "${OUTGREEN}using \"${MYMKDIRCMD}\" for mkdir${OUTNOCOL}"
}
#
##############################################################################
function fetch_file() {
# make local copies of files from (presumably) PNFS
# relative to ${OUTPUTTOP}
fgiven=$1
fbase=`basename $fgive`
fsrc=${OUTPUTTOP}/${fgiven}
if [ -f ${fbase} ]; then
echo -e "${OUTRED}${fbase} already exists locally ... odd"
else
${MYCOPYCMD} ${fsrc} ${fbase}
status=$?
if [[ ${status} -ne 0 || ! -f ${fbase} ]]; then
echo -e "${OUTRED}copy of ${fsrc} failed status=${status}"
fi
fi
}
##############################################################################
function make_genana_fcl() {
echo -e "${OUTCYAN}`pwd`${OUTNOCOL}"
echo -e "${OUTCYAN}creating ${CONFIGFCL}${OUTNOCOL}"
export CONFIGFCL=${CONFIGBASE}.fcl
# needs to loop over universes
cat > ${CONFIGFCL} <<EOF
# this is ${CONFIGFCL}
#
# MULTIVERSE e.g. multiverse181212_Bertini (fcl base) [${MULTIVERSE}]
# G4HADRONICMODEL e.g. Bertini [${G4HADRONICMODEL}]
# PROBENAME e.g. piplus, piminus, proton [${PROBENAME}]
# PROBEPDG e.g. 211, -211, 2212 [${PROBEPDG}]
# PROBEP e.g. 5.0 6.5 # (GeV) [${PROBEP}]
# PROBEPNODOT e.g. 5 6p5 # used in dossier PROLOGs,
# # but, no trailing 'p0's [${PROBEPNODOT}]
# TARGET e.g. Cu [${TARGET}]
# NEVENTS e.g. 5000 [${NEVENTS}]
##### these are cases where both HARP & ITEP have data
##### otherwise we _have_ to generate fcl file based on which expt has data
# 2 piminus_on_C_at_5GeV
# 2 piminus_on_Cu_at_5GeV
# 2 piminus_on_Pb_at_5GeV
# 2 piplus_on_C_at_3GeV
# 2 piplus_on_C_at_5GeV
# 2 piplus_on_Cu_at_3GeV
# 2 piplus_on_Cu_at_5GeV
# 2 piplus_on_Pb_at_3GeV
# 2 piplus_on_Pb_at_5GeV
#####
#include "${MULTIVERSE}.fcl"
#include "HARP_dossier.fcl"
#include "ITEP_dossier.fcl"
process_name: genanaX${CONFIGBASESMALL}
source: {
module_type: EmptyEvent
maxEvents: ${NEVENTS}
} # end of source:
services: {
message: {
debugModules : ["*"]
suppressInfo : []
destinations : {
LogToConsole : {
type : "cout"
threshold : "DEBUG"
categories : { default : { limit : 50 } }
} # end of LogToConsole
} # end of destinations:
} # end of message:
RandomNumberGenerator: {}
TFileService: {
fileName: "${CONFIGBASE}.hist.root"
}
ProcLevelSimSetup: {
HadronicModelName: "${G4HADRONICMODEL}"
TargetNucleus: "${TARGET}"
RNDMSeed: 1
}
# leave this on ... documentation of what was set
PhysModelConfig: { Verbosity: true }
} # end of services:
outputs: {
outroot: {
module_type: RootOutput
fileName: "${CONFIGBASE}.artg4tk.root"
}
} # end of outputs:
physics: {
producers: {
PrimaryGenerator: {
module_type: EventGenerator
nparticles : 1
pdgcode: ${PROBEPDG}
momentum: [ 0.0, 0.0, ${PROBEP} ] // in GeV
}
EOF
for univ in ${UNIVERSE_NAMES}; do
printf " %-25s : @local::%s\n" ${univ} ${univ} >> ${CONFIGFCL}
done
cat >> ${CONFIGFCL} << EOF
} # end of producers:
analyzers: {
EOF
for univ in ${UNIVERSE_NAMES}; do
for expt in ${EXPT_MATCH}; do
cat >> ${CONFIGFCL} << EOF
${univ}${expt}:
{
module_type: Analyzer${expt}
ProductLabel: "${univ}"
IncludeExpData:
{
DBRecords: @local::${expt}_${EXPTSETUP_BASE}
EOF
if [ ${UNROLLED_DOSSIER} -eq 1 ]; then
cat >> ${CONFIGFCL} << EOF
# local dossier files
BaseURL: "./dossier_files"
DictQuery: "/dictionary/dictionary_%s.json"
RecQuery: "/records/rec_%06d.json"
EOF
fi
if [ ${UNROLLED_DOSSIER} -eq 2 ]; then
cat >> ${CONFIGFCL} << EOF
# local dossier files
BaseURL: "http://home.fnal.gov/~rhatcher/dossier_files"
DictQuery: "/dictionary/dictionary_%s.json"
RecQuery: "/records/rec_%06d.json"
EOF
fi
cat >> ${CONFIGFCL} << EOF
}
}
EOF
done
done
cat >> ${CONFIGFCL} << EOF
} # end of analyzers:
path1: [ PrimaryGenerator
EOF
for univ in ${UNIVERSE_NAMES}; do
printf " , %s\n" ${univ} >> ${CONFIGFCL}
done
cat >> ${CONFIGFCL} << EOF
] // end-of path1
path2: [
EOF
char=" "
for univ in ${UNIVERSE_NAMES}; do
for expt in ${EXPT_MATCH}; do
printf " %s %s\n" "$char" ${univ}${expt} >> ${CONFIGFCL}
char=","
done
done
cat >> ${CONFIGFCL} << EOF
] // end-of path2
stream1: [ outroot ]
trigger_paths: [ path1 ]
end_paths: [ path2, stream1 ]
} # end of physics:
EOF
}
#
##############################################################################
function print_universe_names() {
echo ""
if [ $NUNIV -lt 20 ]; then
echo -e "${OUTGREEN}${UNIVERSE_NAMES}"
else
# need to enclose in ""'s so as to retain \n's
echo -e "${OUTGREEN} (first and last 5):"
echo "${UNIVERSE_NAMES}" | head -n 5
echo "..."
echo "${UNIVERSE_NAMES}" | tail -n 5
fi
echo -e "${OUTNOCOL}"
}
#
##############################################################################
function infer_universe_names() {
#
# create a list of universe names based on configurations in the
# ${MULTI_UNIVERSE_FILE}, entries in which should look like:
#
# <label> : {
# module_type: ProcLevelMPVaryProducer
# Verbosity: ...
# HadronicModel: {
# DefaultPhysics: {true|false}
# ModelParameters: {
# <variable1> : <value1>
# <variable2> : <value2>
# }
# }
# }
#
# each of those <label>s is a "universe" ... we need their names.
# (assume ":" & "{" are on the same line ... and weed out "HadronicModel"
# and "ModelParameters" as our heuristic for label names)
#
# complete list
export UNIVERSE_NAMES=`cat ${LOCAL_MULTIVERSE_FILE} | \
grep "{" | grep ":" | \
grep -v HadronicModel | \
grep -v ModelParameters | \
tr -d " :{" `
export NUNIV=`echo "$UNIVERSE_NAMES" | wc -w`
echo " "
if [ ${VERBOSE} -gt 1 ]; then
echo -e "${OUTGREEN}${NUNIV} universes in ${LOCAL_MULTIVERSE_FILE}${OUTNOCOL}"
print_universe_names
fi
# 0 based counting
MINU=0
let MAXU=${NUNIV}-1
export MINU
export MAXU
if [ ${VERBOSE} -gt 0 ]; then
echo -e "${OTUGREEN}initial REQ [${UNIV_FIRST}:${UNIV_LAST}] of [${MINU}:${MAXU}]${OUTNOCOL}"
fi
if [ ${UNIV_FIRST} -lt ${MINU} ]; then UNIV_FIRST=${MINU} ; fi
if [ ${UNIV_LAST} -gt ${MAXU} ]; then UNIV_LAST=${MAXU} ; fi
if [ ${VERBOSE} -gt 1 ]; then
echo -e "${OUTGREEN}bounded REQ [${UNIV_FIRST}:${UNIV_LAST}] of [${MINU}:${MAXU}]${OUTNOCOL}"
fi
if [[ ${UNIV_FIRST} -ne ${MINU} || ${UNIV_LAST} -ne ${MAXU} ]] ; then
# need to trim ...
let i=-1 # using 0 based counting
export UNIVERSE_NAMES_FULL=${UNIVERSE_NAMES}
UNIVERSE_NAMES=""
for univ in ${UNIVERSE_NAMES_FULL} ; do
let i=${i}+1
if [ ${i} -lt ${UNIV_FIRST} ] ; then continue; fi
if [ ${i} -gt ${UNIV_LAST} ] ; then break; fi
if [ -z "${UNIVERSE_NAMES}" ]; then
export UNIVERSE_NAMES="${univ}"
else
export UNIVERSE_NAMES=`echo -e "${UNIVERSE_NAMES}\n${univ}"`
fi
done
NUNIV=`echo "$UNIVERSE_NAMES" | wc -w`
if [ ${VERBOSE} -gt 1 ]; then
echo " "
echo -e "${OUTGREEN}trimmmed to ${NUNIV} universes [${UNIV_FIRST}:${UNIV_LAST}] in ${LOCAL_MULTIVERSE_FILE}${OUTNOCOL}"
print_universe_names
fi
fi
}
#
##############################################################################
# find the supplied fcl file in the usual paths
# if found return full path as ${LOCAL_FCL_FILE}
# if not found echo error, set $?=1
function find_fcl_file() {
FCL_FILE=${1}
unset LOCAL_FCL_FILE
# allow for full path and local directory
for p in `echo "${FHICL_FILE_PATH}:.:/" | tr : "\n" ` ; do
if [ -f ${p}/${FCL_FILE} ]; then
export LOCAL_FCL_FILE=${p}/${FCL_FILE}
break
fi
done
if [[ -z "${LOCAL_FCL_FILE}" || ! -f "${LOCAL_FCL_FILE}" ]] ; then
echo -e "${OUTRED}failed to find ${FCL_FILE} anywhere${OUTNOCOL}"
echo -e "${OUTRED}failed to find ${FCL_FILE} anywhere${OUTNOCOL}" >&2
return 1
else
return 0
fi
}
#
##############################################################################
# use an https://en.wikipedia.org/wiki/Here_document#Unix_shells
# to create file. un-\'ed $ or back-ticks (`) will be expanded from
# the current environment when run
#
##############################################################################
function setup_colors() {
# if running interactively, allow for color coding
##case "$-" in
## *i* )
## # if $- contains "i" then interactive session
# better --- test if stdout is a tty
if [ -t 1 ]; then
export ESCCHAR="\x1B" # or \033 # Mac OS X bash doesn't support \e as esc?
export OUTBLACK="${ESCCHAR}[0;30m"
export OUTBLUE="${ESCCHAR}[0;34m"
export OUTGREEN="${ESCCHAR}[0;32m"
export OUTCYAN="${ESCCHAR}[0;36m"
export OUTRED="${ESCCHAR}[0;31m"
export OUTPURPLE="${ESCCHAR}[0;35m"
export OUTORANGE="${ESCCHAR}[0;33m" # orange, more brownish?
export OUTLTGRAY="${ESCCHAR}[0;37m"
export OUTDKGRAY="${ESCCHAR}[1;30m"
# labelled "light but appear in some cases to show as "bold"
export OUTLTBLUE="${ESCCHAR}[1;34m"
export OUTLTGREEN="${ESCCHAR}[1;32m"
export OUTLTCYAN="${ESCCHAR}[1;36m"
export OUTLTRED="${ESCCHAR}[1;31m"
export OUTLTPURPLE="${ESCCHAR}[1;35m"
export OUTYELLOW="${ESCCHAR}[1;33m"
export OUTWHITE="${ESCCHAR}[1;37m"
export OUTNOCOL="${ESCCHAR}[0m" # No Color
fi
## ;;
## * )
## ESCCHAR=""
## ;;
##esac
# use as: echo -e "${OUTRED} this is red ${OUTNOCOL}"
}
##############################################################################
#
##############################################################################
function check_scratch_area() {
# make sure we're in a scratch area
# needed during init so we can write locally before copying to output area
# in case output area is /pnfs
export ORIGINALDIR=`pwd`
if [ -n "${_CONDOR_SCRATCH_DIR}" ]; then
export USINGFAKESCRATCH=0
# actually on condor worker node ...
else
# not on a worker node ... need to pick somewhere else
_CONDOR_SCRATCH_DIR=${FAKESCRATCHBASE}/fake_CONDOR_SCRATCH_DIR_$$
export USINGFAKESCRATCH=1
echo -e "${OUTBLUE}${b0}: fake a \${_CONDOR_SCRATCH_DIR} as ${_CONDOR_SCRATCH_DIR} ${OUTNOCOL}"
if [ ! -d ${_CONDOR_SCRATCH_DIR} ]; then
mkdir -p ${_CONDOR_SCRATCH_DIR}
fi
fi
if [ ! -d ${_CONDOR_SCRATCH_DIR} ]; then
echo -e "${OUTRED}could not create ${_CONDOR_SCRATCH_DIR}${OUTNOCOL}"
exit 42
fi
cd ${_CONDOR_SCRATCH_DIR}
}
#
##############################################################################
function check_for_gdml_file()
{
if [ ! -f ${GDMLFILENAME} ]; then
echo -e "${OUTRED}${GDMLFILENAME} is not directly available${OUTNOCOL}"
# ModelParamStudyProducer:G4Default@Construction
# doesn't look any place but the named file (i.e. "." unless specified)
##$ find /geant4/app/rhatcher/mrb_work_area/ -name lariat.gdml
## /geant4/app/rhatcher/mrb_work_area/build_slf6.x86_64/gdml/lariat.gdml
## /geant4/app/rhatcher/mrb_work_area/srcs/artg4tk/gdml/lariat.gdml
if [ -f ${MRB_INSTALL}/gdml/${GDMLFILENAME} ]; then
echo -e "${OUTRED}copy ${GDMLFILENAME} from ${MRB_INSTALL}${OUTNOCOL}"
cp ${MRB_INSTALL}/gdml/${GDMLFILENAME} .
else
if [ -f ${MRB_BUILDDIR}/gdml/${GDMLFILENAME} ]; then
echo -e "${OUTRED}copy ${GDMLFILENAME} from ${MRB_BUILDDIR}${OUTNOCOL}"
cp ${MRB_BUILDDIR}/gdml/${GDMLFILENAME} .
fi
fi
echo ""
fi
}
#
##############################################################################
function report_node_info()
{
nodeA=`uname -n `
node1=`uname -n | cut -d. -f1`
krel=`uname -r`
ksys=`uname -s`
now=`date "+%Y-%m-%d %H:%M:%S" `
if [ -f /etc/redhat-release ]; then
redh=`cat /etc/redhat-release 2>/dev/null | \
sed -e 's/Scientific Linux/SL/' -e 's/ Fermi/F/' -e 's/ release//' `
fi
echo -e "${b0}:${OUTBLUE} report_node_info at ${now} ${OUTNOCOL}"
echo " running on ${nodeA} "
echo " OS ${ksys} ${krel} ${redh}"
echo " user `id`"
echo " uname `uname -a`"
echo " PWD=`pwd`"
echo " "
}
function report_setup()
{
echo -e "${b0}:${OUTBLUE} report_setup: script_version ${SCRIPT_VERSION}${OUTNOCOL}"
echo " using `which art`"
echo " using `which ifdh`"
echo " using \${GEANT4_DIR}=${GEANT4_DIR}"
echo " \${PRODUCTS}="
echo ${PRODUCTS} | tr ":" "\n" | sed -e 's/^/ /g'
echo " \${LD_LIBRARY_PATH}="
echo ${LD_LIBRARY_PATH} | tr ":" "\n" | sed -e 's/^/ /g'
echo " \${FHICL_FILE_PATH}="
echo ${FHICL_FILE_PATH} | tr ":" "\n" | sed -e 's/^/ /g'
echo " "
}
function report_config_summary()
{
echo -e "${b0}:${OUTBLUE} config_summary ${CONFIGBASE}${OUTNOCOL}"
echo " DESTDIR ${DESTDIR}"
echo " DESTDIRART ${DESTDIRART}"
echo " PROCESS ${PROCESS}"
echo " JOBOFFSET ${JOBOFFSET}"
echo " JOBID ${JOBID}"
echo " MULTIVERSE ${MULTIVERSE} [${UNIV_FIRST}:${UNIV_LAST}]"
echo " nevents ${NEVENTS}"
echo " hadronic model ${G4HADRONICMODEL}"
echo " target \"${TARGET}\""
echo " probe ${PROBENAME} (${PROBEPDG}) [ ${PROBE_PX}, ${PROBE_PY}, ${PROBE_PZ} ] GeV/c"
echo " "
}
##############################################################################
##############################################################################
setup_colors
echo -e "${OUTCYAN}process_args $@ ${OUTNOCOL}"
process_args "$@"
# find our own little place to do this job's processing
# (easy on condor job worker nodes; interactive .. more difficult)
echo -e "${OUTCYAN}check_scratch_area ${OUTNOCOL}"
check_scratch_area
# fetch the tarball that has the work to do
echo -e "${OUTCYAN}fetch_setup_tarball ${OUTNOCOL}"
fetch_setup_tarball
echo -e "${OUTGREEN}currently `pwd`${OUTNOCOL}"
cd ${_CONDOR_SCRATCH_DIR}
echo -e "${OUTGREEN}woring in `pwd`${OUTNOCOL}"
echo -e "${OUTORANGE}JOBID=${JOBID}${OUTNOCOL}"
if [ ${RNDMSEED} -eq ${RNDMSEED_SPECIAL} ]; then
# user didn't set a seed, set it to the jobid
echo -e "${OUTORANGE}export RNDMSEED=${RNDMSEED}${OUTNOCOL}"
#echo -e "${OUTORANGE}export RNDMSEED=${JOBID}${OUTNOCOL}"
# export RNDMSEED=${JOBID}
fi
if [ "`pwd`" != "${_CONDOR_SCRATCH_DIR}" ]; then
echo -e "${OUTRED}about to fetch_multiverse but in `pwd`${OUTNOCOL}"
echo -e "${OUTRED}instead of ${_CONDOR_SCRATCH_DIR}${OUTNOCOL}"
cd ${_CONDOR_SCRATCH_DIR}
fi
echo -e "${OUTGREEN}looking for ${MULTIVERSE_FILE}${OUTNOCOL}"
# find LOCAL_MULTIVERSE_FILE ... should be in $FHICL_FILE_PATH after setup
find_fcl_file ${MULTIVERSE_FILE}
if [ $? -ne 0 ]; then
# failed to find the file .. no point going on
echo -e "${OUTRED}===========>>> ERRROR${OUTNOCOL}"
echo -e "${OUTRED}... no point in going on. bail out.${OUTNOCOL}"
echo -e "${OUTRED}... no point in going on. bail out.${OUTNOCOL}" >&2
exit 3
fi
LOCAL_MULTIVERSE_FILE=${LOCAL_FCL_FILE}
echo -e "${OUTGREEN}found ${LOCAL_MULTIVERSE_FILE}${OUTNOCOL}"
# [HARP|ITEP]_${PROBENAME}_on_${TARGET}_at_${PROBEPNODOT}GeV
export EXPTSETUP_BASE=${PROBENAME}_on_${TARGET}_at_${PROBEPNODOT}GeV
echo -e "${OUTCYAN}look for EXPTSETUP_BASE=${EXPTSETUP_BASE}${OUTNOCOL}"
EXPT_MATCH=""
FCL_MATCH=""
for EXPT in `echo ${DOSSIER_LIST} | tr ",;:" " "` ; do
# find the fcl file
LOOKFOR=${EXPT}_dossier.fcl
#echo -e -n "${OUTGREEN}looking for ${LOOKFOR}${OUTNOCOL}"
LOOKFOR_RESULT=LOCAL_${EXPT}_DOSSIER_FILE
find_fcl_file ${LOOKFOR}
if [ $? -ne 0 ]; then
echo -e "${OUTRED}could not find ${LOOKFOR}${OUTNOCOL}"
exit 4
fi
export LOCAL_${EXPT}_DOSSIER_FILE=${LOCAL_FCL_FILE}
# "indirect variable reference" val=${!vnamenam}
#if [ -z "${!LOOKFOR}" ]; then
n=`grep -c ${EXPTSETUP_BASE} ${!LOOKFOR_RESULT}`
case $n in
0 ) echo -e "${OUTGREEN}found NO instances in ${LOOKFOR}${OUTNOCOL}"
;;
1 ) echo -e "${OUTGREEN}found instance in ${LOOKFOR}${OUTNOCOL}"
EXPT_MATCH="${EXPT_MATCH} ${EXPT}"
;;
* ) echo -e "${OUTRED}found ${n} instances in ${LOOKFOR}${OUTNOCOL}"
echo -e "${OUTRED}not unique !!${OUTNOCOL}"
exit 5
;;
esac
done
if [ -z "${EXPT_MATCH}" ]; then
echo -e "${OUTRED}no valid experimental data${OUTNOCOL}"
exit 6
fi
echo -e "${OUTORANGE}EXPTSETUP valid for ${EXPT_MATCH}${OUTNOCOL}"
infer_universe_names
if [ ${VERBOSE} -gt 1 ]; then
echo -e "${OUTGREEN}post- infer_universe_names${OUTNOCOL}"
pwd
ls -l
if [ -d 0 ]; then
echo "what is the 0 directory?"
ls -l 0
fi
fi
UNIV_FIRST_4=`printf "%04d" ${UNIV_FIRST}`
UNIV_LAST_4=`printf "%04d" ${UNIV_LAST}`
export CONFIGBASE=${EXPTSETUP_BASE}_U${UNIV_FIRST_4}_${UNIV_LAST_4}
if [ ${PASS} -ne 0 ]; then
export CONFIGBASE=${CONFIGBASE}_P${PASS}
fi
export CONFIGBASESMALL=`echo ${CONFIGBASE} | sed -e 's/_on_//' -e 's/_at_//' | tr -d '_' `
export DESTDIR=${OUTPUTTOP}/${MULTIVERSE}/${EXPTSETUP_BASE}
export DESTDIRART=`echo $DESTDIR | sed -e 's/persistent/scratch/g'`
echo -e ""
echo -e "${OUTORANGE}CONFIGBASE=${CONFIGBASE}${OUTNOCOL}"
echo -e "${OUTORANGE}CONFIGBASESMALL=${CONFIGBASESMALL}${OUTNOCOL}"
make_genana_fcl
echo -e "${OUTRED}-------------------------------------${OUTNOCOL}"
report_config_summary
report_node_info
report_setup
echo -e "${OUTRED}-------------------------------------${OUTNOCOL}"
#if [ ${VERBOSE} -gt 1 ]; then
echo -e "${OUTGREEN}contents of ${CONFIGBASE} are:${OUTORANGE}"
echo "--------------------------------------------------------------------"
cat ${CONFIGFCL}
echo "--------------------------------------------------------------------"
echo -e "${OUTNOCOL}"
echo " "
#fi
# run the job
# look in current directory for any include fcl files ...
export FHICL_FILE_PATH=${FHICL_FILE_PATH}:.
now=`date "+%Y-%m-%d %H:%M:%S" `
echo -e "${OUTPURPLE}art start ${now}"
if [ ${RUNART} -ne 0 ]; then
# HERE'S THE ACTUAL "ART" COMMAND
if [ ${REDIRECT_ART} -ne 0 ]; then
echo "art -c ${CONFIGFCL} 1> ${CONFIGBASE}.out 2> ${CONFIGBASE}.err"
art -c ${CONFIGFCL} 1> ${CONFIGBASE}.out 2> ${CONFIGBASE}.err
ART_STATUS=$?
else
echo "art -c ${CONFIGFCL}"
art -c ${CONFIGFCL}
ART_STATUS=$?
fi
else
ART_STATUS=255 # didn't run ... can't be 0
fi
now=`date "+%Y-%m-%d %H:%M:%S" `
echo -e "art finish ${now}"
if [ ${ART_STATUS} -eq 0 ]; then
echo -e "${OUTGREEN}art returned status ${ART_STATUS}${OUTNOCOL}"
else
echo -e "${OUTRED}art returned status ${ART_STATUS}${OUTNOCOL}"
fi
echo -e "${OUTNOCOL}"
if [[ ${VERBOSE} -gt 1 && ${REDIRECT_ART} -ne 0 ]] ; then
echo -e "${OUTGREEN}contents of ${CONFIGBASE}.out is:${OUTORANGE}"
echo "--------------------------------------------------------------------"
cat ${CONFIGBASE}.out
echo "--------------------------------------------------------------------"
echo -e "${OUTGREEN}contents of ${CONFIGBASE}.err is:${OUTORANGE}"
echo "--------------------------------------------------------------------"
cat ${CONFIGBASE}.err
echo "--------------------------------------------------------------------"
echo -e "${OUTNOCOL}"
echo " "
fi
# copy files back!
echo -e "${OUTGREEN}start copy back section${OUTNOCOL}"
echo -e "${OUTGREEN}start copy back section${OUTNOCOL}" >&2
${MYMKDIRCMD} ${DESTDIR}
MKDIR_STATUS=$?
if [ ${MKDIR_STATUS} -ne 0 ]; then
echo -e "${OUTRED}${MYMKDIRCMD} ${DESTDIR} ${OUTNOCOL} returned ${MKDIR_STATUS}${OUTNOCOL}"
fi
if [ "${DESTDIRART}" != "${DESTDIR}" ]; then
${MYMKDIRCMD} ${DESTDIRART}
MKDIR_STATUS=$?
if [ ${MKDIR_STATUS} -ne 0 ]; then
echo -e "${OUTRED}${MYMKDIRCMD} ${DESTDIRART} ${OUTNOCOL} returned ${MKDIR_STATUS}${OUTNOCOL}"
fi
fi
# for ifdh mkdir is there some way to distinguish between
# "couldn't create directory" (permissions, whatever) vs. "already exists"?
# mkdir(2) says EEXIST returned for later
# /usr/include/asm-generic/errno-base.h:#define EEXIST 17 /* File exists */
# but mkdir run interactively returns 1 ...
localList="${CONFIGBASE}.artg4tk.root ${CONFIGBASE}.hist.root ${CONFIGFCL}"
localList="${localList}"
if [ ${REDIRECT_ART} -ne 0 ]; then
localList="${localList} ${CONFIGBASE}.out ${CONFIGBASE}.err"
fi
for inFile in ${localList} ; do
if [ -f ${inFile} ]; then
DESTDIR1=${DESTDIR}
if [[ "${inFile}" =~ .*artg4tk.root ]]; then
DESTDIR1=${DESTDIRART}
fi
if [ -s ${inFile} ]; then
echo -e "${OUTPURPLE}${MYCOPYCMD} ${inFile} ${DESTDIR1}/${inFile}${OUTNOCOL}"
${MYCOPYCMD} ${inFile} ${DESTDIR1}/${inFile}
else
echo -e "${OUTRED}zero length ${inFile} -- skip copy back${OUTNOCOL}"
fi
else
echo -e "${OUTRED}missing local ${inFile} to copy back${OUTNOCOL}"
fi
done
echo " "
# clean-up
if [ ${USINGFAKESCRATCH} -ne 0 ]; then
if [ ${KEEPSCRATCH} -eq 0 ]; then
echo -e "${OUTBLUE}${b0}: rm -r ${_CONDOR_SCRATCH_DIR} ${OUTNOCOL}"
# rm -r ${_CONDOR_SCRATCH_DIR}
else
echo -e "${OUTBLUE}${b0}: leaving ${_CONDOR_SCRATCH_DIR} ${OUTNOCOL}"
fi
fi
echo -e "${OUTBLUE}${b0}: end-of-script${OUTNOCOL}"
exit ${ART_STATUS}
return
set -o xtrace
set +o xtrace
###
# end-of-script
| true
|
9aaa2b11ae19fa97245bd3889eefd74d045ff194
|
Shell
|
meuserj/dotfiles
|
/installlinks.sh
|
UTF-8
| 1,263
| 2.765625
| 3
|
[] |
no_license
|
#!/bin/bash
if [[ ! -e ~/.consolefont.sh ]]
then
ln -s ~/.gitdotfiles/.consolefont.sh ~/.consolefont.sh
fi
if [[ ! -e ~/.minttyrc ]]
then
ln -s ~/.gitdotfiles/.minttyrc ~/.minttyrc
fi
if [[ ! -e ~/.Xresources ]]
then
ln -s ~/.gitdotfiles/.Xresources ~/.Xresources
fi
if [[ ! -e ~/.shellprompt.sh ]]
then
ln -s ~/.gitdotfiles/.shellprompt.sh ~/.shellprompt.sh
fi
if [[ ! -e ~/.tmux ]]
then
ln -s ~/.gitdotfiles/.tmux ~/.tmux
fi
if [[ ! -e ~/.tmux.conf ]]
then
ln -s ~/.gitdotfiles/.tmux.conf ~/.tmux.conf
fi
if [[ ! -e ~/.tmuxtheme.conf ]]
then
ln -s ~/.gitdotfiles/.tmuxtheme.conf ~/.tmuxtheme.conf
fi
if [[ ! -e ~/.zshrc ]]
then
ln -s ~/.gitdotfiles/.zshrc ~/.zshrc
fi
if [[ -L ~/.fzf.zsh && -e ~/.gitdotfiles/.fzf.zsh ]]
then
rm -f ~/.fzf.zsh
mv ~/.gitdotfiles/.fzf.zsh ~/.fzf.zsh
fi
if [[ ! -e ~/.vim ]]
then
ln -s ~/.vimgit ~/.vim
fi
if [[ ! -e ~/.vimrc ]]
then
ln -s ~/.vim/init.vim ~/.vimrc
fi
if [[ ! -e ~/.config/nvim ]]
then
mkdir -p ~/.config
ln -s ~/.vim ~/.config/nvim
fi
if [[ ! -d ~/bin ]]
then
mkdir -p ~/bin
fi
if [[ ! -L ~/bin/upgrade.sh ]]
then
rm -f ~/bin/upgrade.sh
ln -s ~/.gitdotfiles/upgrade.sh ~/bin/upgrade.sh
fi
if [[ ! -L ~/bin/fff ]]
then
rm -f ~/bin/fff
ln -s ~/.gitdotfiles/fff/fff ~/bin/fff
fi
| true
|
a32cbf012aba1fd59a0e03e3e3030bb6bec50b42
|
Shell
|
vishalkarad/FellowShip
|
/SequencesPracticeProblems/NumberGreterOrLessOrEqual.sh
|
UTF-8
| 365
| 3.71875
| 4
|
[] |
no_license
|
#!/bin/bash
# Find number is Greter less or equal
# declare variable and asign value
firstNumber=10
secondNumber=10
# if elif opration to check greter,less,or equal number
if [ $firstNumber -gt $secondNumber ]
then
echo "$firstNumber is greter :"
elif [ $secondNumber -gt $firstNumber ]
then
echo "$secondNumber is greter : "
else
echo "both are equal"
fi
| true
|
7d68818619f27a1de53a55ef2ad9ec5bca94be66
|
Shell
|
weinenglong/hadoop_k8s
|
/start_cluster.sh
|
UTF-8
| 1,754
| 3.21875
| 3
|
[] |
no_license
|
#!/bin/bash
#pods=`sudo kubectl get pods | grep $1 | sed 's/\(\w\+-\w\+-\w\+\)[^$]*/\1/'`
pods=`sudo kubectl get pods | grep hadoop-service- | sed 's/\(\w\+-\w\+-\w\+-\w\+\)[^$]*/\1/'`
arr=(${pods// /})
pod_count=${#arr[@]}
i=0
j=1
while [ $i -lt $pod_count ]
do
eval pod$i=${arr[$i]}
eval tmp="$"pod$i""
if [[ $tmp == hadoop-service-master* ]];then
ip0=`sudo kubectl describe pod $tmp | grep IP | sed "s/IP:\s\+\([^.]*\)/\1/"`
node0=`sudo kubectl describe pod $tmp | grep Node | sed "s|Node:\s\+\([^/]*\)/[^/]*|\1|"`
param[0]=$tmp
param[1]=$ip0
else
eval ip$j=`sudo kubectl describe pod $tmp | grep IP | sed "s/IP:\s\+\([^.]*\)/\1/"`
eval node$j=`sudo kubectl describe pod $tmp | grep Node | sed "s|Node:\s\+\([^/]*\)/[^/]*|\1|"`
eval param[$j*2]=$tmp
eval param[$j*2+1]="$"ip$j""
let j++
fi
let i++
done
:<<!
ip_temp=$ip0
node_temp=$node0
pod_temp=$pod0
i=0
while [ $i -lt $pod_count ]
do
eval current_node="$"node$i""
if [ $current_node == "10.0.0.21" ]
then
eval ip0="$"ip$i""
eval ip$i=$ip_temp
eval node0="$"node$i""
eval node$i=$node_temp
eval pod0="$"pod$i""
eval pod$i=$pod_temp
let i=i+3
fi
let i++
done
!
#echo ${param[@]}
#echo ${#param[*]}
while [ $i -gt 0 ]
do
let i--
eval echo "$"pod$i"", "$"ip$i"", "$"node$i""
eval ssh -t -p 2122 root@"$"ip$i"" "/etc/start_cluster_node.sh "${param[@]}""
done
#./array_test.sh "${param[@]}"
#eval ssh -t -p 2122 root@$ip2 "/etc/test.sh "${param[@]}""
#ssh -t -p 2122 root@$ip2 "/etc/start_cluster.sh $pod0 $pod1 $pod2 $ip0 $ip1 $ip2"
#ssh -t -p 2122 root@$ip1 "/etc/start_cluster.sh $pod0 $pod1 $pod2 $ip0 $ip1 $ip2"
#ssh -t -p 2122 root@$ip0 "/etc/start_cluster.sh $pod0 $pod1 $pod2 $ip0 $ip1 $ip2"
| true
|
16e150fc95fcfca59cc3a78dc654698db75ffd8a
|
Shell
|
IBM-SkillsAcademy/big-data-eng-2020
|
/scripts/big-sql/bigsqlWorkload/bigsql_workload.sh
|
UTF-8
| 4,694
| 3.71875
| 4
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
#!/bin/bash
#SCRIPTS_PATH=/workloadScripts
#GIT_REPO_DIR=/root/bigsql/scriptsbigsql/big-data-eng-2020
#cd $GIT_REPO_DIR #toberemoved
#git pull #toberemoved
#
#/bin/cp $GIT_REPO_DIR/scripts/big-sql/*.sh $SCRIPTS_PATH/
#/bin/cp $GIT_REPO_DIR/scripts/big-sql/bigsqlWorkload/*.sh $SCRIPTS_PATH/
#/bin/cp $GIT_REPO_DIR/scripts/big-sql/bigsqlWorkload/*.sql $SCRIPTS_PATH/
#chmod a+x $SCRIPTS_PATH/*.sh
source ./env_variables.sh
JSQSH_CONF=$GIT_REPO_DIR/scripts/big-sql/conf/.jsqsh
JSQSH_bin=/usr/ibmpacks/common-utils/current/jsqsh/bin/jsqsh
#echo $1
min=$1
max=$2
password=$3
resetoption=$4
function pause(){
echo $1
read -s -n 1
echo ""
}
#if ((1 >= $max) || [ -z "$min" ] || [ -z "$max" ] || [ -z "$password" ]); then
# echo "wrong parameters"
# exit
#fi
# create folder for this run
foldername=$(date +%d-%m-%Y_%H%M%S)
mkdir -p $SCRIPTS_PATH/$foldername
echo "Created folder " $SCRIPTS_PATH/$foldername " for this test."
# create log files
touch $SCRIPTS_PATH/$foldername/bsq_ex2_time
touch $SCRIPTS_PATH/$foldername/bsq_ex3_time
touch $SCRIPTS_PATH/$foldername/bsq_ex6_time
###############
#Cleanup script
###############
if [ "$resetoption" = "r" ]; then
echo
echo "-------------------------------------------------->> STARTING CLEANUP SCRIPTS <<--------------------------------------------------"
echo
for (( n=$min; n<=$max; n++ ))
do
current_student=student`echo $n | awk '{ printf "%04i\n", $0 }'`
/bin/cp -r $JSQSH_CONF /home/$current_student
chown -R $current_student /home/$current_student/.jsqsh
chgrp -R hadoop /home/$current_student/.jsqsh
done
for (( n=$min; n<=$max; n++ ))
do
current_student=student`echo $n | awk '{ printf "%04i\n", $0 }'`
current_student_bigsql_passwd="$password$n"
/bin/bash $SCRIPTS_PATH/bsq_ex6_clear.sh $current_student $current_student_bigsql_passwd $foldername; /bin/bash $SCRIPTS_PATH/bsq_ex3_clear.sh $current_student $current_student_bigsql_passwd $foldername; /bin/bash $SCRIPTS_PATH/bsq_ex2_clear.sh $current_student $current_student_bigsql_passwd $foldername &
done
wait
for (( n=$min; n<=$max; n++ ))
do
rm -Rf /home/$current_student/.jsqsh &
done
wait
fi
echo "Copying config dir"
for (( n=$min; n<=$max; n++ ))
do
current_student=student`echo $n | awk '{ printf "%04i\n", $0 }'`
/bin/cp -r $JSQSH_CONF /home/$current_student
chown -R $current_student /home/$current_student/.jsqsh
chgrp -R hadoop /home/$current_student/.jsqsh
done
/bin/cp -r $JSQSH_CONF /home/bigsql
chown -R bigsql /home/bigsql/.jsqsh
chgrp -R hadoop /home/bigsql/.jsqsh
wait
echo "Preparation done"
echo
echo
echo
pause "Press any key to start executing the exercises . . ."
echo
#################################
echo "Start executing exercises"
#################################
######################################
echo "Executing Exercise 2 for BigSQL"
######################################
echo
echo "-------------------------------------------------->> STARTING EXERCISE 2 <<--------------------------------------------------"
echo
for (( n=$min; n<=$max; n++ ))
do
current_student=student`echo $n | awk '{ printf "%04i\n", $0 }'`
current_student_bigsql_passwd="$password$n"
/bin/bash $SCRIPTS_PATH/bsq_ex2.sh $current_student $current_student_bigsql_passwd $foldername &
done
wait
echo
echo
echo
######################################
echo "Executing Exercise 3 for BigSQL"
######################################
echo
echo "-------------------------------------------------->> STARTING EXERCISE 3 <<--------------------------------------------------"
echo
for (( n=$min; n<=$max; n++ ))
do
current_student=student`echo $n | awk '{ printf "%04i\n", $0 }'`
current_student_bigsql_passwd="$password$n"
/bin/bash $SCRIPTS_PATH/bsq_ex3.sh $current_student $current_student_bigsql_passwd $foldername &
done
wait
echo
echo
echo
#echo "Granting access for students "
#su bigsql -c "/home/bigsql/bigsql_grantDBADM.sh"
######################################
echo "Executing Exercise 6 for BigSQL"
######################################
echo
echo "-------------------------------------------------->> STARTING EXERCISE 6 <<--------------------------------------------------"
echo
for (( n=$min; n<=$max; n++ ))
do
current_student=student`echo $n | awk '{ printf "%04i\n", $0 }'`
current_student_bigsql_passwd="$password$n"
/bin/bash $SCRIPTS_PATH/bsq_ex6.sh $current_student $current_student_bigsql_passwd $foldername &
done
wait
#echo "Revoking access for students "
#su bigsql -c "/home/bigsql/bigsql_revokeDBADM.sh"
echo
echo
echo
echo "All exercises done - Files saved at '" $SCRIPTS_PATH/$foldername "'"
echo
| true
|
e78387f164c35b202148904a6012adcf1b3fb054
|
Shell
|
kangnium/Shell_Scripting
|
/Automating.Command.Line.Tasks/loop_select.sh
|
UTF-8
| 186
| 2.859375
| 3
|
[] |
no_license
|
# boucle ave select: ça ressembl beaucoup à for
names='Thomas Jude Josh Quit'
PS='Select character'
select name in $names
do
if [ $name == 'Quit' ]
then
break
fi
echo $name
done
echo Goodlock
| true
|
6c423e0783300d1f662f3c0ec40e2b24c7c71e39
|
Shell
|
ArcherCraftStore/ArcherVMPeridot
|
/php/docs/Structures_Graph/docs/generate.sh
|
UTF-8
| 365
| 2.546875
| 3
|
[
"BSD-3-Clause",
"BSD-4-Clause-UC",
"Zlib",
"ISC",
"LicenseRef-scancode-other-permissive",
"TCL",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause",
"JSON",
"LicenseRef-scancode-pcre",
"blessing",
"PHP-3.01",
"Apache-2.0",
"MIT"
] |
permissive
|
#!/bin/sh
(cd ..; tar czf docs/arch.tgz "{arch}")
rm -Rf "../{arch}"
rm -Rf ./html
mkdir -p ./html
phpdoc --directory ../Structures,./tutorials --target ./html --title "Structures_Graph Documentation" --output "HTML:frames" --defaultpackagename structures_graph --defaultcategoryname structures --pear
(cd ..; tar --absolute-names -xzf docs/arch.tgz)
#rm arch.tgz
| true
|
0e4efba4f56e68cf5c3c69686530bef0d8e025ae
|
Shell
|
lsst-dm/db_pdac_wise
|
/scripts/drop_forced_source_folders.bash
|
UTF-8
| 1,456
| 3.59375
| 4
|
[] |
no_license
|
#!/bin/bash
set -e
SCRIPT=`realpath $0`
SCRIPTS=`dirname $SCRIPT`
source $SCRIPTS/env_base_stack.bash
assert_worker
folders="$1"
if [ -z "$folders" ]; then
echo `basename ${SCRIPT}`": usage <folders>"
exit 1
fi
worker=`/usr/bin/hostname`
worker_data_dir="${INPUT_DATA_DIR}/${OUTPUT_FORCED_SOURCE_TABLE}/${worker}"
verbose "------------------------------------------------------------------------------------"
verbose "["`date`"] ** Begin dropping tables at worker: ${worker} **"
verbose "------------------------------------------------------------------------------------"
for folder in `cat ${LOCAL_TMP_DIR}/forcedsource/${folders}`; do
verbose "["`date`"] ** Tables of chunks from folder: ${folder} **"
verbose "------------------------------------------------------------------------------------"
for f in `ls ${worker_data_dir}/${folder}/ | grep .txt`; do
chunk=$(echo ${f%.txt} | awk -F_ '{print $2}')
for table in ${OUTPUT_DB}.${OUTPUT_FORCED_SOURCE_TABLE}_${chunk} ${OUTPUT_DB}.${OUTPUT_FORCED_SOURCE_TABLE}FullOverlap_${chunk}; do
verbose $mysql_cmd -e "DROP TABLE IF EXISTS ${table}"
if [ -z "$(test_flag '-n|--dry-run')" ]; then
$mysql_cmd -e "DROP TABLE IF EXISTS ${table}"
fi
done
done
done
verbose "------------------------------------------------------------------------------------"
verbose "["`date`"] ** Finished loading **"
| true
|
23fe315e0dfcb155b8af0f51c98d96b9b7e99389
|
Shell
|
EstevesAndre/SDIS-Project-2
|
/bin/runPeer.sh
|
UTF-8
| 250
| 2.796875
| 3
|
[] |
no_license
|
#! /bin/sh
if [ $# -ne 2 -a $# -ne 4 ]; then
echo "Usage: $0 <address_ip> <address_port> (<know_address_ip> <known_address_port>)?"
exit 1
fi
java -Djavax.net.ssl.keyStore=keystore -Djavax.net.ssl.keyStorePassword=123456 service/Peer $@
| true
|
2c711d862d75aae72785e261ca2e61ce5a42a174
|
Shell
|
Brianstevee/alaterm
|
/fixlo.bash
|
UTF-8
| 787
| 3.375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Fix for LibreOffice /proc not mounted.
# DO NOT USE THIS UNLESS YOU NEED IT.
# If LibreOffice fails to launch,
# and terminal reports error with /proc not mounted,
# first try exiting alaterm, then re-launch it.
# If that did not work, then try this script.
# It must be re-run whenever LibreOffice is updated.
# Run from Termux: bash fixlo.bash
# Do not run from within alaterm.
if [ "$THOME" != "" ] ; then # THOME is defined within alaterm.
echo "This script does not run from within alaterm."
echo "Logout of alaterm, then run script from Termux."
exit 1
fi
mkdir -p "$alatermTop/prod"
chmod 755 "$alatermTop/prod"
cd "$alatermTop/prod"
echo "1" > version
chmod 755 version
cd "$alatermTop/usr/lib/libreoffice/program"
sed -i 's/\/proc/\/prod/g' oosplash
exit 0
| true
|
1362f9b95193c94597ae3abcdc1c88d6b721dcbb
|
Shell
|
kk1694/fastai-shell
|
/fastai.sh
|
UTF-8
| 2,330
| 3.71875
| 4
|
[] |
no_license
|
#!/bin/bash
if [ -f ~/.fastai-zone ]; then
current_zone=$(cat ~/.fastai-zone)
else
current_zone='us-west1-b'
fi
use-zone() {
zone=$1
echo $zone > ~/.fastai-zone
echo "Availability zone updated to '$zone'"
}
start() {
machine_type=$1
gpu_type=$2
gcloud beta compute --project=$DEVSHELL_PROJECT_ID instances create fastai --zone=$current_zone --machine-type=$machine_type --subnet=fastai --network-tier=PREMIUM --no-restart-on-failure --maintenance-policy=TERMINATE --preemptible --scopes=https://www.googleapis.com/auth/devstorage.read_only,https://www.googleapis.com/auth/logging.write,https://www.googleapis.com/auth/monitoring.write,https://www.googleapis.com/auth/servicecontrol,https://www.googleapis.com/auth/service.management.readonly,https://www.googleapis.com/auth/trace.append --accelerator=type=$gpu_type,count=1 --disk=name=fastai-boot,device-name=fastai-boot,mode=rw,boot=yes
}
v100 () {
start "n1-standard-8" "nvidia-tesla-v100"
}
p100 () {
start "n1-standard-8" "nvidia-tesla-p100"
}
k80 () {
start "n1-standard-4" "nvidia-tesla-k80"
}
nogpu () {
gcloud beta compute --project=$DEVSHELL_PROJECT_ID instances create fastai --zone=$current_zone --machine-type=n1-standard-1 --subnet=fastai --network-tier=PREMIUM --no-restart-on-failure --maintenance-policy=TERMINATE --preemptible --scopes=https://www.googleapis.com/auth/devstorage.read_only,https://www.googleapis.com/auth/logging.write,https://www.googleapis.com/auth/monitoring.write,https://www.googleapis.com/auth/servicecontrol,https://www.googleapis.com/auth/service.management.readonly,https://www.googleapis.com/auth/trace.append --disk=name=fastai-boot,device-name=fastai-boot,mode=rw,boot=yes
}
kill () {
gcloud compute instances delete fastai --project=$DEVSHELL_PROJECT_ID --zone=$current_zone
}
help() {
echo ""
echo "fastai help"
echo "-----------"
echo "fastai v100 - start an instance with tesla v100 gpu"
echo "fastai p100 - start an instance with tesla p100 gpu"
echo "fastai k80 - start an instance with tesla k80 gpu"
echo "fastai nogpu - start an instance without a gpu"
echo "fastai kill - kill the current fastai instance"
echo "fastai use-zone <zone> - set the availability zone"
echo ""
}
command=$1
arg1=$2
$command $2
| true
|
2c5be307da32f50cbca501702dc4733d4388fa40
|
Shell
|
tjansson60/munin-plugins
|
/omsa_power.sh
|
UTF-8
| 645
| 3.40625
| 3
|
[] |
no_license
|
#!/bin/bash
OMSA="/opt/dell/srvadmin/bin/omreport"
if [ "$1" = "autoconf" ]; then
if [ -e "$OMSA" ]; then
echo yes
exit 0
else
echo no
exit 1
fi
fi
if [ "$1" = "config" ]; then
echo 'graph_title OMSA - current power usage in Watt'
echo 'graph_args --base 1000 -l 0'
echo 'graph_vlabel Watt'
echo 'graph_category sensors'
echo 'PWR.label Power usage in W'
exit 0
else
VALUE=`$OMSA chassis pwrmonitoring | grep Reading | grep -v 'KWh' | grep -v 'Peak' | awk '{print $3}'`
echo "PWR.value $VALUE"
fi
| true
|
e2b061746cb7f4d59e893a4bfd230ed1412876f9
|
Shell
|
vindecodex/automated-crawler-wget
|
/dl.sh
|
UTF-8
| 665
| 3.046875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
counter=1
while [ 0 -le 1 ]
do
repeater=1
while [ $repeater -le 2 ]
do
wget -O file$counter.html -erobots=off --user-agent="Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36" https://www.sample.com/post/$counter/
((counter++))
((repeater++))
echo " _ ___ __ __ "
echo "| | / (_)___ ____/ /__ _________ ____/ /__ "
echo "| | / / / __ \/ __ / _ \/ ___/ __ \/ __ / _ \ "
echo "| |/ / / / / / /_/ / __/ /__/ /_/ / /_/ / __/"
echo "|___/_/_/ /_/\__,_/\___/\___/\____/\__,_/\___/ "
sleep $(( 5 + RANDOM % 10 ))
done
sleep 30
done
| true
|
b62ff4306f3eced9b4698d9a8087f388b4ddbc20
|
Shell
|
heroku-softtrends/heroku-buildpack-dotnetcore
|
/bin/release
|
UTF-8
| 621
| 3.1875
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
# bin/release <build-dir>
### Configure directories
export BUILD_DIR=$1
BP_DIR=$(cd $(dirname $0)/..; pwd)
### Load dependencies
# shellcheck source=util/common.sh
source "$BP_DIR/bin/util/common.sh"
### Export all env vars here
. $BUILD_DIR/.profile.d/dotnetcore.sh
cat <<EOF
---
config_vars:
EOF
#if [[ $IS_POSTGRES_USED == "yes" ]]; then
#cat <<EOF
#addons:
# - heroku-postgresql:hobby-dev
#EOF
#fi
if [[ ! -f $BUILD_DIR/Procfile ]]; then
cat <<EOF
default_process_types:
EOF
echo " web: cd \$HOME/${APP_ASSEMBLY} && ASPNETCORE_URLS='http://+:\$PORT' dotnet \"./${APP_ASSEMBLY}.dll\""
fi
| true
|
ff9d66720805519a793238740b93b5bbe3003f0f
|
Shell
|
tro3373/dotfiles
|
/bin/exec_bat
|
UTF-8
| 1,219
| 3.59375
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
bat=
success=0
has() { command -v ${1} >&/dev/null; }
end() {
if [[ ! -e $bat || $success -ne 1 ]]; then
return
fi
rm -rfv $bat
}
main() {
if ! os win; then
echo "Not supported os" 1>&2
exit 1
fi
if ! has cmd; then
echo "No cmd command exist" 1>&2
exit 1
fi
if [[ ! -p /dev/stdin ]]; then
echo "Specify stdin to execute" 1>&2
exit 1
fi
set -e
bat=$(cat - | gen_bat)
# 0 プロセス終了時に、プロセスが自分自身に対して送出する EXIT シグナル。
# 1 XWindow のクローズや、デーモンのリセットに使用されるハングアップシグナル。
# 2 Ctrl+C や Del キーを押したときに発生する割り込みシグナル。
# 3 Ctrl+\ を押したときに発生するクイットシグナル。
# 9 プロセスを強制終了するためのキルシグナル。強制終了なので当然、trap はできない。
#15 プロセスを終了させるための終了シグナル。kill コマンドはデフォルトでこのシグナルを使用する (つまり kill PID は kill -15 PID と同じ結果になる)。
trap end 0 1 2 3 15
cmd //c start "" $bat
success=1
}
main "$@"
| true
|
3fd861484d42570afa1a50fa249b00861aea51be
|
Shell
|
last-endcode/Nolimit-Audience-Deploy
|
/audience-instagram-bali.sh
|
UTF-8
| 990
| 3.25
| 3
|
[] |
no_license
|
#! /bin/bash
DATE=`date +%F`
printf "\t\t****************\n";
printf "\t\tInstagram_account\n";
printf "\t\t****************\n\n";
#TWITTER
pathfile=( `cat "/mnt/app/audience-account/extracontent/instagram-bali"` )
saving="/mnt/app/audience-account/buffer_audience"
backup="/mnt/app/audience-account/backup-audience";
submit="/mnt/data/raw/"
rm $saving/*
for id in "${pathfile[@]}"; do
wget 'https://www.instagram.com/graphql/query/?query_hash=c76146de99bb02f6415203be841dd25a&variables={"id":'$id',"first":"12"}' -O wiwa.json
cat wiwa.json | jq '.data.user.edge_followed_by.count' > followers
count_followers=`cat followers`
node index.js $DATE $DATE instagram $id $count_followers $count_followers > values.log
cat values.log
printf "\n";
done
echo "DONE For instagram";
#for backup
mkdir -p "$backup/Instagram-$DATE"
#just be carefully if path raw delete by hasile hasile makibaou..
mkdir -p $submit
cp $saving/* $submit/
mv $saving/* "$backup/Instagram-$DATE"
| true
|
58cced481548a595d2aea32eb2a36cc964ca598d
|
Shell
|
delkyd/alfheim_linux-PKGBUILDS
|
/python-heapdict/PKGBUILD
|
UTF-8
| 626
| 2.5625
| 3
|
[] |
no_license
|
# Maintainer: Amos Onn <amosonn at gmail dot com>
pkgname=python-heapdict
pkgver=1.0.0
pkgrel=1
pkgdesc="A python library for a heap with decrease- and increase-key operations."
arch=('any')
url="http://stutzbachenterprises.com"
license=('BSD-3-clause')
depends=('python>=3.5')
source=(https://codeload.github.com/DanielStutzbach/heapdict/tar.gz/v$pkgver)
sha256sums=('4c8e581f1651ca78da35820fc80b0d50fc060032517f2c7efd9169e204db3793')
package() {
cd $srcdir/heapdict-$pkgver
python setup.py install --root=$pkgdir || return 1
install -d $pkgdir/usr/share/licenses/$pkgname
install LICENSE $pkgdir/usr/share/licenses/$pkgname/COPYING
}
| true
|
4de6a6818c22df3a95ff2858cdc5bc2975ebc76e
|
Shell
|
Levantado/Articles
|
/BackUp/page-speed.sh
|
UTF-8
| 1,622
| 3.234375
| 3
|
[] |
no_license
|
#!/bin/bash
if [[ "$USER" != 'root' ]]; then
echo "Sorry, you need to run this as root"
exit
fi
apt-get -y --force-yes install dpkg-dev build-essential zlib1g-dev libpcre3 libpcre3-dev unzip
echo "deb http://nginx.org/packages/ubuntu/ trusty nginx" >> /etc/apt/sources.list.d/nginx.list
echo "deb-src http://nginx.org/packages/ubuntu/ trusty nginx" >> /etc/apt/sources.list.d/nginx.list
wget -q "http://nginx.org/packages/keys/nginx_signing.key" -O-| sudo apt-key add -
apt-get update
mkdir -p ~/new/nginx_source/
cd ~/new/nginx_source/
apt-get -y --force-yes source nginx
apt-get -y --force-yes build-dep nginx
mkdir -p ~/new/ngx_pagespeed/
cd ~/new/ngx_pagespeed/
ngx_version=1.9.32.6
wget https://github.com/pagespeed/ngx_pagespeed/archive/release-${ngx_version}-beta.zip
unzip release-${ngx_version}-beta.zip
cd ngx_pagespeed-release-${ngx_version}-beta/
wget --no-check-certificate https://dl.google.com/dl/page-speed/psol/${ngx_version}.tar.gz
tar -xzf ${ngx_version}.tar.gz
cd ~/new/nginx_source/nginx-1.8.0/debian/
sed -i '22 a --add-module=../../ngx_pagespeed/ngx_pagespeed-release-1.9.32.6-beta \\' rules
sed -i '61 a --add-module=../../ngx_pagespeed/ngx_pagespeed-release-1.9.32.6-beta \\' rules
cd ~/new/nginx_source/nginx-1.8.0/
dpkg-buildpackage -b
cd ~/new/nginx_source/
dpkg -i nginx_1.8.0-1~trusty_amd64.deb
nginx -V
mkdir -p /var/ngx_pagespeed_cache
chown -R www-data:www-data /var/ngx_pagespeed_cache
cd /etc/nginx/
sed -i '30ipagespeed on;' nginx.conf
sed -i '31ipagespeed FileCachePath /var/ngx_pagespeed_cache;' nginx.conf
service nginx restart
curl -I -p http://localhost|grep X-Page-Speed
| true
|
ed4b76cf333d7b0cb62d82abc4690f16f401e0f1
|
Shell
|
lexiste/bash
|
/nmapMon-byNetBlock.sh
|
UTF-8
| 2,272
| 3.875
| 4
|
[] |
no_license
|
#!/bin/bash
#
#
# nmap Monitor Script -- used to run a standard scan on given networks that we
# can diff against to track for any changes.
# Create Date : 27-Sept-2017
# Mod Date : 27-Sept-2017
# Mod Auth : tfencl (at) radial.com
#
# taken as reference from: https://jerrygamblin.com/
# don't wrap the path to the input file as the variable appears to contain either \" or \' as literal
NETFILE=~/scan/radial-ext-net.lst
# use NORMAL scan timing (-T3); TCP Connect (SYN/ACK/FIN); list open ports
NMAP_OPTIONS='-T3 -sT --open'
# store output here ...
cd ~/scan
START_TIME=$(date +%s)
if [[ -r ${NETFILE} ]]; then
while IFS= read -r netblock; do
DATE=`date +%d-%b-%Y_%H-%M`
echo 'calling nmap with opts: ' ${NMAP_OPTIONS} ' using ' ${netblock}
# since we are passing in the netblock, with a CIDR notation, need to find and remove the /NN
pos=$netblock | grep -bo "/" | sed 's/:.*$//'
n=$($netblock | cut --characters=${pos})
echo 'p: ' $pos ' n: ' $n
exit 1
nmap ${NMAP_OPTIONS} ${netblock} -oX ${netblock}_${DATE}.xml > /dev/null
if [ -e ${netblock}-prev.xml ]; then # if there is a -prev file, compare the current run and previous run
ndiff ${netblock}-prev.xml ${netblock}_${DATE}.xml --text > ${netblock}-diff
# may want to add a grep filter to the above line ... egrep -v '^(\+|-)N'
# if the sizeOf(diff_file) > 0
if [ -s ${netblock}-diff ]; then
echo '*** NDIFF Detected differences in recent scan ***'
echo ${netblock}-diff
echo ''
echo 'posting to initialstate'
curl 'https://groker.initialstate.com/api/events?accessKey=wreiLDwxlxP8f1ANx0JRoDE6qBzjWQO5&bucketKey=X4NMERU6AHF8&${netblock}=Changed'
# TODO >> send email or other notification on the actual change (contents of ${netblock}-diff)
# update the -prev file since there are changes
ln -sf ${netblock}_${DATE}.xml ${netblock}-prev.xml
else
echo '*** NDIFF found no differences in scans'
rm ${netblock}_${DATE}.xml
fi
else # no -prev file found, so create an initial -prev linked to itself for now
ln -sf ${netblock}_${DATE}.xml ${netblock}-prev.xml
fi
echo 'completed scan of ' ${netblock}
done < "$NETFILE"
fi
END_TIME=$(date +%s)
echo ''
echo $(date) "- finished all targets in " $(expr ${END_TIME} - ${START_TIME}) " seconds"
| true
|
4375186843db025da6437028605cd258865263d7
|
Shell
|
Pankaj-Ra/ECEP_C-CPP
|
/ECEP/LinuxSystem/Templates/Case/case_1.sh
|
UTF-8
| 536
| 3.828125
| 4
|
[] |
no_license
|
#!/bin/bash
echo "Enter an element between 0 - 10 (excluded)"
echo -n "Enter a number: "
read X
if [ -n $X ]
then
case $X in
1) echo Value of x is One.;;
2) echo Value of x is Two.;;
3) echo Value of x is Three.;;
4) echo Value of x is Four.;;
5) echo Value of x is Five.;;
6) echo Value of x is Six.;;
7) echo Value of x is Seven.;;
8) echo Value of x is Eight.;;
9) echo Value of x is Nine.;;
0 | 10) echo wrong number.;;
*) echo Please follow the instructions;;
esac
else
echo Enter the correct element
fi
| true
|
83dd5db2b6f1a0bbc7d3b33be943dadd5cf5adca
|
Shell
|
pmulcaire/multivec
|
/multivec-merge.sh
|
UTF-8
| 330
| 2.578125
| 3
|
[] |
no_license
|
export merged="output/bg-cs-da-de-el-en-es-fi-fr-hu-it-sv.multiskip.iter_10+window_1+min_count_5+negative_5+size_40+threads_32"
rm $merged
for lang in bg cs da de el en es fi fr hu it sv; do python ~/wammar-utils/prefix_lines.py -i $merged.$lang -o $merged.$lang.prefixed -p $lang: && cat $merged.$lang.prefixed >> $merged; done
| true
|
299e152d61ec8556ec5cf7c61e97a70378614f41
|
Shell
|
dbhat/labwiki
|
/bin/labwiki.in
|
UTF-8
| 510
| 2.78125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
HOME=%HOME%
RVM_HOME=%RVM_HOME%
RUBY=%RUBY%
GEMSET=%GEMSET%
export HOME
export GEM_HOME=${RVM_HOME}/gems/${RUBY}
export GEM_PATH=${RVM_HOME}/gems/${RUBY}:${RVM_HOME}/gems/${RUBY}@${GEMSET}
export PATH=${RVM_HOME}/gems/${RUBY}/bin:${RVM_HOME}/gems/${RUBY}@${GEMSET}/bin:${RVM_HOME}/rubies/${RUBY}/bin:${RVM_HOME}/bin:${PATH}
export LW_REF_DIR=`pwd` # For script to reolve relative file names in $* arguments
cd "$( dirname "${BASH_SOURCE[0]}" )"
exec bundle exec ruby ../lib/labwiki/start.rb $*
| true
|
5b122aae83b624056fc9bd63cdfeeba011b5c2a1
|
Shell
|
SamarthBM/Shell-script
|
/day7/array1.sh
|
UTF-8
| 93
| 3.0625
| 3
|
[] |
no_license
|
#!/bin/bash -x
array=(2 -3 1)
sum=0
for i in ${array[@]}
do
sum=$((sum+$i))
done
echo $sum
| true
|
5e62d014c50cb8e6142a2f31f00ac441dc8fa648
|
Shell
|
Eathox/Doom-Nukem
|
/ci/osx/installMixer.bash
|
UTF-8
| 1,611
| 3.171875
| 3
|
[] |
no_license
|
# **************************************************************************** #
# #
# :::::::: #
# installMixer.bash :+: :+: #
# +:+ #
# By: pholster <pholster@student.codam.nl> +#+ #
# +#+ #
# Created: 2019/10/01 16:19:49 by pholster #+# #+# #
# Updated: 2019/10/01 16:19:49 by pholster ######## odam.nl #
# #
# **************************************************************************** #
if [[ $0 =~ $PWD ]]; then
SELF_LOCATION=$0
else
SELF_LOCATION="$PWD/$0"
fi
SELF_LOCATION=`echo "$SELF_LOCATION" | rev | cut -d"/" -f 2- | rev`
function updateXcode {
# touch /tmp/.com.apple.dt.CommandLineTools.installondemand.in-progress
PROD=`softwareupdate -l | grep "\*.*Command Line" | head -n 1 |
awk -F"*" '{print $2}' | sed -e 's/^ *//' | tr -d '\n'`
softwareupdate -i "$PROD" --verbose
rm -f /tmp/.com.apple.dt.CommandLineTools.installondemand.in-progress
}
if [ -z $GITHUB_ACTION ]; then
updateXcode
fi
brew install pkg-config
brew install libmodplug libvorbis mpg123 flac opus
brew reinstall --build-from-source "$SELF_LOCATION/sdl2_mixer.rb"
brew link sdl2_mixer flac opus mpg123 pkg-config libmodplug libvorbis
| true
|
597a869b3e615dc36be47a3f50336a58d2d0bc71
|
Shell
|
wangzkiss/docker-ambari-deploy
|
/install-apps/start_app.sh
|
GB18030
| 16,467
| 2.734375
| 3
|
[] |
no_license
|
#!/bin/bash
# import common variable
: ${DIR_HOME:=$(dirname $0)}
source ${DIR_HOME}/env.sh
#base info
: ${MYSQL_HOST:=mysql.service.consul}
: ${KYLIN_HOST:=kylin.service.consul}
: ${AMR_URL_PORT:=8080}
: ${AMR_URL_HOST:=amb-server.service.consul}
: ${HSFS_SERVER_NAME:=amb1.service.consul}
: ${AMR_DATA_HOST_PWD:=Zasd_1234}
: ${AMR_DATA_HOST:=amb1.service.consul}
: ${AMR_CLSTER_NAME:=vigordata}
# def etl
: ${ETL_NAME:=vigor-etl}
: ${ETL_PORT:=15100}
# def admin
: ${ADMIN_NAME:=vigor-admin}
: ${ADMIN_PORT:=8900}
: ${ADMIN_WAR:=vigordata-web}
# def sch
: ${SCHEDULER_NAME:=vigor-scheduler}
: ${SCHEDULER_PORT:=8901}
: ${SCHEDULER_WAR:=vigordata-scheduler}
# def batch
: ${BATCH_NAME:=vigor-batch}
: ${BATCH_PORT:=8902}
: ${BATCH_WAR:=vigordata-batchagent}
# def etlagent
: ${ETLAGENT_NAME:=vigor-etlagent}
: ${ETLAGENT_PORT:=8903}
: ${ETLAGENT_WAR:=vigordata-etlagent}
# def vigordata-streamingagent
: ${STREAMING_NAME:=vigor-streaming}
: ${STREAMING_PORT:=8904}
: ${STREAMING_WAR:=vigordata-streamingagent}
#admin hosts
admin_ip=docker-229
scheduler_ip=docker-229
batch_ip=docker-229
etl_ip=docker-229
clean_apps(){
#bash ambari-functions.sh amb-clean-cluster
pdsh -w $hostname "docker ps -a | grep vigor |awk '{print $1}' | xargs docker stop"
pdsh -w $hostname "docker ps -a | grep vigor |awk '{print $1}' | xargs docker rm"
}
restart_all()
{
## admin
pdsh -w $admin_ip "bash /tmp/start_app restart vigor-admin"
#batch
pdsh -w $batch_ip "bash /tmp/start_app restart vigor-batch"
#etlagent
pdsh -w $admin_ip "bash /tmp/start_app restart vigor-etlagent"
#stream
pdsh -w $admin_ip "bash /tmp/start_app restart vigor-streaming"
#
for ip in $scheduler_ip; do
pdsh -w $ip "bash /tmp/start_app restart vigor-scheduler"
done
#etlȺ
for ip in $etl_ip; do
pdsh -w $ip "bash /tmp/start_app restart vigor-etl"
done
}
load_etl_image(){
local hostname=${1:?"node <hostname>]"}
/bin/cp -f ${DIR_HOME}/../../sh_files/env.sh ${DIR_HOME}/
pdcp -w $hostname ${DIR_HOME}/vigor-etl-img.tar /tmp/
pdcp -w $hostname ${DIR_HOME}/../../sh_files/env.sh /tmp/
pdsh -w $hostname "docker load < /tmp/vigor-etl-img.tar"
}
load_app_image(){
local hostname=${1:?"node <hostname>]"}
/bin/cp -f ${DIR_HOME}/../../sh_files/env.sh ${DIR_HOME}/
pdcp -w $hostname ${DIR_HOME}/../../sh_files/env.sh /tmp/
pdcp -w $hostname ${DIR_HOME}/vigor-tomcat.tar /tmp/
pdsh -w $hostname "docker load < /tmp/vigor-tomcat.tar"
}
install_app(){
local hostname=${1:?"install_app <hostname> <appname> (vigor-admin |vigor-scheduler | vigor-batch |vigor-etlagent |vigor-streaming | vigor-etl)]"}
local warname=${2:?"install_app <hostname> <appname> (vigor-admin |vigor-scheduler | vigor-batch |vigor-etlagent |vigor-streaming | vigor-etl)"}
#pdcp -w $hostname "${DIR_HOME}/env.sh" /tmp/
case ${warname} in
${ADMIN_NAME})
##Ŷ˿
pdcp -w $hostname "${DIR_HOME}/start_app.sh" /tmp/
pdcp -w $hostname "${DIR_HOME}/${ADMIN_WAR}.war" /tmp/
pdsh -w $hostname "bash /tmp/start_app.sh start_admin"
;;
${ETL_NAME})
pdcp -w $hostname "${DIR_HOME}/start_app.sh" /tmp/
pdsh -w $hostname "bash /tmp/start_app.sh start_etl"
;;
${BATCH_NAME})
pdcp -w $hostname "${DIR_HOME}/start_app.sh" /tmp/
pdcp -w $hostname "${DIR_HOME}/${BATCH_WAR}.war" /tmp/
pdsh -w $hostname "bash /tmp/start_app.sh start_batch"
;;
${SCHEDULER_NAME})
pdcp -w $hostname "${DIR_HOME}/start_app.sh" /tmp/
pdcp -w $hostname "${DIR_HOME}/${SCHEDULER_WAR}.war" /tmp/
pdsh -w $hostname "bash /tmp/start_app.sh start_sch"
;;
${ETLAGENT_NAME})
pdcp -w $hostname "${DIR_HOME}/start_app.sh" /tmp/
pdcp -w $hostname "${DIR_HOME}/${ETLAGENT_WAR}.war" /tmp/
pdsh -w $hostname "bash /tmp/start_app.sh start_etlagent"
;;
${STREAMING_NAME})
pdcp -w $hostname "${DIR_HOME}/start_app.sh" /tmp/
pdcp -w $hostname "${DIR_HOME}/${STREAMING_WAR}.war" /tmp/
pdsh -w $hostname "bash /tmp/start_app.sh start_stream"
;;
*)
echo "Ignorant"
;;
esac
##עdns
}
#start etl server
start_etl(){
local consul_ip=$(get-consul-ip)
docker stop ${ETL_NAME} && docker rm ${ETL_NAME}
docker run --net ${CALICO_NET} --dns $consul_ip --dns-search service.consul --name ${ETL_NAME} -p ${ETL_PORT}:${ETL_PORT} -d vigor-etl $MYSQL_HOST ${ETL_PORT}
## ļ
## /home/vigor-etl/plugins/pentaho-big-data-plugin/hadoop-configurations/hdp21
## /usr/hdp/2.4.0.0-169/hadoop/etc/hadoop
docker exec ${ETL_NAME} sh -c "echo -e 'y\n'|ssh-keygen -q -t rsa -N '' -f ~/.ssh/id_rsa"
docker exec ${ETL_NAME} sh -c "ssh-keyscan ${AMR_DATA_HOST} >> ~/.ssh/known_hosts"
docker exec ${ETL_NAME} sh -c "sshpass -p ${AMR_DATA_HOST_PWD} scp -r root@${AMR_DATA_HOST}:/usr/hdp/2.4.0.0-169/hadoop/etc/hadoop/*.xml /home/${ETL_NAME}/plugins/pentaho-big-data-plugin/hadoop-configurations/hdp21/"
docker exec ${ETL_NAME} sh -c "sshpass -p ${AMR_DATA_HOST_PWD} scp -r root@${AMR_DATA_HOST}:/usr/hdp/2.4.0.0-169/hbase/conf/*.xml /home/${ETL_NAME}/plugins/pentaho-big-data-plugin/hadoop-configurations/hdp21/"
#get ip of this container
docker stop ${ETL_NAME} && docker start ${ETL_NAME}
##Ŷ˿
local app_ip=$(docker inspect --format="{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}" ${ETL_NAME})
amb-publish-port $app_ip ${ETL_PORT}
##עdns
consul-register-service ${ETL_NAME} $app_ip
}
# ҪhomeĿ¼С500M
start_admin(){
#Ŀ¼ Ȩ
mkdir -p /home/${ADMIN_NAME}/webapps
chmod -R 777 /home/${ADMIN_NAME}/webapps
docker stop ${ADMIN_NAME} && docker rm ${ADMIN_NAME}
##
/bin/cp -rf "${DIR_HOME}/${ADMIN_WAR}.war" /home/${ADMIN_NAME}/webapps
cd /home/${ADMIN_NAME}/webapps/
rm -rf ${ADMIN_WAR}
unzip -q "${ADMIN_WAR}.war" -d ${ADMIN_WAR}
if [ $? ]; then
# ##ļ
sed -i "/jdbc.url/{s/\/.*:/\/\/${MYSQL_HOST}:/g}" /home/${ADMIN_NAME}/webapps/${ADMIN_WAR}/WEB-INF/classes/tospur.properties
sed -i "/kylin_base_api_url/{s/\/.*:/\/\/${KYLIN_HOST}:/g}" /home/${ADMIN_NAME}/webapps/${ADMIN_WAR}/WEB-INF/classes/tospur.properties
sed -i "/ambr_host/{s/=.*/= ${AMR_URL_HOST}/g}" /home/${ADMIN_NAME}/webapps/${ADMIN_WAR}/WEB-INF/classes/tospur.properties
sed -i "/ambr_port/{s/=.*/= ${AMR_URL_PORT}/g}" /home/${ADMIN_NAME}/webapps/${ADMIN_WAR}/WEB-INF/classes/tospur.properties
sed -i "/hdfs_nameservices/{s/=.*/= ${HSFS_SERVER_NAME}/g}" /home/${ADMIN_NAME}/webapps/${ADMIN_WAR}/WEB-INF/classes/tospur.properties
sed -i "/kylin_default_project/{s/=.*/= ${ADMIN_WAR}/g}" /home/${ADMIN_NAME}/webapps/${ADMIN_WAR}/WEB-INF/classes/tospur.properties
sed -i "/ambr_cluster_name/{s/=.*/= ${AMR_CLSTER_NAME}/g}" /home/${ADMIN_NAME}/webapps/${ADMIN_WAR}/WEB-INF/classes/tospur.properties
local consul_ip=$(get-consul-ip)
docker run --privileged --net ${CALICO_NET} --dns $consul_ip --name ${ADMIN_NAME} -v /home/${ADMIN_NAME}/webapps:/usr/local/tomcat/webapps -d vigor-tomcat
docker exec ${ADMIN_NAME} sh -c "echo -e 'y\n'|ssh-keygen -q -t rsa -N '' -f ~/.ssh/id_rsa"
docker exec ${ADMIN_NAME} sh -c "ssh-keyscan ${AMR_DATA_HOST} >> ~/.ssh/known_hosts"
docker exec ${ADMIN_NAME} sh -c "sshpass -p ${AMR_DATA_HOST_PWD} scp -r root@${AMR_DATA_HOST}:/usr/hdp/2.4.0.0-169/hadoop/etc/hadoop/*.xml /usr/local/tomcat/webapps/${ADMIN_WAR}/WEB-INF/classes/"
docker exec ${ADMIN_NAME} sh -c "sshpass -p ${AMR_DATA_HOST_PWD} scp -r root@${AMR_DATA_HOST}:/usr/hdp/2.4.0.0-169/hbase/conf/*.xml /usr/local/tomcat/webapps/${ADMIN_WAR}/WEB-INF/classes/"
##docker exec vigor-etl sh -c "sshpass -p Zasd_1234 scp -r root@amb1.service.consul:/usr/hdp/2.4.0.0-169/hbase/conf/*.xml /usr/local/tomcat/webapps/vigordata-web/WEB-INF/classes/"
docker stop ${ADMIN_NAME} && docker start ${ADMIN_NAME}
local app_ip=$(docker inspect --format="{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}" ${ADMIN_NAME})
amb-publish-port $app_ip ${ADMIN_PORT} 8080
else
exit -1
fi
}
# Ҫvigordata-scheduler.war Ŀ¼С500M
start_sch(){
#Ŀ¼ Ȩ
mkdir -p /home/${SCHEDULER_NAME}/webapps
chmod -R 777 /home/${SCHEDULER_NAME}/webapps
docker stop ${SCHEDULER_NAME} && docker rm ${SCHEDULER_NAME}
##
/bin/cp -rf "${DIR_HOME}/${SCHEDULER_WAR}.war" /home/${SCHEDULER_NAME}/webapps
cd /home/${SCHEDULER_NAME}/webapps/
rm -rf ${SCHEDULER_WAR}
unzip -q "${SCHEDULER_WAR}.war" -d ${SCHEDULER_WAR}
if [ $? ]; then
sed -i "/jdbc:mysql/{s/\/.*:/\/\/${MYSQL_HOST}:/g}" /home/${SCHEDULER_NAME}/webapps/${SCHEDULER_WAR}/WEB-INF/classes/activiti.cfg.xml
sed -i "/jdbc:mysql/{s/\/.*:/\/\/${MYSQL_HOST}:/g}" /home/${SCHEDULER_NAME}/webapps/${SCHEDULER_WAR}/WEB-INF/classes/conf.properties
sed -i "/kylin_api_url/{s/\/.*:/\/\/${KYLIN_HOST}:/g}" /home/${SCHEDULER_NAME}/webapps/${SCHEDULER_WAR}/WEB-INF/classes/conf.properties
local consul_ip=$(get-consul-ip)
docker run --privileged --net ${CALICO_NET} --dns $consul_ip --name ${SCHEDULER_NAME} -v /home/${SCHEDULER_NAME}/webapps:/usr/local/tomcat/webapps -d vigor-tomcat
##Ŷ˿
local app_ip=$(docker inspect --format="{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}" ${SCHEDULER_NAME})
amb-publish-port $app_ip ${SCHEDULER_PORT} 8080
##עdns
consul-register-service ${SCHEDULER_NAME} $app_ip
else
exit -1
fi
}
# ϵͳ
start_batch(){
#Ŀ¼ Ȩ
mkdir -p /home/${BATCH_NAME}/webapps
chmod -R 777 /home/${BATCH_NAME}/webapps
##
docker stop ${BATCH_NAME} && docker rm ${BATCH_NAME}
/bin/cp -rf "${DIR_HOME}/${BATCH_WAR}.war" /home/${BATCH_NAME}/webapps
cd /home/${BATCH_NAME}/webapps/
rm -rf ${BATCH_WAR}
unzip -q "${BATCH_WAR}.war" -d ${BATCH_WAR}
if [ $? ]; then
## ļ fs.defaultFS=hdfs://xdata2
sed -i "/jdbc:mysql/{s/\/.*:/\/\/${MYSQL_HOST}:/g}" /home/${BATCH_NAME}/webapps/${BATCH_WAR}/WEB-INF/classes/compute-config.properties
sed -i "/defaultFS/{s/=.*/= hdfs:\/\/${HSFS_SERVER_NAME}/g}" /home/${BATCH_NAME}/webapps/${BATCH_WAR}/WEB-INF/classes/compute-config.properties
local consul_ip=$(get-consul-ip)
docker run --privileged --net ${CALICO_NET} --dns $consul_ip --name ${BATCH_NAME} -v /home/${BATCH_NAME}/webapps:/usr/local/tomcat/webapps -d vigor-tomcat
##Ŷ˿
docker exec ${BATCH_NAME} sh -c "echo -e 'y\n'|ssh-keygen -q -t rsa -N '' -f ~/.ssh/id_rsa"
docker exec ${BATCH_NAME} sh -c "ssh-keyscan ${AMR_DATA_HOST} >> ~/.ssh/known_hosts"
docker exec ${BATCH_NAME} sh -c "sshpass -p ${AMR_DATA_HOST_PWD} scp -r root@${AMR_DATA_HOST}:/usr/hdp/2.4.0.0-169/hadoop/etc/hadoop/*.xml /usr/local/tomcat/webapps/${BATCH_WAR}/WEB-INF/classes/"
docker stop ${BATCH_NAME} && docker start ${BATCH_NAME}
local app_ip=$(docker inspect --format="{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}" ${BATCH_NAME})
amb-publish-port $app_ip ${BATCH_PORT} 8080
##עdns
consul-register-service ${BATCH_NAME} $app_ip
else
exit -1
fi
}
# etlϵͳ
start_etlagent(){
#Ŀ¼ Ȩ
mkdir -p /home/${ETLAGENT_NAME}/webapps
chmod -R 777 /home/${ETLAGENT_NAME}/webapps
##
docker stop ${ETLAGENT_NAME} && docker rm ${ETLAGENT_NAME}
/bin/cp -rf "${DIR_HOME}/${ETLAGENT_WAR}.war" /home/${ETLAGENT_NAME}/webapps
cd /home/${ETLAGENT_NAME}/webapps/
rm -rf ${ETLAGENT_WAR}
unzip -q "${ETLAGENT_WAR}.war" -d ${ETLAGENT_WAR}
## ļ fs.defaultFS=hdfs://xdata2
if [ $? ]; then
sed -i "/repo_db_host/{s/=.*/= ${MYSQL_HOST}/g}" /home/${ETLAGENT_NAME}/webapps/${ETLAGENT_WAR}/WEB-INF/classes/config.properties
sed -i "/etl_server_path/{s/=.*/= \/home\/vigor-etl\//g}" /home/${ETLAGENT_NAME}/webapps/${ETLAGENT_WAR}/WEB-INF/classes/config.properties
sed -i "/repository_name/{s/=.*/= ebd/g}" /home/${ETLAGENT_NAME}/webapps/${ETLAGENT_WAR}/WEB-INF/classes/config.properties
local consul_ip=$(get-consul-ip)
docker run --privileged --net ${CALICO_NET} --dns $consul_ip --name ${ETLAGENT_NAME} -v /home/${ETLAGENT_NAME}/webapps:/usr/local/tomcat/webapps -d vigor-tomcat
##Ŷ˿
local app_ip=$(docker inspect --format="{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}" ${ETLAGENT_NAME})
amb-publish-port $app_ip ${ETLAGENT_PORT} 8080
##עdns
consul-register-service ${ETLAGENT_NAME} $app_ip
else
exit -1
fi
}
#flumeagent
start_stream(){
#Ŀ¼ Ȩ
mkdir -p /home/${STREAMING_NAME}/webapps
chmod -R 777 /home/${STREAMING_NAME}/webapps
##
docker stop ${STREAMING_NAME} && docker rm ${STREAMING_NAME}
/bin/cp -rf "${DIR_HOME}/${STREAMING_WAR}.war" /home/${STREAMING_NAME}/webapps
cd /home/${STREAMING_NAME}/webapps/
rm -rf ${STREAMING_WAR}
unzip -q "${STREAMING_WAR}.war" -d ${STREAMING_WAR}
## ļ fs.defaultFS=hdfs://xdata2
if [ $? ]; then
local consul_ip=$(get-consul-ip)
docker run --privileged --net ${CALICO_NET} --dns $consul_ip --name ${STREAMING_NAME} -v /home/${STREAMING_NAME}/webapps:/usr/local/tomcat/webapps -d vigor-tomcat
##Ŷ˿
local app_ip=$(docker inspect --format="{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}" ${STREAMING_NAME})
amb-publish-port $app_ip ${STREAMING_PORT} 8080
##עdns
consul-register-service ${STREAMING_NAME} $app_ip
else
exit -1
fi
}
restart(){
local container=${1:?"restart <container>"}
local container_port=8900
docker stop ${container} && docker start ${container}
local app_ip=$(docker inspect --format="{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}" ${container})
case ${container} in
${ADMIN_NAME})
container_port=${ADMIN_PORT}
##Ŷ˿
amb-publish-port $app_ip ${container_port} 8080
consul-register-service ${container} $app_ip
;;
${ETL_NAME})
container_port=${ETL_PORT}
amb-publish-port $app_ip ${container_port}
consul-register-service ${container} $app_ip
;;
${BATCH_NAME})
container_port=${BATCH_PORT}
amb-publish-port $app_ip ${container_port} 8080
consul-register-service ${container} $app_ip
;;
${SCHEDULER_NAME})
container_port=${SCHEDULER_PORT}
amb-publish-port $app_ip ${container_port} 8080
consul-register-service ${container} $app_ip
;;
${ETLAGENT_NAME})
container_port=${ETLAGENT_PORT}
amb-publish-port $app_ip ${container_port} 8080
consul-register-service ${container} $app_ip
;;
${STREAMING_NAME})
container_port=${STREAMING_PORT}
amb-publish-port $app_ip ${container_port} 8080
consul-register-service ${container} $app_ip
;;
*)
echo "Ignorant"
;;
esac
##עdns
echo $container_port $app_ip
}
amb-publish-port() {
local container_ip=${1:?"amb-publish-port <container_ip> <host_port> [<container_port>]"}
local host_port=${2:?"amb-publish-port <container_ip> <host_port> [<container_port>]"}
local container_port=$3
for i in $( iptables -nvL INPUT --line-numbers | grep $host_port | awk '{ print $1 }' | tac ); \
do iptables -D INPUT $i; done
iptables -A INPUT -m state --state NEW -p tcp --dport $host_port -j ACCEPT
for i in $( iptables -t nat --line-numbers -nvL PREROUTING | grep $host_port | awk '{ print $1 }' | tac ); \
do iptables -t nat -D PREROUTING $i; done
for i in $( iptables -t nat --line-numbers -nvL OUTPUT | grep $host_port | awk '{ print $1 }' | tac ); \
do iptables -t nat -D OUTPUT $i; done
if [ -z $container_port ]; then
iptables -A PREROUTING -t nat -i eth0 -p tcp --dport $host_port -j DNAT --to ${container_ip}:$host_port
iptables -t nat -A OUTPUT -p tcp -o lo --dport $host_port -j DNAT --to-destination ${container_ip}:$host_port
else
iptables -A PREROUTING -t nat -i eth0 -p tcp --dport $host_port -j DNAT --to ${container_ip}:$container_port
iptables -t nat -A OUTPUT -p tcp -o lo --dport $host_port -j DNAT --to-destination ${container_ip}:$container_port
fi
service iptables save
}
# call arguments verbatim:
$@
| true
|
204b7552a3ab4814205705e522dafc7bc3b2da2d
|
Shell
|
bartulo/scripts_blender
|
/exportar_pnoa_mdt.sh
|
UTF-8
| 793
| 2.65625
| 3
|
[] |
no_license
|
#!/bin/bash
echo -e "Coordenada Latitud"
read latitud
echo -e "Coordenada Longitud"
read longitud
echo -e "Extensión (en km)"
read extension
echo -e "Resolucion PNOA"
read pnoa_res
echo -e "Resolucion MDT"
read mdt_res
sur=$(($latitud - $extension*500))
norte=$(($latitud + $extension*500))
este=$(($longitud + $extension*500))
oeste=$(($longitud - $extension*500))
g.region n=$norte
g.region s=$sur
g.region e=$este
g.region w=$oeste
g.region nsres=$pnoa_res
g.region ewres=$pnoa_res
r.out.gdal pnoa out=pnoa.tif type=UInt16
gdal_translate -ot Byte -of JPEG -expand rgb pnoa.tif pnoa.jpg
g.region nsres=$mdt_res
g.region ewres=$mdt_res
r.mapcalc 'mdt_tmp = mdt*30'
r.colors mdt_tmp rules=rules.txt
r.out.gdal mdt_tmp out=mdt.tif type=UInt16 -f
g.remove type=rast name=mdt_tmp -f
| true
|
ecc21c26932a45eeab950d7bf18071cb64162c62
|
Shell
|
liaoya/packer-template
|
/cloud/custom/install-asdf.sh
|
UTF-8
| 1,646
| 3.640625
| 4
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash -eux
#shellcheck disable=SC1090
[[ -n ${CUSTOM_ASDF} && "${CUSTOM_ASDF^^}" == "TRUE" ]] || exit 0
echo "==> Install asdf"
[[ $(command -v git) ]] || { echo "git is required"; exit 0; }
export ASDF_DATA_DIR=/opt/asdf
if [[ $(command -v jq) ]]; then
ASDF_VERSION=$(curl -sL https://api.github.com/repos/asdf-vm/asdf/tags | jq .[].name | tr -d '"' | head -1)
fi
ASDF_VERSION=${ASDF_VERSION:-v0.7.2}
git clone https://github.com/asdf-vm/asdf.git ${ASDF_DATA_DIR} --branch "${ASDF_VERSION}"
if [[ -n "$(ls -A ${ASDF_DATA_DIR})" ]]; then
if [[ -f "${ASDF_DATA_DIR}/asdf.sh" ]]; then
source "${ASDF_DATA_DIR}/asdf.sh"
asdf update
fi
if [[ -n ${SUDO_USER} ]]; then
real_user=$(id -u "${SUDO_USER}")
real_group=$(id -g "${SUDO_USER}")
chown -R "${real_user}:${real_group}" "${ASDF_DATA_DIR}"
fi
echo "[[ -s \"${ASDF_DATA_DIR}\"/asdf.sh ]] && export ASDF_DATA_DIR=\"${ASDF_DATA_DIR}\" && source \"\${ASDF_DATA_DIR}\"/asdf.sh" | tee /etc/profile.d/asdf.sh
[[ -d /etc/bash_completion.d ]] || mkdir -p /etc/bash_completion.d
[[ -f ${ASDF_DATA_DIR}/completions/asdf.bash ]] && cp -pr "${ASDF_DATA_DIR}/completions/asdf.bash" /etc/bash_completion.d
[[ -d /etc/fish/conf.d ]] || mkdir -p /etc/fish/conf.d
echo "[ -s \"${ASDF_DATA_DIR}\"/asdf.fish ]; and set -xg ASDF_DATA_DIR \"${ASDF_DATA_DIR}\"; and source \$ASDF_DATA_DIR/asdf.fish" | tee /etc/fish/conf.d/asdf.fish
[[ -d /etc/fish/completions ]] || mkdir -p /etc/fish/completions
[[ -f ${ASDF_DATA_DIR}/completions/asdf.fish ]] && cp -pr "${ASDF_DATA_DIR}/completions/asdf.fish" /etc/fish/completions
fi
| true
|
60c2b8589605177e0f27aa4790117cf801698fcb
|
Shell
|
ethasu/bash
|
/gpltall
|
EUC-JP
| 601
| 3.625
| 4
|
[] |
no_license
|
#!/bin/bash -e
# Υդưɽ
#filename=`cat /dev/urandom | LC_CTYPE=C tr -dc 'a-z0-9' | head -c 30`
filename=temp_01.dat
cat > $filename
# col no get
nf=`tac $filename | awk 'NR==1{print NF}'`
# option setting
op=`echo $@ | sed -e 's/w lp/w lp pt 6/g' -e 's/,/ , ""/g'`
if [ $# -eq 0 ]
then
# w/o option -> point plot
gnuplot -p <<EOF
set key
set grid
plot for [i=2:$nf] '$filename' u 1:i title sprintf("%d",i-1) w p pt 6
EOF
else
# w/ option
gnuplot -p <<EOF
set key
set grid
plot for [i=2:$nf] '$filename' u 1:i title sprintf("%d",i-1) $op
EOF
fi
rm $filename
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.