blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
194ee69efbc0566d1d4c95e3c19c990423e4760e
|
Shell
|
cronburg/scripts
|
/oneshot/fix_dropbox.sh
|
UTF-8
| 432
| 3.25
| 3
|
[] |
no_license
|
#!/bin/bash
IFS=$(echo -en "\n\b")
d1="$HOME/Dropbox/Camera Uploads"
d2="$HOME/Dropbox/pics"
for f1 in `ls -1 "$d1"`; do
f2=`find "$d2" -name "$f1"`
if [ "$f2" ]; then
# difference in file sizes (negative means file in Camera\ Uploads is short)
diff=$(echo $(wc -l "$d1/$f1" | awk '{print $1}' | tr -d '\n')-$(wc -l "$f2" | awk '{print $1}' | tr -d '\n') | bc)
echo $diff
#cmp "$d1/$f1" "$f2"
#rm -f "$d1/$f1"
fi
done
| true
|
0432919286f145e194c0f549f0b1787b0d764d83
|
Shell
|
chejunwei2/kaldo
|
/examples/silicon_bulk_LDA_ASE_QE_hiPhive/get_reference.sh
|
UTF-8
| 539
| 2.671875
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
# get_reference.sh is used to
# obtain reference calculations
# for example silicon_bulk_LDA_QE_hiPhive
# Fetch precalculated force constants folder from remote
wget https://www.dropbox.com/s/bvxk7zkcyv8d3ak/silicon_bulk_LDA_ASE_QE_hiPhive.tar.gz?dl=0
mv silicon_bulk_LDA_ASE_QE_hiPhive.tar.gz?dl=0 silicon_bulk_LDA_ASE_QE_hiPhive.tar.gz
# Untar precalculated files and clean up
tar xzvf silicon_bulk_LDA_ASE_QE_hiPhive.tar.gz
rm -rf silicon_bulk_LDA_ASE_QE_hiPhive.tar.gz
echo " "
echo "Reference calculation files are obtained."
| true
|
7e01fc7491e3db1df4423b7a71df4ccb57c7624f
|
Shell
|
afrittoli/scaling_pipelines_with_tekton
|
/build.sh
|
UTF-8
| 294
| 2.6875
| 3
|
[] |
no_license
|
#!/bin/sh
#
TEX_FILENAME=scaling_pipelines_with_tekton
# Reproducible latex builds
SOURCE_DATE_EPOCH=0 SOURCE_DATE_EPOCH_TEX_PRIMITIVES=1 xelatex ${TEX_FILENAME}.tex &> /dev/null
if [ ! $? -eq 0 ]; then
echo ${TEX_FILENAME}.tex cannot be compiled
tail -30 ${TEX_FILENAME}.log
exit 1
fi
| true
|
e75aa76f0e016e26699b39e918f288b8675d4cd5
|
Shell
|
arcusfelis/eodbc
|
/tools/travis-setup-db.sh
|
UTF-8
| 4,328
| 3.75
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
# Environment variable DB is used by this script.
# If DB is undefined, than this script does nothing.
set -e
TOOLS=`dirname $0`
cd "$TOOLS/.."
# There is one odbc.ini for both mssql and pgsql
# Allows to run both in parallel
function install_odbc_ini
{
# CLIENT OS CONFIGURING STUFF
#
# Be aware, that underscore in TDS_Version is required.
# It can't be just "TDS Version = 7.1".
#
# To check that connection works use:
#
# {ok, Conn} = odbc:connect("DSN=eodbc-mssql;UID=sa;PWD=eodbc_secret+ESL123",[]).
#
# To check that TDS version is correct, use:
#
# odbc:sql_query(Conn, "select cast(1 as bigint)").
#
# It should return:
# {selected,[[]],[{"1"}]}
#
# It should not return:
# {selected,[[]],[{1.0}]}
#
# Be aware, that Driver and Setup values are for Ubuntu.
# CentOS would use different ones.
if test -f "/usr/local/lib/libtdsodbc.so"; then
# Mac
ODBC_DRIVER="/usr/local/lib/libtdsodbc.so"
fi
if test -f "/usr/lib/x86_64-linux-gnu/odbc/libtdsodbc.so"; then
# Ubuntu
ODBC_DRIVER="/usr/lib/x86_64-linux-gnu/odbc/libtdsodbc.so"
ODBC_SETUP="/usr/lib/x86_64-linux-gnu/odbc/libtdsS.so"
fi
cat > ~/.odbc.ini << EOL
[eodbc-mssql]
Setup = $ODBC_SETUP
Driver = $ODBC_DRIVER
Server = 127.0.0.1
Port = 2433
Database = eodbc
Username = sa
Password = eodbc_secret+ESL123
Charset = UTF-8
TDS_Version = 7.2
client charset = UTF-8
server charset = UTF-8
EOL
}
# Stores all the data needed by the container
SQL_ROOT_DIR="$(mktemp -d)"
echo "SQL_ROOT_DIR is $SQL_ROOT_DIR"
# A directory, that contains resources that needed to bootstrap a container
# i.e. certificates and config files
SQL_TEMP_DIR="$SQL_ROOT_DIR/temp"
mkdir -p "$SQL_TEMP_DIR"
if [ "$DB" = 'mssql' ]; then
# LICENSE STUFF, IMPORTANT
#
# SQL Server Developer edition
# http://download.microsoft.com/download/4/F/7/4F7E81B0-7CEB-401D-BCFA-BF8BF73D868C/EULAs/License_Dev_Linux.rtf
#
# Information from that license:
# > a. General.
# > You may install and use copies of the software on any device,
# > including third party shared devices, to design, develop, test and
# > demonstrate your programs.
# > You may not use the software on a device or server in a
# > production environment.
#
# > We collect data about how you interact with this software.
# READ MORE...
#
# > BENCHMARK TESTING.
# > You must obtain Microsoft's prior written approval to disclose to
# > a third party the results of any benchmark test of the software.
# SCRIPTING STUFF
docker rm -f eodbc-mssql || echo "Skip removing previous container"
docker volume rm -f eodbc-mssql-data || echo "Skip removing previous volume"
#
# MSSQL wants secure passwords
# i.e. just "eodbc_secret" would not work.
#
# We don't overwrite --entrypoint, but it's possible.
# It has no '/docker-entrypoint-initdb.d/'-like interface.
# So we would put schema into some random place and
# apply it inside 'docker-exec' command.
#
# ABOUT VOLUMES
# Just using /var/opt/mssql volume is not enough.
# We need mssql-data-volume.
#
# Both on Mac and Linux
# https://github.com/Microsoft/mssql-docker/issues/12
#
# Otherwise we get an error in logs
# Error 87(The parameter is incorrect.) occurred while opening file '/var/opt/mssql/data/master.mdf'
#
# Host port is 2433
# Container port is 1433
docker run -d -p 2433:1433 \
--name=eodbc-mssql \
-e "ACCEPT_EULA=Y" \
-e "SA_PASSWORD=eodbc_secret+ESL123" \
-v "$(pwd)/test/mssql.sql:/eodbc.sql:ro" \
--health-cmd='/opt/mssql-tools/bin/sqlcmd -S localhost -U sa -P "eodbc_secret+ESL123" -Q "SELECT 1"' \
microsoft/mssql-server-linux
tools/wait_for_healthcheck.sh eodbc-mssql
tools/wait-for-it.sh -h 127.0.0.1 -p 2433
docker exec -it eodbc-mssql \
/opt/mssql-tools/bin/sqlcmd -S localhost -U sa -P "eodbc_secret+ESL123" \
-Q "CREATE DATABASE eodbc"
docker exec -it eodbc-mssql \
/opt/mssql-tools/bin/sqlcmd -S localhost -U sa -P "eodbc_secret+ESL123" \
-i eodbc.sql
install_odbc_ini
else
echo "Skip setting up database"
fi
| true
|
e37e3791af6189fcbee037ca3514373556227deb
|
Shell
|
Dhirajsharmain/shell-programming
|
/selection-statements/case-statement/prob3.sh
|
UTF-8
| 502
| 3.578125
| 4
|
[] |
no_license
|
#!/bin/bash -x
#Date of Creation : Sun, May 9, 2021 8:16:26 PM
#Created By : Dhiraj
read -p "Enter a number in 1, 10, 100, 1000, 10000, 100000 format : " num
if [ $num -ge 0 ] && [ $num -le 100000 ]
then
case $num in
1)
echo $num : ONE
;;
10)
echo $num : TEN
;;
100)
echo $num : HUNDRED
;;
1000)
echo $num : THOUSAND
;;
10000)
echo $num : TEN THOUSAND
;;
100000)
echo $num : LAKH
;;
esac
else
echo $num : Invalid Number, Please correct number
fi
| true
|
02e71bb0f66ea4c9d30cccf2d99a0876390dc4cc
|
Shell
|
codehz/mcpe-arch
|
/mcpe-sdk/PKGBUILD
|
UTF-8
| 440
| 2.515625
| 3
|
[] |
no_license
|
pkgname=mcpe-sdk
pkgver=0.1
pkgrel=2
pkgdesc="Android SDK custom build for minecraft pe server"
arch=('x86_64')
license=('GPL')
depends=()
makedepends=('android-ndk')
source=()
url='https://developer.android.com/ndk/'
options=(!strip)
# PKGEXT=.pkg.tar
NDK=/opt/android-ndk
package() {
mkdir -p "$pkgdir/opt/"
$NDK/build/tools/make_standalone_toolchain.py \
--arch x86 --api 21 --stl gnustl --install-dir "$pkgdir/opt/mcpe-sdk"
}
| true
|
7d61b1662e6c9b0c09eaee6f0ec9d2d3933cd75f
|
Shell
|
ManOfTeflon/config
|
/bin/run_old
|
UTF-8
| 9,359
| 3.828125
| 4
|
[] |
no_license
|
#!/bin/bash
usage_message='run is a bash script which manages the lifetimes of various screen sessions and processes.
Example usage:
Start memsql single box
run start memsqld
The same thing with gdb attached
run -g start memsqld
run gets a little confused by the aliases, so I have sql/mysqld symlinked to ~/bin/memsqld. I am working on a fix.
Start a cluster with three aggregators and five leaves with the python shell and each MemSQL instance running in a detached screen session.
run start cluster 3 5
The same thing but with a gdb to manage each instance
run start cluster 3 5 True
Go to the python shell managing distributed.py
run watch cluster
Watch the output for various instances
run watch master
run watch agg1
run watch leaf4
Attach a gdb to an existing instance
run attach master
Watch gdb output
run debug master
Send SIGTRAP to pause execution (Only do if gdb is attached!!)
run trap master
Perf top for an instance
run perf master
Stop any instance
run stop leaf2
Currently, run stop cluster will be caught by ipython and not actually stop the cluster. To cleanly shut a cluster down, I do run watch cluster then C-D.'
basedir=$HOME/.run
screen_prefix=run
pattern=`echo "$basedir/.run." | sed -r 's/[]\/()$*.^|[]/\\\\&/g'`
mkdir -p $basedir
sleepinterval=0.25
# Argument defaults
gdb=
port=
noreap=
name=
cmd=
args=
file=
existing=
out=
filename () {
echo "$basedir/.run.$1"
}
getname () {
echo "$1" | sed -r "s/$pattern//g"
}
gdbname () {
echo "$basedir/.gdb.$1"
}
ptsname () {
echo "$basedir/.pts.$1"
}
debugname () {
echo "gdb.$1"
}
getpid () {
cat $(filename $1) 2>/dev/null
}
running () {
pid=$(getpid $1)
if [ -z "$pid" ]; then
return 1
fi
a=$(ps --no-headers p $pid)
if [ -n "$a" ]; then
return 0
fi
return 1
}
usage () {
echo "$usage_message"
exit 1
}
reap () {
for file in $basedir/.run.*; do
zombie=$(getname $file)
pid=$(getpid $zombie)
if [ -z $pid ]; then continue; fi
if (! running $zombie); then
echo "Reaping dead process $zombie with pid $pid"
cleanup $zombie
fi
done
}
waitforport () {
echo "Waiting for port $1""..."
until nc -vz localhost $1; do sleep $sleepinterval; done &>/dev/null
}
cleanup () {
rm $(gdbname $1) 2>/dev/null 1>/dev/null
rm $(filename $1) 2>/dev/null 1>/dev/null
}
parse () {
if [ -z "$noreap" ]; then
reap
fi
cmd=$1
shift
# Escape the rest of the arguments
args=
for arg in "$@"; do
args="$args '$arg'"
done
if [[ -z "$name" ]]; then
name=$cmd
fi
file=$(filename $name)
}
assertnoargs () {
if [[ -n "$@" ]]; then
usage
fi
}
screen_pid () {
ps -p $(getpid $1) -o ppid= 2>/dev/null
}
existing_screen () {
echo $(screen_pid $1).$screen_prefix.$1
}
name_screen () {
echo "$screen_prefix.$1"
}
cmd () {
# echo "$@"
eval "$@"
}
start_screen () {
screen_command='echo $$ > '"$2; exec $(echo $3 | sed "s/'/'\"'\"'/g")"
screen_args="-t '$1'"
if [ -z "$existing" ]; then
screen_args="-Adm $screen_args"
screen_session=$(name_screen $1)
else
screen_args="-X screen $screen_args"
screen_session=$(existing_screen $existing)
fi
export IGNOREEOF=1
screen="screen -S '$screen_session' $screen_args sh -c '$screen_command'"
r=1
attempts=0
while [ "$r" -ne 0 ]; do
cmd "$screen"
r=$?
let attempts=attempts+1
if [ -z "$existing" -o "$attempts" -ge 100 ] && [ "$r" -ne 0 ]; then echo $r; exit $r; fi
done
sync
while [ ! -s $2 ]; do sleep $sleepinterval; done
}
if (( ! "$#" )); then
usage
fi
arg=$1
shift
middle=
while (( "$#" )); do
case $arg in
-g*|--gdb=*)
gdb=`echo $arg | sed -r 's/(-g|--gdb=)//'`
gdb=true
;;
-P*|--port=*)
port=`echo $arg | sed -r 's/(-P|--port=)//'`
;;
-o*|--outfile=*)
out=`echo $arg | sed -r 's/(-o|--outfile=)//'`
;;
-n*|--name=*)
name=`echo $arg | sed -r 's/(-n|--name=)//'`
;;
-c|--no-reap)
noreap=true
;;
-e*|--existing*)
existing=`echo $arg | sed -r 's/(-e|--existing=)//'`
;;
*)
break ;;
esac
arg=$1
shift
done
case $arg in
restart)
parse "$@"
run stop $name
;&
start)
parse "$@"
if [ -f "$file" ]; then
pid=$(getpid $name)
echo "Server '$name' is already running with PID " $pid 1>&2
exit 1
fi
if [[ -n "$gdb" ]]; then
gdbfile=$(gdbname $name)
gdbname=$(debugname $name)
ptsfile=$(ptsname $name)
start_screen $name $file "$basedir/pty $ptsfile $gdbname"
if [ -z "$existing" ]; then existing=$name; fi
start_screen $gdbname $(filename gdb.$name) "gdb -ex 'r $args 1>$ptsfile 2>$ptsfile <$ptsfile' $cmd"
else
start_screen $name $file "$cmd $args"
fi
if [ -n "$port" ]; then
waitforport "$port"
fi
;;
watch)
parse "$@"
assertnoargs
if [ -z "$(getpid $name)" ]; then exit 0; fi
if [ -z "$existing" ]; then existing=$name; fi
screen -A -d -RR "$(existing_screen $existing)" -p $name
;;
show)
if [ -z "$noreap" ]; then
reap
fi
assertnoargs
if [ -z "$(find $basedir -name '.run.*')" ]; then exit 0; fi
prefix=$basedir/.run.
for zombie in $prefix*; do
echo $zombie | cut -c `echo "$prefix" | wc -c`-
done
;;
killall)
if [ -z "$noreap" ]; then
reap
fi
assertnoargs
for file in $basedir/.run.*; do
zombie=$(getname $file)
pid=$(getpid $zombie)
if [ -z $pid ]; then continue; fi
echo "Killing server $zombie with pid $pid"
kill $pid 2>/dev/null
done
reap
if [ -z "$(find $basedir -name '.run.*')" ]; then exit 0; fi
sleep 1
reap
for file in $basedir/.run.*; do
zombie=$(getname $file)
pid=$(getpid $zombie)
if [ -z $pid ]; then continue; fi
echo "Killing uncooperative server $zombie with pid $pid until it's dead"
while (running $zombie); do
kill -9 $pid 2>/dev/null
done
done
reap
;;
debug)
parse "$@"
assertnoargs
gdbname=$(debugname $name)
run -e$existing watch $gdbname
;;
attach)
parse "$@"
assertnoargs
gdbfile=/tmp/attach
pid=$(getpid $name)
sudo gdbserver :20000 --attach $pid &
echo "target remote :20000" > $gdbfile
gdb -x $gdbfile $cmd
rm $gdbfile
;;
trap)
parse "$@"
assertnoargs
pid=$(getpid $name)
kill -5 $pid
;;
int)
parse "$@"
assertnoargs
pid=$(getpid $name)
kill -2 $pid
;;
kill)
parse "$@"
flags="${@:2}"
pid=$(getpid $name)
if [ -n "$pid" ]; then kill $flags $pid; fi
;;
pipe)
parse "$@"
assertnoargs
pid=$(getpid $name)
if [ -z $existing ]; then
existing=$name
fi
screen -S "run.$existing" -p "$name" -X stuff "$2
"
;;
perf)
parse "$@"
assertnoargs
pid=$(getpid $name)
sudo perf top -p $pid
;;
pid)
parse "$@"
assertnoargs
pid=$(getpid $name)
echo $pid
;;
forget)
parse "$@"
assertnoargs
cleanup $name
;;
stop)
parse "$@"
assertnoargs
gdbname=$(debugname $name)
if [ ! -f "$file" ]; then
echo "$name is not running!"
exit 1
fi
pid=$(getpid $name)
echo "Sending SIGQUIT..."
kill -SIGQUIT $pid 2>/dev/null
if running $name; then
sleep 1
echo "Sending SIGTERM..."
kill -SIGTERM $pid 2>/dev/null
fi
if running $name; then
sleep 1
echo "Senging SIGKILL..."
while (running $name); do kill -SIGKILL $pid 2>/dev/null; sleep $sleepinterval; done
fi
run -c stop $gdbname >/dev/null
cleanup $name
;;
*)
usage ;;
esac
| true
|
938e471db1a1ec0d31ced7e7ba8728636fbf6b21
|
Shell
|
ig0r/anax
|
/agent-install/agent-install.sh
|
UTF-8
| 47,908
| 3.6875
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# The script installs Horizon agent on an edge node
set -e
SCRIPT_VERSION="1.1.0"
SUPPORTED_OS=( "macos" "linux" )
SUPPORTED_LINUX_DISTRO=( "ubuntu" "raspbian" "debian" )
SUPPORTED_LINUX_VERSION=( "bionic" "buster" "xenial" "stretch" )
SUPPORTED_ARCH=( "amd64" "arm64" "armhf" )
# Defaults
PKG_PATH="."
PKG_TREE_IGNORE=false
SKIP_REGISTRATION=false
CFG="agent-install.cfg"
OVERWRITE=false
HZN_NODE_POLICY=""
AGENT_INSTALL_ZIP="agent-install-files.tar.gz"
NODE_ID_MAPPING_FILE="node-id-mapping.csv"
CERTIFICATE_DEFAULT="agent-install.crt"
BATCH_INSTALL=0
VERBOSITY=3 # Default logging verbosity
# required parameters and their defaults
REQUIRED_PARAMS=( "HZN_EXCHANGE_URL" "HZN_FSS_CSSURL" "HZN_ORG_ID" "HZN_EXCHANGE_USER_AUTH" )
REQUIRED_VALUE_FLAG="REQUIRED_FROM_USER"
DEFAULTS=( "${REQUIRED_VALUE_FLAG}" "${REQUIRED_VALUE_FLAG}" "${REQUIRED_VALUE_FLAG}" "${REQUIRED_VALUE_FLAG}" )
# certificate for the CLI package on MacOS
MAC_PACKAGE_CERT="horizon-cli.crt"
# Script help
function help() {
cat << EndOfMessage
$(basename "$0") <options> -- installing Horizon software
where:
\$HZN_EXCHANGE_URL, \$HZN_FSS_CSSURL, \$HZN_ORG_ID, \$HZN_EXCHANGE_USER_AUTH variables must be defined either in a config file or environment,
-c - path to a certificate file
-k - path to a configuration file (if not specified, uses agent-install.cfg in current directory, if present)
-p - pattern name to register with (if not specified, registers node w/o pattern)
-i - installation packages location (if not specified, uses current directory). if the argument begins with 'http' or 'https', will use as an apt repository
-j - file location for the public key for an apt repository specified with '-i'
-t - set a branch to use in the apt repo specified with -i. default is 'updates'
-n - path to a node policy file
-s - skip registration
-v - show version
-l - logging verbosity level (0-5, 5 is verbose)
-u - exchange user authorization credentials
-d - the id to register this node with
-f - install older version without prompt. overwrite configured node without prompt.
-w - wait for the named service to start executing on this node
-o - specify an org id for the service specified with '-w'
Example: ./$(basename "$0") -i <path_to_package(s)>
EndOfMessage
quit 1
}
function version() {
echo "$(basename "$0") version: ${SCRIPT_VERSION}"
exit 0
}
# Exit handling
function quit(){
case $1 in
1) echo "Exiting..."; exit 1
;;
2) echo "Input error, exiting..."; exit 2
;;
*) exit
;;
esac
}
function now() {
echo `date '+%Y-%m-%d %H:%M:%S'`
}
# Logging
VERB_SILENT=0
VERB_CRITICAL=1
VERB_ERROR=2
VERB_WARNING=3
VERB_INFO=4
VERB_DEBUG=5
function log_notify() {
log $VERB_SILENT "$1"
}
function log_critical() {
log $VERB_CRITICAL "CRITICAL: $1"
}
function log_error() {
log $VERB_ERROR "ERROR: $1"
}
function log_warning() {
log $VERB_WARNING "WARNING: $1"
}
function log_info() {
log $VERB_INFO "INFO: $1"
}
function log_debug() {
log $VERB_DEBUG "DEBUG: $1"
}
function now() {
echo `date '+%Y-%m-%d %H:%M:%S'`
}
function log() {
if [ $VERBOSITY -ge $1 ]; then
echo `now` "$2" | fold -w80 -s
fi
}
# get variables for the script
# if the env variable is defined uses it, if not checks it in the config file
function get_variable() {
log_debug "get_variable() begin"
if ! [ -z "${!1}" ]; then
# if env/command line variable is defined, using it
if [[ $1 == *"AUTH"* ]]; then
log_notify "Using variable from environment/command line, ${1}"
else
log_notify "Using variable from environment/command line, ${1} is ${!1}"
fi
else
log_notify "The ${1} is missed in environment/not specified with command line, looking for it in the config file ${2} ..."
# the env/command line variable not defined, using config file
# check if it exists
log_info "Checking if the config file ${2} exists..."
if [[ -f "$2" ]] ; then
log_info "The config file ${2} exists"
if [ -z "$(grep ${1} ${2} | grep "^#")" ] && ! [ -z "$(grep ${1} ${2} | cut -d'=' -f2 | cut -d'"' -f2)" ]; then
# found variable in the config file
ref=${1}
IFS= read -r "$ref" <<<"$(grep ${1} ${2} | cut -d'=' -f2 | cut -d'"' -f2)"
if [[ $1 == *"AUTH"* ]]; then
log_notify "Using variable from the config file ${2}, ${1}"
else
log_notify "Using variable from the config file ${2}, ${1} is ${!1}"
fi
else
# found neither in env nor in config file. check if the missed var is in required parameters
if [[ " ${REQUIRED_PARAMS[*]} " == *" ${1} "* ]]; then
# if found neither in the env nor in the env, try to use its default value, if any
log_info "The required variable ${1} found neither in environment nor in the config file ${2}, checking if it has defaults..."
for i in "${!REQUIRED_PARAMS[@]}"; do
if [[ "${REQUIRED_PARAMS[$i]}" = "${1}" ]]; then
log_info "Found ${1} in required params with index ${i}, using it for looking up its default value...";
log_info "Found ${1} default, it is ${DEFAULTS[i]}"
ref=${1}
IFS= read -r "$ref" <<<"${DEFAULTS[i]}"
fi
done
if [ ${!1} = "$REQUIRED_VALUE_FLAG" ]; then
log_notify "The ${1} is required and needs to be set either in the config file or environment, exiting..."
exit 1
fi
else
log_info "The variable ${1} found neither in environment nor in the config file ${2}, but it's not required, continuing..."
fi
fi
else
log_notify "The config file ${2} doesn't exist, exiting..."
exit 1
fi
fi
log_debug "get_variable() end"
}
# validates if mutually exclusive arguments are mutually exclusive
function validate_mutual_ex() {
log_debug "validate_mutual_ex() begin"
if [[ ! -z "${!1}" && ! -z "${!2}" ]]; then
echo "Both ${1}=${!1} and ${2}=${!2} mutually exlusive parameters are defined, exiting..."
exit 1
fi
log_debug "validate_mutual_ex() end"
}
function validate_number_int() {
log_debug "validate_number_int() begin"
re='^[0-9]+$'
if [[ $1 =~ $re ]] ; then
# integer, validate if it's in a correct range
if ! (($1 >= VERB_SILENT && $1 <= VERB_DEBUG)); then
echo `now` "The verbosity number is not in range [${VERB_SILENT}; ${VERB_DEBUG}]."
quit 2
fi
else
echo `now` "The provided verbosity value ${1} is not a number" >&2; quit 2
fi
log_debug "validate_number_int() end"
}
# set HZN_EXCHANGE_PATTERN to a pattern set in the exchange
function set_pattern_from_exchange(){
log_debug "set_pattern_from_exchange() begin"
if [[ "$NODE_ID" != "" ]]; then
if [[ "${HZN_EXCHANGE_URL: -1}" == "/" ]]; then
HZN_EXCHANGE_URL=$(echo "$HZN_EXCHANGE_URL" | sed 's/\/$//')
fi
if [[ $CERTIFICATE != "" ]]; then
EXCH_OUTPUT=$(curl -fs --cacert $CERTIFICATE $HZN_EXCHANGE_URL/orgs/$HZN_ORG_ID/nodes/$NODE_ID -u $HZN_ORG_ID/$HZN_EXCHANGE_USER_AUTH ) || true
else
EXCH_OUTPUT=$(curl -fs $HZN_EXCHANGE_URL/orgs/$HZN_ORG_ID/nodes/$NODE_ID -u $HZN_ORG_ID/$HZN_EXCHANGE_USER_AUTH) || true
fi
if [[ "$EXCH_OUTPUT" != "" ]]; then
EXCH_PATTERN=$(echo $EXCH_OUTPUT | jq -e '.nodes | .[].pattern')
if [[ "$EXCH_PATTERN" != "\"\"" ]]; then
HZN_EXCHANGE_PATTERN=$(echo "$EXCH_PATTERN" | sed 's/"//g' )
fi
fi
else
log_notify "Node id not set. Skipping finding node pattern in the exchange."
fi
log_debug "set_pattern_from_exchange() end"
}
# create a file for HZN_NODE_POLICY to point to containing the node policy found in the exchange
function set_policy_from_exchange(){
log_debug "set_policy_from_exchange() begin"
if [[ "$NODE_ID" != "" ]]; then
if [[ "${HZN_EXCHANGE_URL: -1}" == "/" ]]; then
HZN_EXCHANGE_URL=$(echo "$HZN_EXCHANGE_URL" | sed 's/\/$//')
fi
if [[ $CERTIFICATE != "" ]]; then
EXCH_POLICY=$(curl -fs --cacert $CERTIFICATE $HZN_EXCHANGE_URL/orgs/$HZN_ORG_ID/nodes/$NODE_ID/policy -u $HZN_ORG_ID/$HZN_EXCHANGE_USER_AUTH) || true
else
EXCH_POLICY=$(curl -fs $HZN_EXCHANGE_URL/orgs/$HZN_ORG_ID/nodes/$NODE_ID/policy -u $HZN_ORG_ID/$HZN_EXCHANGE_USER_AUTH) || true
fi
if [[ $EXCH_POLICY != "" ]]; then
echo $EXCH_POLICY > exchange-node-policy.json
HZN_NODE_POLICY="exchange-node-policy.json"
fi
else
log_notify "Node id not set. Skipping finding node policy in the exchange."
fi
log_debug "set_policy_from_exchange() end"
}
# validate that the found credentials, org id, certificate, and exchange url will work to view the org in the exchange
function validate_exchange(){
log_debug "validate_exchange() begin"
if [[ "$CERTIFICATE" != "" ]]; then
OUTPUT=$(curl -fs --cacert $CERTIFICATE $HZN_EXCHANGE_URL/orgs/$HZN_ORG_ID -u $HZN_ORG_ID/$HZN_EXCHANGE_USER_AUTH) || true
else
OUTPUT=$(curl -fs $CERTIFICATE $HZN_EXCHANGE_URL/orgs/$HZN_ORG_ID -u $HZN_ORG_ID/$HZN_EXCHANGE_USER_AUTH) || true
fi
if [[ "$OUTPUT" == "" ]]; then
log_error "Failed to reach exchange using CERTIFICATE=$CERTIFICATE HZN_EXCHANGE_URL=$HZN_EXCHANGE_URL HZN_ORG_ID=$HZN_ORG_ID and HZN_EXCHANGE_USER_AUTH=<specified>"
exit 1
fi
log_debug "validate_exchange() end"
}
# checks input arguments and env variables specified
function validate_args(){
log_debug "validate_args() begin"
log_info "Checking script arguments..."
# preliminary check for script arguments
check_empty "$PKG_PATH" "path to installation packages"
if [[ ${PKG_PATH:0:4} == "http" ]]; then
PKG_APT_REPO="$PKG_PATH"
if [[ "${PKG_APT_REPO: -1}" == "/" ]]; then
PKG_APT_REPO=$(echo "$PKG_APT_REPO" | sed 's/\/$//')
fi
PKG_PATH="."
else
PKG_PATH=$(echo "$PKG_PATH" | sed 's/\/$//')
check_exist d "$PKG_PATH" "The package installation"
fi
check_empty "$SKIP_REGISTRATION" "registration flag"
log_info "Check finished successfully"
log_info "Checking configuration..."
# read and validate configuration
get_variable HZN_EXCHANGE_URL $CFG
check_empty HZN_EXCHANGE_URL "Exchange URL"
get_variable HZN_FSS_CSSURL $CFG
check_empty HZN_FSS_CSSURL "FSS_CSS URL"
get_variable HZN_ORG_ID $CFG
check_empty HZN_ORG_ID "ORG ID"
get_variable HZN_EXCHANGE_USER_AUTH $CFG
check_empty HZN_EXCHANGE_USER_AUTH "Exchange User Auth"
get_variable NODE_ID $CFG
get_variable CERTIFICATE $CFG
get_variable HZN_MGMT_HUB_CERT_PATH $CFG
if [[ "$CERTIFICATE" == "" ]]; then
if [[ "$HZN_MGMT_HUB_CERT_PATH" != "" ]]; then
CERTIFICATE=$HZN_MGMT_HUB_CERT_PATH
elif [ -f "$CERTIFICATE_DEFAULT" ]; then
CERTIFICATE="$CERTIFICATE_DEFAULT"
fi
fi
validate_exchange
get_variable HZN_EXCHANGE_PATTERN $CFG
if [ -z "$HZN_EXCHANGE_PATTERN" ]; then
set_pattern_from_exchange
fi
get_variable HZN_NODE_POLICY $CFG
# check on mutual exclusive params (node policy and pattern name)
validate_mutual_ex "HZN_NODE_POLICY" "HZN_EXCHANGE_PATTERN"
# if a node policy is non-empty, check if the file exists
if [[ ! -z $HZN_NODE_POLICY ]]; then
check_exist f "$HZN_NODE_POLICY" "The node policy"
elif [[ "$HZN_EXCHANGE_PATTERN" == "" ]] ; then
set_policy_from_exchange
fi
if [[ -z "$WAIT_FOR_SERVICE_ORG" ]] && [[ ! -z "$WAIT_FOR_SERVICE" ]]; then
log_error "Must specify service with -w to use with -o organization. Ignoring -o flag."
unset WAIT_FOR_SERVICE_ORG
fi
log_info "Check finished successfully"
log_debug "validate_args() end"
}
function show_config() {
log_debug "show_config() begin"
echo "Current configuration:"
echo "Certification file: ${CERTIFICATE}"
echo "Configuration file: ${CFG}"
echo "Installation packages location: ${PKG_PATH}"
echo "Ignore package tree: ${PKG_TREE_IGNORE}"
echo "Pattern name: ${HZN_EXCHANGE_PATTERN}"
echo "Node policy: ${HZN_NODE_POLICY}"
echo "Skip registration: ${SKIP_REGISTRATION}"
echo "HZN_EXCHANGE_URL=${HZN_EXCHANGE_URL}"
echo "HZN_FSS_CSSURL=${HZN_FSS_CSSURL}"
echo "HZN_ORG_ID=${HZN_ORG_ID}"
echo "HZN_EXCHANGE_USER_AUTH=<specified>"
echo "Verbosity is ${VERBOSITY}"
log_debug "show_config() end"
}
function check_installed() {
log_debug "check_installed() begin"
if command -v "$1" >/dev/null 2>&1; then
log_info "${2} is installed"
elif [[ $3 != "" ]]; then
if command -v "$3" >/dev/null 2>&1; then
log_notify "${2} not found. Attempting to install with ${3}"
set -x
$3 install "$2"
set +x
fi
if command -v "$1" >/dev/null 2>&1; then
log_info "${2} is now installed"
else
log_info "Failed to install ${2} with ${3}. Please install ${2}"
fi
else
log_notify "${2} not found, please install it"
quit 1
fi
log_debug "check_installed() end"
}
# compare versions
function version_gt() {
test "$(printf '%s\n' "$@" | sort -V | head -n 1)" != "$1";
}
function install_macos() {
log_debug "install_macos() begin"
log_notify "Installing agent on ${OS}..."
log_info "Checking ${OS} specific prerequisites..."
check_installed "socat" "socat"
check_installed "docker" "Docker"
check_installed "jq" "jq" "brew"
# Setting up a certificate
log_info "Importing the horizon-cli package certificate into Mac OS keychain..."
set -x
sudo security add-trusted-cert -d -r trustRoot -k /Library/Keychains/System.keychain ${PACKAGES}/${MAC_PACKAGE_CERT}
set +x
if [[ "$CERTIFICATE" != "" ]]; then
log_info "Configuring an edge node to trust the ICP certificate ..."
set -x
sudo security add-trusted-cert -d -r trustRoot -k /Library/Keychains/System.keychain "$CERTIFICATE"
set +x
fi
PKG_NAME=$(find . -name "horizon-cli*\.pkg" | sort -V | tail -n 1 | cut -d "/" -f 2)
log_info "Detecting packages version..."
PACKAGE_VERSION=$(echo ${PACKAGES}/$PKG_NAME | cut -d'-' -f3 | cut -d'.' -f1-3)
ICP_VERSION=$(echo ${PACKAGES}/$PKG_NAME | cut -d'-' -f4 | cut -d'.' -f1-3)
log_info "The packages version is ${PACKAGE_VERSION}"
log_info "The ICP version is ${ICP_VERSION}"
if [[ -z "$ICP_VERSION" ]]; then
export HC_DOCKER_TAG="$PACKAGE_VERSION"
else
export HC_DOCKER_TAG="${PACKAGE_VERSION}-${ICP_VERSION}"
fi
log_debug "Setting up the agent container tag on Mac..."
log_debug "HC_DOCKER_TAG is ${HC_DOCKER_TAG}"
log_info "Checking if hzn is installed..."
if command -v hzn >/dev/null 2>&1; then
# if hzn is installed, need to check the current setup
log_info "hzn found, checking setup..."
AGENT_VERSION=$(hzn version | grep "^Horizon Agent" | sed 's/^.*: //' | cut -d'-' -f1)
log_info "Found Agent version is ${AGENT_VERSION}"
re='^[0-9]+([.][0-9]+)+([.][0-9]+)'
if ! [[ $AGENT_VERSION =~ $re ]] ; then
log_info "Something's wrong. Can't get the agent verison, installing it..."
set -x
sudo installer -pkg ${PACKAGES}/$PKG_NAME -target /
set +x
else
# compare version for installing and what we have
log_info "Comparing agent and packages versions..."
if [ "$AGENT_VERSION" = "$PACKAGE_VERSION" ]; then
log_info "Versions are equal: agent is ${AGENT_VERSION} and packages are ${PACKAGE_VERSION}. Don't need to install"
else
if version_gt "$AGENT_VERSION" "$PACKAGE_VERSION"; then
log_info "Installed agent ${AGENT_VERSION} is newer than the packages ${PACKAGE_VERSION}"
if [ ! "$OVERWRITE" = true ] ; then
if [ $BATCH_INSTALL -eq 1 ]; then
exit 1
fi
echo "The installed agent is newer than one you're trying to install, continue?[y/N]:"
read RESPONSE
if [ ! "$RESPONSE" == 'y' ]; then
echo "Exiting at users request"
exit
fi
fi
log_notify "Installing older packages ${PACKAGE_VERSION}..."
set -x
sudo installer -pkg ${PACKAGES}/$PKG_NAME -target /
set +x
else
log_info "Installed agent is ${AGENT_VERSION}, package is ${PACKAGE_VERSION}"
log_notify "Installing newer package (${PACKAGE_VERSION}) ..."
set -x
sudo installer -pkg ${PACKAGES}/$PKG_NAME -target /
set +x
fi
fi
fi
else
log_notify "hzn not found, installing it..."
set -x
sudo installer -pkg ${PACKAGES}/$PKG_NAME -target /
set +x
fi
start_horizon_service
process_node
# configuring agent inside the container
HZN_CONFIG=/etc/default/horizon
log_info "Configuring ${HZN_CONFIG} file for the agent container..."
HZN_CONFIG_DIR=$(dirname "${HZN_CONFIG}")
if ! [[ -f "$HZN_CONFIG" ]] ; then
log_info "$HZN_CONFIG file doesn't exist, creating..."
# check if the directory exists
if ! [[ -d "$(dirname "${HZN_CONFIG}")" ]] ; then
log_info "The directory ${HZN_CONFIG_DIR} doesn't exist, creating..."
set -x
sudo mkdir -p "$HZN_CONFIG_DIR"
set +x
fi
log_info "Creating ${HZN_CONFIG} file..."
set -x
if [ -z "$CERTIFICATE" ]; then
printf "HZN_EXCHANGE_URL=${HZN_EXCHANGE_URL} \nHZN_FSS_CSSURL=${HZN_FSS_CSSURL} \
\nHZN_DEVICE_ID=${HOSTNAME}" | sudo tee "$HZN_CONFIG"
else
if [[ ${CERTIFICATE:0:1} != "/" ]]; then
ABS_CERTIFICATE=$(pwd)/${CERTIFICATE}
else
ABS_CERTIFICATE=${CERTIFICATE}
fi
printf "HZN_EXCHANGE_URL=${HZN_EXCHANGE_URL} \nHZN_FSS_CSSURL=${HZN_FSS_CSSURL} \
\nHZN_DEVICE_ID=${HOSTNAME} \nHZN_MGMT_HUB_CERT_PATH=${ABS_CERTIFICATE}" | sudo tee "$HZN_CONFIG"
fi
set +x
log_info "Config created"
else
if [[ ! -z "${HZN_EXCHANGE_URL}" ]] && [[ ! -z "${HZN_FSS_CSSURL}" ]]; then
log_info "Found environment variables HZN_EXCHANGE_URL and HZN_FSS_CSSURL, updating horizon config..."
set -x
if [ -z "$CERTIFICATE" ]; then
sudo sed -i.bak -e "s~^HZN_EXCHANGE_URL=[^ ]*~HZN_EXCHANGE_URL=${HZN_EXCHANGE_URL}~g" \
-e "s~^HZN_FSS_CSSURL=[^ ]*~HZN_FSS_CSSURL=${HZN_FSS_CSSURL}~g" "$HZN_CONFIG"
else
if [[ ${CERTIFICATE:0:1} != "/" ]]; then
ABS_CERTIFICATE=$(pwd)/${CERTIFICATE}
else
ABS_CERTIFICATE=${CERTIFICATE}
fi
sudo sed -i.bak -e "s~^HZN_EXCHANGE_URL=[^ ]*~HZN_EXCHANGE_URL=${HZN_EXCHANGE_URL}~g" \
-e "s~^HZN_FSS_CSSURL=[^ ]*~HZN_FSS_CSSURL=${HZN_FSS_CSSURL}~g" \
-e "s~^HZN_MGMT_HUB_CERT_PATH=[^ ]*~HZN_MGMT_HUB_CERT_PATH=${ABS_CERTIFICATE}~g" "$HZN_CONFIG"
fi
set +x
log_info "Config updated"
fi
fi
CONFIG_MAC=~/.hzn/hzn.json
log_info "Configuring hzn..."
if [[ ! -z "${HZN_EXCHANGE_URL}" ]] && [[ ! -z "${HZN_FSS_CSSURL}" ]]; then
if [ -z "$CERTIFICATE" ]; then
if [[ ${CERTIFICATE:0:1} != "/" ]]; then
ABS_CERTIFICATE=$(pwd)/${CERTIFICATE}
else
ABS_CERTIFICATE=${CERTIFICATE}
fi
fi
if [[ -f "$CONFIG_MAC" ]]; then
log_info "${CONFIG_MAC} config file exists, updating..."
set -x
if [ -z "$CERTIFICATE" ]; then
sed -i.bak -e "s|\"HZN_EXCHANGE_URL\": \"[^ ]*\",|\"HZN_EXCHANGE_URL\": \""$HZN_EXCHANGE_URL"\",|" \
-e "s|\"HZN_FSS_CSSURL\": \"[^ ]*\"|\"HZN_FSS_CSSURL\": \""$HZN_FSS_CSSURL"\"|" "$CONFIG_MAC"
else
sed -i.bak -e "s|\"HZN_EXCHANGE_URL\": \"[^ ]*\",|\"HZN_EXCHANGE_URL\": \""$HZN_EXCHANGE_URL"\",|" \
-e "s|\"HZN_FSS_CSSURL\": \"[^ ]*\"|\"HZN_FSS_CSSURL\": \""$HZN_FSS_CSSURL"\"|" \
-e "s|\"HZN_MGMT_HUB_CERT_PATH\": \"[^ ]*\"|\"HZN_MGMT_HUB_CERT_PATH\": \""$ABS_CERTIFICATE"\"|" "$CONFIG_MAC"
fi
set +x
log_info "Config updated"
else
log_info "${CONFIG_MAC} file doesn't exist, creating..."
set -x
mkdir -p "$(dirname "$CONFIG_MAC")"
if [ -z "$CERTIFICATE" ]; then
printf "{\n \"HZN_EXCHANGE_URL\": \""$HZN_EXCHANGE_URL"\",\n \"HZN_FSS_CSSURL\": \""$HZN_FSS_CSSURL"\"\n}" > "$CONFIG_MAC"
else
printf "{\n \"HZN_EXCHANGE_URL\": \""$HZN_EXCHANGE_URL"\",\n \"HZN_FSS_CSSURL\": \""$HZN_FSS_CSSURL"\",\n \"HZN_MGMT_HUB_CERT_PATH\": \""$ABS_CERTIFICATE"\"\n}" > "$CONFIG_MAC"
fi
set +x
log_info "Config created"
fi
fi
start_horizon_service
create_node
registration "$SKIP_REGISTRATION" "$HZN_EXCHANGE_PATTERN" "$HZN_NODE_POLICY"
log_debug "install_macos() end"
}
function install_linux(){
log_debug "install_linux() begin"
log_notify "Installing agent on ${DISTRO}, version ${CODENAME}, architecture ${ARCH}"
ANAX_PORT=8510
if [[ "$OS" == "linux" ]]; then
if [ -f /etc/default/horizon ]; then
log_info "Getting agent port from /etc/default/horizon file..."
anaxPort=$(grep HZN_AGENT_PORT /etc/default/horizon |cut -d'=' -f2)
if [[ "$anaxPort" == "" ]]; then
log_info "Cannot detect agent port as /etc/default/horizon does not contain HZN_AGENT_PORT, using ${ANAX_PORT} instead"
else
ANAX_PORT=$anaxPort
fi
else
log_info "Cannot detect agent port as /etc/default/horizon cannot be found, using ${ANAX_PORT} instead"
fi
fi
log_info "Checking if the agent port ${ANAX_PORT} is free..."
if [ ! -z "$(netstat -nlp | grep \":$ANAX_PORT \")" ]; then
log_info "Something is running on ${ANAX_PORT}..."
if [ -z "$(netstat -nlp | grep \":$ANAX_PORT \" | grep anax)" ]; then
log_notify "It's not anax, please free the port in order to install horizon, exiting..."
netstat -nlp | grep \":$ANAX_PORT \"
exit 1
else
log_info "It's anax, continuing..."
netstat -nlp | grep \":$ANAX_PORT \"
fi
else
log_info "Anax port ${ANAX_PORT} is free, continuing..."
fi
log_info "Updating OS..."
set -x
apt update
set +x
log_info "Checking if curl is installed..."
if command -v curl >/dev/null 2>&1; then
log_info "curl found"
else
log_info "curl not found, installing it..."
set -x
apt install -y curl
set +x
log_info "curl installed"
fi
if command -v jq >/dev/null 2>&1; then
log_info "jq found"
else
log_info "jq not found, installing it..."
set -x
apt install -y jq
set +x
log_info "jq installed"
fi
if [[ ! -z "$PKG_APT_REPO" ]]; then
if [[ ! -z "$PKG_APT_KEY" ]]; then
log_info "Adding key $PKG_APT_KEY"
set -x
apt-key add "$PKG_APT_KEY"
set +x
fi
if [[ -z "$APT_REPO_BRANCH" ]]; then
APT_REPO_BRANCH="updates"
fi
log_info "Adding $PKG_APT_REPO to /etc/sources to install with apt"
set -x
add-apt-repository "deb $PKG_APT_REPO ${CODENAME}-$APT_REPO_BRANCH main"
apt-get install bluehorizon -y -f
set +x
else
log_info "Checking if hzn is installed..."
if command -v hzn >/dev/null 2>&1; then
# if hzn is installed, need to check the current setup
log_info "hzn found, checking setup..."
AGENT_VERSION=$(hzn version | grep "^Horizon Agent" | sed 's/^.*: //' | cut -d'-' -f1)
log_info "Found Agent version is ${AGENT_VERSION}"
re='^[0-9]+([.][0-9]+)+([.][0-9]+)'
if ! [[ $AGENT_VERSION =~ $re ]] ; then
log_notify "Something's wrong. Can't get the agent verison, installing it..."
set -x
set +e
dpkg -i ${PACKAGES}/*horizon*${DISTRO}.${CODENAME}*.deb
set -e
set +x
log_notify "Resolving any dependency errors..."
set -x
apt update && apt-get install -y -f
set +x
else
# compare version for installing and what we have
PACKAGE_VERSION=$(ls ${PACKAGES} | grep horizon-cli | cut -d'_' -f2 | cut -d'~' -f1)
log_info "The packages version is ${PACKAGE_VERSION}"
log_info "Comparing agent and packages versions..."
if [ "$AGENT_VERSION" = "$PACKAGE_VERSION" ]; then
log_notify "Versions are equal: agent is ${AGENT_VERSION} and packages are ${PACKAGE_VERSION}. Don't need to install"
else
if version_gt "$AGENT_VERSION" "$PACKAGE_VERSION" ; then
log_notify "Installed agent ${AGENT_VERSION} is newer than the packages ${PACKAGE_VERSION}"
if [ ! "$OVERWRITE" = true ] ; then
if [ $BATCH_INSTALL -eq 1 ]; then
exit 1
fi
echo "The installed agent is newer than one you're trying to install, continue?[y/N]:"
read RESPONSE
if [ ! "$RESPONSE" == 'y' ]; then
echo "Exiting at users request"
exit
fi
fi
log_notify "Installing older packages ${PACKAGE_VERSION}..."
set -x
set +e
dpkg -i ${PACKAGES}/*horizon*${DISTRO}.${CODENAME}*.deb
set -e
set +x
log_notify "Resolving any dependency errors..."
set -x
apt update && apt-get install -y -f
set +x
else
log_info "Installed agent is ${AGENT_VERSION}, package is ${PACKAGE_VERSION}"
log_notify "Installing newer package (${PACKAGE_VERSION}) ..."
set -x
set +e
dpkg -i ${PACKAGES}/*horizon*${DISTRO}.${CODENAME}*.deb
set -e
set +x
log_notify "Resolving any dependency errors..."
set -x
apt update && apt-get install -y -f
set +x
fi
fi
fi
else
log_notify "hzn not found, installing it..."
set -x
set +e
dpkg -i ${PACKAGES}/*horizon*${DISTRO}.${CODENAME}*.deb
set -e
set +x
log_notify "Resolving any dependency errors..."
set -x
apt update && apt-get install -y -f
set +x
fi
fi
if [[ -f "/etc/horizon/anax.json" ]]; then
while read line; do
if [[ $(echo $line | grep "APIListen") != "" ]]; then
if [[ $(echo $line | cut -d ":" -f 3 | cut -d "\"" -f 1 ) != "$ANAX_PORT" ]]; then
ANAX_PORT=$(echo $line | cut -d ":" -f 3 | cut -d "\"" -f 1 )
log_info "Using anax port $ANAX_PORT"
fi
break
fi
done </etc/horizon/anax.json
fi
process_node
check_exist f "/etc/default/horizon" "horizon configuration"
# The /etc/default/horizon creates upon horizon deb packages installation
if [[ ! -z "${HZN_EXCHANGE_URL}" ]] && [[ ! -z "${HZN_FSS_CSSURL}" ]]; then
log_info "Found variables HZN_EXCHANGE_URL and HZN_FSS_CSSURL, updating horizon config..."
set -x
if [ -z "$CERTIFICATE" ]; then
sed -i.bak -e "s~^HZN_EXCHANGE_URL=[^ ]*~HZN_EXCHANGE_URL=${HZN_EXCHANGE_URL}~g" \
-e "s~^HZN_FSS_CSSURL=[^ ]*~HZN_FSS_CSSURL=${HZN_FSS_CSSURL}~g" /etc/default/horizon
else
if [[ ${CERTIFICATE:0:1} != "/" ]]; then
ABS_CERTIFICATE=$(pwd)/${CERTIFICATE}
else
ABS_CERTIFICATE=${CERTIFICATE}
fi
sed -i.bak -e "s~^HZN_EXCHANGE_URL=[^ ]*~HZN_EXCHANGE_URL=${HZN_EXCHANGE_URL}~g" \
-e "s~^HZN_FSS_CSSURL=[^ ]*~HZN_FSS_CSSURL=${HZN_FSS_CSSURL}~g" \
-e "s~^HZN_MGMT_HUB_CERT_PATH=[^ ]*~HZN_MGMT_HUB_CERT_PATH=${ABS_CERTIFICATE}~g" /etc/default/horizon
fi
set +x
log_info "Config updated"
fi
log_info "Restarting the service..."
set -x
systemctl restart horizon.service
set +x
start_anax_service_check=`date +%s`
while [ -z "$(curl -sm 10 http://localhost:$ANAX_PORT/status | jq -r .configuration.exchange_version)" ] ; do
current_anax_service_check=`date +%s`
log_notify "the service is not ready, will retry in 1 second"
if (( current_anax_service_check - start_anax_service_check > 60 )); then
log_notify "anax service timeout of 60 seconds occured"
exit 1
fi
sleep 1
done
log_notify "The service is ready"
create_node
registration "$SKIP_REGISTRATION" "$HZN_EXCHANGE_PATTERN" "$HZN_NODE_POLICY"
log_debug "install_linux() end"
}
# start horizon service container on mac
function start_horizon_service(){
log_debug "start_horizon_service() begin"
if command -v horizon-container >/dev/null 2>&1; then
if [[ -z $(docker ps -q --filter name=horizon1) ]]; then
# horizn services container is not running
if [[ -z $(docker ps -aq --filter name=horizon1) ]]; then
# horizon services container doesn't exist
log_info "Starting horizon services..."
set -x
horizon-container start
set +x
else
# horizon services are shutdown but the container exists
docker start horizon1
fi
start_horizon_container_check=`date +%s`
while [ -z "$(hzn node list | jq -r .configuration.preferred_exchange_version 2>/dev/null)" ] ; do
current_horizon_container_check=`date +%s`
log_info "the horizon-container with anax is not ready, retry in 10 seconds"
if (( current_horizon_container_check - start_horizon_container_check > 300 )); then
echo `now` "horizon container timeout of 60 seconds occured"
exit 1
fi
sleep 10
done
log_info "The horizon-container is ready"
else
log_info "The horizon-container is running already..."
fi
else
log_notify "horizon-container not found, hzn is not installed or its installation is broken, exiting..."
exit 1
fi
log_debug "start_horizon_service() end"
}
# stops horizon service container on mac
function stop_horizon_service(){
log_debug "stop_horizon_service() begin"
# check if the horizon-container script exists
if command -v horizon-container >/dev/null 2>&1; then
# horizon-container script is installed
if ! [[ -z $(docker ps -q --filter name=horizon1) ]]; then
log_info "Stopping the Horizon services container...."
set -x
horizon-container stop
set +x
fi
else
log_notify "horizon-container not found, hzn is not installed or its installation is broken, exiting..."
exit 1
fi
log_debug "stop_horizon_service() end"
}
function process_node(){
log_debug "process_node() begin"
if [ -z "$OVERWRITE_NODE" ]; then
OVERWRITE_NODE=$OVERWRITE
fi
# Checking node state
NODE_STATE=$(hzn node list | jq -r .configstate.state)
WORKLOADS=$(hzn agreement list | jq -r .[])
if [[ "$NODE_ID" == "" ]] && [[ ! $OVERWRITE_NODE == "true" ]]; then
NODE_ID=$(hzn node list | jq -r .id)
log_notify "Registering node with existing id $NODE_ID"
fi
if [[ "$HZN_EXCHANGE_PATTERN" == "" ]] && [[ "$HZN_NODE_POLICY" == "" ]] && [[ ! "$OVERWRITE_NODE" == "true" ]]; then
LOCAL_PATTERN=$(hzn node list | jq -r .pattern)
if [[ "$LOCAL_PATTERN" != "null" ]] && [[ "$LOCAL_PATTERN" != "" ]]; then
HZN_EXCHANGE_PATTERN=$LOCAL_PATTERN
fi
if [[ "$HZN_EXCHANGE_PATTERN" = "" ]]; then
hzn policy list > local-node-policy.json
HZN_NODE_POLICY="local-node-policy.json"
log_info "Registering node with existing policy $(hzn policy list)"
else
log_info "Registering node with existing pattern $HZN_EXCHANGE_PATTERN"
fi
fi
if [ "$NODE_STATE" = "configured" ]; then
# node is registered
log_info "Node is registered, state is ${NODE_STATE}"
if [ -z "$WORKLOADS" ]; then
# w/o pattern currently
if [[ -z "$HZN_EXCHANGE_PATTERN" ]] && [[ -z "$HZN_NODE_POLICY" ]]; then
log_info "Neither a pattern nor node policy has not been specified, skipping registration..."
else
if [[ ! -z "$HZN_EXCHANGE_PATTERN" ]]; then
log_info "There's no workloads running, but ${HZN_EXCHANGE_PATTERN} pattern has been specified"
log_info "Unregistering the node and register it again with the new ${HZN_EXCHANGE_PATTERN} pattern..."
fi
if [[ ! -z "$HZN_NODE_POLICY" ]]; then
log_info "There's no workloads running, but ${HZN_NODE_POLICY} node policy has been specified"
log_info "Unregistering the node and register it again with the new ${HZN_NODE_POLICY} node policy..."
fi
set -x
hzn unregister -rf
set +x
# if mac, need to stop the horizon services container
if [[ "$OS" == "macos" ]]; then
stop_horizon_service
fi
fi
else
# with a pattern currently
log_notify "The node currently has workload(s) (check them with hzn agreement list)"
if [[ -z "$HZN_EXCHANGE_PATTERN" ]] && [[ -z "$HZN_NODE_POLICY" ]]; then
log_info "Neither a pattern nor node policy has been specified"
if [[ ! "$OVERWRITE_NODE" = "true" ]] && [ $BATCH_INSTALL -eq 0 ] ; then
echo "Do you want to unregister node and register it without pattern or node policy, continue?[y/N]:"
read RESPONSE
if [ ! "$RESPONSE" == 'y' ]; then
echo "Exiting at users request"
exit
fi
fi
log_notify "Unregistering the node and register it again without pattern or node policy..."
else
if [[ ! -z "$HZN_EXCHANGE_PATTERN" ]]; then
log_notify "${HZN_EXCHANGE_PATTERN} pattern has been specified"
fi
if [[ ! -z "$HZN_NODE_POLICY" ]]; then
log_notify "${HZN_NODE_POLICY} node policy has been specified"
fi
if [[ "$OVERWRITE_NODE" != "true" ]] && [ $BATCH_INSTALL -eq 0 ] ; then
if [[ ! -z "$HZN_EXCHANGE_PATTERN" ]]; then
echo "Do you want to unregister and register it with a new ${HZN_EXCHANGE_PATTERN} pattern, continue?[y/N]:"
fi
if [[ ! -z "$HZN_NODE_POLICY" ]]; then
echo "Do you want to unregister and register it with a new ${HZN_NODE_POLICY} node policy, continue?[y/N]:"
fi
read RESPONSE
if [ ! "$RESPONSE" == 'y' ]; then
echo "Exiting at users request"
exit
fi
fi
if [[ ! -z "$HZN_EXCHANGE_PATTERN" ]]; then
log_notify "Unregistering the node and register it again with the new ${HZN_EXCHANGE_PATTERN} pattern..."
fi
if [[ ! -z "$HZN_NODE_POLICY" ]]; then
log_notify "Unregistering the node and register it again with the new ${HZN_NODE_POLICY} node policy..."
fi
fi
set -x
hzn unregister -rf
set +x
# if mac, need to stop the horizon services container
if [[ "$OS" == "macos" ]]; then
stop_horizon_service
fi
fi
else
log_info "Node is not registered, state is ${NODE_STATE}"
# if mac, need to stop the horizon services container
if [[ "$OS" == "macos" ]]; then
stop_horizon_service
fi
fi
log_debug "process_node() end"
}
# creates node
function create_node(){
log_debug "create_node() begin"
NODE_NAME=$HOSTNAME
log_info "Node name is $NODE_NAME"
if [ -z "$HZN_EXCHANGE_NODE_AUTH" ]; then
log_info "HZN_EXCHANGE_NODE_AUTH is not defined, creating it..."
if [[ "$OS" == "linux" ]]; then
if [ -f /etc/default/horizon ]; then
if [[ "$NODE_ID" == "" ]]; then
log_info "Getting node id from /etc/default/horizon file..."
NODE_ID=$(grep HZN_DEVICE_ID /etc/default/horizon |cut -d'=' -f2)
if [[ "$NODE_ID" == "" ]]; then
NODE_ID=$HOSTNAME
fi
fi
else
log_info "Cannot detect node id as /etc/default/horizon cannot be found, using ${NODE_NAME} hostname instead"
NODE_ID=$NODE_NAME
fi
elif [[ "$OS" == "macos" ]]; then
log_info "Using hostname as node id..."
NODE_ID=$NODE_NAME
fi
log_info "Node id is $NODE_ID"
log_info "Generating node token..."
HZN_NODE_TOKEN=$(cat /dev/urandom | env LC_CTYPE=C tr -dc 'a-zA-Z0-9' | fold -w 45 | head -n 1)
log_notify "Generated node token is ${HZN_NODE_TOKEN}"
HZN_EXCHANGE_NODE_AUTH="${NODE_ID}:${HZN_NODE_TOKEN}"
log_info "HZN_EXCHANGE_NODE_AUTH for a node is ${HZN_EXCHANGE_NODE_AUTH}"
else
log_notify "Found HZN_EXCHANGE_NODE_AUTH variable, using it..."
fi
log_notify "Creating a node..."
set -x
hzn exchange node create -n "$HZN_EXCHANGE_NODE_AUTH" -m "$NODE_NAME" -o "$HZN_ORG_ID" -u "$HZN_EXCHANGE_USER_AUTH"
set +x
log_notify "Verifying a node..."
set -x
hzn exchange node confirm -n "$HZN_EXCHANGE_NODE_AUTH" -o "$HZN_ORG_ID"
set +x
log_debug "create_node() end"
}
# register node depending on if registration's requested and pattern name or policy file
function registration() {
log_debug "registration() begin"
NODE_STATE=$(hzn node list | jq -r .configstate.state)
if [ "$NODE_STATE" = "configured" ]; then
log_info "Node is registered already, skipping registration..."
return 0
fi
WAIT_FOR_SERVICE_ARG=""
if [[ "$WAIT_FOR_SERVICE" != "" ]]; then
if [[ "$WAIT_FOR_SERVICE_ORG" != "" ]]; then
WAIT_FOR_SERVICE_ARG=" -s $WAIT_FOR_SERVICE --serviceorg $WAIT_FOR_SERVICE_ORG "
else
WAIT_FOR_SERVICE_ARG=" -s $WAIT_FOR_SERVICE "
fi
fi
NODE_NAME=$HOSTNAME
log_info "Node name is $NODE_NAME"
if [ "$1" = true ] ; then
log_notify "Skipping registration as it was specified with -s"
else
log_notify "Registering node..."
if [[ -z "${2}" ]]; then
if [[ -z "${3}" ]]; then
log_info "Neither a pattern nor node policy were not specified, registering without it..."
set -x
hzn register -m "${NODE_NAME}" -o "$HZN_ORG_ID" -u "$HZN_EXCHANGE_USER_AUTH" -n "$HZN_EXCHANGE_NODE_AUTH" $WAIT_FOR_SERVICE_ARG
set +x
else
log_info "Node policy ${HZN_NODE_POLICY} was specified, registering..."
set -x
hzn register -m "${NODE_NAME}" -o "$HZN_ORG_ID" -u "$HZN_EXCHANGE_USER_AUTH" -n "$HZN_EXCHANGE_NODE_AUTH" --policy "$3" $WAIT_FOR_SERVICE_ARG
set +x
fi
else
if [[ -z "${3}" ]]; then
log_info "Registering node with ${2} pattern"
set -x
hzn register -p "$2" -m "${NODE_NAME}" -o "$HZN_ORG_ID" -u "$HZN_EXCHANGE_USER_AUTH" -n "$HZN_EXCHANGE_NODE_AUTH" $WAIT_FOR_SERVICE_ARG
set +x
else
log_info "Pattern ${2} and policy ${3} were specified. However, pattern registration will override the policy, registering..."
set -x
hzn register -p "$2" -m "${NODE_NAME}" -o "$HZN_ORG_ID" -u "$HZN_EXCHANGE_USER_AUTH" -n "$HZN_EXCHANGE_NODE_AUTH" --policy "$3" $WAIT_FOR_SERVICE_ARG
set +x
fi
fi
fi
log_debug "registration() end"
}
function check_empty() {
log_debug "check_empty() begin"
if [ -z "$1" ]; then
log_notify "The ${2} value is empty, exiting..."
exit 1
fi
log_debug "check_empty() end"
}
# checks if file or directory exists
function check_exist() {
log_debug "check_exist() begin"
case $1 in
f) if ! [[ -f "$2" ]] ; then
log_notify "${3} file ${2} doesn't exist"
exit 1
fi
;;
d) if ! [[ -d "$2" ]] ; then
log_notify "${3} directory ${2} doesn't exist"
exit 1
fi
;;
w) if ! ls ${2} 1> /dev/null 2>&1 ; then
log_notify "${3} files ${2} do not exist"
exit 1
fi
;;
*) echo "not supported"
exit 1
;;
esac
log_debug "check_exist() end"
}
# autocomplete support for CLI
function add_autocomplete() {
log_debug "add_autocomplete() begin"
log_info "Enabling autocomplete for the CLI commands..."
SHELL_FILE="${SHELL##*/}"
if [ -f "/etc/bash_completion.d/hzn_bash_autocomplete.sh" ]; then
AUTOCOMPLETE="/etc/bash_completion.d/hzn_bash_autocomplete.sh"
elif [ -f "/usr/local/share/horizon/hzn_bash_autocomplete.sh" ]; then
# backward compatibility support
AUTOCOMPLETE="/usr/local/share/horizon/hzn_bash_autocomplete.sh"
fi
if [[ ! -z "$AUTOCOMPLETE" ]]; then
if [ -f ~/.${SHELL_FILE}rc ]; then
grep -q "^source ${AUTOCOMPLETE}" ~/.${SHELL_FILE}rc || \
echo "source ${AUTOCOMPLETE}" >> ~/.${SHELL_FILE}rc
else
echo "source ${AUTOCOMPLETE}" > ~/.${SHELL_FILE}rc
fi
else
log_info "There's no an autocomplete script expected, skipping it..."
fi
log_debug "add_autocomplete() end"
}
# detects operating system.
function detect_os() {
log_debug "detect_os() begin"
if [[ "$OSTYPE" == "linux"* ]]; then
OS="linux"
elif [[ "$OSTYPE" == "darwin"* ]]; then
OS="macos"
else
OS="unknown"
fi
log_info "Detected OS is ${OS}"
log_debug "detect_os() end"
}
# detects linux distributive name, version, and codename
function detect_distro() {
log_debug "detect_distro() begin"
if [ -f /etc/os-release ]; then
. /etc/os-release
DISTRO=$ID
VER=$VERSION_ID
CODENAME=$VERSION_CODENAME
elif type lsb_release >/dev/null 2>&1; then
DISTRO=$(lsb_release -si)
VER=$(lsb_release -sr)
CODENAME=$(lsb_release -sc)
elif [ -f /etc/lsb-release ]; then
. /etc/lsb-release
DISTRO=$DISTRIB_ID
VER=$DISTRIB_RELEASE
CODENAME=$DISTRIB_CODENAME
else
log_notify "Cannot detect Linux version, exiting..."
exit 1
fi
# Raspbian has a codename embedded in a version
if [[ "$DISTRO" == "raspbian" ]]; then
CODENAME=$(echo ${VERSION} | sed -e 's/.*(\(.*\))/\1/')
fi
log_info "Detected distributive is ${DISTRO}, verison is ${VER}, codename is ${CODENAME}"
log_debug "detect_distro() end"
}
# detects hardware architecture on linux
function detect_arch() {
log_debug "detect_arch() begin"
# detecting architecture
uname="$(uname -m)"
if [[ "$uname" =~ "aarch64" ]]; then
ARCH="arm64"
elif [[ "$uname" =~ "arm" ]]; then
ARCH="armhf"
elif [[ "$uname" == "x86_64" ]]; then
ARCH="amd64"
elif [[ "$uname" == "ppc64le" ]]; then
ARCH="ppc64el"
else
(>&2 echo "Unknown architecture $uname")
exit 1
fi
log_info "Detected architecture is ${ARCH}"
log_debug "detect_arch() end"
}
# checks if OS/distributive/codename/arch is supported
function check_support() {
log_debug "check_support() begin"
# checks if OS, distro or arch is supported
if [[ ! "${1}" = *"${2}"* ]]; then
echo "Supported components are: "
for i in "${1}"; do echo -n "${i} "; done
echo ""
log_notify "The detected ${2} is not supported, exiting..."
exit 1
else
log_info "The detected ${2} is supported"
fi
log_debug "check_support() end"
}
# checks if requirements are met
function check_requirements() {
log_debug "check_requirements() begin"
detect_os
log_info "Checking support of detected OS..."
check_support "${SUPPORTED_OS[*]}" "$OS"
if [ "$OS" = "linux" ]; then
detect_distro
log_info "Checking support of detected Linux distributive..."
check_support "${SUPPORTED_LINUX_DISTRO[*]}" "$DISTRO"
log_info "Checking support of detected Linux version/codename..."
check_support "${SUPPORTED_LINUX_VERSION[*]}" "$CODENAME"
detect_arch
log_info "Checking support of detected architecture..."
check_support "${SUPPORTED_ARCH[*]}" "$ARCH"
if [[ -z "$PKG_APT_REPO" ]]; then
log_info "Checking the path with packages..."
if [ "$PKG_TREE_IGNORE" = true ] ; then
# ignoring the package tree, checking the current dir
PACKAGES="${PKG_PATH}"
else
# checking the package tree for linux
PACKAGES="${PKG_PATH}/${OS}/${DISTRO}/${CODENAME}/${ARCH}"
fi
log_info "Checking path with packages ${PACKAGES}"
check_exist w "${PACKAGES}/*horizon*${DISTRO}.${CODENAME}*.deb" "Linux installation"
fi
if [ $(id -u) -ne 0 ]; then
log_notify "Please run script with the root priveleges by running 'sudo -s' command first"
quit 1
fi
elif [ "$OS" = "macos" ]; then
if [[ -z "$PKG_APT_REPO" ]]; then
log_info "Checking the path with packages..."
if [ "$PKG_TREE_IGNORE" = true ] ; then
# ignoring the package tree, checking the current dir
PACKAGES="${PKG_PATH}"
else
# checking the package tree for macos
PACKAGES="${PKG_PATH}/${OS}"
fi
log_info "Checking path with packages ${PACKAGES}"
check_exist w "${PACKAGES}/horizon-cli-*.pkg" "MacOS installation"
check_exist f "${PACKAGES}/${MAC_PACKAGE_CERT}" "The CLI package certificate"
fi
fi
log_debug "check_requirements() end"
}
function check_node_state() {
log_debug "check_node_state() begin"
if command -v hzn >/dev/null 2>&1; then
local NODE_STATE=$(hzn node list | jq -r .configstate.state)
log_info "Current node state is: ${NODE_STATE}"
if [ $BATCH_INSTALL -eq 0 ] && [[ "$NODE_STATE" = "configured" ]] && [[ ! $OVERWRITE = "true" ]]; then
# node is configured need to ask what to do
log_notify "Your node is registered"
echo "Do you want to overwrite the current node configuration?[y/N]:"
read RESPONSE
if [ "$RESPONSE" == 'y' ]; then
OVERWRITE_NODE=true
log_notify "The configuration will be overwritten..."
else
log_notify "You might be asked for overwrite confirmations later..."
fi
elif [[ "$NODE_STATE" = "unconfigured" ]]; then
# node is unconfigured
log_info "The node is in unconfigured state, continuing..."
fi
else
log_info "The hzn doesn't seem to be installed, continuing..."
fi
log_debug "check_node_state() end"
}
function unzip_install_files() {
if [ -f $AGENT_INSTALL_ZIP ]; then
tar -zxf $AGENT_INSTALL_ZIP
else
log_error "Agent install tar file $AGENT_INSTALL_ZIP does not exist."
fi
}
function find_node_id() {
log_debug "start find_node_id"
if [ -f $NODE_ID_MAPPING_FILE ]; then
BATCH_INSTALL=1
log_debug "found id mapping file $NODE_ID_MAPPING_FILE"
ID_LINE=$(grep $(hostname) "$NODE_ID_MAPPING_FILE" || [[ $? == 1 ]] )
if [ -z $ID_LINE ]; then
log_debug "Did not find node id with hostname. Trying with ip"
find_node_ip_address
for IP in $(echo $NODE_IP); do
ID_LINE=$(grep "$IP" "$NODE_ID_MAPPING_FILE" || [[ $? == 1 ]] )
if [[ ! "$ID_LINE" = "" ]];then break; fi
done
if [[ ! "$ID_LINE" = "" ]]; then
NODE_ID=$(echo $ID_LINE | cut -d "," -f 2)
else
log_notify "Failed to find node id in mapping file $NODE_ID_MAPPING_FILE with $(hostname) or $NODE_IP"
exit 1
fi
else
NODE_ID=$(echo $ID_LINE | cut -d "," -f 2)
fi
fi
log_debug "finished find_node_id"
}
function find_node_ip_address() {
NODE_IP=$(hostname -I)
}
# Accept the parameters from command line
while getopts "c:i:j:p:k:u:d:z:hvl:n:sfw:o:t:" opt; do
case $opt in
c) CERTIFICATE="$OPTARG"
;;
i) PKG_PATH="$OPTARG" PKG_TREE_IGNORE=true
;;
j) PKG_APT_KEY="$OPTARG"
;;
p) HZN_EXCHANGE_PATTERN="$OPTARG"
;;
k) CFG="$OPTARG"
;;
u) HZN_EXCHANGE_USER_AUTH="$OPTARG"
;;
d) NODE_ID="$OPTARG"
;;
z) AGENT_INSTALL_ZIP="$OPTARG"
;;
h) help
;;
v) version
;;
l) validate_number_int "$OPTARG"; VERBOSITY="$OPTARG"
;;
n) HZN_NODE_POLICY="$OPTARG"
;;
s) SKIP_REGISTRATION=true
;;
f) OVERWRITE=true
;;
w) WAIT_FOR_SERVICE="$OPTARG"
;;
o) WAIT_FOR_SERVICE_ORG="$OPTARG"
;;
t) APT_REPO_BRANCH="$OPTARG"
;;
\?) echo "Invalid option: -$OPTARG"; help
;;
:) echo "Option -$OPTARG requires an argument"; help
;;
esac
done
if [ -f "$AGENT_INSTALL_ZIP" ]; then
unzip_install_files
find_node_id
NODE_ID=$(echo "$NODE_ID" | sed -e 's/^[[:space:]]*//' | sed -e 's/[[:space:]]*$//' )
if [[ $NODE_ID != "" ]]; then
log_info "Found node id $NODE_ID"
fi
fi
# checking the supplied arguments
validate_args "$*" "$#"
# showing current configuration
show_config
# checking if the requirements are met
check_requirements
check_node_state
if [[ "$OS" == "linux" ]]; then
echo `now` "Detection results: OS is ${OS}, distributive is ${DISTRO}, release is ${CODENAME}, architecture is ${ARCH}"
install_${OS} ${OS} ${DISTRO} ${CODENAME} ${ARCH}
elif [[ "$OS" == "macos" ]]; then
echo `now` "Detection results: OS is ${OS}"
install_${OS}
fi
add_autocomplete
| true
|
e797d74b661431a6a71fe855375d533146c6e757
|
Shell
|
myang32/basebox-slave
|
/bootstrap.sh
|
UTF-8
| 1,970
| 2.90625
| 3
|
[
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] |
permissive
|
#!/bin/bash
if [ ! -d resources/ ]; then
mkdir -p resources
fi
if [ ! -f resources/Vagrantfile-global ]; then
if [ -f /vagrant/resources/basebox-slave/Vagrantfile-global ]; then
echo "Deploying Vagrantfile-global from Host to vApp sync folder"
cp /vagrant/resources/basebox-slave/Vagrantfile-global resources/Vagrantfile-global
fi
fi
if [ ! -f resources/test-box-vcloud-credentials.bat ]; then
if [ -f /vagrant/resources/basebox-slave/test-box-vcloud-credentials.bat ]; then
echo "Deploying test-box-vcloud-credentials.bat from Host to vApp sync folder"
cp /vagrant/resources/basebox-slave/test-box-vcloud-credentials.bat resources/test-box-vcloud-credentials.bat
fi
fi
if [ ! -f resources/upload-vcloud-credentials.bat ]; then
if [ -f /vagrant/resources/basebox-slave/upload-vcloud-credentials.bat ]; then
echo "Deploying upload-vcloud-credentials.bat from Host to vApp sync folder"
cp /vagrant/resources/basebox-slave/upload-vcloud-credentials.bat resources/upload-vcloud-credentials.bat
fi
fi
if [ ! -f resources/hosts ]; then
if [ -f /vagrant/resources/basebox-slave/hosts ]; then
echo "Deploying additional hosts entries"
cp /vagrant/resources/basebox-slave/hosts resources/hosts
fi
fi
if [ ! -f resources/license.lic ]; then
if [ -f /vagrant/resources/basebox-slave/license.lic ]; then
echo "Deploying Vagrant VMware Workstation license.lic"
cp /vagrant/resources/basebox-slave/license.lic resources/license.lic
fi
fi
if [ ! -f resources/jenkins.plugins.publish_over_ssh.BapSshPublisherPlugin.xml ]; then
if [ -f /vagrant/resources/basebox-slave/jenkins.plugins.publish_over_ssh.BapSshPublisherPlugin.xml ]; then
echo "Deploying Publish Over SSH Configuration jenkins.plugins.publish_over_ssh.BapSshPublisherPlugin.xml"
cp /vagrant/resources/basebox-slave/jenkins.plugins.publish_over_ssh.BapSshPublisherPlugin.xml resources/jenkins.plugins.publish_over_ssh.BapSshPublisherPlugin.xml
fi
fi
| true
|
d9b9507540d40517cf57ff8b14738b934872d8aa
|
Shell
|
harshitgupta412/OCDE
|
/api/new_user.sh
|
UTF-8
| 319
| 2.578125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
mkdir -p "./files/${1}"
mkdir -p "./files2/${1}"
mkdir -p "./files2/${1}/${1}"
cp "./run.sh" "./files2/${1}/run.sh"
mount --bind -o ro "./files/${1}" "./files2/${1}/${1}"
for ch in "bin" "lib" "lib64" "usr"
do
mkdir -p "./files2/${1}/${ch}"
mount --bind -o ro "/${ch}" "./files2/${1}/${ch}"
done
| true
|
b084edf7b1cf69a0ccca4bd866d8ec1db3ced771
|
Shell
|
charlieporth1/ubuntu-scripts
|
/prog/heath-check.sh
|
UTF-8
| 9,151
| 3.4375
| 3
|
[] |
no_license
|
#!/bin/bash
echo "Date last open `date`"
source $PROG/all-scripts-exports.sh
CONCURRENT
LOG_FILE=$LOG/health_check.log
LOCK_FILE=/tmp/health-checks.stop.lock
echo "Date last ran `date`"
systemctl is-active --quiet ctp-dns.service && echo Service is running
if ! [[ -f $LOG_FILE ]]; then
echo "Created log file at $LOG_FILE"
touch $LOG_FILE
fi
writeLog() {
local fn="$1"
local fcount="${2:-0}"
local SERVICE="${3:-N_A}"
local data="$fn,$fcount,$SERVICE,[`date`]"
echo $data | sudo tee -a $LOG_FILE
}
getFailCount() {
local fn="$1"
local count=`tail -25 $LOG_FILE | grep "$fn" | tail -1 | awk -F , '{print $2}'`
echo "${count:-0}"
}
COUNT_ACTION() {
local FN="$1"
local COUNT="${2:-0}"
local SERVICE="$3"
echo "$COUNT"
local max=5
if [[ $COUNT -ge $max ]] && [[ "$FN" == "$LOCK_FILE" ]]; then
echo "Removing $LOCK_FILE file because $COUNT -ge $max sleeping 30s"
sleep 30s
echo "Removing $LOCK_FILE file because $COUNT -ge $max"
sudo rm -rf $LOCK_FILE
fi
if [[ $COUNT -ge 3 ]] && [[ $COUNT -lt $max ]]; then
echo "SENDING EMAIL COUNT IS GREATE THAN OR EQUAL TO"
bash $PROG/alert_user.sh "Failure Alert" "$FN Failed $COUNT times on $HOSTNAME; Service ${SERVICE}"
elif [[ $COUNT -ge $max ]]; then
echo "Reset failure count $COUNT"
bash $PROG/alert_user.sh "Failure Alert" "$FN Failed $COUNT times on $HOSTNAME; Service ${SERVICE} $HOSTNAME"
if [[ -n "$SERVICE" ]]; then
systemctl daemon-reload
systemctl stop $SERVICE
systemctl reset-failed $SERVICE
fi
writeLog $FN 0 $SERVICE
else
echo "Not sig count $SERVICE"
fi
}
if [[ -f $LOCK_FILE ]]; then
fn="$LOCK_FILE"
echo $fn
writeLog $fn $((1+$(getFailCount $fn)))
COUNT_ACTION $fn $(getFailCount $fn)
echo "LOCK FILE :: COUNT $(getFailCount $fn)"
exit 1
else
fn="$LOCK_FILE"
writeLog $fn 0
fi
ftl_port=`netstat -tulpn | grep -o ':4711' | xargs`
dns_out_port=`netstat -tulpn | grep -o ":53"| xargs`
https_prt=`netstat -tulpn | grep -o ":443" | xargs`
dns_https_proxy=`netstat -tulpn | grep -o '127.0.0.1:8053' | xargs`
unbound_port=`netstat -tulpn | grep -o '127.0.0.1:5053' | xargs`
dot_port=`netstat -tulpn | grep -o ":853" | xargs`
lighttpd_port=`netstat -tulpn | grep -o ':8443' | xargs`
wg=`ss -lun 'sport = :54571'`
FAILED_STR="fail\|FAILURE\|failed"
FULL_FAIL_STR="$FAILED_STR\|stop\|inactive\|dead\|stopped"
doh_proxy_status=`systemctl is-failed doh-server.service | grep -io "$FULL_FAIL_STR"`
fail_ftl_status=`systemctl is-failed pihole-FTL.service | grep -io "$FAILED_STR"`
ctp_status=`systemctl is-failed ctp-dns.service | grep -io "$FULL_FAIL_STR"`
lighttpd_status=`systemctl is-failed lighttpd.service | grep -io "$FULL_FAIL_STR"`
nginx_status=`systemctl is-failed nginx.service | grep -io "$FULL_FAIL_STR"`
pihole_status_web=`pihole status web`
pihole_status=`pihole status | grep -io 'not\|disabled\|[✗]'`
ftl_status=`pidof pihole-FTL`
WAIT_TIME=8.5s
function RESTART_PIHOLE() {
mkdir -p /var/cache/dnsmasq/
touch /var/cache/dnsmasq/dnsmasq_dnssec_timestamp
touch /etc/pihole/local.list
touch /etc/pihole/custom.list
#chown pihole:pihole -R /var/cache/dnsmasq/
sudo chown -R dnsmasq:pihole /var/cache/dnsmasq
echo "RESTART_PIHOLE"
pihole restartdns
sleep 5s
echo "RESTARTING DNS"
IF_RESTART
IF_RESTART
IF_RESTART
sleep $WAIT_TIME
}
fn='pihole-FTL.service'
if [[ `systemctl-exists $fn` = 'true' ]] && [[ `systemctl-inbetween-status $fn` == 'false' ]]; then
echo "systemd process $fn exists"
if { [[ -n "$pihole_status" ]] || [[ -z "$dns_out_port" ]]; }
then
echo "triggers pihole_status :$pihole_status: dns_out_port :$dns_out_port:"
fn="local_pihole_dns"
echo $fn
writeLog $fn $((1+$(getFailCount $fn)))
COUNT_ACTION $fn $(getFailCount $fn)
RESTART_PIHOLE
sleep $WAIT_TIME
fi
fi
if [[ `systemctl-exists $fn` = 'true' ]] && [[ `systemctl-inbetween-status $fn` == 'false' ]]; then
echo "systemd process $fn exists"
if [[ -n "$fail_ftl_status" ]] || [[ -z "$ftl_status" ]] || [[ -z "$ftl_port" ]]; then
echo "systemd process $fn failed restarting"
echo "FTL ftl_status $FTL $ftl_status"
echo $fn
sudo chown -R dnsmasq:pihole /var/cache/dnsmasq
systemctl restart $fn
writeLog $fn $((1+$(getFailCount $fn))) $fn
COUNT_ACTION $fn $(getFailCount $fn) $fn
sleep $WAIT_TIME
fi
fi
fn="doh-server.service"
if [[ `systemctl-exists $fn` = 'true' ]] && [[ `systemctl-inbetween-status $fn` == 'false' ]]; then
if [[ -z "$dns_https_proxy" ]] || [[ -n "$doh_proxy_status" ]]; then
echo "systemd process $fn failed restarting"
echo $fn
systemctl restart $fn
writeLog $fn $((1+$(getFailCount $fn))) $fn
COUNT_ACTION $fn $(getFailCount $fn) $fn
sleep $WAIT_TIME
fi
fi
fn="nginx.service"
if [[ `systemctl-exists $fn` = 'true' ]] && [[ `systemctl-inbetween-status $fn` == 'false' ]]; then
if [[ -z "$https_prt" ]] || [[ -n "$nginx_status" ]]; then
echo "systemd process $fn failed restarting"
echo $fn
killall -9 $fn
systemctl restart $fn
writeLog $fn $((1+$(getFailCount $fn))) $fn
COUNT_ACTION $fn $(getFailCount $fn) $fn
sleep $WAIT_TIME
fi
fi
fn="ctp-dns.service"
if [[ `systemctl-exists $fn` = 'true' ]] && [[ `systemctl-inbetween-status $fn` == 'false' ]]; then
if { [[ -z "$dot_port" ]]; } || [[ -n "$ctp_status" ]] ; then
echo "systemd process $fn failed restarting"
echo $fn
systemctl daemon-reload
systemctl reset-failed $fn
systemctl restart $fn
writeLog $fn $((1+$(getFailCount $fn))) $fn
COUNT_ACTION $fn $(getFailCount $fn) $fn
sleep $WAIT_TIME
fi
fi
fn='unbound.service'
if [[ `systemctl-exists $fn` = 'true' ]] && [[ `systemctl-inbetween-status $fn` == 'false' ]]; then
echo "systemd process $fn exists"
service_status=`systemctl is-failed $fn | grep -io "$FULL_FAIL_STR"`
if { [[ -z "$unbound_port" ]]; } || [[ -n "$service_status" ]] ; then
echo "systemd process $fn failed restarting"
echo $fn
systemctl restart $fn
writeLog $fn $((1+$(getFailCount $fn))) $fn
COUNT_ACTION $fn $(getFailCount $fn) $fn
sleep $WAIT_TIME
fi
fi
fn='ctp-YouTube-Ad-Blocker.service'
if [[ `systemctl-exists $fn` = 'true' ]] && [[ `systemctl-inbetween-status $fn` == 'false' ]]; then
echo "systemd process $fn exists"
service_status=`systemctl is-failed $fn | grep -io "$FULL_FAIL_STR"`
if [[ -n "$service_status" ]]; then
echo "systemd process $fn failed restarting"
echo $fn
systemctl daemon-reload
systemctl restart $fn
writeLog $fn $((1+$(getFailCount $fn))) $fn
COUNT_ACTION $fn $(getFailCount $fn) $fn
sleep $WAIT_TIME
fi
fi
fn='ads-catcher.service'
if [[ `systemctl-exists $fn` = 'true' ]] && [[ `systemctl-inbetween-status $fn` == 'false' ]]; then
echo "systemd process $fn exists"
service_status=`systemctl is-failed $fn | grep -io "$FULL_FAIL_STR"`
if [[ -n "$service_status" ]]; then
echo "systemd process $fn failed restarting"
echo $fn
systemctl restart $fn
writeLog $fn $((1+$(getFailCount $fn))) $fn
COUNT_ACTION $fn $(getFailCount $fn) $fn
sleep $WAIT_TIME
fi
fi
fn='wg-quick@wg0.service'
if [[ `systemctl-exists $fn` = 'true' ]] && [[ `systemctl-inbetween-status $fn` == 'false' ]]; then
echo "systemd process $fn exists"
service_status=`systemctl is-failed $fn | grep -io "$FULL_FAIL_STR"`
if [[ -n "$service_status" ]]; then
echo "systemd process $fn failed restarting"
echo $fn
systemctl restart $fn
writeLog $fn $((1+$(getFailCount $fn))) $fn
COUNT_ACTION $fn $(getFailCount $fn) $fn
sleep $WAIT_TIME
fi
fi
fn='lighttpd.service'
if [[ `systemctl-exists $fn` = 'true' ]] && [[ `systemctl-inbetween-status $fn` == 'false' ]]; then
echo "systemd process $fn exists"
service_status=`systemctl is-failed $fn | grep -io "$FULL_FAIL_STR"`
if [[ -n "$service_status" ]]; then
echo "systemd process $fn failed restarting"
echo $fn
systemctl restart $fn
writeLog $fn $((1+$(getFailCount $fn))) $fn
COUNT_ACTION $fn $(getFailCount $fn) $fn
sleep $WAIT_TIME
fi
fi
fn='php7.4-fpm.service'
if [[ `systemctl-exists $fn` = 'true' ]] && [[ `systemctl-inbetween-status $fn` == 'false' ]]; then
echo "systemd process $fn exists"
service_status=`systemctl is-failed $fn | grep -io "$FULL_FAIL_STR"`
if [[ -n "$service_status" ]]; then
echo "systemd process $fn failed restarting"
echo $fn
systemctl restart $fn
writeLog $fn $((1+$(getFailCount $fn))) $fn
COUNT_ACTION $fn $(getFailCount $fn) $fn
sleep $WAIT_TIME
fi
fi
bash $PROG/test_dnssec.sh -a
bash $PROG/test_dns.sh -a
echo "Done running at: `date`"
| true
|
e40a265d4e31ae42092f97db5997f9580d584088
|
Shell
|
msys2/MSYS2-packages
|
/file/PKGBUILD
|
UTF-8
| 1,496
| 2.921875
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
# Maintainer: Alexey Pavlov <alexpux@gmail.com>
pkgname=file
pkgver=5.45
pkgrel=1
pkgdesc="File type identification utility"
arch=('i686' 'x86_64')
license=('custom')
url="https://www.darwinsys.com/file/"
depends=('gcc-libs' 'zlib' 'libbz2' 'liblzma' 'libzstd')
makedepends=('python' 'zlib-devel' 'libbz2-devel' 'liblzma-devel' 'libzstd-devel' 'autotools' 'gcc')
options=('!libtool')
source=("https://astron.com/pub/$pkgname/$pkgname-$pkgver.tar.gz"{,.asc})
sha256sums=('fc97f51029bb0e2c9f4e3bffefdaf678f0e039ee872b9de5c002a6d09c784d82'
'SKIP')
validpgpkeys=('BE04995BA8F90ED0C0C176C471112AB16CB33B3A') # Christos Zoulas <christos@zoulas.com>
prepare() {
cd "${srcdir}/${pkgname}-${pkgver}"
autoreconf -fiv
}
build() {
cd "${srcdir}/${pkgname}-${pkgver}"
local CYGWIN_CHOST="${CHOST/-msys/-cygwin}"
./configure \
--prefix=/usr \
--build=${CYGWIN_CHOST} \
--host=${CYGWIN_CHOST} \
--target=${CYGWIN_CHOST} \
--enable-fsect-man5 \
--enable-zlib \
--enable-bzlib \
--enable-xzlib \
--enable-zstdlib
make
}
package() {
cd "${srcdir}/${pkgname}-${pkgver}"
make DESTDIR=${pkgdir} install
install -Dm644 COPYING ${pkgdir}/usr/share/licenses/${pkgname}/COPYING
PYTHON_SITELIB=$(/usr/bin/python -c 'from distutils.sysconfig import * ; print(get_python_lib(0,0));')
mkdir -p ${pkgdir}/${PYTHON_SITELIB}
cp -f ${srcdir}/${pkgname}-${pkgver}/python/magic.py "${pkgdir}/$PYTHON_SITELIB"
python3 -m compileall -o 0 -o 1 -s "${pkgdir}" "${pkgdir}/$PYTHON_SITELIB"
}
| true
|
454be47fddf9d2faf617501e5bb9076b72aa51ab
|
Shell
|
ixfg9922/DP-403123
|
/2-deploy-iko.sh
|
UTF-8
| 706
| 3.078125
| 3
|
[] |
no_license
|
#!/bin/bash
source ./utils.sh
get_intersystems_credentials
deploy_isc_reg_secret
msg "\nAdding Smart Data Services helm repository to helm...\n"
helm repo add sds https://intersystems.github.io/sds-charts
exit_if_error "Could no add smart data services repository to helm."
helm repo update
exit_if_error "Helm update failed."
msg "\nInstalling IKO...\n"
helm install -f ./iko-sds-values.yaml -n "default" --version 2.1.66 intersystems-iris-operator sds/iris-operator
exit_if_error "Could not install IKO into your cluster using helm."
trace "\nWaiting IKO to be ready."
while [ -z "$(kubectl get pods | grep iris-operator | grep 1/1)" ]
do
printf "."
sleep 1
done
trace "\nIKO is ready!\n"
| true
|
4121e42eb460ee54c57914e98efe09a48fa8b828
|
Shell
|
jhu-sheridan-libraries/lutece-init
|
/entrypoint.sh
|
UTF-8
| 3,214
| 4.125
| 4
|
[] |
no_license
|
#! /bin/bash
# If file exists, set fileValue to contents. Otherwise set fileValue to default value.
# Usage: get_file_value FILE DEFAULT_VALUE
fileValue=""
get_file_value() {
local file="$1"
fileValue="$2"
if [ -e "${file}" ]
then
fileValue=$(cat "${file}")
fi
}
# If needed, init MySQL db
# Usage: init_db modified_war_dir
init_db() {
modifiedwardir="$1"
echo "Waiting for MySQL server"
while ! mysqladmin ping -h${DB_HOST} --silent; do
sleep 1
done
echo "Found MySQL server"
TABLE="core_datastore"
echo "Checking if table <$TABLE> exists ..."
mysql -u ${DB_USER} -p${DB_PASS} -h ${DB_HOST} -e "desc $TABLE" ${DB_NAME} > /dev/null 2>&1
if [ $? -eq 0 ]
then
echo "Database already initialized"
else
echo "Database is empty"
if [ -f ${sqlinitfile} ]
then
echo "Loading database from dump"
mysql -u ${DB_USER} -p${DB_PASS} -h ${DB_HOST} ${DB_NAME} < ${sqlinitfile}
else
echo "Initiliazing new site database"
cd ${modifiedwardir}/WEB-INF/sql && ant
fi
fi
}
# Grab configuration values possibly stored in files
get_file_value "${MYSQL_DATABASE_FILE}" "${MYSQL_DATABASE}"
DB_NAME=${fileValue}
get_file_value "${MYSQL_USER_FILE}" "${MYSQL_USER}"
DB_USER=${fileValue}
get_file_value "${MYSQL_PASSWORD_FILE}" "${MYSQL_PASSWORD}"
DB_PASS=${fileValue}
get_file_value "${MAIL_HOST_FILE}" "${MAIL_HOST}"
MAIL_HOST=${fileValue}
get_file_value "${MAIL_PORT_FILE}" "${MAIL_PORT}"
MAIL_PORT=${fileValue}
get_file_value "${MAIL_USER_FILE}" "${MAIL_USER}"
MAIL_USER=${fileValue}
get_file_value "${MAIL_PASS_FILE}" "${MAIL_PASS}"
MAIL_PASS=${fileValue}
# Lutece war must be modified before being deployed with secret config values.
# Only modify and deploy war if needed.
sourcewar=/data/lutece.war
sqlinitfile=/data/lutece.sql
deploywar=/usr/local/tomcat/webapps/ROOT.war
deploywardir=/usr/local/tomcat/webapps/ROOT
extractdir=/lutece
dbconfigfile=${extractdir}/WEB-INF/conf/db.properties
configfile=${extractdir}/WEB-INF/conf/config.properties
# Replace strings in a given file
# Usage: rplfile KEY VALUE FILE
rplfile() {
# Set LANG to work around rpl bug
LANG=en_US.UTF-8 rpl -q "$1" "$2" "$3" > /dev/null 2>&1
}
if [ ! -f ${sourcewar} ]
then
echo "Error: No source war ${sourcewar} found."
exit 1
fi
if [ ! -f ${deploywar} ] || [ ${sourcewar} -nt ${deploywar} ]
then
echo "Modifying source war to create deployment war"
rm -f ${deploywar}
unzip -q ${sourcewar} -d ${extractdir}
rplfile "#DB_NAME#" "${DB_NAME}" ${dbconfigfile}
rplfile "#DB_USER#" "${DB_USER}" ${dbconfigfile}
rplfile "#DB_PASS#" "${DB_PASS}" ${dbconfigfile}
rplfile "#DB_HOST#" "${DB_HOST}" ${dbconfigfile}
rplfile "#MAIL_HOST#" "${MAIL_HOST}" ${configfile}
rplfile "#MAIL_PORT#" "${MAIL_PORT}" ${configfile}
rplfile "#MAIL_USER#" "${MAIL_USER}" ${configfile}
rplfile "#MAIL_PASS#" "${MAIL_PASS}" ${configfile}
init_db ${extractdir}
cd ${extractdir} && jar cf /tmp.war *
echo "Deploying modified war"
mv /tmp.war ${deploywar}
else
echo "No changes to deployed war needed."
init_db ${deploywardir}
fi
# Start tomcat
catalina.sh run
| true
|
0080d724797ea991c5f6e87b207408c6f4927015
|
Shell
|
smangul1/imrep.GTEx
|
/validation/mixcr/reproduce.sh
|
UTF-8
| 362
| 2.546875
| 3
|
[] |
no_license
|
#cd intermediate.files
#while read line; do SRA=$(echo $line | awk '{print $1}'); new=$(echo $line | awk '{print $2}'); echo $SRA,$new; mv mixcr_${SRA}.txt mixcr_${new}.txt;done<../../sample_SRA_bio_new.txt
ls *txt | awk -F ".txt" '{print $1}' >samples.txt
while read line
do
python ../../code/mixcr.extract.py ${line}.txt ${line}.clean.cdr3
done<samples.txt
| true
|
38a1ce560aae166e05e7b1999088300db9014ae1
|
Shell
|
andrewjbtw/dvcapture
|
/dvanalyze.sh
|
UTF-8
| 1,920
| 3.734375
| 4
|
[] |
no_license
|
#!/bin/bash
# dvanalyzer analysis
capture_file=$1
log_dir=$2
# todo: validate file and folder input
if [ ! -d "$log_dir" ]
then
echo "$log_dir" not found
exit 1
fi
if ( echo "$capture_file" | grep "\.m2t$") # checks for ".m2t" extension
then
echo "This is an .m2t file. Skipping dvanalyzer." # dvanalyzer doesn't run on .m2t
else
scriptdir=$(dirname "$0")
filename=$(basename "$capture_file")
if [ -f "$capture_file" ] ; then
outputdir="$log_dir/${filename%.*}_analysis"
if [ ! -d "$outputdir" ] ; then
mkdir -p "$outputdir"
# plot graph
echo Analyzing DV stream...
dvanalyzer </dev/null --XML "$capture_file" > "$outputdir/${filename%.*}_dvanalyzer.xml"
xsltproc "$scriptdir/dvanalyzer.xsl" "$outputdir/${filename%.*}_dvanalyzer.xml" > "$outputdir/${filename%.*}_dvanalyzer_summary.txt"
echo Plotting results...
echo "set terminal svg size 1920, 1080 enhanced background rgb 'white'
set border 0
set datafile separator ','
set output '$outputdir/${filename%.*}_dvanalyzer.svg'
set multiplot layout 4, 1 title 'DV Analyzer Graphs of $filename'
set style fill solid border -1
set xrange [ 0: ]
set yrange [ 0:100 ]
set grid y
unset xtics
set xdata time
set timefmt '%S'
set xtics format '%H:%M:%S'
set xtics nomirror
plot '$outputdir/${filename%.*}_dvanalyzer_summary.txt' u (\$1/29.97):(\$2) title 'Video Error Concealment (percentage)' lt 1 with impulses
plot '' u (\$1/29.97):(\$3) title 'Channel 1 Audio Error (percentage)' lt 2 with impulses
plot '' u (\$1/29.97):(\$4) title 'Channel 2 Audio Error (percentage)' lt 3 with impulses
set yrange [ -100:100 ]
plot '' u (\$1/29.97):(\$5) title 'Audio Error Head Difference' lt 4 with impulses" | gnuplot
echo Done
fi
else
echo "ERROR - $capture_file is not a DV file"
fi
fi
| true
|
abf8b50a1537d2c2633f93e897aea18eaa89004d
|
Shell
|
DerekMaffett/dotfiles
|
/link-configs.sh
|
UTF-8
| 1,448
| 2.765625
| 3
|
[] |
no_license
|
link () {
ln -fvs $HOME/dotfiles/configs/$1 $HOME/$2
}
mkdir -p ~/.config/projects/
mkdir -p ~/.config/brittany/
mkdir -p ~/.config/nvim/
mkdir -p ~/.config/nixpkgs/
mkdir -p ~/.config/nix/
mkdir -p ~/.config/terminator/
mkdir -p ~/.config/kitty/
mkdir -p ~/.config/qutebrowser/
mkdir -p ~/.config/qutebrowser/bookmarks/
mkdir -p ~/.config/home-manager/
mkdir -p ~/.xmonad/
mkdir -p ~/.stack/
mkdir -p ~/.ssh/
link .projects.json .config/projects/.projects.json
link brittany.yaml .config/brittany/config.yaml
link nix-config.nix .config/nixpkgs/config.nix
link terminator-config .config/terminator/config
link kitty.conf .config/kitty/kitty.conf
link kitty-mac-cmdline-options .config/kitty/macos-launch-services-cmdline
link qutebrowser-config.py .config/qutebrowser/config.py
link qutebrowser-bookmarks .config/qutebrowser/bookmarks/urls
link qutebrowser-quickmarks .config/qutebrowser/quickmarks
link xmonad.hs .xmonad/xmonad.hs
link stack.yaml .stack/config.yaml
link home.nix .config/home-manager/home.nix
link .Xresources .Xresources
link .agignore .agignore
link .gitconfig .gitconfig
link .prettierrc.js .prettierrc.js
link .tmux.conf .tmux.conf
link .zshrc .zshrc
link .bashrc .bashrc
link ssh-init.json .ssh/ssh-init.json
link .nix-channels .nix-channels
if test -f /etc/NIXOS; then
# Link nixos config to root
sudo ln -fvs $HOME/dotfiles/configs/preferences-configuration.nix /etc/nixos/preferences-configuration.nix
fi
| true
|
915fa1ae9f03d673607aba6d466f6908b40b6b76
|
Shell
|
tmzt/androix-util-modular
|
/release.sh
|
UTF-8
| 5,585
| 3.96875
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/sh
set -e
announce_list="xorg-announce@lists.freedesktop.org"
xorg_list="xorg@lists.freedesktop.org"
dri_list="dri-devel@lists.sourceforge.net"
xkb_list="xkb@listserv.bat.ru"
host_people=annarchy.freedesktop.org
host_xorg=xorg.freedesktop.org
host_dri=dri.freedesktop.org
user=
remote=origin
usage()
{
cat <<HELP
Usage: `basename $0` [options] <section> <tag_previous> <tag_current>
Options:
--force force overwritting an existing release
--user <name> username on $host_people
--help this help message
--ignore-local-changes don't abort on uncommitted local changes
--remote git remote where the change should be pushed (default "origin")
HELP
}
abort_for_changes()
{
cat <<ERR
Uncommitted changes found. Did you forget to commit? Aborting.
Use --ignore-local-changes to skip this check.
ERR
exit 1
}
gen_announce_mail()
{
case "$tag_previous" in
initial)
range="$tag_current"
;;
*)
range="$tag_previous".."$tag_current"
;;
esac
MD5SUM=`which md5sum || which gmd5sum`
SHA1SUM=`which sha1sum || which gsha1sum`
if [ "$section" = "libdrm" ]; then
host=$host_dri
list=$dri_list
elif [ "$section" = "xkeyboard-config" ]; then
host=$host_xorg
list=$xkb_list
else
host=$host_xorg
list=$xorg_list
fi
cat <<RELEASE
Subject: [ANNOUNCE] $module $version
To: $announce_list
CC: $list
`git log --no-merges "$range" | git shortlog`
git tag: $tag_current
http://$host/$section_path/$tarbz2
MD5: `cd $tarball_dir && $MD5SUM $tarbz2`
SHA1: `cd $tarball_dir && $SHA1SUM $tarbz2`
http://$host/$section_path/$targz
MD5: `cd $tarball_dir && $MD5SUM $targz`
SHA1: `cd $tarball_dir && $SHA1SUM $targz`
RELEASE
}
export LC_ALL=C
while [ $# != 0 ]; do
case "$1" in
--force)
force="yes"
shift
;;
--help)
usage
exit 0
;;
--user)
shift
user=$1@
shift
;;
--ignore-local-changes)
ignorechanges=1
shift
;;
--remote)
shift
remote=$1
shift
;;
--*)
echo "error: unknown option"
usage
exit 1
;;
*)
section="$1"
tag_previous="$2"
tag_current="$3"
shift 3
if [ $# != 0 ]; then
echo "error: unknown parameter"
usage
exit 1
fi
;;
esac
done
# Check for uncommitted/queued changes.
if [ "x$ignorechanges" != "x1" ]; then
set +e
git diff --quiet HEAD > /dev/null 2>&1
if [ $? -ne 0 ]; then
abort_for_changes
fi
set -e
fi
# Check if the object has been pushed. Do do so
# 1. Check if the current branch has the object. If not, abort.
# 2. Check if the object is on $remote/branchname. If not, abort.
local_sha=`git rev-list -1 $tag_current`
current_branch=`git branch | grep "\*" | sed -e "s/\* //"`
set +e
git rev-list $current_branch | grep $local_sha > /dev/null
if [ $? -eq 1 ]; then
echo "Cannot find tag '$tag_current' on current branch. Aborting."
echo "Switch to the correct branch and re-run the script."
exit 1
fi
revs=`git rev-list $remote/$current_branch..$current_branch | wc -l`
if [ $revs -ne 0 ]; then
git rev-list $remote/$current_branch..$current_branch | grep $local_sha > /dev/null
if [ $? -ne 1 ]; then
echo "$remote/$current_branch doesn't have object $local_sha"
echo "for tag '$tag_current'. Did you push branch first? Aborting."
exit 1
fi
fi
set -e
tarball_dir="$(dirname $(find . -name config.status))"
module="${tag_current%-*}"
if [ "x$module" = "x$tag_current" ]; then
# version-number-only tag.
pwd=`pwd`
module=`basename $pwd`
version="$tag_current"
else
# module-and-version style tag
version="${tag_current##*-}"
fi
detected_module=`grep 'PACKAGE = ' $tarball_dir/Makefile | sed 's|PACKAGE = ||'`
if [ -f $detected_module-$version.tar.bz2 ]; then
module=$detected_module
fi
modulever=$module-$version
tarbz2="$modulever.tar.bz2"
targz="$modulever.tar.gz"
announce="$tarball_dir/$modulever.announce"
echo "checking parameters"
if ! [ -f "$tarball_dir/$tarbz2" ] ||
! [ -f "$tarball_dir/$targz" ]; then
echo "error: tarballs not found. Did you run make dist?"
usage
exit 1
fi
if [ -z "$tag_previous" ] ||
[ -z "$section" ]; then
echo "error: previous tag or section not found."
usage
exit 1
fi
if [ "$section" = "libdrm" ]; then
section_path="libdrm"
srv_path="/srv/$host_dri/www/$section_path"
elif [ "$section" = "xkeyboard-config" ]; then
section_path="archive/individual/data"
srv_path="/srv/$host_xorg/$section_path"
else
section_path="archive/individual/$section"
srv_path="/srv/$host_xorg/$section_path"
fi
echo "checking for proper current dir"
if ! [ -d .git ]; then
echo "error: do this from your git dir, weenie"
exit 1
fi
echo "checking for an existing tag"
if ! git tag -l $tag_current >/dev/null; then
echo "error: you must tag your release first!"
exit 1
fi
echo "checking for an existing release"
if ssh $user$host_people ls $srv_path/$targz >/dev/null 2>&1 ||
ssh $user$host_people ls $srv_path/$tarbz2 >/dev/null 2>&1; then
if [ "x$force" = "xyes" ]; then
echo "warning: overriding released file ... here be dragons."
else
echo "error: file already exists!"
exit 1
fi
fi
echo "generating announce mail template, remember to sign it"
gen_announce_mail >$announce
echo " at: $announce"
echo "installing release into server"
scp $tarball_dir/$targz $tarball_dir/$tarbz2 $user$host_people:$srv_path
echo "pushing tag upstream"
git push $remote $tag_current
| true
|
0f38956deb476813780adcacc49edeafd82c5222
|
Shell
|
whatsondoc/Linux
|
/data_transfer/rsync_data_transfer--upload.sh
|
UTF-8
| 17,497
| 4.0625
| 4
|
[] |
no_license
|
#!/bin/bash
#_______________________________________________________________#
# Project: Remote file transfer using CPU affinity #
# Author: Ben Watson - @WhatsOnDoc #
# Date: December 2018 #
# LEGEND: #
# ## = Explanation #
# # = Commenting line for exclusion (as per usual) #
#_______________________________________________________________#
#-----------------#
# VARIABLES #
#-----------------#
USER="" ## The username required to establish a secure shell (ssh) connection to the remote host
#read -p "Username: " USER ## User prompts for interactive invocation. Commented out by default
REMOTE_HOST="" ## The remote host name or IP address
#read -p "Remote host: " REMOTE_HOST ## User prompts for interactive invocation. Commented out by default
REMOTE_DIR="" ## The directory on the remote server that the data will be transferred into
#read -p "Remote directory: " REMOTE_DIR ## User prompts for interactive invocation. Commented out by default
CHECKSUMS="" ## Enter 'YES' to enable checksum hash calculation and comparison from source => destination
#-------------------------------------------------------------------------------#
## \ / ##
### { –––-–––– YOU SHOULD NOT NEED TO CHANGE ANYTHING FROM HERE ON –––-–––– } ###
## / \ ##
#-------------------------------------------------------------------------------#
#-----------------#
# FUNCTIONS #
#-----------------#
validation_checks() {
## Checking that the variables in this script (above) have been populated:
if [[ -z ${USER} ]] || [[ -z ${REMOTE_HOST} ]] || [[ -z ${REMOTE_DIR} ]]
then
echo -e "\nERROR:\t\tThe following variables have not been defined within the script:\n\tUser:\t\t${USER}\n\tRemote Host:\t${REMOTE_HOST}\n\tRemote Directory:\t${REMOTE_DIR}\n"
echo -e "\nPlease use your favourite text editor to edit the script and populate the above variables."
TERMINATE="true"
fi
if ! ssh ${USER}@${REMOTE_HOST} "[[ -d ${REMOTE_DIR} ]]"
then
echo -e "\nERROR:\t\tThe remote directory specified in the REMOTE_DIR variable does not exist.\n"
TERMINATE="true"
fi
## Checking that all mandatory parameters - local directory, number of processors & thread value - have been provided as arguments:
if [[ -z ${LOCAL_DIR} ]] || [[ -z ${PROCS} ]] || [[ -z ${THREADING} ]]
then
echo -e "\nERROR:\t\tMandatory arguments have not been specified:\n\t\t\tDirectory:\t\t${LOCAL_DIR}\n\t\t\tNumber of CPUs:\t\t${PROCS}\n\t\t\tThread value:\t\t${THREADING}\n"
TERMINATE="true"
fi
## Validating the integer provided for the number of processors:
if [[ ${PROCS} -gt $(nproc) ]]
then
echo -e "\nERROR:\t\tThe number of processors specified is greater than the number of CPU cores on the local server.\n"
TERMINATE="true"
fi
## Checking for the existence of a trailing slash on the provided directory path:
DIR_SLASH=$(echo ${LOCAL_DIR: -1})
## If there is a trailing slash, let's remove it from the path as the rsync syntax below includes the slash (and we don't want duplicate slashes):
if [[ ${DIR_SLASH} == '/' ]]
then
LOCAL_DIR=$(echo ${LOCAL_DIR} | sed s'/.$//')
fi
## Validation that passwordless authentication is enabled between local and destination servers (e.g. using ssh keys):
ssh -o PasswordAuthentication=no -o BatchMode=yes ${USER}@${REMOTE_HOST} exit > /dev/null
## An unsuccessful attempt will return a non-zero error code, which will fail the following check:
if [[ $? == 0 ]]
then
echo -e "VALIDATED:\tPasswordless authentication to the remote server is in place.\n"
else
echo -e "\nERROR:\t\tCannot connect to the remote server without the use of a password.\n"
TERMINATE="true"
fi
## Checking that rsync is installed on the local server:
if [[ -x $(command -v rsync) ]]
then
echo -e "VALIDATED:\trsync is present on the local server.\n"
else
echo -e "\nERROR:\t\trsync is not present on the local server (or, at least, not included in '$PATH').\n"
TERMINATE="true"
fi
## Looking for pre-existing rsync processes on the local server:
if [[ $(ps -e -o cmd | awk '$1=="rsync"') ]]
then
echo -e "\nADVISORY:\tThere are running rsync processes on the local server:"
ps -e -o psr,cmd,pid | awk '$2=="rsync"'
fi
## Checking rsync is installed on the remote server:
ssh ${USER}@${REMOTE_HOST} 'command -v rsync' > /dev/null
## An unsuccessful attempt will return a non-zero error code, which will fail the following check:
if [[ $? == 0 ]]
then
echo -e "VALIDATED:\trsync is present on the remote server.\n"
else
echo -e "\nERROR:\t\trsync is not present on the remote server (or, at least, not included in '$PATH').\n"
TERMINATE="true"
fi
## Looking for pre-existing rsync processes on the remote server:
REMOTE_PROCESSES=$(ssh ${USER}@${REMOTE_HOST} ps -e -o cmd | awk '$1=="rsync"')
if [[ -n ${REMOTE_PROCESSES} ]]
then
echo -e "\nADVISORY:\tThere are running rsync processes on the remote server:"
ssh ${USER}@${REMOTE_HOST} ps -e -o psr,cmd,pid | awk '$2=="rsync"'
fi
## Checking the taskset command exists on the local server (as this is used to bind processes to CPUs):
if ! [[ -x $(command -v taskset) ]]
then
echo -e "\nERROR:\t\ttaskset is not present on this server (or, at least, not included in '$PATH'). It is typically available in the util-linux package in Linux.\n"
TERMINATE="true"
fi
## Validating that the variable containing the number of processors is populated correctly. If 'nproc' isn't available, the variable value will be -1 and this will cause problems...
if [[ ${NUM_CPUS} == "-1" ]]
then
echo "\nERROR:\t\tUnable to accurately determine number of processors using 'nproc'. Make this program available (and in '$PATH') or manually amend the NUM_CPUS variable to proceed.\n"
TERMINATE="true"
fi
## If any of the prior validation checks fail, then the help() function will be called and the script will exit:
if [[ ${TERMINATE} == "true" ]]
then
help
exit 1
fi
}
## Defining the help function to be invoked if no arguments provided at runtime, or the validation checks fail:
help() {
echo -e "\nHELP STATEMENT\nPlease execute the script specifying the parameters for local directory '-d', the number of processors '-p' as either 'all' or an integer, and the number of parallel threads '-t', also as an integer (i.e. not a floating point number)."
echo -e "\nExample usage:\v\t$ /path/to/script.sh -d /local/directory/path -p ALL -t 16\n\t\t$ script.sh -d /local/directory/path -p 4 -t 8\n"
echo -e "\nPackages & commands required:\tssh; nproc; ps; awk; sed; rsync (on local server); rsync (on remote server); taskset; comm\n"
}
#--------------------#
# SCRIPT BLOCK #
#--------------------#
echo -e "\n"
while getopts "hd:t:p:" OPTION
do
case "$OPTION"
in
d) LOCAL_DIR=${OPTARG} ## The directory specified by the user from which to transfer files, parsed from the input value in the script argument
if [[ -d ${LOCAL_DIR} ]] ## Checking that the directory provided by the user at script invocation exists
then
echo -e "VALIDATED:\tLocal directory provided exists."
else
echo -e "\nERROR:\tPlease specify a valid directory in which the files exist. \n"
help
exit 1
fi
;;
t) THREADING=${OPTARG} ## The number of parallel transfer tasks that will be assigned to each processor used by the script
if ((${THREADING})) 2> /dev/null ## Checking that the thread value provided is an integer (not a string nor a float)
then
echo -e "VALIDATED:\tThread value provided is an integer."
else
echo -e "\nERROR:\tPlease specify an integer (whole number) for the number of parallel execution threads.\n"
help
exit 1
fi
;;
p) PROCS=${OPTARG}
if [[ ${PROCS} =~ ALL|All|all ]] ## Determining whether user input determines that all server CPUs will be used for data transfer
then
echo -e "VALIDATED:\tAll system processors selected."
PROCS=$(nproc)
NUM_CPUS=$(( ${PROCS} - 1 )) ## The number of CPUs to be used for transfers on the local server, less 1 as we number from 0
elif [[ ${PROCS} != 0 ]] && ((${PROCS})) 2> /dev/null ## Checking that the thread value provided is an integer (not a string nor a float)
then
echo -e "VALIDATED:\tProcessor value provided is a non-zero integer."
else
echo -e "\nERROR:\tPlease specify an integer (whole number) for the number of processors to be used for the data transfer, or 'all' to specify all processors.\n"
help
exit 1
fi
;;
h | *) help && exit 1 ## Capturing all other input; providing the help() statement for non-ratified inputs
;;
esac
done
validation_checks ## Calling the validation_checks function
## Creating the runtime variables:
TOTAL_TASKS=$(find ${LOCAL_DIR} -type f | wc -l) ## The total number of files in the supplied directory path to be transferred
FILE_QUEUE=( $(ls ${LOCAL_DIR}) ) ## Creating a variable array that contains the file names that are to be transferred
FILE_INDEX="0" ## A simple file counter used to measure the number of tasks being undertaken
DATA_TRANSFER_COUNT="0" ## Enabling the capture of data volumes that pass through the transfer loops
echo -e "
Local directory:\t\t${LOCAL_DIR}
Remote directory:\t\t${REMOTE_DIR}
Remote user@server:\t\t${USER}@${REMOTE_HOST}
Number of tasks:\t\t${TOTAL_TASKS}
Number of processors:\t\t${PROCS}
Thread count per CPU:\t\t${THREADING}\n" ## Printing the defined variables to stdout to create a record of the conditions
## If checksums are enabled, calculate the file checksums at the source:
if [[ ${CHECKSUMS} == "YES" ]]
then
echo -e "\nChecksum validation enabled - computing checksums on files in the source directory..."
FILE_CHECKSUM_INDEX="0"
for FILE_CHECKSUM in ${FILE_QUEUE[*]}
do
echo "${FILE_CHECKSUM} `sha1sum ${LOCAL_DIR}/${FILE_QUEUE[${FILE_CHECKSUM_INDEX}]} | awk '{print $1}'`" >> /dev/shm/data-transfer-file-checksum.local
((FILE_CHECKSUM_INDEX++))
done
echo -e "Complete.\n"
fi
## Capturing the starting second count to be used to calculate the wall time:
TIMER_START=$(date +%s)
## Sending table headings to stdout for transfer information:
echo -e "\nHOSTNAME\t\t\t\tCPU\t\tTASK\t\tTHREAD\t\tFILE"
while true
do
## Cycling the available CPUs on the local server:
for CPU in $(seq 0 ${NUM_CPUS})
do
## Tracking that we still have outstanding tasks to complete:
if [ ${FILE_INDEX} -lt ${TOTAL_TASKS} ]
then
## Running a check to see whether any rsync processes are running on the specific processor:
CHECK=$(ps -e -o psr,cmd | awk -v aCPU=${CPU} '$1==aCPU' | awk '$2=="rsync"')
## If the variable is empty (and thus no process running), bind an rsync operation to the specific processor for the next file in the FILE_QUEUE:
if [[ -z ${CHECK} ]]
then
## A loop to specify the number of tasks that should be bound to each processor during distribution:
for THREAD in $(seq 1 ${THREADING})
do
## Checking the FILE_INDEX against the TOTAL_TASKS again to make sure we don't create empty tasks:
if [ ${FILE_INDEX} -lt ${TOTAL_TASKS} ]
then
## Defining CPU affinity for the transfer tasks (preventing the Linux scheduler from moving tasks between processors):
taskset -c ${CPU} rsync -a -e ssh ${LOCAL_DIR}/${FILE_QUEUE[${FILE_INDEX}]} ${USER}@${REMOTE_HOST}:${REMOTE_DIR} &
## Adding a slight pause to allow for large creation of parallel tasks:
sleep 0.1s
## Binding the most recently started task on the remote server to a processor:
## TBC ##
## Echo the current operation performed to stdout:
echo -e "${HOSTNAME}\t\t\t\t${CPU}\t\t${FILE_INDEX}\t\t${THREAD}\t\t${FILE_QUEUE[$FILE_INDEX]}"
## Capturing file size and incrementing the file size counter:
DATA_TRANSFER_COUNT=$(( ${DATA_TRANSFER_COUNT} + $(stat -c %s ${LOCAL_DIR}/${FILE_QUEUE[${FILE_INDEX}]} | cut -f1) ))
## Increment the file counter:
((FILE_INDEX++))
else
:
fi
done
fi
## The exit path, for when the FILE_INDEX counter exceeds the value in TOTAL_TASKS:
else
echo -e "\nAll transfer tasks have been assigned to CPU cores.\n"
## Tracking the outstanding number of running processes:
until [[ $(pidof rsync | wc -w) == 0 ]]
do
## Overwriting the same line with updated output to prevent explosion to stdout:
echo -n "Remaining processes: `pidof rsync | wc -w`"
echo -n -e "\e[0K\r"
done
echo -e "All processes complete."
TIMER_END=$(date +%s) ## Capturing the end second count
## Checking for differences between local target directories:
if [[ -x $(command -v comm) ]]
then
echo -e "\v\vChecking for the differences between local & remote directories..."
FILE_LISTS="/dev/shm/data-transfer-file-list" ## Storing the file lists in memory on the local server (should be pretty small)
ls ${LOCAL_DIR} | sort > ${FILE_LISTS}.local ## Capturing the contents of the local directory and storing in a temp file on local memory
ssh ${USER}@${REMOTE_HOST} "ls ${REMOTE_DIR} | sort" > ${FILE_LISTS}.remote ## Capturing the contents of the remote directory and storing in a temp file on local memory
DIR_COMPARISON=( $(comm -23 ${FILE_LISTS}.local ${FILE_LISTS}.remote) ) ## Comparing the local & remote directories from the temp files just created, and storing any differences in a variable array
if [[ -n ${DIR_COMPARISON} ]] ## A query on the variable with '-n' sees whether there is a value set. If there is, follow the loop...
then
if [[ $(ls ${LOCAL_DIR} | wc -l) == ${TOTAL_TASKS} ]] ## Checking to see whether the current number of files in the local directory matches $TOTAL_TASKS, generated earlier in the script
then
echo -e "\nNot all files have been transferred during this operation."
else
echo -e "\nThere is a difference in the number of files present than when the transfer was initiated."
fi
echo -e "\nThe following files exist on the source but not on the destination:"
for DIFF_FILE in ${DIR_COMPARISON[*]} ## Looping through the variable array and printing the contents to stdout
do
echo -e "\t${DIFF_FILE}"
done
echo -e "\nYou can re-run the script and rsync will send only those files that do not exist on the remote directory."
else ## The alternative, assuming there is no value stored in $DIR_COMPARISON
echo -e "\nThe local and remote directories are in sync - all files were successfully transferred."
fi
rm ${FILE_LISTS}.local ${FILE_LISTS}.remote ## Being good citizens and tidying up after ourselves
else
echo -e "The 'comm' comparison program is not available - skipping post-transfer directory comparison...\n"
fi
## If checksums are enabled, calculate the file checksums at the destination:
if [[ ${CHECKSUMS} == "YES" ]]
then
echo -e "\nComputing checksums on files in the destination directory..."
FILE_CHECKSUM_INDEX="0"
for FILE_CHECKSUM in ${FILE_QUEUE[*]}
do
ssh ${USER}@${REMOTE_HOST} sha1sum ${REMOTE_DIR}/${FILE_QUEUE[${FILE_CHECKSUM_INDEX}]} | awk '{print $1}' >> /dev/shm/data-transfer-file-checksum.remote
((FILE_CHECKSUM_INDEX++))
done
echo -e "Complete.\n"
CHECKSUM_COMPARISON=( $(comm -23 /dev/shm/data-transfer-file-checksum.local /dev/shm/data-transfer-file-checksum.remote) ) ## Comparing the local & remote directories from the temp files just created, and storing any differences in a variable array
if [[ -n ${CHECKSUM_COMPARISON} ]]
then
echo -e "\nERROR:\t\tChecksum mismatches have been detected:\n"
for DIFF_CHECKSUM in ${CHECKSUM_COMPARISON[*]} ## Looping through the variable array and printing the contents to stdout
do
echo -e "\t${DIFF_CHECKSUM}"
done
else
echo -e "\nVALIDATED: SHA1 checksums computed, compared and validated.\n"
fi
fi
if [[ -x $(command -v bc) ]]
then
DATA_TRANSFER_COUNT="$(echo "scale=2; ${DATA_TRANSFER_COUNT} / 1024 / 1024 / 1024 / 1024" | bc -l)TB" ## Deriving the TB transfer figure from the accumulated file size counts
else
echo -e "\nThe 'bc' program is not available, so the amount of data transferred will not be displayed..."
fi
echo -e "\vOPERATION COMPLETE: Submitted ${FILE_INDEX} files `if [[ -n ${DATA_TRANSFER_COUNT} ]]; then echo "at ${DATA_TRANSFER_COUNT} "; fi`for transfer from ${LOCAL_DIR} to ${REMOTE_HOST}:${REMOTE_DIR}\v"
TIMER_DIFF_SECONDS=$(( ${TIMER_END} - ${TIMER_START} )) ## Calculating the difference between start & end second values
TIMER_READABLE=$(date +%H:%M:%S -ud @${TIMER_DIFF_SECONDS}) ## Converting the second delta into a human readable time format (HH:MM:SS)...
echo -e "Date:\t\t\t`date "+%a %d %b %Y"`\nTransfer wall time:\t${TIMER_READABLE}\n" ## ...And printing it to stdout with the date
exit 0
fi
done
done
| true
|
cfee8d075f7467f45ea8f5aeec3bf92c80ea4152
|
Shell
|
AudiusProject/audius-protocol
|
/monitoring/grafana/bin/save-dashboards.sh
|
UTF-8
| 4,640
| 3.125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
set -e
: "${BEARER_PATH:=grafana/bearer.env}"
set -o allexport
source ${BEARER_PATH}
set +o allexport
: "${GRAFANA_USER:=admin}"
: "${GRAFANA_PASS:=admin}"
: "${GRAFANA_API_URL:=localhost}"
: "${GRAFANA_API_PORT:=80}"
PASS_URL=http://${GRAFANA_USER}:${GRAFANA_PASS}@${GRAFANA_API_URL}:${GRAFANA_API_PORT}
BASE_URL=http://${GRAFANA_API_URL}:${GRAFANA_API_PORT}
# REMOVE METADATA
# filter out environment-sensitive keys
CLEAR_METADATA='.dashboard'
CLEAR_ITERATION='del(.iteration)'
# CLEAR VERSION TO AVOID CONFLICTS
# reset .versions to null
CLEAR_VERSION='.version = null'
: "${CLEAR_DASHBOARD_ID:=.id = null}"
# CLEAR PROMETHEUS UID
# clears prometheus uid since each deployment is unique
CLEAR_PROM_TARGET_UID='del(.panels[].targets[]?.datasource.uid)'
CLEAR_PROM_PANEL_UID='del(.panels[].datasource)'
# RESET TIME WINDOW AND REFRESH TIMES
# restrict time windows to avoid Prometheus pressure
SET_TIME_WINDOW_FROM='.time.from = "now-2h"'
SET_TIME_WINDOW_TO='.time.to = "now"'
# restrict auto-refresh time to avoid Prometheus pressure
SET_REFRESH_INTERVAL='.refresh = "30m"'
# set a time delay since graphs don't fall sharply down at the tail end
SET_TIME_DELAY='.timepicker.nowDelay = "1m"'
# RESET TEMPLATING
# clear current selection
RESET_TEMPLATE_SELECTION='del(.templating.list?[].current)'
# SANITIZE LIBRARY PANELS
# when a panel is a library panel, only keep the libraryPanel and gridPos keys
# since everything else is ignored at upload time
# also trim the created/updated fields since they generate plenty of commit noise
SANITIZE_LIBRARY_PANELS='.panels |= map(if .libraryPanel != null then {libraryPanel, id, gridPos} else . end)'
CLEAR_LIBRARY_PANEL_CREATED='del(.panels[].libraryPanel.meta.created)'
CLEAR_LIBRARY_PANEL_UPDATED='del(.panels[].libraryPanel.meta.updated)'
# REQUIRED FOR PUSHING JSON-BACKED DASHBOARDS VIA THE API
# wrap the final output in a different format and use overwrite: true, to avoid .id and .version collisions
PUSH_FORMATTING='{dashboard: ., overwrite: true}'
# FOLDERS
# ids have to be unique
CLEAR_FOLDER_IDS='del(.[].id)'
path=grafana/metadata/folders.json
curl \
-s \
-H "Authorization: Bearer ${BEARER_TOKEN}" \
-H 'Content-Type: application/json' \
-H 'Accept: application/json' \
${BASE_URL}/api/folders \
| jq "${CLEAR_FOLDER_IDS}" \
> "${path}"
echo "Saved to: ${path}"
path=grafana/metadata/library.json
# save all library panels into a single file
curl -s ${PASS_URL}/api/library-elements?perPage=100 \
| jq .result.elements \
> ${path}
echo "Saved to: ${path}"
path=grafana/metadata/contact-points.json
# save all contact points into a single file
curl -s ${PASS_URL}/api/v1/provisioning/contact-points \
| jq . \
> ${path}
echo "Saved to: ${path}"
path=grafana/metadata/policies.json
# save all notification policies into a single file
curl -s ${PASS_URL}/api/v1/provisioning/policies \
| jq . \
> ${path}
echo "Saved to: ${path}"
path=grafana/metadata/mute-timings.json
# save all mute timings into a single file
curl -s ${PASS_URL}/api/v1/provisioning/mute-timings \
| jq . \
> ${path}
echo "Saved to: ${path}"
path=grafana/metadata/templates.json
# save all alert templates into a single file
curl -s ${PASS_URL}/api/v1/provisioning/templates \
| jq . \
> ${path}
echo "Saved to: ${path}"
# save dashboards into separate json files
for uid in $(curl -s ${PASS_URL}/api/search | jq -rc '.[] | select(.uri != "db/prometheus-stats") | select(.type != "dash-folder") | .uid')
do
response=$(curl \
-s \
-H "Authorization: Bearer ${BEARER_TOKEN}" \
-H 'Content-Type: application/json' \
-H 'Accept: application/json' \
${BASE_URL}/api/dashboards/uid/${uid})
# create local filepath using the .meta key
slug=$(echo ${response} | jq -r '.meta.slug')
path=grafana/dashboards
mkdir -p "${path}"
path=${path}/${slug}.json
echo "${response}" \
| jq "${CLEAR_METADATA}" \
| jq "${CLEAR_ITERATION}" \
| jq "${CLEAR_VERSION}" \
| jq "${CLEAR_DASHBOARD_ID}" \
| jq "${CLEAR_PROM_TARGET_UID}" \
| jq "${CLEAR_PROM_PANEL_UID}" \
| jq "${SET_TIME_WINDOW_FROM}" \
| jq "${SET_TIME_WINDOW_TO}" \
| jq "${SET_REFRESH_INTERVAL}" \
| jq "${SET_TIME_DELAY}" \
| jq "${RESET_TEMPLATE_SELECTION}" \
| jq "${SANITIZE_LIBRARY_PANELS}" \
| jq "${CLEAR_LIBRARY_PANEL_CREATED}" \
| jq "${CLEAR_LIBRARY_PANEL_UPDATED}" \
| jq "${PUSH_FORMATTING}" \
> "${path}"
echo "Saved to: ${path}"
done
| true
|
e32577ce28b9ac838124068b48fa52cfb46d0b18
|
Shell
|
console-haishin-live/setup-client
|
/tmux/setup.sh
|
UTF-8
| 1,294
| 3.765625
| 4
|
[] |
no_license
|
#!/bin/bash
set -ue
SCRIPT_DIR=$(cd "$(dirname "$0")"; pwd)
SRC_DIR="${HOME}/local/src"
LOCAL_DIR="${HOME}/local"
SERIAL="$(date +%Y%m%d%H%M%S)"
TMUX_VERSION="3.1b"
function download(){
echo "依存パッケージをインストールする。"
sudo apt install -y build-essential \
libncurses5-dev \
libevent-dev
echo "ソースをダンロードして、展開する。"
mkdir -p "${SRC_DIR}"
cd "${SRC_DIR}"
curl -OL \
"https://github.com/tmux/tmux/releases/download/${TMUX_VERSION}/tmux-${TMUX_VERSION}.tar.gz"
tar -zxvf "tmux-${TMUX_VERSION}.tar.gz"
}
function install(){
echo "コンパイル、インストールする。"
cd "./tmux-${TMUX_VERSION}"
./configure --prefix="${LOCAL_DIR}"
make
make install
}
function config(){
echo "既存のコンフィグをバックアップする。"
if [ -f "${HOME}/.tmux.conf" ]; then
mv "${HOME}/.tmux.conf" "${HOME}/.tmux.conf.${SERIAL}"
fi
echo "コンフィグをコピーする。"
cd "${SCRIPT_DIR}"
cp -i ./.tmux.conf "${HOME}/"
}
function env(){
echo "${HOME}/local/bin にパスを通す。"
cat <<EOF >> "${HOME}/.bashrc"
# tmux
PATH="\${HOME}/local/bin:\${PATH}"
EOF
}
download
install
config
env
| true
|
84eda3003910f89e5161d3cb8ee94531475f304f
|
Shell
|
petriborg/uvloop
|
/.ci/build-manylinux-wheels.sh
|
UTF-8
| 1,000
| 3
| 3
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
#!/bin/bash
set -e -x
yum update -y
yum install -y libtool autoconf automake
PYTHON_VERSIONS="cp35-cp35m"
# Compile wheels
for PYTHON_VERSION in ${PYTHON_VERSIONS}; do
PYTHON="/opt/python/${PYTHON_VERSION}/bin/python"
PIP="/opt/python/${PYTHON_VERSION}/bin/pip"
${PIP} install --upgrade pip wheel
${PIP} install --upgrade setuptools
${PIP} install -r /io/.ci/requirements.txt
make -C /io/ PYTHON="${PYTHON}" distclean
make -C /io/ PYTHON="${PYTHON}"
${PIP} wheel /io/ -w /io/dist/
done
#Bundle external shared libraries into the wheels.
for whl in /io/dist/*.whl; do
auditwheel repair $whl -w /io/dist/
rm /io/dist/*-linux_*.whl
done
for PYTHON_VERSION in ${PYTHON_VERSIONS}; do
PYTHON="/opt/python/${PYTHON_VERSION}/bin/python"
PIP="/opt/python/${PYTHON_VERSION}/bin/pip"
${PIP} install ${PYMODULE} --no-index -f file:///io/dist
rm -rf /io/tests/__pycache__
make -C /io/ PYTHON="${PYTHON}" test
rm -rf /io/tests/__pycache__
done
| true
|
b56c3377e3168f4da66deb0ed374f54a81fe9328
|
Shell
|
joshviki/mindtree_mine
|
/create_GKE/delete.sh
|
UTF-8
| 1,727
| 3.921875
| 4
|
[] |
no_license
|
#!/bin/bash
echo "Please wait... Collecting the GKE cluster list"
gcloud projects list --format="value(PROJECT_ID)" > temp_total.txt
nl -s ")" temp_total.txt > temp_ID.txt
while read line
do
for i in $line
do
gcloud config set project $i
gcloud container clusters list --format="value(name)" > ins_total.txt
echo '*****************************************'
echo "Clusters in $i project"
cat ins_total.txt
echo '*****************************************'
done
done<temp_total.txt
cat temp_ID.txt
read -p "Please choose the project in which the GKE cluster has to be deleted :" option
line_num=`cat temp_ID.txt | wc -l`
if [[ $option -gt $line_num ]]
then
echo "Wrong option... exiting the deletion process"
else
ID=`cat temp_ID.txt | grep -w $option | awk -F ")" '{print $2}'`
echo "The project ID which you have chosen is $ID"
echo "Please wait while we switch to the desired project"
gcloud config set project $ID
proj_id=`gcloud config get-value project`
if [[ $proj_id == $ID ]]
then
echo "Switched to $proj_id"
gcloud container clusters list --format="value(name)" > ins_total.txt
nl -s ")" ins_total.txt > ins_list.txt
cat ins_list.txt
read -p "Please choose the GKE cluster which has to be deleted :" ins_opt
ins_line_num=`cat ins_list.txt | wc -l`
if [[ $ins_opt -gt $ins_line_num ]]
then
echo "Wrong option... exiting the creation process"
else
ins=`cat ins_list.txt | grep -w $ins_opt | awk -F ")" '{print $2}'`
zone=`gcloud container clusters list --filter="name=$ins" --format="value(location)"`
gcloud container clusters delete $ins --zone=$zone
echo "$ins deleted successfully in zone $zone"
fi
else
echo "Switching to $ID failed"
fi
fi
rm ./*.txt
| true
|
1a6e805c51b3d1da4e8b78a4612f9af6aa53c138
|
Shell
|
rsau/lagoon
|
/.docker/images/govcms/scripts/govcms-deploy
|
UTF-8
| 4,474
| 3.375
| 3
|
[] |
permissive
|
#!/usr/bin/env bash
IFS=$'\n\t'
set -euo pipefail
# Ensure lagoon environment is set with the least destructive default.
LAGOON_ENVIRONMENT_TYPE=${LAGOON_ENVIRONMENT_TYPE:-production}
# @todo This strategy will be injected from .env or .lagoon.env Currently set to replicate the existing process.
# Determine the config strategy `import` vs `retain`.
GOVCMS_DEPLOY_WORKFLOW_CONFIG=${GOVCMS_DEPLOY_WORKFLOW_CONFIG:-import}
# Determine the content strategy `import` vs `retain`.
GOVCMS_DEPLOY_WORKFLOW_CONTENT=${GOVCMS_DEPLOY_WORKFLOW_CONTENT:-retain}
# Space-separated list of Db replica hosts.
MARIADB_READREPLICA_HOSTS="${MARIADB_READREPLICA_HOSTS:-}"
# The location of the application directory.
APP="${APP:-/app}"
# Check for presence of config files.
set +e # Prevent script failure when assigning 0.
# shellcheck disable=SC2012,SC2086
config_count=$(ls -1 $APP/config/default/*.yml 2>/dev/null | wc -l | tr -d ' ')
# shellcheck disable=SC2012,SC2086
dev_config_count=$(ls -1 $APP/config/dev/*.yml 2>/dev/null | wc -l | tr -d ' ')
set -e
echo "Running govcms-deploy"
echo "Environment type: $LAGOON_ENVIRONMENT_TYPE"
echo "Config strategy: $GOVCMS_DEPLOY_WORKFLOW_CONFIG"
echo "Content strategy: $GOVCMS_DEPLOY_WORKFLOW_CONTENT"
echo "There are ${config_count} config yaml files, and ${dev_config_count} dev yaml files."
# Ensure tmp folder always exists.
mkdir -p "$APP/web/sites/default/files/private/tmp"
drush core:status
# Database options to configure the remote to use the read replica when
# performing read operations.
dump_opts=""
read_replica_enabled () {
if [[ -z "$MARIADB_READREPLICA_HOSTS" ]]; then
return
fi
if tables=$(drush sqlq 'show tables;' --database=read 2> /dev/null) && [ -n "$tables" ]; then
dump_opts="--database=read"
fi
}
# Database updates, cache rebuild, optional config imports.
common_deploy () {
if [[ "$LAGOON_ENVIRONMENT_TYPE" = "development" && "$GOVCMS_DEPLOY_WORKFLOW_CONTENT" = "import" ]]; then
echo "Performing content import."
# shellcheck disable=SC2086
drush --alias-path=/app/drush/sites @govcms.prod sql:dump --gzip --extra-dump=--no-tablespaces --result-file=/tmp/sync.sql -y
drush rsync --alias-path=/app/drush/sites @govcms.prod:/tmp/sync.sql.gz /tmp/ -y
gunzip < /tmp/sync.sql.gz | drush sqlc
rm /tmp/sync.sql.gz
fi
drush eval "\Drupal::service('extension.list.theme')->reset()->getList();"
drush cache:rebuild
drush updatedb -y
# Base configuration import with development environment overrides.
if [[ "$GOVCMS_DEPLOY_WORKFLOW_CONFIG" = "import" && "$config_count" -gt 0 ]]; then
echo "Performing config import."
drush config:import -y sync
if [[ "$LAGOON_ENVIRONMENT_TYPE" != "production" && "$dev_config_count" -gt 0 ]]; then
echo "Performing development config import on non-production site."
drush config:import -y --partial --source=../config/dev
fi
fi
if [[ "$LAGOON_ENVIRONMENT_TYPE" != "production" ]]; then
echo "Enable stage_file_proxy in non-prod environments."
drush pm:enable stage_file_proxy -y
fi
}
read_replica_enabled
if [[ "$LAGOON_ENVIRONMENT_TYPE" = "production" ]]; then
if drush status --fields=bootstrap | grep -q "Successful"; then
echo "Making a database backup."
# shellcheck disable=SC2086
mkdir -p "$APP/web/sites/default/files/private/backups/" && drush sql:dump $dump_opts --gzip --extra-dump=--no-tablespaces --result-file="$APP/web/sites/default/files/private/backups/pre-deploy-dump.sql"
common_deploy
else
echo "Drupal is not installed or not operational."
fi
else
if ! drush status --fields=bootstrap | grep -q "Successful"; then
echo "Drupal is not installed or not operational."
if [[ "$LAGOON_ENVIRONMENT_TYPE" = "local" ]]; then
echo "Drupal is not installed locally, try ahoy install"
else
# In a non-functioning development site, import the database from production before running import.
# Note, this would destroy data in a dev site that was just broken temporarily. Review?
# shellcheck disable=SC2086
drush --alias-path=/app/drush/sites @govcms.prod sql:dump --gzip --extra-dump=--no-tablespaces --result-file=/tmp/sync.sql -y
drush rsync --alias-path=/app/drush/sites @govcms.prod:/tmp/sync.sql.gz /tmp/ -y
gunzip < /tmp/sync.sql.gz | drush sqlc
rm /tmp/sync.sql.gz
common_deploy
fi
else
common_deploy
fi
fi
echo "Finished running govcms-deploy."
| true
|
f0069927bd348ec56994f97fd521cf4436651bd8
|
Shell
|
itsabdessalam/devops-php-website
|
/start.sh
|
UTF-8
| 502
| 3.25
| 3
|
[] |
no_license
|
#!/bin/bash
# Retrieve all branches on local machine
git fetch
# Switch to v3 branch
git checkout v3
# Stop all running containers to prevent conflicts
docker stop $(docker ps -aq)
# Run containers
docker-compose up -d
# Open browser to check app
if hash xdg-open &> /dev/null
then
xdg-open http://localhost:8080 2>/dev/null
elif hash open &> /dev/null
then
open http://localhost:8080 2>/dev/null
else
echo 'Couldn'\''t open the project on the browser, go to http://localhost:8080'
fi
| true
|
aa6d39ebf6836a98fab7a66969fac0a9e4a93fd2
|
Shell
|
readytowork/S29sPLMPWAtbN49RV
|
/workshop/readline.sh
|
UTF-8
| 796
| 3.171875
| 3
|
[] |
no_license
|
#!/bin/bash
a=0
substring="Saving results to result"
fileprint=""
space=" "
pass=" 1"
notpass=" 0"
nline=$'\n'
ispassp=0
while read line
do a=$(($a+1));
if ! [ "${line/$substring}" = "$line" ]
then
if [ "$ispassp" == "0" ]; then
fileprint=$fileprint$notpass$nline
fi
continue
fi
for word in $line
do
if [ "$word" == "=" ]; then
continue
fi
if [ "$word" == "h1" ]; then
continue
fi
if [ "$word" == "h2" ]; then
continue
fi
if [ "$word" == "PASS" ]; then
fileprint=$fileprint$space$pass$space$nline
ispassp=1
else
fileprint=$fileprint$word$space
ispassp=0
fi
done
done < "QpskQpsk.txt"
echo "$fileprint" >> "output2.txt"
echo "Final line count is: $a";
#Q16Q16.txt qamqpsk.txt QpskQpsk.txt
| true
|
3fc4a1160f4b2a79606a7a6f7ab6795036857d65
|
Shell
|
kaumiller-lab/qbb2019-answers
|
/day2-lunch/doRNA-seq.sh
|
UTF-8
| 498
| 3.046875
| 3
|
[] |
no_license
|
#!/bin/bash
GENOME=../day2-morning/BDGP6
ANNOTATION=../genomes/BDGP6.Ensembl.81.gtf
THREADS=4
for SAMPLE in SRR072893 SRR072903 SRR072905 SRR072915
do
echo "*** Processing $SAMPLE"
cp ../rawdata/$SAMPLE.fastq .
fastqc -t $THREADS $SAMPLE.fastq
hisat2 -p $THREADS -x $GENOME -U $SAMPLE.fastq -S $SAMPLE.sam
samtools sort -@ $THREADS -O BAM $SAMPLE.sam -o $SAMPLE.bam
samtools index -@ $THREADS -b $SAMPLE.bam
stringtie $SAMPLE.bam -e -B -p $THREADS -G $ANNOTATION -o $SAMPLE.gtf
done
| true
|
3ed841b37643f7331a4a34b2548372203b84f6ca
|
Shell
|
gwinevia/2017development
|
/C_programming/c_compile.sh
|
UTF-8
| 364
| 3.828125
| 4
|
[] |
no_license
|
#!/bin/bash
basename=$1 #引数からファイル名を受取る
filename=${basename%.*} #拡張子を取った名前
if [ -a ${filename}.c ]; then
gcc -o ${filename} ${filename}.c -lm
if [ -a ${filename} ]; then
./${filename}
else
echo "** ${filename} do not made **"
fi
else
echo "** ${filename}.c does not exist **"
fi
| true
|
273ee22867028cdec80548d4208ec9f06fe075a9
|
Shell
|
amithjkamath/codesamples
|
/cpp/opencv/CVbuild.sh
|
UTF-8
| 413
| 3.421875
| 3
|
[] |
no_license
|
#!/bin/sh
if [ ! -n "$2" ]
then
echo "Usage: Arg#1: .c filename, Arg#2: output name"
exit $E_BADARGS
fi
echo "compiling $1"
gcc -ggdb `pkg-config --cflags opencv` -o $2 $1 `pkg-config --libs opencv`;
# for C++ files, do this.
# for i in *.cpp; do
# echo "compiling $i"
# g++ -ggdb `pkg-config --cflags opencv` -o `basename $i .cpp` $i `pkg-config --libs opencv`;
# done
| true
|
bf20915c79209c01211088926db0d7501d8c7b6c
|
Shell
|
marcopeg/humble-cli
|
/bin/inc/index-of.sh
|
UTF-8
| 169
| 3.5625
| 4
|
[] |
no_license
|
indexOf() {
x="${1%%$2*}"
RES=$([[ $x = $1 ]] && echo -1 || echo ${#x})
if [ "$RES" == "${#1}" ]; then
echo -1
else
echo $RES
fi
}
| true
|
f24b368f4b74d45846b3c88682d8668b1dc1b066
|
Shell
|
Zniper1/Sor1TP2
|
/MegaMenu.sh
|
UTF-8
| 4,921
| 3.828125
| 4
|
[] |
no_license
|
#!/bin/bash
#------------------------------------------------------
# PALETA DE COLORES
#------------------------------------------------------
#setaf para color de letras/setab: color de fondo
red=`tput setaf 1`;
green=`tput setaf 2`;
blue=`tput setaf 4`;
bg_blue=`tput setab 4`;
reset=`tput sgr0`;
bold=`tput setaf bold`;
#------------------------------------------------------
# VARIABLES GLOBALES
#------------------------------------------------------
proyectoActual="$(pwd)";
proyectos="";
#------------------------------------------------------
# DISPLAY MENU
#------------------------------------------------------
imprimir_menu () {
imprimir_encabezado "\t S U P E R - M E N U ";
echo -e "\t\t El proyecto actual es:";
echo -e "\t\t $proyectoActual";
echo -e "\t\t";
echo -e "\t\t Opciones:";
echo "";
echo -e "\t\t\t a. Datos de Red";
echo -e "\t\t\t b. Escaneo de Red";
echo -e "\t\t\t c. Loguearse a dispositivo";
echo -e "\t\t\t d. Copiar archivo de host remoto a servidor";
echo -e "\t\t\t e. Copiar archivo de servidor a host remoto";
echo -e "\t\t\t q. Salir";
echo "";
echo -e "Escriba la opción y presione ENTER";
}
#------------------------------------------------------
# FUNCTIONES AUXILIARES
#------------------------------------------------------
imprimir_encabezado () {
clear;
#Se le agrega formato a la fecha que muestra
#Se agrega variable $USER para ver que usuario está ejecutando
echo -e "`date +"%d-%m-%Y %T" `\t\t\t\t\t USERNAME:$USER";
echo "";
#Se agregan colores a encabezado
echo -e "\t\t ${bg_blue} ${red} ${bold}-------------------------------------------------------------\t${reset}";
echo -e "\t\t ${bold}${bg_blue}${red}$1\t\t${reset}";
echo -e "\t\t ${bg_blue} ${red} ${bold}-------------------------------------------------------------\t${reset}";
echo "";
}
esperar () {
echo "";
echo -e "Presione enter para continuar";
read ENTER ;
}
malaEleccion () {
echo -e "Selección Inválida ..." ;
}
#------------------------------------------------------
# FUNCTIONES del MENU
#------------------------------------------------------
a_funcion () {
imprimir_encabezado "\tOpción a. Datos de Red";
echo "Datos de su Red: "
echo ""
DireccionRed=$(ifconfig | grep -i "inet" -m 1|awk 'N=2 {print $N}')
IPRouter=$(ip route show | grep -i "via " -m 1|awk 'N=3 {print $N}')
Ip=$(dig +short myip.opendns.com @resolver1.opendns.com)
#dig: nos permite hacer dns para obtener informacion de nombre de dominio
echo Mi Direccion ip publica es: "$Ip"
echo Mi Direccion de red es: "$DireccionRed"
echo La Direccion del router es: "$IPRouter"
}
b_funcion() {
imprimir_encabezado "\t0pción b. Escaneo de Red";
echo "Ingrese su contraseña para ver los dispositivos conectados a su Red"
echo ""
IPBroadcast=$(ip route show | grep -i "via " -m 1|awk 'N=3 {print $N}')
IPBroadcast=${IPBroadcast/%[0-9][0-9][0-9]/*}
IPBroadcast=${IPBroadcast/%[0-9][0-9]/*}
IPBroadcast=${IPBroadcast/%[0-9]/*}
sudo nmap -sP $IPBroadcast | grep -B 1 "for "
}
c_funcion() {
imprimir_encabezado "\t0pción c. Loguearse a dispositivo";
echo "Ingrese los datos necesarios para logearse en el servidor"
echo ""
read -p "Ingrese el puerto: " puerto
read -p "Ingrese el usuario: " usuario
read -p "Ingrese la ip del servidor: " ip
echo ""
echo "Para salir del servidor escriba 'exit'"
echo ""
ssh -X -p $puerto $usuario@$ip
}
d_funcion() {
imprimir_encabezado "\t0pción d. Copiar archivo de host remoto a servidor";
echo "Ingrese los datos necesarios para copiar un archivo en el servidor"
echo ""
read -p "Ingrese el path y nombre del archivo: " archivo
read -p "Ingrese el path donde desea guardar el archivo: " ubicacion
read -p "Ingrese el usuario: " usuario
read -p "Ingrese la ip del servidor: " ip
echo ""
scp $archivo $usuario@$ip:$ubicacion
}
e_funcion() {
imprimir_encabezado "\t0pción e. Copiar archivo de servidor a host remoto";
echo "Ingrese los datos encesarios para copiar un archivo desde el servidor"
echo ""
read -p "Ingrese el path y nombre del archivo: " archivo
read -p "Ingrese el path donde desea guardar el archivo: " ubicacion
read -p "Ingrese el usuario: " usuario
read -p "Ingrese la ip del servidor: " ip
echo ""
scp $usuario@$ip:$archivo $ubicacion
}
#------------------------------------------------------
# LOGICA PRINCIPAL
#------------------------------------------------------
while true
do
# 1. mostrar el menu
imprimir_menu;
# 2. leer la opcion del usuario
read opcion;
case $opcion in
a|A) a_funcion;;
b|B) b_funcion;;
c|C) c_funcion;;
d|D) d_funcion;;
e|E) e_funcion;;
q|Q) break;;
*) malaEleccion;;
esac
esperar;
done
| true
|
8edee94e7b2e351c8f5d1812a635e4b0fd1c3968
|
Shell
|
hk59775634/luci-app-mycpe
|
/files/etc/init.d/mycpe
|
UTF-8
| 201
| 2.625
| 3
|
[] |
no_license
|
#!/bin/sh /etc/rc.common
# /etc/init.d/mycpe
START=95
PID=/tmp/mycpe.pid
start(){
/etc/init.d/mycpe enable
/usr/sbin/mycpe start >/dev/null 2>&1 &
}
stop(){
kill -9 `cat $PID` >/dev/null 2>&1 &
}
| true
|
b51bf812306767ec398f03caa2242c00c6501d1d
|
Shell
|
uc-cdis/cloud-automation
|
/gen3/bin/kube-setup-fenceshib.sh
|
UTF-8
| 1,377
| 3.375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
#
# Deploy fenceshib into existing commons - assume configs are already configured
# for fenceshib to re-use the userapi db.
# This fragment is pasted into kube-services.sh by kube.tf.
#
source "${GEN3_HOME}/gen3/lib/utils.sh"
gen3_load "gen3/lib/kube-setup-init"
[[ -z "$GEN3_ROLL_ALL" ]] && gen3 kube-setup-secrets
if [[ -d "$(gen3_secrets_folder)/creds.json" ]]; then # create database
# Initialize fence database and user list
cd "$(gen3_secrets_folder)"
if [[ ! -f .rendered_fence_db ]]; then
gen3 job run fencedb-create
echo "Waiting 10 seconds for fencedb-create job"
sleep 10
gen3 job logs fencedb-create || true
echo "Leaving setup jobs running in background"
cd "$(gen3_secrets_folder)"
fi
# avoid doing the previous block more than once or when not necessary ...
touch "$(gen3_secrets_folder)/.rendered_fence_db"
fi
# setup configmap
if ! g3kubectl get configmaps/fenceshib-config > /dev/null 2>&1; then
g3kubectl apply -f "${GEN3_HOME}/kube/services/fenceshib/fenceshib-configmap.yaml"
fi
# deploy fenceshib
gen3 roll fenceshib
g3kubectl apply -f "${GEN3_HOME}/kube/services/fenceshib/fenceshib-service.yaml"
gen3 roll fenceshib-canary || true
g3kubectl apply -f "${GEN3_HOME}/kube/services/fenceshib/fenceshib-canary-service.yaml"
cat <<EOM
The fenceshib service has been deployed onto the k8s cluster.
EOM
| true
|
d80db4d6c8756b01733c823c330846937f5ec2b9
|
Shell
|
Blazemeter/blazemeter-openshift
|
/.openshift/action_hooks/deploy
|
UTF-8
| 4,255
| 3.5625
| 4
|
[] |
no_license
|
#!/bin/bash
# This deploy hook gets executed after dependencies are resolved and the
# build hook has been run but before the application has been started back
# up again. This script gets executed directly, so it could be python, php,
# ruby, etc.
set -e
DRUPAL_SITE_DIR=${OPENSHIFT_DATA_DIR}sites
DRUPAL_PRIVATE_DIR=${OPENSHIFT_DATA_DIR}private
DRUPAL_SETTINGS=${DRUPAL_SITE_DIR}/default/settings.php
DRUSH_SCRIPT=${OPENSHIFT_DATA_DIR}bin/drush
DEFAULT_PASSWORD=openshift_changeme
if [ ! -d "${DRUPAL_SITE_DIR}/default" ]
then
echo "No sites directory has been created, Drupal cannot be deployed"
exit 9
fi
#
# Create a new Drupal site. You can delete your ${OPENSHIFT_DATA_DIR}sites
# folder to run this step again.
#
if [ ! -f "${DRUPAL_SITE_DIR}/default/settings.php" ]
then
#
# Automatic installation only works with mysql.
#
if [ -z "$OPENSHIFT_MYSQL_DB_HOST" ]
then
echo 1>&2
echo "Could not find mysql database. Please run:" 1>&2
echo "rhc cartridge add mysql-5.1 -a $OPENSHIFT_APP_NAME" 1>&2
echo "then make a sample commit (add whitespace somewhere) and re-push" 1>&2
echo 1>&2
fi
RETRY=60
while [ $RETRY -gt 0 ] ; do
#reload env variables
for env_var in $OPENSHIFT_HOMEDIR/.env/*
do
. $env_var
done
#check for db
if [ -n "$OPENSHIFT_MYSQL_DB_HOST" ]
then
echo "Database server found at $OPENSHIFT_MYSQL_DB_HOST. initializing..."
sleep 5
break
fi
sleep 1
RETRY=$(( $RETRY - 1 ))
done
if [ -z "$OPENSHIFT_MYSQL_DB_HOST" ]
then
exit 5
fi
if [ -z "$OPENSHIFT_MYSQL_DB_PORT" ]
then
exit 6
fi
mkdir -p ${DRUPAL_SITE_DIR}/default
mkdir -p ${DRUPAL_PRIVATE_DIR}
pushd ${OPENSHIFT_REPO_DIR}php
echo
echo "Creating a new Drupal site at ${DRUPAL_SITE_DIR}/default"
echo
if ! $DRUSH_SCRIPT site-install blazemeter_profile --site-name=${OPENSHIFT_APP_NAME} --account-pass=openshift_changeme --db-url=mysql://$OPENSHIFT_MYSQL_DB_USERNAME:$OPENSHIFT_MYSQL_DB_PASSWORD@$OPENSHIFT_MYSQL_DB_HOST:$OPENSHIFT_MYSQL_DB_PORT/$OPENSHIFT_APP_NAME --yes
then
echo "Unable to configure your Drupal installation"
echo
exit 10
fi
#
# Tweak settings.php to use the OpenShift environment variables instead of
# the values passed to Drush.
#
chmod u+w ${DRUPAL_SETTINGS} ${DRUPAL_SITE_DIR}/default
cat ${DRUPAL_SETTINGS} | ruby -e "puts STDIN.read.gsub(/\\\$databases\s*=\s*array.*?\)\;/m, '# Replaced by OpenShift')" > ${OPENSHIFT_TMP_DIR}/settings.php
cat << "END" >> ${OPENSHIFT_TMP_DIR}/settings.php
/**
* For maximum portability, use the OpenShift environment variables.
*/
// When run from Drush, only $_ENV is available. Might be a bug
if (array_key_exists('OPENSHIFT_APP_NAME', $_SERVER)) {
$src = $_SERVER;
} else {
$src = $_ENV;
}
$databases = array (
'default' =>
array (
'default' =>
array (
'database' => $src['OPENSHIFT_APP_NAME'],
'username' => $src['OPENSHIFT_MYSQL_DB_USERNAME'],
'password' => $src['OPENSHIFT_MYSQL_DB_PASSWORD'],
'host' => $src['OPENSHIFT_MYSQL_DB_HOST'],
'port' => $src['OPENSHIFT_MYSQL_DB_PORT'],
'driver' => 'mysql',
'prefix' => '',
),
),
);
END
cat ${OPENSHIFT_TMP_DIR}/settings.php > ${DRUPAL_SETTINGS}
#
# Use the temporary directories that OpenShift provides, and set
# the private path to be inside the data dir
#
echo "\$conf['file_private_path'] = \$src['OPENSHIFT_DATA_DIR'] . 'private/';" >> ${DRUPAL_SETTINGS}
echo "\$conf['file_temporary_path'] = \$src['OPENSHIFT_TMP_DIR'] . 'drupal/';" >> ${DRUPAL_SETTINGS}
chmod u-w ${DRUPAL_SETTINGS} ${DRUPAL_SITE_DIR}/default
popd
echo
echo "Drupal is now configured"
echo
echo "===================================================="
echo " Drupal-Admin login: admin"
echo " Drupal-Admin password: openshift_changeme"
echo " Don't forget to change your drupal admin password!"
echo "===================================================="
else
echo "Drupal is already configured. Delete settings.php to rerun setup"
fi
echo
| true
|
ece21e0589c49618aacc779148c3ea23dbaf583b
|
Shell
|
shiguangwang/vim-setting
|
/prepare_common.sh
|
UTF-8
| 326
| 3.703125
| 4
|
[] |
no_license
|
#!/bin/bash
# Prepare powerline fonts
echo "Prepareing powerline fonts ..."
POWERLINE_FONTS=$HOME/tmp/powerline_fonts
if [ ! -d $POWERLINE_FONTS ]; then
echo "Installing..."
mkdir -p $POWERLINE_FONTS
git clone https://github.com/powerline/fonts.git $POWERLINE_FONTS
cd $POWERLINE_FONTS
./install.sh
fi
echo "Done!"
| true
|
2335d87050168c255713fc6a1463f08e1a084429
|
Shell
|
cabralrobert/Programacao_Script2019.2
|
/atividades/atividade05/isfile.sh
|
UTF-8
| 308
| 3.140625
| 3
|
[] |
no_license
|
#!/bin/bash
if [ -d "$1" ]
then
echo "É um diretorio"
elif [ -f "$1" ]
then
echo "É um arquivo"
fi
if [ -r "$1" ]
then
echo "Tem permissão de leitura"
else
echo "Não tem permissão de leitura"
fi
if [ -w "$1" ]
then
echo "Tem permissão de escrita"
else
echo "Não tem permissão de escrita"
fi
| true
|
5fbc8670cde44a1c8b5c4b12706b2c6873038833
|
Shell
|
jotfs/jot
|
/download_jotfs_binary.sh
|
UTF-8
| 368
| 2.84375
| 3
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
mkdir -p ./bin
tag_name=$(curl -s https://api.github.com/repos/jotfs/jotfs/releases/latest | jq -r '.tag_name')
url=https://github.com/jotfs/jotfs/releases/download/$tag_name/jotfs_linux_amd64.gz
curl -s -L --output ./bin/jotfs_linux_amd64.gz $url
gzip -dc ./bin/jotfs_linux_amd64.gz > ./bin/jotfs
chmod u+x ./bin/jotfs
echo "JotFS server binary saved to ./bin/jotfs"
| true
|
e78a3265b01981ed4492abc8844637d5f4a8ea83
|
Shell
|
kingvuplus/EG-base
|
/files/scripts/httpd_script.sh
|
UTF-8
| 1,108
| 3.703125
| 4
|
[] |
no_license
|
#!/bin/sh
NAME=httpd
DESC="Busybox HTTP Daemon"
HTTP_ON=0
HTTPROOT=/usr/www
HTTPPORT=8047
HTTPCONF=/var/etc/httpd.conf
ARGS="-h $HTTPROOT -p $HTTPPORT -c $HTTPCONF"
if [ -f /usr/sbin/httpd ]
then
DAEMON=/usr/sbin/httpd
else
DAEMON=/sbin/httpd
fi
test -f $DAEMON || exit 0
set -e
case "$1" in
start)
if [ ! -d $HTTPROOT ]; then
echo "$HTTPROOT is missing."
exit 1
fi
if [ $HTTP_ON -ne 0 ]; then
echo -n "starting $DESC: $NAME... "
start-stop-daemon -S -b -n $NAME -a $DAEMON -- $ARGS
echo "done."
fi
;;
start2)
if [ ! -d $HTTPROOT ]; then
echo "$HTTPROOT is missing."
exit 1
fi
echo -n "starting $DESC: $NAME... "
start-stop-daemon -S -b -n $NAME -a $DAEMON -- $ARGS
echo "done."
;;
stop)
echo -n "stopping $DESC: $NAME... "
start-stop-daemon -K -n $NAME
echo "done."
;;
restart)
echo "restarting $DESC: $NAME... "
$0 stop
$0 start
echo "done."
;;
reload)
echo -n "reloading $DESC: $NAME... "
killall -HUP $(basename ${DAEMON})
echo "done."
;;
*)
echo "Usage: $0 {start|stop|restart|reload}"
exit 1
;;
esac
exit 0
| true
|
c9d5e2578d262d27e647794762485d3edc45f2bb
|
Shell
|
hossx/backcore
|
/coinex/shell/export.sh
|
UTF-8
| 1,279
| 3.84375
| 4
|
[] |
no_license
|
#!/bin/sh
#
# Copyright 2014 Coinport Inc. All Rights Reserved.
echo ================= Coinport Data Exporter =================
echo export $1 events
# data path
path=/data/export
tempFile=/tmp/export_cmd.sh
touch $tempFile
chmod +x $tempFile
# query recent snapshot
snapshotFile=${path}/lastsnapshot_${1}
lastSnapshot=`cat $snapshotFile`
if [ ! -f "$snapshotFile" ]; then
lastSnapshot=0
fi
echo from last snapshot $lastSnapshot
query="'{\"metadata.height\": {\$gt: $lastSnapshot}}'"
echo mongoexport -d coinex_events -c p_${1}_metadata -f metadata.height -q $query --csv -o /tmp/snapshot > $tempFile
$tempFile
# for each snapshot
for i in `sed -n '2,$p' /tmp/snapshot`; do
snapshot=`expr $i - 1`
echo current snapshot $i, prepare to export snapshot $snapshot ...
file=${path}/coinport_${1}_snapshot_${snapshot}.json
query="'{snapshot: $snapshot}'"
if [ ! -f "$file" ]; then
echo mongoexport -d coinex_events -c p_${1}_events -q $query --jsonArray -o $file > $tempFile
$tempFile
# remove empty files
if [ ! -s "$file" ]; then
rm $file
fi
fi
done
echo $snapshot > $snapshotFile
echo last snapshot is $snapshot
echo ==========================================================
| true
|
77cd0b773b06132383b27bf5bc9b0b521db09047
|
Shell
|
exoclim/socrates_tools
|
/gen_spec_files/sp_sw_jmdsa/mk_sp_sw_6_jm2dsa
|
UTF-8
| 19,500
| 2.890625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
NBANDS="6"
SP_ID="jm2dsa"
SP_FLDR="ga7"
SOLAR_SPEC="sun"
PT_FILE="pt663"
. ../set_paths_sw
################################################################################
# Calculate k-coefficients
################################################################################
echo "Calculating k-coefficients."
# Set paths to files
HITRAN_H2O="$HITRAN_DIR/01_hit12.par"
HITRAN_CO2="$HITRAN_DIR/02_hit12.par"
HITRAN_O3="$HITRAN_DIR/03_hit12.par"
HITRAN_N2O="$HITRAN_DIR/04_hit08.par"
HITRAN_UV_N2O="$HITRAN_DIR/N2O-UV00.xsc"
HITRAN_CH4="$HITRAN_DIR/06_hit12.par"
HITRAN_O2="$HITRAN_DIR/07_hit12.par"
HITRAN_UV_O2="$HITRAN_DIR/07_UV06.par"
HITRAN_SO2="$HITRAN_DIR/09_hit12.par"
HITRAN_UV_SO2="$HITRAN_DIR/SO2_UV08.xsc"
HITRAN_OCS="$HITRAN_DIR/19_hit12.par"
HITRAN_UV_O3="$RAD_DATA/gases/ser_bdm_o3.xsc"
JPL_UV_O2="$RAD_DATA/gases/jpl_o2.xsc"
o3_pt_file="$RAD_DATA/gases/pt_o3_ser"
o2_pt_file="$RAD_DATA/gases/pt_o2_jpl"
n2o_pt_file="$RAD_DATA/gases/pt_n2o_uv"
so2_pt_file="$RAD_DATA/gases/pt_so2_uv"
ref_pt_file="$RAD_DATA/gases/ref_pt"
if [ $RAD_BIN ] ; then
echo "Using code compiled in "$RAD_BIN
else
echo "Path to code not set."
exit 1
fi
# Create skeleton spectral file
SPECFILE="sp_sw_${NBANDS}_${SP_ID}_${SOLAR_SPEC}_skel"
rm -f ${SPECFILE}
. mk_sp_sw_${NBANDS}_${SP_ID}_skel ${SOLAR_SPEC} > /dev/null
# Create 12 band skeleton spectral file for pseudo-bands
specfile12="sp_sw_12_${SP_ID}_${SOLAR_SPEC}_skel"
rm -f ${SPECFILE}12
. mk_sp_sw_12_${SP_ID}_skel ${SOLAR_SPEC} > /dev/null
# Create directory for k-coefficients
mkdir -p ${K_COEFF_DIR}
# Construct weight argument
WGT="+S ${SOLAR_SPEC_DIR}/${SOLAR_SPEC}"
echo "Jobs running in background:"
if [ ! -s ${K_COEFF_DIR}/co2_s${NBANDS}_5l ] ; then
echo "CO2 band 5"
rm -f ${K_COEFF_DIR}/co2_s${NBANDS}_5l*
Ccorr_k -F ../${PT_FILE} -D $HITRAN_CO2 \
-R 5 5 -c 2500.0 -i 1.0 -l 2 1.0e1 -t 4.0e-3 \
-s ${SPECFILE} ${WGT} -lk \
-o ${K_COEFF_DIR}/co2_s${NBANDS}_5l -m ${K_COEFF_DIR}/co2_s${NBANDS}_5lm \
-L $SW_DATA/co2_lbl_sw_${PT_FILE}.nc > ${K_COEFF_DIR}/co2_s${NBANDS}_5log \
&& echo "CO2 band 5 done" &
fi
if [ ! -s ${K_COEFF_DIR}/co2_s${NBANDS}_6l ] ; then
echo "CO2 band 6"
rm -f ${K_COEFF_DIR}/co2_s${NBANDS}_6l*
Ccorr_k -F ../${PT_FILE} -D $HITRAN_CO2 \
-R 6 6 -c 2500.0 -i 1.0 -l 2 1.0e1 -t 1.0e-2 \
-s ${SPECFILE} ${WGT} -lk \
-o ${K_COEFF_DIR}/co2_s${NBANDS}_6l -m ${K_COEFF_DIR}/co2_s${NBANDS}_6lm \
-L $SW_DATA/co2_lbl_sw_${PT_FILE}.nc > ${K_COEFF_DIR}/co2_s${NBANDS}_6log \
&& echo "CO2 band 6 done" &
fi
if [ ! -s ${K_COEFF_DIR}/o3_u6_2l ] ; then
echo "O3 bands 3-4"
rm -f ${K_COEFF_DIR}/o3_u6_2l*
Ccorr_k -F $o3_pt_file -X $HITRAN_UV_O3 \
-R 3 4 -c 2500.0 -i 1.0 -l 3 1.0e-2 -t 1.0e-2 \
-s ${SPECFILE} ${WGT} -q -r $ref_pt_file \
-o ${K_COEFF_DIR}/o3_u6_2l -m ${K_COEFF_DIR}/o3_u6_2lm \
-L $SW_DATA/o3_lbl_uv_${PT_FILE}.nc > ${K_COEFF_DIR}/o3_u6_2log \
&& echo "O3 bands 3-4 done" &
fi
if [ ! -s ${K_COEFF_DIR}/o3_s${NBANDS}_2l ] ; then
echo "O3 band 6"
rm -f ${K_COEFF_DIR}/o3_s${NBANDS}_2l*
Ccorr_k -F ../${PT_FILE} -D $HITRAN_O3 \
-R 6 6 -c 2500.0 -i 1.0 -l 3 1.0e-2 -t 1.0e-2 \
-s ${SPECFILE} ${WGT} -lk \
-o ${K_COEFF_DIR}/o3_s${NBANDS}_2l -m ${K_COEFF_DIR}/o3_s${NBANDS}_2lm \
-L $SW_DATA/o3_lbl_sw_${PT_FILE}.nc > ${K_COEFF_DIR}/o3_s${NBANDS}_2log \
&& echo "O3 band 6 done" &
fi
if [ ! -s ${K_COEFF_DIR}/n2o_s${NBANDS}_6l ] ; then
echo "N2O band 6"
rm -f ${K_COEFF_DIR}/n2o_s${NBANDS}_6l*
Ccorr_k -F ../${PT_FILE} -D $HITRAN_N2O \
-R 6 6 -c 2500.0 -i 1.0 -l 4 5.0e-3 -t 6.0e-3 \
-s ${SPECFILE} ${WGT} -lk \
-o ${K_COEFF_DIR}/n2o_s${NBANDS}_6l -m ${K_COEFF_DIR}/n2o_s${NBANDS}_6lm \
-L $SW_DATA/n2o_lbl_sw_${PT_FILE}.nc > ${K_COEFF_DIR}/n2o_s${NBANDS}_6log \
&& echo "N2O band 6 done" &
fi
if [ ! -s ${K_COEFF_DIR}/ch4_s${NBANDS}_5l ] ; then
echo "CH4 band 5"
rm -f ${K_COEFF_DIR}/ch4_s${NBANDS}_5l*
Ccorr_k -F ../${PT_FILE} -D $HITRAN_CH4 \
-R 5 5 -c 2500.0 -i 1.0 -l 6 1.0e-2 -t 6.0e-3 \
-s ${SPECFILE} ${WGT} -lk \
-o ${K_COEFF_DIR}/ch4_s${NBANDS}_5l -m ${K_COEFF_DIR}/ch4_s${NBANDS}_5lm \
-L $SW_DATA/ch4_lbl_sw_${PT_FILE}.nc > ${K_COEFF_DIR}/ch4_s${NBANDS}_5log \
&& echo "CH4 band 5 done" &
fi
if [ ! -s ${K_COEFF_DIR}/ch4_s${NBANDS}_6l ] ; then
echo "CH4 band 6"
rm -f ${K_COEFF_DIR}/ch4_s${NBANDS}_6l*
Ccorr_k -F ../${PT_FILE} -D $HITRAN_CH4 \
-R 6 6 -c 2500.0 -i 1.0 -l 6 1.0e-2 -t 6.0e-3 \
-s ${SPECFILE} ${WGT} -lk \
-o ${K_COEFF_DIR}/ch4_s${NBANDS}_6l -m ${K_COEFF_DIR}/ch4_s${NBANDS}_6lm \
-L $SW_DATA/ch4_lbl_sw_${PT_FILE}.nc > ${K_COEFF_DIR}/ch4_s${NBANDS}_6log \
&& echo "CH4 band 6 done" &
fi
if [ ! -s ${K_COEFF_DIR}/o2_u6_1l ] ; then
echo "O2 band 1"
rm -f ${K_COEFF_DIR}/o2_u6_1l*
Ccorr_k -F ../${PT_FILE} -D $HITRAN_UV_O2 \
-R 1 1 -c 2500.0 -i 1.0 -l 7 2.3e3 -t 1.0e-2 \
-s ${SPECFILE} ${WGT} -lk \
-o ${K_COEFF_DIR}/o2_u6_1l -m ${K_COEFF_DIR}/o2_u6_1lm \
-L $SW_DATA/o2_lbl_uv_${PT_FILE}.nc > ${K_COEFF_DIR}/o2_u6_1log \
&& echo "CO2 band 5 done" &
fi
if [ ! -s ${K_COEFF_DIR}/o2_s${NBANDS}_3l ] ; then
echo "O2 band 3"
rm -f ${K_COEFF_DIR}/o2_s${NBANDS}_3l*
Ccorr_k -F ../${PT_FILE} -D $HITRAN_O2 \
-R 3 3 -c 2500.0 -i 1.0 -l 7 2.3e3 -t 1.0e-2 \
-s ${SPECFILE} ${WGT} -lk \
-o ${K_COEFF_DIR}/o2_s${NBANDS}_3l -m ${K_COEFF_DIR}/o2_s${NBANDS}_3lm \
-L $SW_DATA/o2_lbl_sw_${PT_FILE}.nc > ${K_COEFF_DIR}/o2_s${NBANDS}_3log \
&& echo "O2 band 3 done" &
fi
if [ ! -s ${K_COEFF_DIR}/o2_s${NBANDS}_4l ] ; then
echo "O2 band 4"
rm -f ${K_COEFF_DIR}/o2_s${NBANDS}_4l*
Ccorr_k -F ../${PT_FILE} -D $HITRAN_O2 \
-R 4 4 -c 2500.0 -i 1.0 -l 7 2.3e3 -t 2.2e-3 \
-s ${SPECFILE} ${WGT} -lk \
-o ${K_COEFF_DIR}/o2_s${NBANDS}_4l -m ${K_COEFF_DIR}/o2_s${NBANDS}_4lm \
-L $SW_DATA/o2_lbl_sw_${PT_FILE}.nc > ${K_COEFF_DIR}/o2_s${NBANDS}_4log \
&& echo "O2 band 4 done" &
fi
if [ ! -s ${K_COEFF_DIR}/o2_s${NBANDS}_5l ] ; then
echo "O2 band 5"
rm -f ${K_COEFF_DIR}/o2_s${NBANDS}_5l*
Ccorr_k -F ../${PT_FILE} -D $HITRAN_O2 \
-R 5 5 -c 2500.0 -i 1.0 -l 7 2.3e3 -t 1.0e-2 \
-s ${SPECFILE} ${WGT} -lk \
-o ${K_COEFF_DIR}/o2_s${NBANDS}_5l -m ${K_COEFF_DIR}/o2_s${NBANDS}_5lm \
-L $SW_DATA/o2_lbl_sw_${PT_FILE}.nc > ${K_COEFF_DIR}/o2_s${NBANDS}_5log \
&& echo "O2 band 5 done" &
fi
if [ ! -s ${K_COEFF_DIR}/so2_s${NBANDS}_l ] ; then
echo "SO2 band 6"
rm -f ${K_COEFF_DIR}/so2_s${NBANDS}_l*
Ccorr_k -F ../${PT_FILE} -D $HITRAN_SO2 \
-R 6 6 -c 2500.0 -i 1.0 -l 9 3.0e-4 -t 1.0e-3 \
-s ${SPECFILE} ${WGT} -lk \
-o ${K_COEFF_DIR}/so2_s${NBANDS}_l -m ${K_COEFF_DIR}/so2_s${NBANDS}_lm \
-L $SW_DATA/so2_lbl_sw_${PT_FILE}.nc > ${K_COEFF_DIR}/so2_s${NBANDS}_log \
&& echo "SO2 band 6 done" &
fi
if [ ! -s ${K_COEFF_DIR}/so2_u6_l ] ; then
echo "SO2 band 1-2"
rm -f ${K_COEFF_DIR}/so2_u6_l*
Ccorr_k -F $so2_pt_file -X $HITRAN_UV_SO2 \
-R 1 2 -c 2500.0 -i 1.0 -l 9 3.0e-4 -t 1.0e-3 \
-s ${SPECFILE} ${WGT} -q -r $ref_pt_file \
-o ${K_COEFF_DIR}/so2_u6_l -m ${K_COEFF_DIR}/so2_u6_lm \
-L $SW_DATA/so2_lbl_uv_${PT_FILE}.nc > ${K_COEFF_DIR}/so2_u6_log \
&& echo "SO2 band 1-2 done" &
fi
if [ ! -s ${K_COEFF_DIR}/ocs_s${NBANDS}_l ] ; then
echo "OCS band 6"
rm -f ${K_COEFF_DIR}/ocs_s${NBANDS}_l*
Ccorr_k -F ../${PT_FILE} -D $HITRAN_OCS \
-R 6 6 -c 2500.0 -i 1.0 -l 25 1.0e-3 -t 1.0e-3 \
-s ${SPECFILE} ${WGT} -lk \
-o ${K_COEFF_DIR}/ocs_s${NBANDS}_l -m ${K_COEFF_DIR}/ocs_s${NBANDS}_lm \
-L $SW_DATA/ocs_lbl_sw_${PT_FILE}.nc > ${K_COEFF_DIR}/ocs_s${NBANDS}_log \
&& echo "OCS band 6 done" &
fi
if [ ! -s ${K_COEFF_DIR}/h2o_s${NBANDS}_3l ] ; then
echo "H2O lines band 3"
rm -f ${K_COEFF_DIR}/h2o_s${NBANDS}_3l*
Ccorr_k -F ../${PT_FILE} -D $HITRAN_H2O \
-R 3 3 -c 2500.0 -i 1.0 -l 1 1.0e2 -n 2 \
-s ${SPECFILE} ${WGT} -lk \
-k -x ${CONT_DIR}/mt_ckd_v3.0_frn \
-o ${K_COEFF_DIR}/h2o_s${NBANDS}_3l -m ${K_COEFF_DIR}/h2o_s${NBANDS}_3lm \
-L $SW_DATA/h2o_lbl_swf_${PT_FILE}.nc > ${K_COEFF_DIR}/h2o_s${NBANDS}_3log \
&& echo "H2O lines band 3 done" &
fi
if [ ! -s ${K_COEFF_DIR}/h2o_s${NBANDS}_4l ] ; then
echo "H2O lines band 4"
rm -f ${K_COEFF_DIR}/h2o_s${NBANDS}_4l*
Ccorr_k -F ../${PT_FILE} -D $HITRAN_H2O \
-R 4 4 -c 2500.0 -i 1.0 -l 1 1.0e2 -t 1.0e-2 \
-s ${SPECFILE} ${WGT} -lk \
-k -x ${CONT_DIR}/mt_ckd_v3.0_frn \
-o ${K_COEFF_DIR}/h2o_s${NBANDS}_4l -m ${K_COEFF_DIR}/h2o_s${NBANDS}_4lm \
-L $SW_DATA/h2o_lbl_swf_${PT_FILE}.nc > ${K_COEFF_DIR}/h2o_s${NBANDS}_4log \
&& echo "H2O lines band 4 done" &
fi
if [ ! -s ${K_COEFF_DIR}/h2o_s${NBANDS}_5l ] ; then
echo "H2O lines band 5"
rm -f ${K_COEFF_DIR}/h2o_s${NBANDS}_5l*
Ccorr_k -F ../${PT_FILE} -D $HITRAN_H2O \
-R 5 5 -c 2500.0 -i 1.0 -l 1 1.0e2 -t 1.0e-2 \
-s ${SPECFILE} ${WGT} -lk \
-k -x ${CONT_DIR}/mt_ckd_v3.0_frn \
-o ${K_COEFF_DIR}/h2o_s${NBANDS}_5l -m ${K_COEFF_DIR}/h2o_s${NBANDS}_5lm \
-L $SW_DATA/h2o_lbl_swf_${PT_FILE}.nc > ${K_COEFF_DIR}/h2o_s${NBANDS}_5log \
&& echo "H2O lines band 5 done" &
fi
if [ ! -s ${K_COEFF_DIR}/h2o_s${NBANDS}_6l ] ; then
echo "H2O lines band 6"
rm -f ${K_COEFF_DIR}/h2o_s${NBANDS}_6l*
Ccorr_k -F ../${PT_FILE} -D $HITRAN_H2O \
-R 6 6 -c 2500.0 -i 1.0 -l 1 1.0e2 -t 1.2e-2 \
-s ${SPECFILE} ${WGT} -lk \
-k -x ${CONT_DIR}/mt_ckd_v3.0_frn \
-o ${K_COEFF_DIR}/h2o_s${NBANDS}_6l -m ${K_COEFF_DIR}/h2o_s${NBANDS}_6lm \
-L $SW_DATA/h2o_lbl_swf_${PT_FILE}.nc > ${K_COEFF_DIR}/h2o_s${NBANDS}_6log \
&& echo "H2O lines band 6 done" &
fi
if [ ! -s ${K_COEFF_DIR}/h2o_s${NBANDS}_s ] ; then
echo "H2O self-broadened continuum"
rm -f ${K_COEFF_DIR}/h2o_s${NBANDS}_s*
Ccorr_k -C 33 1.0e-4 1.0e4 -F ../${PT_FILE} -D $HITRAN_H2O \
-P 7 -R 3 6 -c 2500.0 -i 1.0 \
-e ${CONT_DIR}/mt_ckd_v3.0_s296 ${CONT_DIR}/mt_ckd_v3.0_s260 \
-k -s ${SPECFILE} ${WGT} -q -r $ref_pt_file \
-o ${K_COEFF_DIR}/h2o_s${NBANDS}_s -m ${K_COEFF_DIR}/h2o_s${NBANDS}_sm \
-L $SW_DATA/h2o_lbl_swf_${PT_FILE}.nc > ${K_COEFF_DIR}/h2o_s${NBANDS}_slog \
&& echo "H2O self-broadened continuum done" &
fi
echo "Jobs running in the foreground:"
if [ ! -s ${K_COEFF_DIR}/o3_u12_l ] ; then
echo "O3 bands 1-8 (of 12)"
rm -f ${K_COEFF_DIR}/o3_u12_l*
Ccorr_k -F $o3_pt_file -X $HITRAN_UV_O3 \
-R 1 8 -c 2500.0 -i 1.0 -l 3 1.0e-2 -n 1 \
-s ${SPECFILE}12 ${WGT} -q -r $ref_pt_file \
-o ${K_COEFF_DIR}/o3_u12_l -m ${K_COEFF_DIR}/o3_u12_lm \
-L $SW_DATA/o3_lbl_uv_${PT_FILE}.nc > ${K_COEFF_DIR}/o3_u12_log \
&& echo "O3 bands 1-8 (of 12) done"
fi
# Edit 12 band O3 file to give 6 band file:
# Adjust weights to normalised solar spectrum weights from sp_sw_12_jm2dsa_skel
# (hardwired here: should be changed if solar spectrum is changed)
if [ ${SOLAR_SPEC} = "sun" ] ; then
sed -e "15s/1/6/" \
-e "18s/1.000000000E+00/2.121528934E-02/" -e "20,38d" \
-e "39s/1.000000000E+00/3.472014963E-02/" -e "41,59d" \
-e "60s/1.000000000E+00/4.818967733E-02/" -e "62,80d" \
-e "81s/1.000000000E+00/1.412245476E-01/" -e "83,101d" \
-e "102s/1.000000000E+00/2.941486590E-01/" -e "104,122d" \
-e "123s/1.000000000E+00/4.605016771E-01/" \
-e "130s/7/2/" \
-e "141s/1/2/" \
-e "144s/1.000000000E+00/2.900632239E-01/" -e "146,164d" \
-e "165s/1.000000000E+00/7.099367761E-01/" \
${K_COEFF_DIR}/o3_u12_l > ${K_COEFF_DIR}/o3_u6_l
elif [ ${SOLAR_SPEC} = "pc" ] ; then
sed -e "15s/1/6/" \
-e "18s/1.000000000E+00/6.901997742E-01/" -e "20,38d" \
-e "39s/1.000000000E+00/5.293133998E-02/" -e "41,59d" \
-e "60s/1.000000000E+00/3.327782674E-02/" -e "62,80d" \
-e "81s/1.000000000E+00/7.190050499E-02/" -e "83,101d" \
-e "102s/1.000000000E+00/4.394576332E-02/" -e "104,122d" \
-e "123s/1.000000000E+00/1.077447907E-01/" \
-e "130s/7/2/" \
-e "141s/1/2/" \
-e "144s/1.000000000E+00/1.804974896E-01/" -e "146,164d" \
-e "165s/1.000000000E+00/8.195025106E-01/" \
${K_COEFF_DIR}/o3_u12_l > ${K_COEFF_DIR}/o3_u6_l
elif [ ${SOLAR_SPEC} = "gj876" ] ; then
sed -e "15s/1/6/" \
-e "18s/1.000000000E+00/5.775872224E-01/" -e "20,38d" \
-e "39s/1.000000000E+00/3.405126467E-02/" -e "41,59d" \
-e "60s/1.000000000E+00/2.904063550E-02/" -e "62,80d" \
-e "81s/1.000000000E+00/1.033824522E-01/" -e "83,101d" \
-e "102s/1.000000000E+00/1.414090007E-01/" -e "104,122d" \
-e "123s/1.000000000E+00/1.145294246E-01/" \
-e "130s/7/2/" \
-e "141s/1/2/" \
-e "144s/1.000000000E+00/1.308343729E-01/" -e "146,164d" \
-e "165s/1.000000000E+00/8.691656268E-01/" \
${K_COEFF_DIR}/o3_u12_l > ${K_COEFF_DIR}/o3_u6_l
elif [ ${SOLAR_SPEC} = "k186" ] ; then
sed -e "15s/1/6/" \
-e "18s/1.000000000E+00/2.091296926E-04/" -e "20,38d" \
-e "39s/1.000000000E+00/1.384036529E-03/" -e "41,59d" \
-e "60s/1.000000000E+00/8.923413110E-03/" -e "62,80d" \
-e "81s/1.000000000E+00/5.035518055E-02/" -e "83,101d" \
-e "102s/1.000000000E+00/1.794202293E-01/" -e "104,122d" \
-e "123s/1.000000000E+00/7.597080108E-01/" \
-e "130s/7/2/" \
-e "141s/1/2/" \
-e "144s/1.000000000E+00/1.306757696E-01/" -e "146,164d" \
-e "165s/1.000000000E+00/8.693242305E-01/" \
${K_COEFF_DIR}/o3_u12_l > ${K_COEFF_DIR}/o3_u6_l
elif [ ${SOLAR_SPEC} = "sun_0.715gya" ] ; then
sed -e "15s/1/6/" \
-e "18s/1.000000000E+00/2.166362164E-02/" -e "20,38d" \
-e "39s/1.000000000E+00/3.465688541E-02/" -e "41,59d" \
-e "60s/1.000000000E+00/4.849478939E-02/" -e "62,80d" \
-e "81s/1.000000000E+00/1.425375069E-01/" -e "83,101d" \
-e "102s/1.000000000E+00/2.965934810E-01/" -e "104,122d" \
-e "123s/1.000000000E+00/4.560537157E-01/" \
-e "130s/7/2/" \
-e "141s/1/2/" \
-e "144s/1.000000000E+00/2.903952229E-01/" -e "146,164d" \
-e "165s/1.000000000E+00/7.096047774E-01/" \
${K_COEFF_DIR}/o3_u12_l > ${K_COEFF_DIR}/o3_u6_l
elif [ ${SOLAR_SPEC} = "sun_2.9gya" ] ; then
sed -e "15s/1/6/" \
-e "18s/1.000000000E+00/2.038003079E-02/" -e "20,38d" \
-e "39s/1.000000000E+00/3.070740297E-02/" -e "41,59d" \
-e "60s/1.000000000E+00/4.502093415E-02/" -e "62,80d" \
-e "81s/1.000000000E+00/1.402717281E-01/" -e "83,101d" \
-e "102s/1.000000000E+00/2.924388629E-01/" -e "104,122d" \
-e "123s/1.000000000E+00/4.711810409E-01/" \
-e "130s/7/2/" \
-e "141s/1/2/" \
-e "144s/1.000000000E+00/2.834020970E-01/" -e "146,164d" \
-e "165s/1.000000000E+00/7.165979031E-01/" \
${K_COEFF_DIR}/o3_u12_l > ${K_COEFF_DIR}/o3_u6_l
elif [ ${SOLAR_SPEC} = "hd22049" ] ; then
sed -e "15s/1/6/" \
-e "18s/1.000000000E+00/1.166662245E-02/" -e "20,38d" \
-e "39s/1.000000000E+00/1.907442736E-02/" -e "41,59d" \
-e "60s/1.000000000E+00/3.717517612E-02/" -e "62,80d" \
-e "81s/1.000000000E+00/1.216750121E-01/" -e "83,101d" \
-e "102s/1.000000000E+00/2.777548694E-01/" -e "104,122d" \
-e "123s/1.000000000E+00/5.326538924E-01/" \
-e "130s/7/2/" \
-e "141s/1/2/" \
-e "144s/1.000000000E+00/2.080770319E-01/" -e "146,164d" \
-e "165s/1.000000000E+00/7.919229679E-01/" \
${K_COEFF_DIR}/o3_u12_l > ${K_COEFF_DIR}/o3_u6_l
else
echo "Error, no weight conversion for spectrum."
stop
fi
wait
echo "Calculation of k-coefficients completed."
################################################################################
# Calculate optical properties
################################################################################
echo "Calculating cloud optical properties."
WGT_SCTAVG="-${WGT:1}"
mkdir -p ${CLD_COEFF_DIR}
if [ ! -s ${CLD_COEFF_DIR}/fit_sw_drop5_${NBANDS} ] ; then
echo 'Type 5 droplets'
rm -f ${CLD_COEFF_DIR}/mon_sw_drop5_${NBANDS}
Cscatter_average -s ${SPECFILE} -P 1 -t ${WGT_SCTAVG} \
-f 5 ${CLD_COEFF_DIR}/fit_sw_drop5_${NBANDS} \
${CLD_COEFF_DIR}/mon_sw_drop5_${NBANDS} 1.e3 \
${RAD_DATA}/cloud/scatter_drop_type5
fi
if [ ! -s ${CLD_COEFF_DIR}/fit_sw_ice8_${NBANDS} ] ; then
echo 'Type 8 ice'
rm -f ${CLD_COEFF_DIR}/mon_sw_ice8_${NBANDS}
Cscatter_average -s ${SPECFILE} -P 1 -w ${WGT_SCTAVG} \
-f 12 ${CLD_COEFF_DIR}/fit_sw_ice8_${NBANDS} \
${CLD_COEFF_DIR}/mon_sw_ice8_${NBANDS} 920 \
${RAD_DATA}/cloud/scatter_ice_sw_type8
fi
echo "Calculation of cloud optical properties completed."
################################################################################
# Create spectral file
################################################################################
echo "Creating spectral file."
# Create full path to spectral file
SPEC_FILE="${SPEC_FILE_DIR}/sp_sw_ga7_dsa_${SOLAR_SPEC}"
mkdir -p ${SPEC_FILE_DIR}
rm -f ${SPEC_FILE}
rm -f ${SPEC_FILE}_k
# Replace old k-coefficients by new k-coefficients in spectral file
prep_spec << EOF > mk_sp_sw_ga7_dsa_${SOLAR_SPEC}_out
sp_sw_${NBANDS}_${SP_ID}_${SOLAR_SPEC}_skel
n
${SPEC_FILE}
5
${K_COEFF_DIR}/h2o_s${NBANDS}_4l
5
y
${K_COEFF_DIR}/h2o_s${NBANDS}_5l
5
y
${K_COEFF_DIR}/h2o_s${NBANDS}_6l
9
${K_COEFF_DIR}/h2o_s${NBANDS}_s
5
y
${K_COEFF_DIR}/co2_s${NBANDS}_5l
5
y
${K_COEFF_DIR}/co2_s${NBANDS}_6l
5
y
${K_COEFF_DIR}/o3_u6_2l
5
y
${K_COEFF_DIR}/o3_s${NBANDS}_2l
5
y
${K_COEFF_DIR}/o3_u6_l
5
y
${K_COEFF_DIR}/h2o_s${NBANDS}_3l
5
y
${K_COEFF_DIR}/n2o_s${NBANDS}_6l
5
y
${K_COEFF_DIR}/ch4_s${NBANDS}_5l
5
y
${K_COEFF_DIR}/ch4_s${NBANDS}_6l
5
y
${K_COEFF_DIR}/o2_u6_1l
5
y
${K_COEFF_DIR}/o2_s${NBANDS}_3l
5
y
${K_COEFF_DIR}/o2_s${NBANDS}_4l
5
y
${K_COEFF_DIR}/o2_s${NBANDS}_5l
5
y
${K_COEFF_DIR}/so2_s${NBANDS}_l
5
y
${K_COEFF_DIR}/so2_u6_l
5
y
${K_COEFF_DIR}/ocs_s${NBANDS}_l
-1
EOF
# Add cloud fits to spectral file
prep_spec << EOF >> mk_sp_sw_ga7_dsa_${SOLAR_SPEC}_out
${SPEC_FILE}
a
10
5
${CLD_COEFF_DIR}/fit_sw_drop5_${NBANDS}
1.50000E-06 5.00000E-05
12
8
${CLD_COEFF_DIR}/fit_sw_ice8_${NBANDS}
7.00000E-06 3.20000E-04
-1
EOF
# Add solar spectrum and Rayleigh scattering coefficients
prep_spec << EOF >> mk_sp_sw_ga7_dsa_${SOLAR_SPEC}_out
${SPEC_FILE}
a
2
y
n
${SOLAR_SPEC_DIR}/${SOLAR_SPEC}
y
3
a
-1
EOF
. ../set_permissions
echo "Spectral file sp_sw_ga7_dsa_${SOLAR_SPEC} created. All done."
echo "Please check mk_sp_sw_ga7_dsa_${SOLAR_SPEC}_out for errors."
exit 0
| true
|
9f250e057f35e8f7bc24a0b7c1a28e42d113f542
|
Shell
|
dschexna/fedemo
|
/vm/files/mapr-warden-302-patched
|
UTF-8
| 8,992
| 3.21875
| 3
|
[] |
no_license
|
#! /bin/bash
###
#
# chkconfig: 35 20 40
# description: Enables MapR warden services
#
# LSB compliant service control script
#
### BEGIN INIT INFO
# Provides: mapr-warden
# Required-Start: $network
# Required-Stop: $network
# Should-Start:
# Should-Stop:
# Default-Start: 3 5
# Default-Stop: 0 1 2 6
# Short-Description: Start MapR warden service
### END INIT INFO
# Source function library.
. /etc/init.d/functions
# Define LSB log_* functions.
# Depend on lsb-base (>= 3.0-6) to ensure that this file is present.
. /lib/lsb/init-functions
BASEMAPR=${MAPR_HOME:-/opt/mapr}
PATH=$PATH:/sbin:/usr/sbin:/bin:/usr/bin:$BASEMAPR/warden/
PATH=$PATH:$BASEMAPR/lib
PATH=$PATH:$BASEMAPR/server
env=${BASEMAPR}/conf/env.sh
[ -f $env ] && . $env
if [ ! -z $JAVA_HOME ]; then
export PATH=$JAVA_HOME/bin:$PATH
fi
# Bug6901: set default OS limits
# max processes
ulimit -u ${MAPR_ULIMIT_U:-64000}
# max file descriptors
ulimit -n ${MAPR_ULIMIT_N:-64000}
# max socket connections
sysctl -q -w net.core.somaxconn=${MAPR_SYSCTL_SOMAXCONN:-20000}
# Bug8992: use root's default umask 022 instead of non-root 002
umask ${MAPR_UMASK:-022}
DESC="warden daemon"
NAME=warden
PIDFILE=/var/run/$NAME.pid
SCRIPTNAME=/etc/init.d/mapr-$NAME
#Exit if the package is not installed
[ -x "$SCRIPTNAME" ] || exit 0
# Read configuration variable file if it is present.
[ -r /etc/default/$NAME ] && . /etc/default/$NAME
. $BASEMAPR/server/initscripts-common.sh
. $BASEMAPR/server/configure-common.sh
MFS_PORT=$(awk -F = '$1 == "mfs.server.port" { print $2 }' ${BASEMAPR}/conf/mfs.conf)
# Variables
RC_LOCK_DIR=/var/lock/subsys
WARDEN_LOCK_FILE=${RC_LOCK_DIR}/mapr-warden
WARDEN_HOME=$BASEMAPR
WARDEN_LOG_DIR=${WARDEN_HOME}/logs
WARDEN_LOG_FILE=${WARDEN_LOG_DIR}/warden.log
WARDEN_PID_DIR=${WARDEN_HOME}/logs
WARDEN_OPTS="-Dmapr.library.flatclass -Dcom.sun.management.jmxremote"
WARDEN_CLASSPATH="${WARDEN_HOME}:${WARDEN_HOME}/conf"
for i in ${WARDEN_HOME}/lib/*.jar; do
WARDEN_CLASSPATH=${WARDEN_CLASSPATH}:$i;
done
WARDEN_CONF=${WARDEN_HOME}/conf/warden.conf
JT_STOP_COMMAND=$(grep "service.command.jt.stop" ${WARDEN_CONF} | sed 's/service.command.jt.stop=//')
# java.library.path for rpc in c++
WARDEN_JAVA_LIBRARY_PATH="${WARDEN_HOME}/lib"
WARDEN_OPTS="${WARDEN_OPTS} -Dpid=$$ -Dpname=warden -Dmapr.home.dir=${WARDEN_HOME}"
# stop these guys at the end (even if it wan't started by warden)
STOP_SCRIPTS="/etc/init.d/mapr-hoststats /etc/init.d/mapr-nfsserver \
/etc/init.d/mapr-cldb /etc/init.d/mapr-mfs"
command="WARDEN"
log=${WARDEN_LOG_FILE}
pid=${WARDEN_PID_DIR}/warden.pid
logFile=${WARDEN_LOG_DIR}/wardeninit.log
DAEMON_CONF="$BASEMAPR/conf/daemon.conf"
if [ -e $DAEMON_CONF ]; then
MAPR_USER=$(awk -F = '$1 == "mapr.daemon.user" { print $2 }' $DAEMON_CONF)
MAPR_GROUP=$(awk -F = '$1 == "mapr.daemon.group" { print $2 }' $DAEMON_CONF)
RUN_AS_MAPR_USER=$(awk -F = '$1 == "mapr.daemon.runuser.warden" { print $2 }' $DAEMON_CONF)
fi
rotate_log()
{
log=$1;
num=10;
if [ -n "$2" ]; then
num=$2
fi
if [ -f "$log" ]; then # rotate logs
while [ $num -gt 1 ]; do
prev=`expr $num - 1`
[ -f "$log.$prev" ] && mv "$log.$prev" "$log.$num"
num=$prev
done
mv "$log" "$log.$num";
fi
}
SetupEtcHosts()
{
DemoSuppScr=$BASEMAPR/demovm/DemoVMSupport.pl
if [ -x $DemoSuppScr ]; then
$DemoSuppScr >> ${WARDEN_LOG_FILE} 2>&1
fi
}
do_start()
{
date >> $logFile
echo "Start warden" >> $logFile
if [ -f $pid ]; then
if kill -0 `cat $pid` >> $logFile 2>&1; then
# check if it is really warden or some other process
WPID=$(cat $pid)
if cat /proc/$WPID/cmdline | grep Warden >> $logFile 2>&1; then
echo $command running as process `cat $pid`. Stop it | tee -a ${WARDEN_LOG_FILE}
exit 0
fi
fi
fi
JAVA=$(CheckForJava)
if [ $? != 0 ]; then
echo $JAVA | tee -a ${WARDEN_LOG_FILE}
exit 1
fi
# setup hostname
HOSTNAME_FILE="$BASEMAPR/hostname"
echo `/bin/hostname --fqdn` > ${HOSTNAME_FILE}.$$
if [ $? -ne 0 ]; then
echo "INFO: cmd echo `/bin/hostname --fqdn` failed" | tee -a ${WARDEN_LOG_FILE}
echo " Please check your DNS settings" | tee -a ${WARDEN_LOG_FILE}
echo " Using previous version of ${HOSTNAME_FILE}" | tee -a ${WARDEN_LOG_FILE}
else
cp ${HOSTNAME_FILE}.$$ ${HOSTNAME_FILE}
fi
rm ${HOSTNAME_FILE}.$$
if [[ ! -f ${HOSTNAME_FILE} ]] ; then
echo "ERROR: ${HOSTNAME_FILE} not present. Exiting !" | tee -a ${WARDEN_LOG_FILE}
exit 1
fi
if [[ ! -s ${HOSTNAME_FILE} ]] ; then
echo "ERROR: Empty ${HOSTNAME_FILE}. Exiting !" | tee -a ${WARDEN_LOG_FILE}
exit 1
fi
#create cpu_mem_disk file
CreateCpuMemDiskFile;
# setup core generation
mkdir -p /opt/cores
chmod 777 /opt/cores/
echo "/opt/cores/%e.core.%p.%h" > /proc/sys/kernel/core_pattern
rotate_log $log
# MapR Demo VM support: By default the VM images have 127.0.0.1 -> hostname.
# This causes create*volumes.sh to hang. Set up the ip -> host in /etc/hosts.
SetupEtcHosts;
# Stop JT if it is running for some reason - just in case it is a residue from previous crash
${JT_STOP_COMMAND} >> $logFile 2>&1
CURR_USER=`id -nu`
CURR_GROUP=`id -ng`
if [ "xxx$MAPR_USER" != "xxx" -a "$MAPR_USER" != "root" ]; then
chown $MAPR_USER $HOSTNAME_FILE
ChownRootFiles $BASEMAPR/logs $MAPR_USER $MAPR_GROUP
if [ "xxx$RUN_AS_MAPR_USER" = "xxx1" ]; then
MAPR_SHELL="/bin/sh"
RUN_AS_CMD="su -s $MAPR_SHELL -p $MAPR_USER -c"
CURR_USER=$MAPR_USER
CURR_GROUP=$MAPR_GROUP
fi
fi
ConfigureRunUserForHadoop $CURR_USER
ConfigureRunUserTTLocalDir $CURR_USER $CURR_GROUP
SetPermissionOnLinuxTaskController $CURR_GROUP
WARDEN_CMD="$JAVA \
-XX:ErrorFile='/opt/cores/hs_err_pid%p.log' \
-XX:-HeapDumpOnOutOfMemoryError \
-XX:HeapDumpPath='/opt/cores' \
-XX:+UseConcMarkSweepGC \
-Dlog.file=${WARDEN_LOG_FILE} \
-Djava.library.path=${WARDEN_JAVA_LIBRARY_PATH} -classpath \
${WARDEN_CLASSPATH} ${WARDEN_OPTS} \
com.mapr.warden.WardenMain ${WARDEN_CONF}"
if [ "xxx$RUN_AS_CMD" != "xxx" ]; then
rm -f $pid
$RUN_AS_CMD "$WARDEN_CMD & echo \$! > $pid " >> $logFile 2>&1 < /dev/null &
else
$WARDEN_CMD >> $logFile 2>&1 < /dev/null &
echo $! > $pid
fi
wait_count=30
echo "Starting $command, logging to $log."
while ! cat $pid > /dev/null 2>&1 && [ $wait_count -gt 1 ] ; do
wait_count=`expr $wait_count - 1`
echo -n "."
sleep 1
done
echo
if kill -0 `cat $pid` >> $logFile 2>&1; then
echo "For diagnostics look at ${BASEMAPR}/logs/ for createsystemvolumes.log, warden.log and configured services log files"
if [ "xxx$RUN_AS_CMD" != "xxx" ]; then
$RUN_AS_CMD "nohup bash ${BASEMAPR}/server/createsystemvolumes.sh" >> $logFile 2>&1 &
else
nohup bash ${BASEMAPR}/server/createsystemvolumes.sh >> $logFile 2>&1 &
fi
[ -d $RC_LOCK_DIR ] && touch ${WARDEN_LOCK_FILE}
echo "Warden started" >> $logFile
else
echo "Warden start failed" >> $logFile
echo "Error: warden can not be started. See ${WARDEN_LOG_FILE} for details" | tee -a ${WARDEN_LOG_FILE}
exit 1
fi
}
do_stop()
{
date >> $logFile
echo "Stop warden" >> $logFile
status=0
if [ -f $pid ]; then
if kill -0 `cat $pid` >> $logFile 2>&1; then
echo stopping $command
kill `cat $pid`
while kill -0 `cat $pid` >> $logFile 2>&1; do
sleep 1;
done
# clean up ipcrm
ipcrm -M $MFS_PORT >> $logFile 2>&1
status=0
else
echo $command not running. | tee -a ${WARDEN_LOG_FILE}
status=1
fi
else
echo $command not running. | tee -a ${WARDEN_LOG_FILE}
status=1
fi
volScriptPid="${BASEMAPR}/logs/createsystemvolumes.sh.pid"
#kill createsystemvolumes.sh if its still running
if [ -f $volScriptPid ]; then
kill -9 `cat $volScriptPid` >> $logFile 2>&1
fi
# Now make sure we dont have any of our processes running
echo looking to stop mapr-core processes not started by warden | tee -a ${WARDEN_LOG_FILE}
for stop_script in ${STOP_SCRIPTS}
do
if [ -x ${stop_script} ]; then
${stop_script} stop >> $logFile 2>&1
fi
done
rm -f ${WARDEN_LOCK_FILE} # ignore errors
return ${status}
}
do_status() {
if [ -f $pid ]; then
if kill -0 `cat $pid` >> $logFile 2>&1; then
echo $command running as process `cat $pid`.
return 0
fi
echo $pid exists with pid `cat $pid` but no $command.
return 1
fi
echo $command not running.
return 1
}
if [ $(id -u) -ne 0 ]; then
echo "Must be root to run warden start/stop/restart commands" | tee -a ${WARDEN_LOG_FILE}
exit 1
fi
case "$1" in
start)
do_start
;;
stop)
do_stop
;;
status)
do_status
;;
restart)
do_stop
do_start
;;
*)
echo "Usage: $SCRIPTNAME {start|stop|status|restart|}" >&2
exit 3
;;
esac
exit $?
| true
|
50ad05a0cfd6c6e6e612b73ab0ee6ddc74264460
|
Shell
|
jsaura27/Medical-Records
|
/certificatesGeneration
|
UTF-8
| 2,781
| 3.109375
| 3
|
[] |
no_license
|
#!/bin/sh
#
# Copyright (C) 2017 by Pablo Correa Gomez
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# ( http://www.fsf.org/licenses/gpl.txt )
#####################################################################
#DIR=/Users/oscarodestal/Skola/datasakerhet/digital_certificates/certFiles
DIR=/home/pablo/data/Informatica/medicalRecords/certFiles
LANG="-J-Duser.language=en" #Needed to fix a bug with Spanish and Swedish translations
ST_PASSWD="-storepass password"
KEY_PASSWD="-keypass password"
echo "Creating X.509 CA self signed. Still will be prompted to include CN
"
openssl req -x509 -newkey rsa:4096 -keyout ${DIR}/rootCAkey.pem -nodes -out ${DIR}/rootCA.pem
echo "Creating keystore to host the CA, aka truststore
"
keytool $LANG -importcert -alias rootCA -file ${DIR}/rootCA.pem $KEY_PASSWD -trustcacerts -storetype pkcs12 -keystore ${DIR}/clienttruststore $ST_PASSWD
for machine in client.client server.server
do
if [ $machine = client.client ]
then
KEYSTORE=clientkeystore
KEY_CN="Jaime Saura Bastida (940718T313) /\
Pablo Correa Gomez (960924T154) /\
Anna Palmqvist Sjovall (dat15asj) /\
Oscar Odestal (dat15ood) "
else
KEYSTORE=serverkeystore
KEY_CN=MyServer
fi
echo "Generating $machine keypair
"
keytool $LANG -genkeypair -alias client_cert -keyalg rsa -keysize 4096 -dname \
CN="$KEY_CN" $KEY_PASSWD -keystore ${DIR}/$KEYSTORE -storetype pkcs12 $ST_PASSWD
echo "Generating $machine cert sign request
"
keytool $LANG -certreq -alias client_cert -keyalg rsa -keysize 4096 -file ${DIR}/clientkeyCSR.pem -keystore ${DIR}/$KEYSTORE -storetype pkcs12 $ST_PASSWD
echo "Signing $machine cert sign request
"
openssl x509 -req -in ${DIR}/clientkeyCSR.pem -out ${DIR}/clientCertSigned.pem -CA ${DIR}/rootCA.pem -CAkey ${DIR}/rootCAkey.pem -CAcreateserial
echo "Importing $machine certs to keystore
"
keytool $LANG -importcert -alias CA_cert -file ${DIR}/rootCA.pem -trustcacerts -keystore ${DIR}/$KEYSTORE -storetype pkcs12 $ST_PASSWD
keytool $LANG -importcert -alias client_cert -file ${DIR}/clientCertSigned.pem -keystore ${DIR}/$KEYSTORE -storetype pkcs12 $ST_PASSWD
done
echo "Creating server.server truststore
"
keytool $LANG -importcert -alias rootCA -file ${DIR}/rootCA.pem $KEY_PASSWD -trustcacerts -storetype pkcs12 -keystore ${DIR}/servertruststore $ST_PASSWD
| true
|
1fac52d9de5dc84a6013e08a65b1199f18754793
|
Shell
|
veltzer/demos-bash
|
/src/examples/core/variables/long_variable.bash
|
UTF-8
| 544
| 3.53125
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash -u
# This is an example of how to do long variables in bash
#
# References:
# - https://unix.stackexchange.com/questions/340718/how-do-i-bring-heredoc-text-into-a-shell-script-variable
read_it() {
VAR1=$(cat)
}
read_it <<EOF
first line
second line
third line
EOF
echo "$VAR1"
IFS='' read -d '' -r VAR2 <<EOF
first line
second line
third line
EOF
echo "$VAR2"
nl="
"
read_heredoc(){
VAR3=""
while IFS="$nl" read -r line; do
VAR3="$VAR3$line$nl"
done
}
read_heredoc <<EOF
first line
second line
third line
EOF
echo "$VAR3"
| true
|
e6eb851082e5d2b83a22b35758ed8940645ae68a
|
Shell
|
jnewblanc/awsclitools
|
/ec2/add_default_volume_tags.sh
|
UTF-8
| 877
| 3.953125
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
#
# Get the list of untagged volumes for this instance and then slap some default
# tags on them.
scriptdir=`(cd $(dirname $0); pwd)`
if [ ! -x "${scriptdir}/get_my_aws_info.sh" ]; then
echo "Can not find ${scriptdir}/get_my_aws_info.sh. Aborting"
exit 0
fi
role=`${scriptdir}/get_my_aws_info.sh Role`
if [ "${role}" = "" ]; then
echo "Could not determine role for this instance. Aborting"
exit 0
fi
untaggedVols=`/opt/serviceNow/chefInstalled/bin/get_my_aws_info.sh UntaggedAttachedVolumes`
if [ "${untaggedVols}" = "" ]; then
echo "Did not detect any untagged volumes for this instance. Aborting"
exit 0
fi
for vol in ${untaggedVols} ; do
nametag="${role}.$$"
echo "Tagging ${vol} with Name ${nametag}"
/usr/local/bin/aws ec2 create-tags --profile autoprov --resources ${vol} --tags "Key=Name,Value=${nametag} Key=Role,Value=${role}"
done
| true
|
3f36cff6f218f2ab840a8ff7563fd9a9920ed73c
|
Shell
|
souchtal/fti
|
/testing/itf/engine
|
UTF-8
| 9,809
| 4.21875
| 4
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
# Include all ITF variables
source '@itf_dir@/variables'
# Include all ITF Fixture-public functions
source '@itf_dir@/api'
# -------------------------- Test Runner Internals ----------------------------
itf_load_fixture() {
# Loads the function definitions in a fixture to the current context
#
# Parameters:
# $1: The fixture name without the .fixture extension
itf_loaded_fixture="$(basename "$1")"
itf_fixture_had_errors='false'
source "$1"
if [ $? -ne 0 ]; then
fail 'Fixture file not found'
fi
itf_clear_logs
}
itf_unload_fixture() {
# Removes all definitions declared in a fixture
# This should be called if the runner will execute another fixture
unset setup
unset prepare_fti
unset runtest
unset teardown
type on_fixture_teardown &>/dev/null && on_fixture_teardown
unset on_fixture_teardown
}
test_case_dry() {
# Prepare a test for execution and print its info but do not execute it
#
# Parameters:
# $@ The parameters to be passed to the fixture setup
itf_param_clear # Clear parameters from previous test case
setup $@ # Call setup to declare arguments and other data
itf_param_parse $@ # Parse the test case arguments an ddefine variables
# Print feedback
echo "$(itf_describe_test_case)"
}
test_case() {
# Run a test defined in a fixture with a set of input data
#
# Parameters:
# $@ The parameters to be passed to the fixture setup
#
# Returns:
# Non-zero values if the test fails
#
# Detailed:
# Run a test case until completion and checks if it failed.
# If it fails, append the log into a file named after the test suite.
# Create the FTI configfile based on the mock
cat "$itf_cfg_template" >$itf_cfgfile
test_case_dry $@
# Prepare the FTI config file
type prepare_fti &>/dev/null && prepare_fti
# Execute the test fixture
(
# Test if ITF has any error stored
if [ ! -z "$itf_error" ]; then
itf_log_test_case $itf_tstdout
echo "$itf_error" >>$itf_tstdout
fail "$itf_error"
fi
# Setup the config file with standarized variable names
itf_set_default_variables_to_config
# Run the test
runtest
)
local __exitcode=$?
# Save logs if needed
itf_append_logs $__exitcode
# Call the test case teardown
type teardown &>/dev/null && teardown
# Remove the global, local and meta directories
if [ $itf_maintain_ckpt -eq 0 ]; then
local _locdir="$(fti_config_get 'ckpt_dir')"
local _globaldir="$(fti_config_get 'glbl_dir')"
local _metadir="$(fti_config_get 'meta_dir')"
rm -rf $_locdir $_globaldir $_metadir
fi
# Reset the ranks into the default rank count
itf_nranks=$itf_nranks_default
# Remove the config files and test stdout
rm -rf $itf_cfgfile $itf_tstdout
# Return the test exit code
return $__exitcode
}
# ---------------------------- Engine Log handling ----------------------------
itf_log() {
# Logs an ITF message and print it to the user depending on ITF state
#
# Parameters:
# $1: The message log level
# $@: The message
let __loglv=$1-1
shift
if [ $itf_verbose -gt $__loglv ]; then
echo $@ | tee -a $itf_tstdout
else
echo $@ >> $itf_tstdout
fi
unset __loglv
}
itf_get_fixture_failure_log() {
# Get the failure log filename for the current fixture
echo "$itf_loaded_fixture-failed.log"
}
itf_get_fixture_full_log() {
# Get the log filename for the current fixture
echo "$itf_loaded_fixture-all.log"
}
itf_clear_logs() {
# Delete all log files managed by ITF
rm -rf "$(itf_get_fixture_failure_log)"
rm -rf "$(itf_get_fixture_full_log)"
rm -rf "$itf_report_log"
}
itf_log_test_case() {
# Log the test case into a log file
#
# Parameters:
# $1: The log file
local _test_name=""
for p in "${itf_paramv[@]}"; do
_test_name="${_test_name}--$p ${!p} "
done
echo $_test_name >>$1
echo "" >>$1 # new line feed
}
itf_append_logs() {
# Append test case to the different logs based on the test status
#
# Parameters:
# $1: The test status
set -e
# A log containing stdout of failed tests
local faillog="$(itf_get_fixture_failure_log)"
# A log containing stdout of successfull tests
local normallog="$(itf_get_fixture_full_log)"
# Check if the test has failed
if [ $1 -ne 0 ]; then
# Always push the test stdout into the log
itf_log_test_case $itf_tstdout
cat $itf_tstdout >>$faillog
echo "" >>$faillog # line feed
# Check if this is the first failure in the fixture
if [ $itf_fixture_had_errors == 'false' ]; then
itf_fixture_had_errors='true'
# Push the fixture name into the ITF report
echo $itf_loaded_fixture >>$itf_report_log
fi
# Log the test arguments into ITF report
itf_log_test_case $itf_report_log
fi
# Check if ITF is configured to save all logs
if [ $itf_maintain_app_logs -eq 1 ]; then
itf_log_test_case $itf_tstdout
cat $itf_tstdout >>$normallog
echo "" >>$faillog # line feed
fi
set +e
}
# --------------------------- ITF Interal functions ---------------------------
itf_describe_test_case() {
# Describe the current test case in terms of its arguments and values
for p in "${itf_paramv[@]}"; do
print_color $COLOR_BLUEBOLD "$p="
# IOLib has a direct string representation
if [ $p == 'iolib' ]; then
print_color $COLOR_WHITEBOLD "$(iolib_id_to_name $iolib) "
else
print_color $COLOR_WHITEBOLD "${!p} "
fi
done
}
itf_find_fti_objects() {
# Find checkpoint objects from last ITF-managed execution (app_run*)
#
# Parameters:
#
# $1: Which checkpoint object to find
# values: [ 'global', 'ckpt', 'partner', 'rs-encoding', 'node' ]
# $@: which nodes to delete, for local, if IO is not MPIIO or SIONLIB
# values: node names
#
# Usage Details:
#
# When the object is 'global' returns:
# - The L4 directory in the FTI Global archive
# When the object is 'ckpt' returns:
# - First local checkpoint file, in a given level, for every supplied node
# When the object is 'partner' returns:
# - First local partner file, in a given level, for every supplied node
# When the object is 'rs-encoding' returns:
# - First local RS-encoding, in a given level, for every supplied node
# When the object is 'node' returns:
# - Local node directory for every supplied node
if [ ! $1 == 'global' ] && [ -z $level ]; then
fail '$level variable must be set for ITF to delete checkpoint files'
fi
local _id=$(fti_config_get 'exec_id')
local _erase_all='false'
# Switch action depending on the checkpoint object suppllied
case $1 in
global)
# When deleting from global, delete everything
echo "$(fti_config_get 'glbl_dir')/$_id/l4"
return 0
;;
ckpt)
local _what='Rank'
;;
partner)
local _what='Pcof'
;;
rs-encoding)
local _what='RSed'
;;
node)
# When erasing the node, we delete all local information
_erase_all='true'
;;
*)
fail "checkpoint object type malformed $1, try one of the following: 'global', 'ckpt', 'partner' or 'rs-encoding', 'node'"
;;
esac
shift
local _where="$(fti_config_get 'ckpt_dir')"
local _files=""
for i in $@; do
if [ $_erase_all == 'true' ]; then
_files="$_files $_where/$i"
else
_files="$_files $(find $_where/$i/$_id/l$level | grep "$_what" | head -n 1)"
fi
done
echo "$_files"
}
itf_param_clear() {
# Clear the set of parameters managed by ITF
for i in ${itf_paramv[@]}; do
unset $i
done
itf_error=""
itf_paramv=()
}
itf_set_default_variables_to_config() {
# Set up the config file using ITF standard variables names
#
# This method is always called after the test case 'setup'
if [ ! -z $iolib ]; then
fti_config_set 'ckpt_io' "$iolib"
fi
if [ ! -z $head ]; then
fti_config_set 'head' "$head"
fi
if [ ! -z $keep ]; then
fti_config_set 'keep_last_ckpt' "$keep"
fi
if [ ! -z $keepl4 ]; then
fti_config_set 'keep_l4_ckpt' "$keepl4"
fi
}
itf_param_parse() {
# Parse the parameters passed against the ones registered in ITF
#
# Parametes:
# $@ The parameters to be parsed
#
# Usage:
# Use this function after registering the parameters with:
# - param_register
#
# This function will parse the parameters and setup variables acordingly
#
# Example:
# param_register 'iolib' 'head'
# itf_param_parse --iolib 1 --head 0
# echo $iolib # should output 1
# echo $head # should output 0
# Define all variables from input parameters
while [ $# -gt 1 ]; do
local _found='false'
for p in ${itf_paramv[@]}; do
if [ $1 == "--$p" ]; then
eval $p=$2
local _found='true'
fi
done
if [ $_found == 'false' ]; then
itf_error="Invalid argument $1"
return 1
fi
shift
shift
done
# Check if all required arguments were found
for p in "${itf_paramv[@]}"; do
if [ -z ${!p} ]; then
itf_error="Argument '$p' not found"
return 1
fi
done
}
| true
|
9c491a13898b3ad2cf4a877e3a70c1d8fe66bed0
|
Shell
|
DougPaTo/kingit
|
/Backup/envio_67.sh
|
UTF-8
| 3,622
| 3.703125
| 4
|
[] |
no_license
|
#!/bin/bash
#Marca Data e hora do inicio do backup
COMECO=`date +%F" as "%X`
#Informa onde sera armazenado o log do backup
LOG=/bkp_sql/logs/ENVIO_67_`date +%H%M-%F`.log
#Mostra a data em que foi realizado o backup para envio do assunto do email
DATA=`date +%F`
#Descrição do Backup
TIPOBKP="Banco para 10.0.99.67"
DESCR="Copia do Banco de Dados SQL do RM para o servidor de testes e homologação 10.0.99.67"
#De onde sera copiado para onde.
ORIGEM="/bkp_sql/SQL_RM/"
ARQUIVO=$(ls -t "/bkp_sql/SQL_RM/RMPRODUCAO*" | sed -n "1p")
DESTINO=/media/sql67/
#Determina Comparilhamento que sera montado
COMPARTILHAMENTO="//10.0.99.67/BKP_ONTEM"
USUARIO="Administrador"
SENHA="InfoMatriz193122"
#Destinatarios de email
DESTINATARIOS="contato@kingit.com.br,renan.petrucci@engeform.com.br,yudy@kingit.com.br,willyam.neves@engeform.com.br"
#Montar Unidade HDDADM
mount.cifs $COMPARTILHAMENTO $DESTINO -o user=$USUARIO,pass=$SENHA
## Remove arquivos do local de destino
rm /media/sql67/*
#Configuracao para email
echo To: $DESTINATARIOS >> $LOG
echo From: backup.servidor >> $LOG
echo Subject: $TIPOBKP $DATA >> $LOG
#Fim da configuracao de email.
echo Backup Iniciado, e seguindo... Aguarde
echo " " >> $LOG
echo " " >> $LOG
echo "|##############################################" >> $LOG
echo $DESCR >> $LOG
echo " " >> $LOG
#echo "Quantidade ocupada na Origem: " `du -sh $ORIGEM` >> $LOG
echo " " >> $LOG
echo "Backup iniciado em: $COMECO" >> $LOG
echo " " >> $LOG
#Backup com Versionamento
#rsync -britzvhl --progress --compress-level=9 --backup-dir=/bkp_sql/bkp_dif/bkp_rm_`date +%F"_AS_"%X` --suffix=.old $ORIGEM $DESTINO >> $LOG
#Backup sem Versionamento
rsync -avzh --progress $ARQUIVO $DESTINO >> $LOG
TERMINO=`date +%F" as "%X`
echo " " >> $LOG
echo "Backup iniciado em: $COMECO" >> $LOG
echo "Backup terminado em: $TERMINO" >> $LOG
#echo "Espaço Contratado: 150Gb" >> $LOG
echo "Espaço utilizado no Destino: " `df -h $DESTINO` >> $LOG
echo "|##############################################" >> $LOG
echo " " >> $LOG
echo " " >> $LOG
function BackupNormal(){
: <<'CORTE'
#Remove backups antigos
if [ $((`ls /bkp_sql/Historico/ | wc -l`)) -ge 30 ]; then
echo "Existem Backups com mais de 30 dias" >> $LOG
echo "Serão removidos os seguintes backups antigos: " >> $LOG
for i in `find /bkp_sql/Historico/ -name "*" -type d -mtime +30`; do
echo "$i" >> $LOG
rm -R $i
done
fi
CORTE
PathBackup="/bkp_sql/Historico/"
MesAtual=`date +%m`
let MesAtual=MesAtual-1
MesAtual=0$MesAtual
for delete in $(ls -t $PathBackup | tac | sed -n "/[0-9][0-9][0-9][0-9]-$MesAtual-[0-9][0-9]/,/[0-9][0-9][0-9][0-9]-$-[0-9][0-9]/!p"); do
echo "Removi este backup: $delete"
#rm -R $PathBackup$delete
done
#Armazenando o backup anterior em pasta por data
mkdir -p /bkp_sql/Historico/$DATA
mv $ARQUIVO /bkp_sql/Historico/$DATA/
#Remover Arquivos Copiados da Origem
#rm -R /media//*
}
function BackupMensal(){
mkdir -p /bkp_sql/Historico/$DATA
mkdir -p /bkp_sql/Mensal/$DATA
cp $ARQUIVO /bkp_sql/Mensal/$DATA/
mv $ARQUIVO /bkp_sql/Historico/$DATA/
#Remover Arquivos Copiados da Origem
#rm -R /media/SGBD/*
}
function VerificaAntigos(){
if [ $((`echo $DATA | cut -d- -f3`)) -eq 1 ]; then
echo "Realização de backup Mensal" >> $LOG
echo $DATA
BackupMensal
else
echo "Backup Diário" >> $LOG
echo $DATA
BackupNormal
fi
}
#Verifica os backups e ajusta para o local correto
VerificaAntigos
#Envio do log após final do backup
ssmtp $DESTINATARIOS < $LOG
#Desmontar a Unidade de REDE
umount $DESTINO
| true
|
20c857332c8057e2a44f134369b7a23760c09639
|
Shell
|
latifkabir/Computation_using_Fortran90
|
/f90/double_complex.sh
|
UTF-8
| 510
| 3.09375
| 3
|
[] |
no_license
|
#!/bin/bash
#
gfortran -c -g double_complex.f90 >& compiler.txt
if [ $? -ne 0 ]; then
echo "Errors compiling double_complex.f90"
exit
fi
rm compiler.txt
#
gfortran double_complex.o
if [ $? -ne 0 ]; then
echo "Errors linking and loading double_complex.o"
exit
fi
rm double_complex.o
#
mv a.out double_complex
./double_complex > double_complex_output.txt
if [ $? -ne 0 ]; then
echo "Errors running double_complex"
exit
fi
rm double_complex
#
echo "The double_complex test problem has been executed."
| true
|
5d8e26e9c3e91449467c23da77db08e6e77e1293
|
Shell
|
liucougar/quickstart
|
/modules/bootloader_x86.sh
|
UTF-8
| 2,716
| 3.359375
| 3
|
[] |
no_license
|
# $Id$
sanity_check_config_bootloader() {
if [ -z "${bootloader}" ]; then
warn "bootloader not set...assuming grub"
bootloader="grub:0"
fi
}
configure_bootloader_grub_2() {
#local boot_root="$(get_boot_and_root)"
#local boot="$(echo ${boot_root} | cut -d '|' -f1)"
[ -z "${bootloader_install_device}" ] && die "no bootloader_install_device is specified" #bootloader_install_device="$(get_device_and_partition_from_devnode ${boot} | cut -d '|' -f1)"
if ! spawn_chroot "grub2-install ${bootloader_install_device}"; then
error "could not install grub to ${bootloader_install_device}"
return 1
fi
spawn_chroot "grub2-mkconfig -o /boot/grub/grub.cfg" || die "failed to generate grub.cfg file"
}
configure_bootloader_grub() {
echo -e "default 0\ntimeout 30\n" > ${chroot_dir}/boot/grub/grub.conf
local boot_root="$(get_boot_and_root)"
local boot="$(echo ${boot_root} | cut -d '|' -f1)"
local boot_device="$(get_device_and_partition_from_devnode ${boot} | cut -d '|' -f1)"
local boot_minor="$(get_device_and_partition_from_devnode ${boot} | cut -d '|' -f2)"
local root="$(echo ${boot_root} | cut -d '|' -f2)"
local kernel_initrd="$(get_kernel_and_initrd)"
for k in ${kernel_initrd}; do
local kernel="$(echo ${k} | cut -d '|' -f1)"
local initrd="$(echo ${k} | cut -d '|' -f2)"
local kv="$(echo ${kernel} | sed -e 's:^kernel-genkernel-[^-]\+-::')"
echo "title=Gentoo Linux ${kv}" >> ${chroot_dir}/boot/grub/grub.conf
local grub_device="$(map_device_to_grub_device ${boot_device})"
if [ -z "${grub_device}" ]; then
error "could not map boot device ${boot_device} to grub device"
return 1
fi
echo -en "root (${grub_device},$(expr ${boot_minor} - 1))\nkernel /boot/${kernel} " >> ${chroot_dir}/boot/grub/grub.conf
#[ -z "${grub_kernel_root}" ] && grub_kernel_root=${root}
local grub_kernel_root="$(echo ${boot_root} | cut -d '|' -f3)"
if [ -z "${initrd}" ]; then
echo "root=${grub_kernel_root}" >> ${chroot_dir}/boot/grub/grub.conf
else
echo "root=/dev/ram0 init=/linuxrc ramdisk=8192 real_root=${grub_kernel_root} ${bootloader_kernel_args}" >> ${chroot_dir}/boot/grub/grub.conf
echo -e "initrd /boot/${initrd}\n" >> ${chroot_dir}/boot/grub/grub.conf
fi
done
if ! spawn_chroot "grep -v rootfs /proc/mounts > /etc/mtab"; then
error "could not copy /proc/mounts to /etc/mtab"
return 1
fi
[ -z "${bootloader_install_device}" ] && bootloader_install_device="$(get_device_and_partition_from_devnode ${boot} | cut -d '|' -f1)"
if ! spawn_chroot "grub-install ${bootloader_install_device}"; then
error "could not install grub to ${bootloader_install_device}"
return 1
fi
}
| true
|
7d0c32c2e19597110877b37235bb9fd14089fbff
|
Shell
|
kdwinter/vfio-setup
|
/check_iommu.sh
|
UTF-8
| 229
| 2.859375
| 3
|
[
"Unlicense"
] |
permissive
|
#!/usr/bin/env bash
shopt -s nullglob
for d in /sys/kernel/iommu_groups/*/devices/*; do
n=${d#*/iommu_groups/*}; n=${n%%/*}
printf 'IOMMU Group %s ' "$n"
lspci -nns "${d##*/}"
done;
# vim:ts=4 sw=4 sts=4 expandtab
| true
|
210c370ed47ca096c54ab9ba782ff5c71ed0001b
|
Shell
|
parthg13/kube-assignment
|
/assignment4/file-creater-service/script.sh
|
UTF-8
| 345
| 3.015625
| 3
|
[] |
no_license
|
#!/bin/sh
# code goes here.
# echo "This is a script, run by cron!"
DATE=`date`
TIMESTAMP=`date +%s`
if [ `expr $TIMESTAMP % 2` == 0 ]
then
echo 'File A content.File created date: '+$DATE > /app/a.html
else
echo 'File B content.File created date: '+$DATE > /app/b.html
fi
# echo 'File created date: '+$DATE+ > a.txt
# echo `date +%s`
| true
|
a7d3ea6d46e8e2290d7120877c087ab38da440a8
|
Shell
|
SergioBertolinSG/performance_tests_oc_env
|
/run_performance_tests.sh
|
UTF-8
| 933
| 3.375
| 3
|
[] |
no_license
|
#!/bin/bash
if [ $# -ne 1 ];
then
echo "This script needs the owncloud core commit which tests are running against"
exit
fi
COMMIT=$1
PORT=8080
echo $PORT
sudo php -S localhost:$PORT -t /var/www/html/oc_server &
PHPPID=$!
echo "Process of the owncloud server: $PHPPID"
sleep 2
if [ ! -d /srv/performance_tests ]; then
mkdir /srv/performance_tests
fi
currentTime=$(date +%Y-%m-%d.%H-%M-%S)
export DAV_USER=admin
export DAV_PASS=admin
echo "$DAV_USER : $DAV_PASS will run tests"
/opt/administration/performance-tests-c++/webdav-benchmark http://localhost:$PORT/remote.php/webdav/ -csv > /srv/performance_tests/"$currentTime"_"$COMMIT".csv
php /srv/tools/createFileMergingTestData.php $COMMIT $currentTime /srv/performance_tests/"$currentTime"_"$COMMIT".csv /srv/tools/stats_fake.json > /srv/performance_tests/"$currentTime"_"$COMMIT".json
rm /srv/performance_tests/"$currentTime"_"$COMMIT".csv
sudo kill $PHPPID
| true
|
35a7565d3f7897258f8bff95879912176aefbc71
|
Shell
|
deadcrew/deadfiles
|
/bin/headset
|
UTF-8
| 356
| 2.765625
| 3
|
[] |
no_license
|
#!/bin/bash
input=${1:-alsa_input.pci-0000_00_1f.3.analog-stereo}
# disable noise
amixer -q -c PCH cset 'name=Headphone Mic Boost Volume' 1
# use headset mic for recording
pactl set-source-port "$input" analog-input-headset-mic
# max recording volume
pactl set-source-volume "$input" ${VOLUME:-65536}
notify-send " " "headset: volume ${VOLUME:-65536}"
| true
|
80eb992ded8ffb4cf25e1e1b83df7bd5deea438d
|
Shell
|
joren/my-zsh
|
/custom/projects.zsh
|
UTF-8
| 573
| 3.265625
| 3
|
[] |
no_license
|
PROJECT_PATHS=(~/Projects/Openminds ~/Projects/Openminds/iwt ~/Projects/PotatoFactory ~/Projects/Fileflambe ~/Projects/ArrrrCamp)
function project () {
cmd="cd"
file=$1
if [[ "open" == "$file" ]] then
file=$2
cmd=(${(s: :)EDITOR})
fi
for project in $PROJECT_PATHS; do
if [[ -d $project/$file ]] then
$cmd "$project/$file"
unset project # Unset project var
return
fi
done
echo "No such project $1"
}
alias p="project"
function _project () {
compadd `/bin/ls -l $PROJECT_PATHS | awk '{ print $9 }'`
}
compdef _project project
export CC=gcc
| true
|
9c378cee7b0d3cb01065b5a5b9e13672fadd056a
|
Shell
|
viking17/OS-Lab
|
/os1.sh
|
UTF-8
| 66
| 2.734375
| 3
|
[] |
no_license
|
#!/bin/bash
len=$#
for(( i=$len;i>0;i-- ))do
eval echo \$$i
done
| true
|
f0f1e6bf2e08fb94e5999dc0fcea4f5d285cc454
|
Shell
|
theallen77/plugin.video.censoredtv
|
/src/make.sh
|
UTF-8
| 4,158
| 3.015625
| 3
|
[
"MIT"
] |
permissive
|
# Zip up the code to distribute to Kodi. Don't use Github's download zips, they
# don't work at all!!
# First remove any remaining data
rm -f Censored.zip
# Copy the license file
cp ../LICENSE ./plugin.video.censoredtv/LICENSE.md
# Rename the screenshot files -- their names clash in the "DOS 8.3" zipping
mv ./plugin.video.censoredtv/resources/media/screenshot-01.jpg ./plugin.video.censoredtv/resources/media/01ss.jpg
mv ./plugin.video.censoredtv/resources/media/screenshot-02.jpg ./plugin.video.censoredtv/resources/media/02ss.jpg
mv ./plugin.video.censoredtv/resources/media/screenshot-03.jpg ./plugin.video.censoredtv/resources/media/03ss.jpg
# Do the zipping. In Windows, the following command will probably be okay:
#
# zip -rq ./Censored.zip ./plugin.video.censoredtv
#
# (and all the following stuff with "7z rn" can be omitted). On Linux we need to
# use the -k option.
#
zip -rqk ./Censored.zip ./plugin.video.censoredtv
# Done with the license file and screenshots
rm -f ./plugin.video.censoredtv/LICENSE.md
mv ./plugin.video.censoredtv/resources/media/01ss.jpg ./plugin.video.censoredtv/resources/media/screenshot-01.jpg
mv ./plugin.video.censoredtv/resources/media/02ss.jpg ./plugin.video.censoredtv/resources/media/screenshot-02.jpg
mv ./plugin.video.censoredtv/resources/media/03ss.jpg ./plugin.video.censoredtv/resources/media/screenshot-03.jpg
# Now this is a hack to get zip working in Linux. See:
#
# https://superuser.com/questions/898481/how-to-create-a-zip-file-with-files-in-fat-format-on-linux
#
# We need to zip with the "-k" option to get FAT files, but then must rename each
# one individually using the "7z rn" option to restore normal filenames (not
# the 8.3 ones).
7z rn Censored.zip PLUGIN.VID plugin.video.censoredtv 1> /dev/null
7z rn Censored.zip plugin.video.censoredtv/LICENSE.MD plugin.video.censoredtv/LICENSE.md 1> /dev/null
7z rn Censored.zip plugin.video.censoredtv/MAIN.PY plugin.video.censoredtv/main.py 1> /dev/null
7z rn Censored.zip plugin.video.censoredtv/ADDON.XML plugin.video.censoredtv/addon.xml 1> /dev/null
7z rn Censored.zip plugin.video.censoredtv/RESOURCE plugin.video.censoredtv/resources 1> /dev/null
7z rn Censored.zip plugin.video.censoredtv/resources/SETTINGS.XML plugin.video.censoredtv/resources/settings.xml 1> /dev/null
7z rn Censored.zip plugin.video.censoredtv/resources/__INIT__.PY plugin.video.censoredtv/resources/__init__.py 1> /dev/null
7z rn Censored.zip plugin.video.censoredtv/resources/MEDIA plugin.video.censoredtv/resources/media 1> /dev/null
7z rn Censored.zip plugin.video.censoredtv/resources/LIB plugin.video.censoredtv/resources/lib 1> /dev/null
7z rn Censored.zip plugin.video.censoredtv/resources/LANGUAGE plugin.video.censoredtv/resources/language 1> /dev/null
7z rn Censored.zip plugin.video.censoredtv/resources/media/01SS.JPG plugin.video.censoredtv/resources/media/screenshot-01.jpg 1> /dev/null
7z rn Censored.zip plugin.video.censoredtv/resources/media/02SS.JPG plugin.video.censoredtv/resources/media/screenshot-02.jpg 1> /dev/null
7z rn Censored.zip plugin.video.censoredtv/resources/media/03SS.JPG plugin.video.censoredtv/resources/media/screenshot-03.jpg 1> /dev/null
7z rn Censored.zip plugin.video.censoredtv/resources/media/FANART.JPG plugin.video.censoredtv/resources/media/fanart.jpg 1> /dev/null
7z rn Censored.zip plugin.video.censoredtv/resources/media/ICON.PNG plugin.video.censoredtv/resources/media/icon.png 1> /dev/null
7z rn Censored.zip plugin.video.censoredtv/resources/lib/RUN_ADDO.PY plugin.video.censoredtv/resources/lib/run_addon.py 1> /dev/null
7z rn Censored.zip plugin.video.censoredtv/resources/lib/__INIT__.PY plugin.video.censoredtv/resources/lib/__init__.py 1> /dev/null
7z rn Censored.zip plugin.video.censoredtv/resources/language/RESOURCE.LAN plugin.video.censoredtv/resources/language/resource.language.en_gb 1> /dev/null
7z rn Censored.zip plugin.video.censoredtv/resources/language/resource.language.en_gb/STRINGS.PO plugin.video.censoredtv/resources/language/resource.language.en_gb/strings.po 1> /dev/null
# The zip is ready! Rename it manually and copy to ../zips/ directory.
| true
|
9d2a908b4098d09e0d6159bd1acdb992c0d9067d
|
Shell
|
YuuK10/women-in-tech-c
|
/scripts/editcode.sh
|
UTF-8
| 257
| 2.953125
| 3
|
[] |
no_license
|
#!/bin/sh
__GAME_PATH__
cd $game_path
lvl=$(cat data/current_level)
lang=$(cat data/config/language)
check_file="$(ls src | grep player_function.c)"
if [ $check_file ]
then
vim src/player_function.c
else
cp samples/$lang/$lvl.c src/player_function.c
fi
| true
|
efbc50757bba25e9dbcb1a69fcd421a4156b2f08
|
Shell
|
lydianblues/jstree-rails
|
/INSTALLER.sh
|
UTF-8
| 2,259
| 3.0625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Copy the images, assets, and javascripts into the rails-jquery gem.
# This is the path to the original JsTree GitHub checkout:
# git://github.com/vakata/jstree.git
SRC_DIR=/opt/gems/jstree
# Everything that we copy from the original jstree distribution goes here.
ASSET_DIR=./vendor/assets
# Additional plugins that are not part of the 'official' JsTree distribution.
PLUGINS_DIR=./plugins
rm -rf $ASSET_DIR
mkdir -p $ASSET_DIR/stylesheets/jstree/themes/default
mkdir -p $ASSET_DIR/stylesheets/jstree/themes/default-rtl
mkdir -p $ASSET_DIR/images/jstree/themes/default
mkdir -p $ASSET_DIR/images/jstree/themes/default-rtl
mkdir -p $ASSET_DIR/javascripts/jstree
#
# By design, plugins modified by Quosap overwrite those
# in the original distribution. We can't simply create
# a new contextmenu plugin with a different name, for example,
# because the name "contextmenu" is referred to in vakata.
#
cp $SRC_DIR/src/*.js $ASSET_DIR/javascripts/jstree
cp $PLUGINS_DIR/* $ASSET_DIR/javascripts/jstree
# Caution, the order of these matters.
cat > $ASSET_DIR/javascripts/jstree/index.js <<__EOF__
//= require jstree/vakata.js
//= require jstree/jstree.js
//= require jstree/jstree.checkbox.js
//= require jstree/jstree.contextmenu.js
//= require jstree/jstree.dnd.js
//= require jstree/jstree.hotkeys.js
//= require jstree/jstree.html.js
//= require jstree/jstree.json.js
//= require jstree/jstree.rules.js
//= require jstree/jstree.sort.js
//= require jstree/jstree.state.js
//= require jstree/jstree.themes.js
//= require jstree/jstree.ui.js
//= require jstree/jstree.unique.js
//= require jstree/jstree.xml.js
//= require jstree/jstree.helpers.js
__EOF__
# You will need this somewhere in your javascript to
# select a theme.
# $.jstree.THEMES_DIR = '/assets/jstree/themes/';
# Copy theme images.
cp $SRC_DIR/src/themes/default/{*.gif,*.png} \
$ASSET_DIR/images/jstree/themes/default
cp $SRC_DIR/src/themes/default-rtl/{*.gif,*.png} \
$ASSET_DIR/images/jstree/themes/default-rtl
# Copy theme stylesheets.
cp $SRC_DIR/src/themes/default/*.css \
$ASSET_DIR/stylesheets/jstree/themes/default
cp $SRC_DIR/src/themes/default-rtl/*.css \
$ASSET_DIR/stylesheets/jstree/themes/default-rtl
(cd $SRC_DIR && tar cf - docs ) | tar xf -
| true
|
c5254556ad39501682a564e64503c12e326b8344
|
Shell
|
chadchabot/dotfiles
|
/bootstrap.sh
|
UTF-8
| 3,637
| 3.78125
| 4
|
[] |
no_license
|
#!/bin/bash
# inspired by everyone else who's ever done such a thing, but mostly Paul Vilchez
# who turned me on to this whole dotfiles thing, unbeknownst to him
source ./lib.sh
source ./osx/homebrew.sh
set -e
set -u
bot "So what are we doing today?"
echo "1) New system setup - install everything!"
echo "2) Update dotfiles (terminal, vim, bash, etc)"
echo "3) Update OS X system prefs"
echo "4) Update OS X applications (via Homebrew)"
echo "5) Nuke it all from orbit"
echo "6) Exit"
read option
#does bash have named hashes? I'll check later
#for now, this will work.
#Tasks are, in order:
#0 - dotfiles
#1 - OS X system prefs
#2 - homebrew stuff
declare -a tasks=(0 0 0)
# TODO: set up a named array for each installation step and
# have each trigger the associated dotfile update setp
case $option in
1)
echo "you chose new system setup"
tasks[0]=1
tasks[1]=1
tasks[2]=1
;;
2)
echo "dotfiles only"
tasks[0]=1
;;
3)
echo "OS X system prefs"
tasks[1]=1
;;
4)
echo "OS X applications"
tasks[2]=1
;;
5)
echo "Nuke it from orbit"
echo "This hasn't been implemented yet."
exit 0
;;
6)
echo "Exit"
exit 0
;;
*)
echo "Unrecognized option"
exit 1
;;
esac
#bash and command line setup
# vim setup
if [ ${tasks[0]} = 1 ]; then
./vim-setup.sh
./term-setup.sh
fi
# os x setup
if [ ${tasks[1]} = 1 ]; then
bot "Setting up sensible OS X defaults"
pushd osx >/dev/null 2>&1
sudo ./set_defaults.sh
popd >/dev/null 2>&1
#xcode-select --install install command line tools as part of dotfiles setup?
ok "OS X defaults are finished\n\tYou will definitely need to restart for most of these to take effect."
fi
# application download and homebrew stuff
if [ ${tasks[2]} = 1 ]; then
# homebrew or apt-get (in a far off universe where I may possibly choose a Ubuntu machine again)
PLATFORM="$(uname -s)"
case "$(uname -s)" in
"Darwin")
echo "You're running OS X. This is good."
bot "Setting up Homebrew and those bits"
# I know, I know. installing arbitrary stuff pulled via curl is bad news bears
if test $(which brew)
then
ok "Homebrew already installed."
action "Updating homebrew"
brew update
else
action "Installing Homebrew now"
echo "Soon this will install homebrew for you"
install_homebrew
fi
action "Installing homebrew packages"
#TODO: Ask if this is a full install (include personal apps) or a "work"
# install, and I only want to install the essentials
# Would be nice to save this as a preference (envvar?) and rely on that
# as a default for runs of this command at a later time.
install_brews
install_casks
ok "Finished with Homebrew and brews"
;;
"Linux")
echo "You're running linux… good for you I guess?"
# this should use apt-get instead, but we'll worry about that later, if ever
;;
esac
fi
#TODO: install good ruby gems/tools like rvm or rbenv (I forget which one is en vouge and the current golden child.
# look at: http://zanshin.net/2012/08/03/adding-sublime-text-2-settings-themes-plugins-to-dotfiles/
#sublime text prefs
#bot "Setting up Sublime Text prefs, if you're in to that kind of thing"
warn "You have one job to do manually.\nAdd \"[[ -r ~/.bashrc ]] && . ~/.bashrc\" to your ~/.bash_profile file in order to get all the alias and function goodies."
echo '[[ -r ~/.bashrc ]] && . ~/.bashrc' >> ~/.bash_profile
echo 'eval "$(rbenv init -)"' >> ~/.bash_profile
bot "Everything is done! Congrats"
| true
|
5da51108f475cc27a1434d4d55fda704fe82c117
|
Shell
|
seifudd/scripts
|
/01-rnaseq/fastqc-trimmomatic-hisat2-stringtie-featurecounts.sh
|
UTF-8
| 18,778
| 3.359375
| 3
|
[] |
no_license
|
#!/bin/bash
set -e # stop the script if a command fails
function fail {
echo "FAIL: $@" >&2
exit 1 # signal failure
}
SAMPLE=$1
DATAPATH=$2
READ1=$3
READ2=$4
REF=$5
outdir=$6
numcpus=$7
module load hisat
module load samtools
module load trimmomatic
module load fastqc
module load stringtie
module load subread
function do_fastqc () {
date
########################################################################################################################
# if trimming, change DATAPATH
DATAPATH="/lscratch/${SLURM_JOBID}"
# if trimming, change $READ1 and $READ2
READ1="${SAMPLE}_1P.fastq.gz"
READ2="${SAMPLE}_2P.fastq.gz"
# if trimming, change $out_dir to something like "fastqc_post_trimming" if you prefer
# out_dir="fastqc"
out_dir="fastqc_post_trimming"
# fastqc -o output_dir [-f fastq|bam|sam] -c contaminant_file seqfile1 .. seqfileN
mkdir -p $outdir/$SAMPLE/$out_dir
fastqc -o "$outdir/$SAMPLE/$out_dir" \
--nogroup \
"$DATAPATH/$READ1" \
"$DATAPATH/$READ2" \
|| fail "fastqc failed"
echo "fastqc done"
########################################################################################################################
date
}
function do_trimmomatic () {
date
########################################################################################################################
java -jar $TRIMMOJAR PE \
-threads $numcpus \
"$DATAPATH/$READ1" \
"$DATAPATH/$READ2" \
-baseout "/lscratch/${SLURM_JOBID}/${SAMPLE}.fastq.gz" \
ILLUMINACLIP:"/usr/local/apps/trimmomatic/Trimmomatic-0.36/adapters/TruSeq3-PE-2.fa":2:30:10 \
MINLEN:50
echo "trimmomatic done"
########################################################################################################################
date
}
function do_hisat2 () {
date
########################################################################################################################
# if trimming, please change DATAPATH
DATAPATH="/lscratch/${SLURM_JOBID}"
# if trimming, please change $READ1 and $READ2
READ1="${SAMPLE}_1P.fastq.gz"
READ2="${SAMPLE}_2P.fastq.gz"
out_dir="hisat2"
mkdir -p $outdir/$SAMPLE/$out_dir
hisat2 -p $numcpus \
-x $REF/genome_tran \
--downstream-transcriptome-assembly \
-1 "$DATAPATH/$READ1" \
-2 "$DATAPATH/$READ2" \
--rg-id $SAMPLE --rg SM:$SAMPLE \
| samtools view -h -f 3 -O SAM - \
| perl -nle 'print if m/^@(?:[A-Z]{2})\s|\bNH:i:1\b/' \
| samtools sort -@ $numcpus \
-o "$outdir/$SAMPLE/$out_dir/$SAMPLE.unique.bam" \
-T /lscratch/${SLURM_JOB_ID}/${SAMPLE}_chunk -
samtools index "$outdir/$SAMPLE/$out_dir/$SAMPLE.unique.bam"
echo "hisat2 done"
########################################################################################################################
date
}
function do_stringtie () {
date
########################################################################################################################
GTF="/data/NHLBI_BCB/bin/HISAT2-reference-genomes/GENCODE_human_v28/gencode.v28.annotation.gtf"
bamfile="$outdir/$SAMPLE/hisat2/$SAMPLE.unique.bam"
out_dir="stringtie"
mkdir -p $outdir/$SAMPLE/$out_dir
# only reference
stringtie -p 4 \
-o $outdir/$SAMPLE/$out_dir/$SAMPLE.gtf \
-e -G $GTF \
-l $SAMPLE \
-v -B -c 10 -j 5 -f 0.1 \
$bamfile
# novel
# stringtie -p 4 \
# -o $outdir/$SAMPLE/$out_dir/$SAMPLE.gtf \
# -G $GTF \
# -l $SAMPLE \
# -v -B -c 10 -j 5 -f 0.1 \
# $bamfile
# https://github.com/gpertea/stringtie
echo -e "
--version : print current version at stdout
-h print this usage message
-G reference annotation to use for guiding the assembly process (GTF/GFF3)
-l name prefix for output transcripts (default: STRG)
-f minimum isoform fraction (default: 0.1)
-m minimum assembled transcript length to report (default 100bp)
-o output path/file name for the assembled transcripts GTF (default: stdout)
-a minimum anchor length for junctions (default: 10)
-j minimum junction coverage (default: 1)
-t disable trimming of predicted transcripts based on coverage
(default: coverage trimming is enabled)
-c minimum reads per bp coverage to consider for transcript assembly (default: 2.5)
-v verbose (log bundle processing details)
-g gap between read mappings triggering a new bundle (default: 50)
-C output file with reference transcripts that are covered by reads
-M fraction of bundle allowed to be covered by multi-hit reads (default:0.95)
-p number of threads (CPUs) to use (default: 1)
-A gene abundance estimation output file name
-B enable output of Ballgown table files which will be created in the
same directory as the output GTF (requires -G, -o recommended)
-b enable output of Ballgown table files but these files will be
created under the directory path given as <dir_path>
-e only estimates the abundance of given reference transcripts (requires -G)
-x do not assemble any transcripts on the given reference sequence(s)
Transcript merge usage mode:
stringtie --merge [Options] { gtf_list | strg1.gtf ...}
With this option StringTie will assemble transcripts from multiple
input files generating a unified non-redundant set of isoforms. In this
usage mode the following options are available:
-G <guide_gff> reference annotation to include in the merging (GTF/GFF3)
-o <out_gtf> output file name for the merged transcripts GTF
(default: stdout)
-m <min_len> minimum input transcript length to include in the merge
(default: 50)
-c <min_cov> minimum input transcript coverage to include in the merge
(default: 0)
-F <min_fpkm> minimum input transcript FPKM to include in the merge
(default: 1.0)
-T <min_tpm> minimum input transcript TPM to include in the merge
(default: 1.0)
-f <min_iso> minimum isoform fraction (default: 0.01)
-g <gap_len> gap between transcripts to merge together (default: 250)
-i keep merged transcripts with retained introns; by default
these are not kept unless there is strong evidence for them
-l <label> name prefix for output transcripts (default: MSTRG)
" > /dev/null
echo "stringtie done"
########################################################################################################################
date
}
function do_featurecounts () {
date
########################################################################################################################
GTF="/data/NHLBI_BCB/bin/HISAT2-reference-genomes/GENCODE_human_v28/gencode.v28.annotation.gtf"
bamfile="$outdir/$SAMPLE/hisat2/$SAMPLE.unique.bam"
out_dir="featurecounts"
mkdir -p $outdir/$SAMPLE/$out_dir
s=0 # -s strand-specific : 0 (unstranded), 1 (stranded) 2 (reversely stranded). 0 default.
# -f -Q -M -O
end_num="" # if single end sequencing, please change to "single"
if [[ $end_num == "single" ]]
then
featureCounts -T $numcpus \
-t exon \
-g gene_id \
-a $GTF \
-s $s \
-o $outdir/$SAMPLE/$out_dir/$SAMPLE.genefeatureCounts.txt \
$bamfile
else
featureCounts -T $numcpus \
-t exon \
-g gene_id \
-a $GTF \
-s $s -p -M -O \
-o $outdir/$SAMPLE/$out_dir/$SAMPLE.genefeatureCounts.txt \
$bamfile
fi
echo -e "
Version 1.4.6-p3
Usage: featureCounts [options] -a <annotation_file> -o <output_file> input_file1 [input_file2] ...
Required parameters:
-a <input> Give the name of the annotation file. The program assumes
that the provided annotation file is in GTF format. Use -F
option to specify other annotation formats.
-o <input> Give the name of the output file. The output file contains
the number of reads assigned to each meta-feature (or each
feature if -f is specified). A meta-feature is the aggregation
of features, grouped by using gene identifiers. Please refer
to the users guide for more details.
input_files Give the names of input read files that include the read
mapping results. Format of input files is automatically
determined (SAM or BAM). Paired-end reads will be
automatically re-ordered if it is found that reads from the
same pair are not adjacent to each other. Multiple files can
be provided at the same time.
Optional parameters:
-A <input> Specify the name of a file including aliases of chromosome
names. The file should be a comma delimited text file that
includes two columns. The first column gives the chromosome
names used in the annotation and the second column gives the
chromosome names used by reads. This file should not contain
header lines. Names included in this file are case sensitive.
-F <input> Specify the format of the annotation file. Acceptable formats
include 'GTF' and 'SAF'. 'GTF' by default. Please refer to the
users guide for SAF annotation format.
-t <input> Specify the feature type. Only rows which have the matched
matched feature type in the provided GTF annotation file
will be included for read counting. 'exon' by default.
-g <input> Specify the attribute type used to group features (eg. exons)
into meta-features (eg. genes), when GTF annotation is provided.
'gene_id' by default. This attribute type is usually the gene
identifier. This argument is useful for the meta-feature level
summarization.
-f If specified, read summarization will be performed at the
feature level (eg. exon level). Otherwise, it is performed at
meta-feature level (eg. gene level).
-O If specified, reads (or fragments if -p is specified) will
be allowed to be assigned to more than one matched meta-
feature (or feature if -f is specified).
-s <int> Indicate if strand-specific read counting should be performed.
It has three possible values: 0 (unstranded), 1 (stranded) and
2 (reversely stranded). 0 by default.
-M If specified, multi-mapping reads/fragments will be counted (ie.
a multi-mapping read will be counted up to N times if it has N
reported mapping locations). The program uses the NH' tag to
find multi-mapping reads.
-Q <int> The minimum mapping quality score a read must satisfy in order
to be counted. For paired-end reads, at least one end should
satisfy this criteria. 0 by default.
-T <int> Number of the threads. 1 by default.
-R Output read counting result for each read/fragment. For each
input read file, read counting results for reads/fragments will
be saved to a tab-delimited file that contains four columns
including read name, status(assigned or the reason if not
assigned), name of target feature/meta-feature and number of
hits if the read/fragment is counted multiple times. Name of
the file is the same as name of the input read file except a
suffix '.featureCounts' is added.
--minReadOverlap <int> Specify the minimum number of overlapped bases
required to assign a read to a feature. 1 by default. Negative
values are permitted, indicating a gap being allowed between a
read and a feature.
--readExtension5 <int> Reads are extended upstream by <int> bases from
their 5' end.
--readExtension3 <int> Reads are extended upstream by <int> bases from
their 3' end.
--read2pos <5:3> The read is reduced to its 5' most base or 3'
most base. Read summarization is then performed based on the
single base which the read is reduced to.
--fraction If specified, a fractional count 1/n will be generated for each
multi-mapping read, where n is the number of alignments (indica-
ted by 'NH' tag) reported for the read. This option must be used
together with the '-M' option.
--primary If specified, only primary alignments will be counted. Primary
and secondary alignments are identified using bit 0x100 in the
Flag field of SAM/BAM files. All primary alignments in a dataset
will be counted no matter they are from multi-mapping reads or
not ('-M' is ignored).
--ignoreDup If specified, reads that were marked as
duplicates will be ignored. Bit Ox400 in FLAG field of SAM/BAM
file is used for identifying duplicate reads. In paired end
data, the entire read pair will be ignored if at least one end
is found to be a duplicate read.
--countSplitAlignmentsOnly If specified, only split alignments (CIGAR
strings containing letter 'N') will be counted. All the other
alignments will be ignored. An example of split alignments is
the exon-spanning reads in RNA-seq data.
Optional paired-end parameters:
-p If specified, fragments (or templates) will be counted instead
of reads. This option is only applicable for paired-end reads.
The two reads from the same fragment must be adjacent to each
other in the provided SAM/BAM file.
-P If specified, paired-end distance will be checked when assigning
fragments to meta-features or features. This option is only
applicable when -p is specified. The distance thresholds should
be specified using -d and -D options.
-d <int> Minimum fragment/template length, 50 by default.
-D <int> Maximum fragment/template length, 600 by default.
-B If specified, only fragments that have both ends
successfully aligned will be considered for summarization.
This option is only applicable for paired-end reads.
-C If specified, the chimeric fragments (those fragments that
have their two ends aligned to different chromosomes) will
NOT be included for summarization. This option is only
applicable for paired-end read data.
-v Output version of the program.
--donotsort If specified, paired end reads will not be reordered even if
reads from the same pair were found not to be next to each other
in the input.
"> /dev/null
echo "featurecounts done"
########################################################################################################################
date
}
function do_featurecounts_merge () {
date
########################################################################################################################
sids="/data/NHLBI_BCB/Levine_Stew_Lab/06_Will_RNA-seq/sids.txt"
out_dir="featurecounts"
for i in `cat $sids | cut -f1 | head -1`; do
SAMPLE=$i
echo -e "Gene\t$i" > tmp1
cat $SAMPLE/$out_dir/$i.genefeatureCounts.txt | sed '1,2d' | sort -k1,1 | cut -f 1,7 >> tmp1
done
for i in `cat $sids | cut -f1 | sed '1,1d' `; do
SAMPLE=$i
echo -e "$i" > tmp2
cat $SAMPLE/$out_dir/$i.genefeatureCounts.txt | sed '1,2d' | sort -k1,1 | cut -f 7 >> tmp2
paste tmp1 tmp2 > tmp3
mv -f tmp3 tmp1
done
mv -f tmp1 gene.featurecount.txt
rm -f tmp2
rm -f tmp3
########################################################################################################################
date
}
function do_get_hisat2_stats () {
date
########################################################################################################################
# change log files directory to absolute path of logfiles
logfiles="/data/NHLBI_BCB/Levine_Stew_Lab/06_Will_RNA-seq/02-fastqc-trimmomatic-hisat2-featurecounts/02-logfiles-trimmomatic-fastqc-hisat2-featurecounts"
cd $logfiles
ls *.slurm.err.txt | cut -f 1 -d'.' > tmp1
grep 'reads; of these' *.slurm.err.txt | awk '{print $1}' | cut -f 2 -d':' | awk '{ printf("%'"'"'d\n",$1); }' > tmp2
grep 'overall alignment rate' *.slurm.err.txt | awk '{print $1}' | cut -f 2 -d':' > tmp3
grep 'aligned concordantly exactly 1 time' *.slurm.err.txt | awk '{print $2}' | awk '{ printf("%'"'"'d\n",$1); }' > tmp4
grep 'aligned concordantly exactly 1 time' *.slurm.err.txt | awk '{print $3}' | sed 's/(//g' | sed 's/)//g' > tmp5
echo -e "Sample\tTotal_Reads\tOverall_Alignment_Rate\tUniq_alignment\tUniq_alignment_%" > alignmetn.stats
paste tmp1 tmp2 tmp3 tmp4 tmp5 >> alignmetn.stats
rm -f tmp*
a1=`cat alignmetn.stats | sed 's/,//g' | sed '1,1d' | awk '{ sum += $2; n++ } END { print int(sum / n); }' | awk '{ printf("%'"'"'d\n",$1); }'`
a2=`cat alignmetn.stats | sed 's/,//g' | sed '1,1d' | awk '{ sum += $3; n++ } END { print sum / n; }' | awk -F. '{print $1"."substr($2,1,2)"%"}'`
a3=`cat alignmetn.stats | sed 's/,//g' | sed '1,1d' | awk '{ sum += $4; n++ } END { print int(sum / n); }' | awk '{ printf("%'"'"'d\n",$1); }'`
a4=`cat alignmetn.stats | sed 's/,//g' | sed '1,1d' | awk '{ sum += $5; n++ } END { print sum / n; }' | awk -F. '{print $1"."substr($2,1,2)"%"}' `
echo -e "Average\t$a1\t$a2\t$a3\t$a4" >> alignment.stats
########################################################################################################################
date
}
function do_get_featurecount_stats () {
date
########################################################################################################################
sids="/data/NHLBI_BCB/Levine_Stew_Lab/06_Will_RNA-seq/sids.txt"
# change log files directory to absolute path of logfiles
logfiles="/data/NHLBI_BCB/Levine_Stew_Lab/06_Will_RNA-seq/02-fastqc-trimmomatic-hisat2-featurecounts/02-logfiles-trimmomatic-fastqc-hisat2-featurecounts"
cd $logfiles
echo -e "Sample\tGene_Count\tTotal_fragments\tSuccessfully_assigned\t%" > gene.featurecount.summary.txt
for i in `cat $sids | cut -f1`; do
gene=`cat $i.slurm.err.txt | grep 'Meta-features : ' | awk '{print $4}'`
total=`cat $i.slurm.err.txt | grep 'Total fragments : ' | awk '{print $5}'`
s=`cat $i.slurm.err.txt | grep 'Successfully assigned fragments : ' | awk '{print $6"\t"$7}'`
echo -e "$i\t$gene\t$total\t$s" >> gene.featurecount.summary.txt
done
########################################################################################################################
date
}
# do_fastqc
# do_trimmomatic
# do_fastqc
# do_hisat2
# do_featurecounts
# do_featurecounts_merge
# do_stringtie
do_get_hisat2_stats
# do_get_featurecount_stats
| true
|
bbe6fad56369137f33da59fd73461b42e907e6ce
|
Shell
|
Sirherobrine23/Action-Debian_Package
|
/src/post_js.sh
|
UTF-8
| 321
| 3.046875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
if [ $INPUT_DEBUG == 'true' ];then
env
fi
echo "Preparing to upload the file"
cd /tmp/repo
if cd $INPUT_PATH;then
echo $PWD
echo 'inside the directory'
else
echo 'Error entering the directory'
find .
exit 23
fi
cp -rfv $DEB_PATH ./
fi
#
#
exit 0
| true
|
6fbff906edeb06b8102bfd48b902c2b03bc61a45
|
Shell
|
etsi-cti-admin/titan-docker
|
/from-sources/scripts/build_titan.bash
|
UTF-8
| 1,821
| 3.984375
| 4
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
set -e
#set -vx
clear
if [ -z "${TOP}" ]
then
echo "Failed, TOP variable not defined, exit"
exit 1
fi
CURDIR=`pwd`
TITAN_DIR=${TOP}/..
# Move to the right directory
if [ ! -d ${TITAN_DIR} ]
then
echo "Titan directory does not exist, create it"
# Create TITAN directories
mkdir -p ${TITAN_DIR}
if [ ! "$?" -eq "0" ]
then
echo "Failed, TOP variable not defined, exit"
exit 2
fi
cd ${TITAN_DIR}
# Clone all TITAN repositories
if [ ! -f ${CURDIR}/titan_repos.txt ]
then
echo "${HOME_BIN}/titan_repos.txt file does not exist, exit"
rm -fr ${TOP}
rm -fr ${TOP}/..
exit 3
fi
TITAN_REPOS=`cat ${CURDIR}/titan_repos.txt`
for i in ${TITAN_REPOS};
do
git clone $i
if [ ! "$?" -eq "0" ]
then
echo "Failed to clone $i, exit"
exit 4
fi
done
else
cd ${TITAN_DIR}
# Update github folders
DIRS=`find . -type d -name ".git" -exec dirname {} \;`
for i in ${DIRS};
do
echo "Processing $i..."
cd $i
git fetch
if [ ! "$?" -eq "0" ]
then
echo "Failed to fetch $i, continue"
else
git pull
if [ ! "$?" -eq "0" ]
then
echo "Failed to pull $i, continue"
fi
fi
cd -
done
fi
# Build TITAN core
export JNI=no
export GUI=no
export DEBUG=no
export GEN_PDF=no
if [ -d ${TTCN3_DIR} ]
then
rm -fr ${TTCN3_DIR}
fi
mkdir ${TTCN3_DIR}
cd ./titan.core
/bin/cat <<EOF > Makefile.personal
JNI:=no
GUI:=no
DEBUG:=no
GEN_PDF:=no
EOF
echo "Starting build..."
make clean
if [ "${OSTYPE}" == "cygwin" ]
then
make -j
else
make
fi
make install
echo "Build done"
# Go back to initial directory
cd ${CURDIR}
exit 0
| true
|
ff56f217d13268db910c964140c20e8cae6bea7d
|
Shell
|
SouthAfricaDigitalScience/boost-deploy
|
/deploy.sh
|
UTF-8
| 3,033
| 3.078125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash -e
. /etc/profile.d/modules.sh
module add deploy
# add the dependency modules for the deploy
module add bzip2
module add readline
module add gcc/${GCC_VERSION}
module add openmpi/${OPENMPI_VERSION}-gcc-${GCC_VERSION}
module add python/2.7.13-gcc-${GCC_VERSION}
module add icu/59_1-gcc-${GCC_VERSION}
REMOTE_VERSION=`echo ${VERSION} | sed "s/\\./\_/g"`
cd ${WORKSPACE}/${NAME}_${REMOTE_VERSION}/
echo "Cleaning"
./b2 --clean
echo "reststarting bootstrap"
./bootstrap.sh \
--prefix=$SOFT_DIR/${NAME}-${VERSION}-mpi-${OPENMPI_VERSION}-gcc-${GCC_VERSION} \
--with-toolset=gcc \
--with-python-root=$PYTHON_DIR \
--with-python=${PYTHON_DIR}/bin/python2.7 \
--with-icu=${ICU_DIR} \
--with-libraries=all
echo "Making mpi bindings"
echo "using mpi ;" >> project-config.jam
echo "Starting deploy build"
./b2 -d+2 install \
threading=multi \
link=static,shared runtime-link=shared,shared \
runtime-link=shared \
--debug-configuration \
-sMPI_PATH=${OPENMPI_DIR} \
-sBZIP2_BINARY=bz2 -sBZLIB_INCLUDE=${BZLIB_DIR}/include -sBZLIB_LIBDIR=${BZLIB_DIR}/lib \
-sPYTHON_PATH=${PYTHONHOME} -sPYTHON_INCLUDE=${PYTHON_DIR}/include -sPYTHON_LIBDIR=${PYTHON_DIR}/lib \
-sICU_PATH=${ICU_DIR} \
--prefix=$SOFT_DIR-gcc-${GCC_VERSION}-mpi-${OPENMPI_VERSION} \
--with-iostreams \
--with-python \
--with-mpi \
--with-atomic \
--with-chrono \
--with-container \
--with-context \
--with-coroutine \
--with-coroutine2 \
--with-filesystem \
--with-date_time \
--with-exception \
--with-graph \
--with-graph_parallel \
--with-log \
--with-locale \
--with-system \
--with-math \
--with-program_options \
--with-test --with-thread \
--with-timer \
--with-type_erasure \
--with-wave \
--with-random \
--with-regex \
--with-signals \
--with-serialization
echo "Creating module"
mkdir -p ${LIBRARIES}/${NAME}
# Now, create the module file for deployment
(
cat <<MODULE_FILE
#%Module1.0
## boost modulefile
##
proc ModulesHelp { } {
puts stderr "\tAdds boost (1.62.0.) to your environment."
}
module-whatis "Sets the environment for using boost (1.62.0.) Built with GCC $GCC_VERSION and OpenMPI Version $OPENMPI_VERSION"
module add bzip2
module add readline
module add gcc/$GCC_VERSION
module add openmpi/$OPENMPI_VERSION-gcc-$GCC_VERSION
module add python/$PYTHON_VERSION-gcc-$GCC_VERSION
module add icu/59_1-gcc-$GCC_VERSION
setenv BOOST_VERSION $VERSION
setenv BOOST_DIR $::env(CVMFS_DIR)/$::env(SITE)/$::env(OS)/$::env(ARCH)/${NAME}/$VERSION-gcc-$GCC_VERSION-mpi-$OPENMPI_VERSION
setenv BOOST_ROOT $::env(BOOST_DIR)
setenv CFLAGS "$CFLAGS -I$::env(BOOST_DIR)/include -L$::env(BOOST_DIR)/lib"
prepend-path LD_LIBRARY_PATH $::env(BOOST_DIR)
MODULE_FILE
) > ${LIBRARIES}/${NAME}/${VERSION}-gcc-${GCC_VERSION}-mpi-${OPENMPI_VERSION}
module avail ${NAME}
module add ${NAME}/${VERSION}-gcc-${GCC_VERSION}-mpi-${OPENMPI_VERSION}
echo "LD_LIBRARY_PATH is : ${LD_LIBRARY_PATH}"
which g++
cd ${WORKSPACE}
echo "BOOST DIR is ${BOOST_DIR} ; SOFT_DIR is ${SOFT_DIR}"
ls -lht ${BOOST_DIR}
c++ -I${BOOST_DIR}/include -L${BOOST_DIR}/lib hello-world.cpp
./a.out
| true
|
0605ea2305ebdefed5eab934e4f794bb45e88930
|
Shell
|
delong45/Tantan-test-go
|
/stop.sh
|
UTF-8
| 152
| 2.921875
| 3
|
[] |
no_license
|
#!/bin/bash
pid=`ps --no-headers -FC tantan | awk '{print $2}'`
kill $pid
if [ $? -ne 0 ]
then
echo "Failed to stop Tantan server"
exit 1
fi
| true
|
1f184a269e6e4c05a7e8c19d394100487b437d4c
|
Shell
|
grafi-tt/portage-overlay
|
/app-emulation/WALinuxAgent/files/openssl-wrapper
|
UTF-8
| 153
| 3.0625
| 3
|
[] |
no_license
|
#!/bin/sh
if [ "$#" -eq 0 ]; then
echo "no argument?" >&2
exit 1
elif [ "$1" = "cms" ]; then
shift
"$(dirname "$0")"/cms "$@"
else
openssl "$@"
fi
| true
|
5218107b7fd44e5ce448150e016f8cf08379e9b5
|
Shell
|
SURFfplo/stack-idp
|
/startup.sh
|
UTF-8
| 1,534
| 3.234375
| 3
|
[] |
no_license
|
#!/bin/sh
set -e
# ### CONFIGURE NIGINX ###
# replace variables in default.conf
NGINX_CONF="/data/conf/default.conf"
sed -i "s~%PHP_SERVICE%~${PHP_STACK_SERVICE}_php~g" "$NGINX_CONF"
# ### CONFIGURE SIMPLESAMLPHP ###
# get admin password
MY_PASSWORD=admin
if [ -f "$SSP_ADMIN_PASS" ]
then
MY_PASSWORD=`cat $SSP_ADMIN_PASS`
fi
# replace variables in authsources.php
SSP_AUTH="/data/src/simplesamlphp/config/authsources.php"
sed -i "s~%SSP_LDAP_HOST%~$SSP_LDAP_HOST~g" "$SSP_AUTH"
sed -i "s~%SSP_LDAP_PORT%~$SSP_LDAP_PORT~g" "$SSP_AUTH"
sed -i "s~%SSP_LDAP_DOMAIN%~$SSP_LDAP_DOMAIN~g" "$SSP_AUTH"
# replace variables in config.php
SSP_CONF="/data/src/simplesamlphp/config/config.php"
sed -i "s~%SSP_BASEURL%~$SSP_BASEURL~g" "$SSP_CONF"
sed -i "s~%SSP_ADMIN_PASS%~$MY_PASSWORD~g" "$SSP_CONF"
sed -i "s~%SSP_CONTACT_NAME%~$SSP_CONTACT_NAME~g" "$SSP_CONF"
sed -i "s~%SSP_CONTACT_EMAIL%~$SSP_CONTACT_EMAIL~g" "$SSP_CONF"
sed -i "s~%SSP_THEME%~$SSP_THEME~g" "$SSP_CONF"
# ### CREATE CERTIFICATES ###
cd /data/src/simplesamlphp/cert
openssl req -new -x509 -days 3652 -nodes -out server.crt -keyout server.pem -subj "/C=NL/ST=Utrecht/L=Utrecht/O=SURF/CN=www.surf.nl"
chmod 644 server.pem
if [ -f "/data/save/server.crt" ]
then
cp -a /data/save/server.crt /data/src/simplesamlphp/cert
else
cp -a /data/src/simplesamlphp/cert/server.crt /data/save
fi
if [ -f "/data/save/server.pem" ]
then
cp -a /data/save/server.pem /data/src/simplesamlphp/cert
else
cp -a /data/src/simplesamlphp/cert/server.pem /data/save
fi
exec "$@"
| true
|
99a17ee36de4cfd9b927b47352efdb7168561396
|
Shell
|
luckyzwei/UnityProject
|
/sh/personal_public.sh
|
UTF-8
| 530
| 3.09375
| 3
|
[] |
no_license
|
#!/bin/bash
ARGS=$1
echo "args:1 ${ARGS}"
if [ $ARGS == "大熊" ]; then
IP="127.0.0.1";
SERVER_DIR="/mnt/daxiong_share/";
SERVER_ID=101;
elif [ $ARGS == "昭齐" ]; then
IP="127.0.0.1";
SERVER_DIR="/mnt/zhaoqi_share/";
SERVER_ID=102;
elif [ $ARGS == "陈萍" ]; then
IP="127.0.0.1";
SERVER_DIR="/mnt/chenping_share/";
SERVER_ID=103;
fi
cd $SERVER_DIR
make all
ret=`echo $?`
echo "ret: ${ret}"
if [ ${ret} -ne 0 ]; then
echo "compile fail";
exit 1
fi
echo $IP
echo $SERVER_ID
sh mgectl "update_server" $IP $SERVER_ID
| true
|
45a6a522d167bacad4a062e5388c9899e43d6f95
|
Shell
|
khdevnet/git-help
|
/hooks/commit-msg
|
UTF-8
| 307
| 3.4375
| 3
|
[] |
no_license
|
#!/bin/sh
MSG="$1"
COMMIT_LENGTH=`cat $MSG | grep -E '^(issue-[0-9]|ISSUE-[0-9]+|Merge|merge|Revert|revert)' | wc -c`
if [ ${COMMIT_LENGTH} -eq 0 ] ; then
cat "$MSG"
echo "Your commit message must starts with the next words 'ISSUE-[digits]|issue-[digits]|Merge|merge|Revert|revert'"
exit 1
fi
| true
|
06ff70787c6f5f5aa88f54acd72b4ee29fa90b95
|
Shell
|
nvzard/OS-LAB
|
/LAB_1/variables.sh
|
UTF-8
| 262
| 3.046875
| 3
|
[] |
no_license
|
#Pre-defined variables
echo $HOME
echo $PWD
#Positional Variables
echo $1 $2
#change value of positional variables
set 99 $2
echo $1 $2
$1=69
echo $1 $2
#User Defined Variables
echo 'Enter two values:'
read a b
echo $a $b
echo 'Bonus variable:'
c=10
echo $c
| true
|
d73f88064b0558fb6d5756315a160cc1f7df18f6
|
Shell
|
gabrielott/dotfiles
|
/.local/bin/tohevc
|
UTF-8
| 2,247
| 3.953125
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
# Bash is required because exporting functions isn't in the POSIX spec.
# Transcodes files to HEVC in order to save space. The quality should
# be noticeably worse, but acceptable. Encoding is done using nvenc
# and up to 3 videos will be transcoded concurrently.
# File sizes by resolution for a 35 minute video:
# - UHD: 2.23GB
# - 4K: 2.1GB
# - FHD: 918MB
# - HD: 393MB
# - 480p: 131MB
# - Worse than 480p: 52MB
# - Anything that doesn't fit the resolutions above exactly: 918MB
transcode() {
# Bitrates for each resolution:
# BITRATE_TRASH is used for resolutions worse than 480p
# and BITRATE_DEFAULT is used for anything that doesn't
# match the other ones.
BITRATE_DEFAULT=3500000
BITRATE_UHD=8500000
BITRATE_4K=8000000
BITRATE_FHD=3500000
BITRATE_HD=1500000
BITRATE_SD=500000
BITRATE_TRASH=200000
# If the bitrate of the video multiplied by TRANSCODE_THRESHOLD
# is less than the bitrate we would use, we don't actually
# transcode it.
TRANSCODE_THRESHOLD=1.1
file="$1"
out="$2"
info="$(ffprobe \
-v error \
-select_streams v:0 \
-show_entries stream=width,bit_rate \
-of default=noprint_wrappers=1 \
"$file" \
| sed 's/.*=//' \
| tr '\n' '/')"
width="$(echo "$info" | cut -d'/' -f1)"
bitrate="$(echo "$info" | cut -d'/' -f2)"
new_bitrate="$BITRATE_DEFAULT"
case "$width" in
4096) new_bitrate="$BITRATE_UHD" ;;
2160) new_bitrate="$BITRATE_4K" ;;
1920) new_bitrate="$BITRATE_FHD" ;;
1280) new_bitrate="$BITRATE_HD" ;;
854) new_bitrate="$BITRATE_SD" ;;
*) [ "$bitrate" -lt 854 ] && new_bitrate="$BITRATE_TRASH" ;;
esac
# If the bitrate is already low enough, we just change
# the extension to conf.ext, no need to transcode.
effective_bitrate="$(awk "BEGIN {print int($TRANSCODE_THRESHOLD * $bitrate)}")"
if [ "$bitrate" -le "$new_bitrate" ]; then
echo "Won't transcode $file, bitrate already low enough"
ext="$(ext "$file")"
rex "$file" "conf.$ext"
return
fi
echo "Starting file $file with bitrate $new_bitrate"
ffmpeg \
-hide_banner \
-y \
-v error \
-i "$file" \
-c:v hevc_nvenc \
-b:v "$new_bitrate" \
-c:a copy \
"$out"
echo "File $file done"
}
export -f transcode
parallel -j3 --bar transcode '{}' '{.}.conv.mp4' ::: "$@"
| true
|
333b55323c047d4ed460a29e7fb536c1800c07cd
|
Shell
|
christophert/skitter
|
/cache/build.sh
|
UTF-8
| 1,288
| 3.421875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
SSL_HOSTNAME="${SSL_HOSTNAME:-cache}"
# get CA Certificate
curl -XPOST -H "Content-Type: application/json" -d '{"label": "primary"}' http://ca:8888/api/v1/cfssl/info > ca.json
# get the SSL hostname from ENV
sed -i "s/cache.skitter.app/$SSL_HOSTNAME/g" csr.json
# get cert from CA
curl -X POST -H "Content-Type: application/json" -d @csr.json \
http://ca:8888/api/v1/cfssl/newcert > full_cert.json
# my jank method of extracting json content b/c it takes way too long to install
# modules (~5 minutes is to long) -- this is native to unix
extract_json_data () {
cat $1 | \
grep -Po "\"$2\""':.*?[^\\]\",' | \
sed 's/"'"$2"'":"//g' | \
sed ':a;N;$!ba;s/\n/ /g' | \
sed 's/",.*//' | \
sed 's/\\n/\n/g' > $3
}
# make the private key and certificate request
extract_json_data ca.json certificate ca.crt
extract_json_data full_cert.json certificate /etc/ssl/cache.pem
extract_json_data full_cert.json private_key certificate.key
cat ca.crt certificate.key >> /etc/ssl/cache.pem
# generate dhparam
if [ ! -f /etc/ssl/dhparam.pem ]; then
openssl dhparam -out /etc/ssl/dhparam.pem 2048 2> /dev/null
fi
# clean unessessary files
rm ca.json full_cert.json ca.crt certificate.key
# run nginx
/usr/sbin/nginx -g "daemon off;"
| true
|
d0fea8f91292307e3667769a7a88e4e0f0a73e8f
|
Shell
|
adamfeuer/ArtOfGratitude_app
|
/bin/gunicorn-django-test.sh
|
UTF-8
| 705
| 2.546875
| 3
|
[
"MIT"
] |
permissive
|
#! /bin/bash -x
APP=gratitude
VIRTUALENV=/opt/gratitude-test
PORT=9100
LOGDIR=/var/log/gunicorn/
SETTINGS=conf.test.settings
PATH=/bin:/usr/bin
USER=www-data
GROUP=www-data
IP=127.0.0.1
WORKERS=5
NAME=django-gunicorn-test
DESC=$NAME
LOGFILE="$LOGDIR$NAME.log"
PIDFILE="$VIRTUALENV/run/$NAME.pid"
COMMAND="$VIRTUALENV/$APP/bin/manage.py run_gunicorn --user=$USER --group=$GROUP --workers=$WORKERS --bind=$IP:$PORT --pid=$PIDFILE --name=$NAME --log-file=$LOGFILE --log-level=info --settings=$SETTINGS"
#COMMAND="$VIRTUALENV/$APP/bin/manage.py run_gunicorn --bind=$IP:$PORT --pid=$PIDFILE --name=$NAME --log-file=$LOGFILE --log-level=info --settings=$SETTINGS"
source $VIRTUALENV/bin/activate
exec $COMMAND
| true
|
ff903bb5b8fb444543f91aec3bf0ff5a33e738c9
|
Shell
|
Ghardo/GTK-App-Theme-Changer
|
/cth
|
UTF-8
| 840
| 3.71875
| 4
|
[] |
no_license
|
#!/bin/bash
DEFAULT_RCF="/usr/share/themes/ManjaroFlatLight/gtk-2.0/gtkrc"
DEFAULT_GTH="ManjaroFlatLight"
rcf=""
gth=""
set -- $(getopt p:n: "$@")
while [ $# -gt 0 ]
do
case "$1" in
-p)
rcf="${2}/gtk-2.0/gtkrc"; shift 2;;
-n)
gth=$2; shift 2;;
--) shift; break;;
esac
done
if [ -z "${rcf}" ] && [ -z "${gth}" ]; then
rcf=$DEFAULT_RCF
gth=$DEFAULT_GTH
fi
if [ -z "${gth}" ]; then
gth="$(basename $(realpath $(dirname ${rcf})/..))"
fi
if [ -z "${rcf}" ]; then
if [ -d "/usr/share/themes/${gth}/" ]; then
rcf="/usr/share/themes/${gth}/"
fi
if [ -d "~/.themes/${gth}/" ]; then
rcf="~/.themes/${gth}/"
fi
fi
if [ $(readlink ${0}) ]; then
GTK2_RC_FILES="${rcf}" GTK_THEME="${gth}" $(echo ${0##*/} | sed 's/-$//') $*
else
GTK2_RC_FILES="${rcf}" GTK_THEME="${gth}" $*
fi
| true
|
b80953e27afa6c4c673845e9ff68dfb36caebebf
|
Shell
|
haniokasai/netwalker-rootfs
|
/var/lib/dpkg/info/bc.postrm
|
UTF-8
| 566
| 3.421875
| 3
|
[] |
no_license
|
#! /bin/sh
#
# This is the postrm script for the Debian GNU/Linux bc package
#
# Written by Dirk Eddelbuettel <edd@debian.org>
# Previous versions written by Bill Mitchell, Austin Donnelly and James Troup
set -e
# Automatically added by dh_installmenu
if [ -x "`which update-menus 2>/dev/null`" ]; then update-menus ; fi
# End automatically added section
case "$1" in
purge|remove|upgrade|failed-upgrade|abort-install|abort-upgrade|disappear)
# if [ -x /usr/bin/update-menus ]
# then
# update-menus
# fi
;;
*)
echo "postrm called with unknown argument \`$1'" >&2
;;
esac
| true
|
44a6c6b1272259621d5548a5f40a2b2d61defb70
|
Shell
|
daniapm/holberton-system_engineering-devops
|
/0x04-loops_conditions_and_parsing/7-clock
|
UTF-8
| 250
| 3.546875
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
# This script is displaying the time for 12 hours and 59 minutes
hours=0
while [ $hours -lt 13 ]
do
echo "Hour: $hours"
minutes=0
while (( minutes++ < 59));
do
echo $minutes
done
let hours+=1
done
| true
|
194f2f23c857fb33d0517ac50fa2655c2ee31d36
|
Shell
|
kurron/docker-clion
|
/clion.sh
|
UTF-8
| 859
| 3.484375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# Launches CLion inside a Docker container
IMAGE=${1:-kurron/docker-clion:latest}
DOCKER_GROUP_ID=$(cut -d: -f3 < <(getent group docker))
USER_ID=$(id -u $(whoami))
GROUP_ID=$(id -g $(whoami))
# Need to give the container access to your windowing system
xhost +
CMD="docker run --group-add ${DOCKER_GROUP_ID} \
--env HOME=/home/powerless \
--env DISPLAY=unix${DISPLAY} \
--interactive \
--name CLion \
--net "host" \
--rm \
--tty \
--user=${USER_ID}:${GROUP_ID} \
--volume $HOME:/home/powerless \
--volume /tmp/.X11-unix:/tmp/.X11-unix \
--volume /var/run/docker.sock:/var/run/docker.sock \
--workdir /tmp \
${IMAGE}"
echo $CMD
$CMD
| true
|
b970a5d3df4211bce41fb30e0958df9ae1b09027
|
Shell
|
rene525456/bash
|
/tarea6.sh
|
UTF-8
| 139
| 2.546875
| 3
|
[] |
no_license
|
#! /bin/bash
file1=$(cat $1|wc -l)
echo "El archivo $1 tiene $file1 lineas"
file2=$(cat $2|wc -l)
echo "El archivo $2 tiene $file2 lineas"
| true
|
c24053c5148768b689a8bfec8d4f3fe324649a11
|
Shell
|
robashton/nixpkgs-nixerl
|
/update/release
|
UTF-8
| 824
| 3.6875
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
set -euo pipefail
set -x
cd "$(dirname "${BASH_SOURCE[0]}")/.."
declare commit_message=${1:-}
main() {
read -r major minor patch label <<< "$(git describe HEAD --abbrev=0 | cut -c 2- | awk 'BEGIN{FS="[.-]"} {print $1,$2,$3,$4}')"
new_patch=$((patch + 1))
new_version=v${major}.${minor}.${new_patch}-${label}
sed -i "s|v[0-9]*\.[0-9]*\.[0-9]*-[-a-z]*|${new_version}|g" ./README.md
if [[ -z "${commit_message}" ]]
then
new_versions=$(git diff -U0 erlang-manifest.json | sed -n 's|^+ *"version": "\([^"]*\)".*|\1|p' | awk '{printf (NR>1?", ":"") $1}')
commit_message="Added ${new_versions}."
fi
git add \
README.md \
erlang-manifest.json \
update/version-metadata/
git commit -m "${commit_message}"
git tag -s -m "${commit_message}" "${new_version}"
}
main
| true
|
7319f727d60817ac20afb0b098ee836802606a00
|
Shell
|
dnuffer/setup
|
/remove_cuda.sh
|
UTF-8
| 781
| 3.25
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
set -x
set -e
set -u
if [ $UID != "0" ]; then
echo "This script must be run as root" >&2
exit 1
fi
packages=`grep ^Package: /var/lib/apt/lists/developer.download.nvidia.com_compute_cuda_repos_ubuntu1404_*_Packages | cut -f 2 -d ' '`
apt-get -y remove $packages cuda-repo-ubuntu1404
rm -f /etc/apt/sources.list.d/cuda.list
apt-get -y update
apt-get --fix-broken -y install
## The nvidia driver sucks for desktop use :-(
#if ! [ -e /etc/apt/sources.list.d/cuda.list ]; then
#wget -O /tmp/cuda.deb http://developer.download.nvidia.com/compute/cuda/repos/ubuntu1404/x86_64/cuda-repo-ubuntu1404_6.5-14_amd64.deb
#dpkg -i /tmp/cuda.deb
#apt-get -y update
#fi
#apt-get -y install --install-recommends --fix-broken --ignore-hold --auto-remove \
#cuda \
#nvidia-prime
| true
|
6c65fdaee21c3726aca39417ce01b6121fc2bbc0
|
Shell
|
swift-lang/swift-t
|
/dev/conda/clean.sh
|
UTF-8
| 332
| 3.453125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/zsh -f
set -eu
# CLEAN SH
# Clean generated files
rm0()
# File removal, ok with empty argument list
# Safer than rm -f
{
local F V
zparseopts -D -E v=V
for F in ${*}
do
if [[ -f $F ]] rm ${V} $F
done
}
# Get this directory:
THIS=${0:h:A}
cd $THIS
for D in *(/)
do
rm0 -v $D/settings.sed $D/meta.yaml
done
| true
|
63e5e8364c813ffba8b603f1a9c7d94db1c7213b
|
Shell
|
alshaboti/dam
|
/setup.sh
|
UTF-8
| 6,132
| 3.28125
| 3
|
[] |
no_license
|
#!/bin/bash
#################################
# Start by sourceing the script #
# source zeek-test.sh #
#################################
#create faucet ovs network
#Install ovs-docker as here
#http://containertutorials.com/network/ovs_docker.html
echo ">>>>>>>>>>>>>>>Pre requisits<<<<<<<<<<<<<<<<<<<<<"
# echo "git_zeek-netcontrol"
# function git_zeek-netcontrol(){
# git clone https://github.com/zeek/zeek-netcontrol.git
# }
# echo "git_faucetagent"
# function git_faucetagent()
# {
# git clone https://github.com/faucetsdn/faucetagent.git
# # remove sudo, as faucet container will run as root
# # cd faucetagent
# # sed -i "s/sudo//g" dependencies.sh
# }
echo "generate_gNMI_certs"
function generate_gNMI_certs(){
mkdir -p tls_cert_key
mkdir -p tls_cert_key/server
openssl req -x509 -sha256 -nodes -days 365 -newkey rsa:2048 -keyout tls_cert_key/server/server.key -out tls_cert_key/server/server.crt -subj '/CN=faucet.localhost'
mkdir -p tls_cert_key/client
openssl req -x509 -sha256 -nodes -days 365 -newkey rsa:2048 -keyout tls_cert_key/client/client.key -out tls_cert_key/client/client.crt -subj '/CN=zeek.localhost'
cp tls_cert_key/server/server.crt tls_cert_key/client/ca.crt
}
echo "############## First: Create and attach to each container ##################### "
#echo "1- Either use tmux and create and attach to each container using the following functions"
#echo "cr_faucet-cont"
#function cr_faucet-cont(){
# docker run \
# --rm --name faucet \
# -v /var/log/faucet/:/var/log/faucet/ \
# -v $PWD/etc/faucet/:/etc/faucet/ \
# -p 6653:6653 -p 9302:9302 \
# shaboti/faucet-ssh
#}
#echo "cr_server-cont"
#function cr_server-cont(){
# docker run \
# --rm -it --name server \
# --network=none python /bin/bash
#}
#echo "cr_host-cont"
#function cr_host-cont(){
# docker run \
# --rm -it --name host \
# --network=none python /bin/bash
#}
#echo "cr_zeek-cont"
#function cr_zeek-cont(){
# docker run \
# --rm -it --name zeek \
# -v $PWD:/pegler \
# -v /etc/faucet/:/etc/faucet/ shaboti/zeek-ids /bin/bash
#}
#echo "2- OR create and attach to all container at once using xterm"
echo "cr_all_conts_with_xterms"
function cr_all_conts_with_xterms(){
xterm -T faucet -e \
docker run \
--rm --name faucet \
-v /var/log/faucet/:/var/log/faucet/ \
-v $PWD/etc/faucet/faucet.yaml:/etc/faucet/faucet.yaml \
-v $PWD/tls_cert_key/:/pegler/tls_cert_key/ \
-p 6653:6653 -p 9302:9302 \
shaboti/faucet-agent &
xterm -bg MediumPurple4 -T host -e \
docker run \
--rm --name host \
-it \
python bash -c "echo 'RUN: wget http://192.168.0.1:8000' && bash" &
xterm -bg NavyBlue -T server -e \
docker run \
--rm --name server \
-it \
python bash -c "echo 'RUN: python -m http.server 8000' && bash" &
xterm -bg Maroon -T zeek -e \
docker run \
--rm --name zeek \
-it \
-v $PWD/src/:/pegler/src/ \
-v $PWD/etc/faucet/faucet.zeek.yaml:/pegler/etc/faucet/faucet.zeek.yaml \
-v $PWD/tls_cert_key/:/pegler/tls_cert_key/ \
-w /pegler/src \
shaboti/zeek-ids /bin/bash &
}
#docker pull ubuntu
#then install zeek on it, save that container as an image for later use.
#export PATH=/usr/local/zeek/bin:$PATH
#export PREFIX=/usr/local/zeek
#https://github.com/zeek/zeek-netcontrol
#export PYTHONPATH=$PREFIX/lib/zeekctl:/pegler/zeek-netcontrol
echo "###################### Second: configure and build the network connections ####################"
echo "create_ovs_net"
function create_ovs_net(){
ovs-vsctl add-br ovs-br0 \
-- set bridge ovs-br0 other-config:datapath-id=0000000000000001 \
-- set bridge ovs-br0 other-config:disable-in-band=true \
-- set bridge ovs-br0 fail_mode=secure \
-- set-controller ovs-br0 tcp:127.0.0.1:6653 tcp:127.0.0.1:6654
# create bridge btween zeek and faucet
docker network create --subnet 192.168.100.0/24 --driver bridge zeek_faucet_nw 1>/dev/null
docker network connect --ip 192.168.100.2 zeek_faucet_nw zeek
docker network connect --ip 192.168.100.3 zeek_faucet_nw faucet
# connect the rest to ovs-br0
ip addr add dev ovs-br0 192.168.0.254/24
ovs-docker add-port ovs-br0 eth1 server --ipaddress=192.168.0.1/24
ovs-docker add-port ovs-br0 eth1 host --ipaddress=192.168.0.2/24
ovs-docker add-port ovs-br0 eth2 zeek --ipaddress=192.168.0.100/24
}
echo "######################### Third (optinal): you may use other commands #########################"
echo "check_ovs_net"
function check_ovs_net(){
ovs-vsctl show
ovs-ofctl show ovs-br0
docker ps
}
echo "get_X-bash-xterm"
function get_X-bash-xterm(){
xterm -T $1 -bg Grey15 -e docker exec -it $1 /bin/bash &
}
# echo "get_x-bash"
# function get_x-bash(){
# docker exec -it $1 /bin/bash
# }
echo "get_faucet-gNMI-agent-xterm"
function get_faucet-gNMI-agent-xterm(){
xterm -T faucetXterm -e docker exec faucet ./faucetagent/faucetagent.py --cert /pegler/tls_cert_key/server/server.crt --key /pegler/tls_cert_key/server/server.key --configfile /etc/faucet/faucet.yaml &
}
# echo "get_faucet-bash-xterm"
# function get_faucet-bash-xterm(){
# xterm -T faucetXterm -e docker exec -it faucet /bin/bash &
#}
# faucet reload
#echo "faucet_relaod_config"
#function faucet_reload_config(){
# docker kill --signal=HUP faucet
#}
echo "################### Remove everything ########################"
# to REMOVE everything
echo "clear_ovs_net_all"
function clear_ovs_net_all(){
docker stop server host zeek faucet 2>/dev/null
ovs-vsctl del-br ovs-br0 2>/dev/null
docker rm host server zeek faucet 2>/dev/null
docker network rm zeek_faucet_nw 2>/dev/null
}
echo "Done!"
echo ""
| true
|
0da5ee739a7b51a114e5a4c86fac3f59d13a3a05
|
Shell
|
warpme/minimyth2
|
/script/meta/minimyth/files/source/rootfs/etc/rc.d/init.d/extras
|
UTF-8
| 2,131
| 3.796875
| 4
|
[] |
no_license
|
#!/bin/sh
################################################################################
# extras
#
# This script (downloads and) mounts the extras directory tree.
################################################################################
. /etc/rc.d/functions
start() {
if [ -n "${MM_EXTRAS_URL}" ] ; then
if [ ! "x${MM_EXTRAS_URL}" = "xauto" ] && [ ! "x${MM_EXTRAS_URL}" = "xglobal" ] ; then
/usr/bin/logger -s -t minimyth -p "local0.info" "[init.d/extras] Mounting Extras dir from ${MM_EXTRAS_URL}"
mm_url_mount "${MM_EXTRAS_URL}" "/usr/local"
/sbin/ldconfig
fi
if [ "x${MM_EXTRAS_URL}" = "xauto" ] ; then
mm_message_output info "mounting extras ...(auto)"
if [ -f "/var/lib/minimyth.bootdir.nfs_mounted" ] ; then
/bin/rm -rf /usr/local
/bin/ln -sf /var/minimyth.bootdir/extras/${HOST_NAME} "/usr/local"
/usr/bin/logger -s -t minimyth -p "local0.info" "[init.d/media] Extras dir automounted...(auto)"
/sbin/ldconfig
else
/usr/bin/logger -s -t minimyth -p "local0.info" "[init.d/media] ERROR: Extras not avaliale because NFS base dir not mounted...(auto)"
return 1
fi
fi
if [ "x${MM_EXTRAS_URL}" = "xglobal" ] ; then
mm_message_output info "mounting extras ...(global)"
if [ -f "/var/lib/minimyth.bootdir.nfs_mounted" ] ; then
/bin/rm -rf /usr/local
/bin/ln -sf /var/minimyth.bootdir/extras "/usr/local"
/usr/bin/logger -s -t minimyth -p "local0.info" "[init.d/media] extras dir automounted...(global)"
/sbin/ldconfig
else
/usr/bin/logger -s -t minimyth -p "local0.info" "[init.d/media] ERROR: Extras not avaliale because NFS base dir not mounted...(global)"
return 1
fi
fi
fi
this_script_done
}
stop() {
/bin/umount "${MM_EXTRAS_URL}"
return 0
}
case $1 in
start) start ;;
stop) stop ;;
esac
| true
|
6bc257fb25e093fb458e29449fcf82738411683b
|
Shell
|
MilesQLi/iBinHunt
|
/doc/install-vine.sh
|
UTF-8
| 2,850
| 3.515625
| 4
|
[] |
no_license
|
#!/bin/bash
# Instructions for installing VinE on Linux
# In the form of a shell script that runs on a fresh Ubuntu 8.04
# installation. Some adaptation may be needed for other systems.
# Things that require root access are preceded with "sudo".
# See the "INSTALL" file in the VinE source for more discussion on
# what we're doing here.
# Last tested 2008-09-11
# This script will build VinE in a "$HOME/bitblaze" directory
cd ~
mkdir bitblaze
cd bitblaze
# Prerequisite: Valgrind VEX r1749
# VinE uses the VEX library that comes with Valgrind to interpret the
# semantics of instructions. (Note we don't even bother to compile
# Valgrind itself). Because of changes in the VEX interface, earlier
# or later revisions will probably not work without some changes,
# though we'll probably update to work with a more recent version at
# some point in the future.
# Packages needed to build Valgrind:
sudo apt-get build-dep valgrind
# Extra packages needed to build the SVN version:
sudo apt-get install subversion automake
svn co -r6697 svn://svn.valgrind.org/valgrind/trunk valgrind
(cd valgrind/VEX && svn up -r1749)
(cd valgrind/VEX && make version && make libvex_x86_linux.a && make libvex.a)
# Other prerequisite packages:
# For C++ support:
sudo apt-get install g++
# For OCaml support:
sudo apt-get install ocaml ocaml-findlib libgdome2-ocaml-dev camlidl \
libextlib-ocaml-dev
# For the BFD library:
sudo apt-get install binutils-dev
# For the Boost Graph library:
sudo apt-get install libboost-dev libboost-graph-dev
# For the SQLite database:
sudo apt-get install libsqlite3-dev sqlite3 libsqlite3-0 libsqlite3-ocaml-dev
# For building documentation:
sudo apt-get install texlive
# Ocamlgraph >= 0.99c is required. Ocamlgraph is packaged by Debian
# and Ubuntu as libocamlgraph-ocaml-dev, but the latest version in
# Ubuntu is 0.98. The following process for building a package from
# the Debian repository is a bit of a hack.
sudo apt-get install libocamlgraph-ocaml-dev
sudo apt-get build-dep libocamlgraph-ocaml-dev
sudo apt-get install liblablgtk2-ocaml-dev liblablgtk2-gnome-ocaml-dev \
docbook-xsl po4a
sudo apt-get install fakeroot
svn co svn://svn.debian.org/svn/pkg-ocaml-maint/trunk/packages/ocamlgraph \
-r5983
tar xvzf ocamlgraph/upstream/ocamlgraph_0.99c.orig.tar.gz
mv ocamlgraph/trunk/debian ocamlgraph-0.99c
perl -pi -e 's[ocaml-nox \(>= 3.10.0-9\)] #\
[ocaml-nox (>= 3.10.0-8)]' ocamlgraph-0.99c/debian/control
(cd ocamlgraph-0.99c && dpkg-buildpackage -us -uc -rfakeroot)
sudo dpkg -i libocamlgraph-ocaml-dev_0.99c-2_i386.deb
# VinE itself:
# Trunk:
svn co https://bullseye.cs.berkeley.edu/svn/vine/trunk vine
(cd vine && ./autogen.sh)
(cd vine && ./configure --with-vex=$HOME/bitblaze/valgrind/VEX)
(cd vine && make)
(cd vine/doc && make doc)
| true
|
75ef9927f1217b5872ccccea57d492fe4be9c719
|
Shell
|
AlexisMaximo/epa-com
|
/week6/instance.check.sh
|
UTF-8
| 215
| 3.28125
| 3
|
[] |
no_license
|
#!/bin/bash
aws ec2 describe-instance-status --instance-ids $1 > lab6.txt
output=`grep running lab6.txt | wc -l`
if [ output -gt 0 ]; then
echo "The instance is running"
else
echo "No such instance running"
fi
| true
|
5a3237863621b18ed24a1250861b19c7601c0137
|
Shell
|
WishCow/scripts
|
/blocklets/active_window.sh
|
UTF-8
| 333
| 2.921875
| 3
|
[] |
no_license
|
#!/bin/bash -eu
while read window; do
icon=""
case "$window" in
*WeeChat*)
icon=""
;;
*Firefox*)
icon=""
;;
*VIM*)
icon=""
;;
*VLC*)
icon=""
;;
esac
echo "$icon $window"
done < <(xtitle -s)
| true
|
dcebd8f9b8ce7f019f7e40e9122342026ab784e1
|
Shell
|
Robert-96/tox-docker
|
/install-pypy.sh
|
UTF-8
| 1,402
| 4.25
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
validate_python_version() {
VERSION=$1
VERSION_REGEX='^([0-9]+\.)([0-9]+)$'
if [[ ! $VERSION =~ $VERSION_REGEX ]]; then
echo "Invalid CPython version '$VERSION'."
echo "The version must use a major.minor pattern (e.g 2.7, 3.4)."
exit 1
fi
}
validate_pypy_version() {
VERSION=$1
VERSION_REGEX='^([0-9]+\.){2}([0-9]+)$'
if [[ ! $VERSION =~ $VERSION_REGEX ]]; then
echo "Invalid PyPy version '$VERSION'."
echo "The version must use a major.minor.patch pattern (e.g 2.7.18, 3.4.9)."
exit 1
fi
}
install_pypy() {
PYTHON_VERSION=$1
PYPY_VERSION=$2
BASE_URL="https://downloads.python.org/pypy"
FILE="pypy$PYTHON_VERSION-v$PYPY_VERSION-linux64"
ARCHIVE="$FILE.tar.bz2"
URL="$BASE_URL/$ARCHIVE"
echo "Installing PyPy $PYPY_VERSION - Python $PYTHON_VERSION"
wget -q -P /tmp $URL
tar -x -C /opt -f "/tmp/$ARCHIVE"
rm "/tmp/$ARCHIVE"
mv "/opt/$FILE" "/opt/pypy$PYTHON_VERSION"
if [[ "$PYTHON_VERSION" == "2.7" ]]; then
ln -s "/opt/pypy$PYTHON_VERSION/bin/pypy" "/usr/local/bin/pypy"
pypy --version &> /dev/null
else
ln -s "/opt/pypy$PYTHON_VERSION/bin/pypy3" "/usr/local/bin/pypy$PYTHON_VERSION"
eval "pypy$PYTHON_VERSION --version &> /dev/null"
fi
}
validate_python_version $1
validate_pypy_version $2
install_pypy $1 $2
| true
|
6482f18eddec8d95b5e9ea1b75b2003350eee408
|
Shell
|
dougyouch/shell-settings
|
/bash/.bash_aliases
|
UTF-8
| 669
| 3.03125
| 3
|
[] |
no_license
|
LS='/bin/ls'
if ($LS --color >& /dev/null); then
alias dir="$LS --color -algF"
alias l="$LS --color -olgF"
alias la="$LS --color -a"
alias ls="$LS --color -lF"
alias lt="la --color -rt"
elif ($LS -o >& /dev/null); then
alias dir="$LS -oalgF"
alias l="$LS -olgF"
alias la="$LS -a"
alias ls="$LS -lF"
alias lt="la -rt"
else
alias dir="$LS -algF"
alias l="$LS -lgF"
alias la="$LS -a"
alias ls="$LS -F"
alias lt="la -rt"
fi
# pick xemacs if we have a choice (and here I assume it is XEmacs
if (which xemacs >& /dev/null); then
alias emacs='xemacs'
fi
alias du="du -h"
alias df="df -h"
alias zip='zip -y'
alias mx='nslookup -query=mx'
| true
|
a13fbe5f49007e1635802f7669d3972c097a7649
|
Shell
|
kylinsystems/idempiere-installation-script
|
/utils/chuboe_fdw_example_read_install.sh
|
UTF-8
| 3,021
| 3.5625
| 4
|
[] |
no_license
|
#!/bin/bash
#The purpose of this script is to help you create a foreign data wrapper (FDW)
#connection to another postgresql server for read purposes.
#Here are the details:
#always test execution on a test server first (not production)
#this script assumes the remote server is running iDempiere (not required)
#could also be a webstore running on mysql
#Assumes you created the following view on the remote server:
#create view chuboe_ordertest_v as select o.documentno from c_order o;
#See 'changeme' below for variables that you should change for your environment.
#create remote server
remote_server_ip='172.30.1.202' #changeme
remote_server_name='remote1'
remote_server_port='5432'
remote_server_db='idempiere'
remote_schema_name='adempiere'
#create user mapping
remote_user_name='adempiere'
remote_user_password='Silly' #changeme
#create foreign table - you will create as many table refs as you wish
#this is just one example.
#the materialized view (_mv) is optional
local_table_reference='chuboe_ordertest_remote_v'
local_table_reference_mv='chuboe_ordertest_remote_mv' #materialized view optional
remote_table_name='chuboe_ordertest_v'
#create fdw
psql -d idempiere -U adempiere -c "CREATE EXTENSION postgres_fdw"
#create reference to remote server in local system
psql -d idempiere -U adempiere -c "CREATE SERVER $remote_server_name
FOREIGN DATA WRAPPER postgres_fdw
OPTIONS (host '$remote_server_ip', port '$remote_server_port', dbname '$remote_server_db')"
#map our local adempiere user to the remote adempiere user - Note: you should create a new remote user with limited power (not adempeire)
psql -d idempiere -U adempiere -c "CREATE USER MAPPING FOR $remote_user_name
SERVER $remote_server_name
OPTIONS (user '$remote_user_name', password '$remote_user_password')"
#create a local reference to a remote table
psql -d idempiere -U adempiere -c "CREATE FOREIGN TABLE $local_table_reference (
documentno character varying(30))
SERVER $remote_server_name
OPTIONS (schema_name '$remote_schema_name', table_name '$remote_table_name')"
echo
echo @@@***@@@
echo Test connectivity using the following command:
echo psql -d idempiere -U adempiere -c \"select \* from $local_table_reference\"
echo @@@***@@@
echo
#optionally create a materialized view (local cache) so that you can read
#from the table even when the connection between the servers is down.
psql -d idempiere -U adempiere -c "create materialized view $local_table_reference_mv as select * from $local_table_reference"
echo
echo @@@***@@@
echo "Test local cached copy (materialized view) using the following command:"
echo psql -d idempiere -U adempiere -c \"select \* from $local_table_reference_mv\"
echo
echo Note: you can shut the remote server down and still query this data.
echo @@@***@@@
echo
echo
echo @@@***@@@
echo To remove all above artifacts:
echo psql -d idempiere -U adempiere -c \"drop server $remote_server_name CASCADE\"
echo @@@***@@@
echo
| true
|
4a597681ae030511d524d5d168f40253ce20cf1b
|
Shell
|
ymeur/REMAP
|
/RUN/job_o
|
UTF-8
| 1,190
| 2.796875
| 3
|
[] |
no_license
|
#!/bin/bash
##################
## CURIE CEA ##
##################
#MSUB -r remap # nom de la requete
#MSUB -o remap.out # nom du fichier de sortie
#MSUB -e remap.err
#MSUB -n 256 # reservation des processeurs pour le job
##MSUB -c 1 # reservation des processeurs pour le job
#MSUB -x # exclusif
#MSUB -T 180 # Limite temps (en secondes)
#MSUB -p gen7357
#MSUB -Q test # QoS test
#MSUB -q standard # Choosing standard nodes
#MSUB -X # X11 forwarding
cd $BRIDGE_MSUB_PWD
ulimit -a
ulimit -aH
8
module load mpiprofile/bullxmpi/performance
module load ddt
mpirun hostname | sort | uniq > hosts.tmp
cat hosts.tmp
i=0
rm -rf hosts
for nodes in `cat hosts.tmp`
do
host[$i]=$nodes
echo "${host[$i]}" >> hosts
i=$((i+1))
done
rm -rf rankfile.txt
rank=0
i=0
for nodes in `cat hosts.tmp`
do
for j in `seq 0 63`;
do
echo "rank $rank=${host[$i]} slot=0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15" >> rankfile.txt
rank=$((rank+1))
done
i=$((i+1))
done
rm -rf hosts.tmp
mpirun -hostfile hosts -rankfile rankfile.txt -np 1024 /bin/bash -c '../test &> remap.out.$OMPI_COMM_WORLD_RANK'
| true
|
2bb32a9b34bf696370e775eee0bffb535fcf2c19
|
Shell
|
Danone89/install-zabbix
|
/scripts/install.sh
|
UTF-8
| 9,982
| 3.34375
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/sh
#
# Created on June 5, 2020
#
# @author: sgoldsmith
#
# Install dependencies, mysql, Zabbix Server 5.4.x and Zabbix Agent 2 on Ubuntu
# 20.04. This may work on other versions and Debian like distributions.
#
# Change variables below to suit your needs.
#
# Steven P. Goldsmith
# sgjava@gmail.com
#
# MySQL root password
dbroot="rootZaq!2wsx"
# Zabbix user MySQL password
dbzabbix="zabbixZaq!2wsx"
# MySQL database monitoring user
monzabbix="monzabbixZaq!2wsx"
# Zabbix Server URL
zabbixurl="https://cdn.zabbix.com/zabbix/sources/stable/5.4/zabbix-5.4.2.tar.gz"
# Just Zabbix server archive name
zabbixarchive=$(basename "$zabbixurl")
# Where to put Zabbix source
srcdir="/usr/local/src"
# PHP timezone
phptz="America/New_York"
# Zabbix server configuration
zabbixconf="/usr/local/etc/zabbix_server.conf"
# Get architecture
arch=$(uname -m)
# Temp dir for downloads, etc.
tmpdir="$HOME/temp"
# stdout and stderr for commands logged
logfile="$PWD/install.log"
rm -f $logfile
# Simple logger
log(){
timestamp=$(date +"%m-%d-%Y %k:%M:%S")
echo "$timestamp $1"
echo "$timestamp $1" >> $logfile 2>&1
}
log "Removing temp dir $tmpdir"
rm -rf "$tmpdir" >> $logfile 2>&1
mkdir -p "$tmpdir" >> $logfile 2>&1
log "Installing MySQL..."
sudo -E apt-get -y update >> $logfile 2>&1
sudo -E apt-get -y install mysql-server mysql-client >> $logfile 2>&1
# Secure MySQL, create zabbix DB, zabbix user and zbx_monitor user.
sudo -E mysql --user=root <<_EOF_
ALTER USER 'root'@'localhost' IDENTIFIED BY '${dbroot}';
DELETE FROM mysql.user WHERE User='';
DELETE FROM mysql.user WHERE User='root' AND Host NOT IN ('localhost', '127.0.0.1', '::1');
DROP DATABASE IF EXISTS test;
DELETE FROM mysql.db WHERE Db='test' OR Db='test\\_%';
CREATE DATABASE zabbix CHARACTER SET UTF8 COLLATE UTF8_BIN;
CREATE USER 'zabbix'@'%' IDENTIFIED BY '${dbzabbix}';
GRANT ALL PRIVILEGES ON zabbix.* TO 'zabbix'@'%';
CREATE USER 'zbx_monitor'@'%' IDENTIFIED BY '${monzabbix}';
GRANT USAGE,REPLICATION CLIENT,PROCESS,SHOW DATABASES,SHOW VIEW ON *.* TO 'zbx_monitor'@'%';
FLUSH PRIVILEGES;
_EOF_
# JDK stuff
javahome=/usr/lib/jvm/jdk11
export javahome
# ARM 32
if [ "$arch" = "armv7l" ]; then
jdkurl="https://cdn.azul.com/zulu-embedded/bin/zulu11.48.21-ca-jdk11.0.11-linux_aarch32hf.tar.gz"
# ARM 64
elif [ "$arch" = "aarch64" ]; then
jdkurl="https://cdn.azul.com/zulu-embedded/bin/zulu11.48.21-ca-jdk11.0.11-linux_aarch64.tar.gz"
# X86_32
elif [ "$arch" = "i586" ] || [ "$arch" = "i686" ]; then
jdkurl="https://cdn.azul.com/zulu/bin/zulu11.48.21-ca-jdk11.0.11-linux_i686.tar.gz"
# X86_64
elif [ "$arch" = "x86_64" ]; then
jdkurl="https://cdn.azul.com/zulu/bin/zulu11.48.21-ca-jdk11.0.11-linux_x64.tar.gz"
fi
# Just JDK archive name
jdkarchive=$(basename "$jdkurl")
# Install Zulu Java JDK
log "Downloading $jdkarchive to $tmpdir"
wget -q --directory-prefix=$tmpdir "$jdkurl" >> $logfile 2>&1
log "Extracting $jdkarchive to $tmpdir"
tar -xf "$tmpdir/$jdkarchive" -C "$tmpdir" >> $logfile 2>&1
log "Removing $javahome"
sudo -E rm -rf "$javahome" >> $logfile 2>&1
# Remove .gz
filename="${jdkarchive%.*}"
# Remove .tar
filename="${filename%.*}"
sudo mkdir -p /usr/lib/jvm >> $logfile 2>&1
log "Moving $tmpdir/$filename to $javahome"
sudo -E mv "$tmpdir/$filename" "$javahome" >> $logfile 2>&1
sudo -E update-alternatives --install "/usr/bin/java" "java" "$javahome/bin/java" 1 >> $logfile 2>&1
sudo -E update-alternatives --install "/usr/bin/javac" "javac" "$javahome/bin/javac" 1 >> $logfile 2>&1
sudo -E update-alternatives --install "/usr/bin/jar" "jar" "$javahome/bin/jar" 1 >> $logfile 2>&1
sudo -E update-alternatives --install "/usr/bin/javadoc" "javadoc" "$javahome/bin/javadoc" 1 >> $logfile 2>&1
# See if JAVA_HOME exists and if not add it to /etc/environment
if grep -q "JAVA_HOME" /etc/environment; then
log "JAVA_HOME already exists"
else
# Add JAVA_HOME to /etc/environment
log "Adding JAVA_HOME to /etc/environment"
sudo -E sh -c 'echo "JAVA_HOME=$javahome" >> /etc/environment'
. /etc/environment
log "JAVA_HOME = $JAVA_HOME"
fi
# Download Zabbix source
log "Downloading $zabbixarchive to $tmpdir"
wget -q --directory-prefix=$tmpdir "$zabbixurl" >> $logfile 2>&1
log "Extracting $zabbixarchive to $tmpdir"
tar -xf "$tmpdir/$zabbixarchive" -C "$tmpdir" >> $logfile 2>&1
# Remove .gz
filename="${zabbixarchive%.*}"
# Remove .tar
filename="${filename%.*}"
sudo -E mv "$tmpdir/$filename" "${srcdir}" >> $logfile 2>&1
# Import Zabbix data
log "Importing Zabbix data..."
cd "${srcdir}/${filename}/database/mysql" >> $logfile 2>&1
sudo -E mysql -u zabbix -p zabbix --password=$dbzabbix < schema.sql >> $logfile 2>&1
sudo -E mysql -u zabbix -p zabbix --password=$dbzabbix < images.sql >> $logfile 2>&1
sudo -E mysql -u zabbix -p zabbix --password=$dbzabbix < data.sql >> $logfile 2>&1
# Insert macro values to monitor 'Zabbix server' MySQL DB (just add 'Template DB MySQL by Zabbix agent 2')
sudo -E mysql --user=root <<_EOF_
USE zabbix;
INSERT INTO hostmacro SELECT (select max(hostmacroid)+1 from hostmacro), hostid, '{\$MYSQL.DSN}', '', 'MySQL Data Source Name', 0 FROM hosts WHERE host = 'Zabbix server';
INSERT INTO hostmacro SELECT (select max(hostmacroid)+1 from hostmacro), hostid, '{\$MYSQL.USER}', 'zbx_monitor', 'MySQL DB monitor password', 0 FROM hosts WHERE host = 'Zabbix server';
INSERT INTO hostmacro SELECT (select max(hostmacroid)+1 from hostmacro), hostid, '{\$MYSQL.PASSWORD}', 'monzabbixZaq!2wsx', 'MySQL DB monitor password', 0 FROM hosts WHERE host = 'Zabbix server';
_EOF_
# Install webserver
log "Installing Apache and PHP..."
sudo -E apt-get -y install fping apache2 php libapache2-mod-php php-cli php-mysql php-mbstring php-gd php-xml php-bcmath php-ldap mlocate >> $logfile 2>&1
sudo -E updatedb >> $logfile 2>&1
# Get php.ini file location
phpini=$(locate php.ini 2>&1 | head -n 1)
# Update settings in php.ini
sudo -E sed -i 's/max_execution_time = 30/max_execution_time = 300/g' "$phpini" >> $logfile 2>&1
sudo -E sed -i 's/memory_limit = 128M/memory_limit = 256M/g' "$phpini" >> $logfile 2>&1
sudo -E sed -i 's/post_max_size = 8M/post_max_size = 32M/g' "$phpini" >> $logfile 2>&1
sudo -E sed -i 's/max_input_time = 60/max_input_time = 300/g' "$phpini" >> $logfile 2>&1
sudo -E sed -i "s|;date.timezone =|date.timezone = $phptz|g" "$phpini" >> $logfile 2>&1
sudo -E service apache2 restart >> $logfile 2>&1
# Use latest golang
log "Adding Go repository..."
sudo -E add-apt-repository ppa:longsleep/golang-backports -y >> $logfile 2>&1
sudo -E apt update >> $logfile 2>&1
# Install Zabbix
log "Installing Zabbix Server..."
# Create group and user
sudo -E addgroup --system --quiet zabbix >> $logfile 2>&1
sudo -E adduser --quiet --system --disabled-login --ingroup zabbix --home /var/lib/zabbix --no-create-home zabbix >> $logfile 2>&1
# Create user home
sudo -E mkdir -m u=rwx,g=rwx,o= -p /var/lib/zabbix >> $logfile 2>&1
sudo -E chown zabbix:zabbix /var/lib/zabbix >> $logfile 2>&1
sudo -E apt-get -y install build-essential libmysqlclient-dev libssl-dev libsnmp-dev libevent-dev pkg-config golang-go >> $logfile 2>&1
sudo -E apt-get -y install libopenipmi-dev libcurl4-openssl-dev libxml2-dev libssh2-1-dev libpcre3-dev >> $logfile 2>&1
sudo -E apt-get -y install libldap2-dev libiksemel-dev libcurl4-openssl-dev libgnutls28-dev >> $logfile 2>&1
cd "${srcdir}/${filename}" >> $logfile 2>&1
# Patch source to fix "plugins/proc/procfs_linux.go:248:6: constant 1099511627776 overflows int" on 32 bit systems
log "Patching source to work on 32 bit platforms..."
sed -i 's/strconv.Atoi(strings.TrimSpace(line\[:len(line)-2\]))/strconv.ParseInt(strings.TrimSpace(line[:len(line)-2]),10,64)/' src/go/plugins/proc/procfs_linux.go >> $logfile 2>&1
# Cnange configuration options here
sudo -E ./configure --enable-server --enable-agent2 --enable-ipv6 --with-mysql --with-openssl --with-net-snmp --with-openipmi --with-libcurl --with-libxml2 --with-ssh2 --with-ldap --enable-java --prefix=/usr/local >> $logfile 2>&1
sudo -E make install >> $logfile 2>&1
# Configure Zabbix server
sudo -E chmod ug+s /usr/bin/fping
sudo -E chmod ug+s /usr/bin/fping6
sudo -E sed -i "s/# DBPassword=/DBPassword=$dbzabbix/g" "$zabbixconf" >> $logfile 2>&1
sudo -E sed -i "s|# FpingLocation=/usr/sbin/fping|FpingLocation=/usr/bin/fping|g" "$zabbixconf" >> $logfile 2>&1
sudo -E sed -i "s|# Fping6Location=/usr/sbin/fping6|Fping6Location=/usr/bin/fping6|g" "$zabbixconf" >> $logfile 2>&1
sudo -E sed -i "s/# StartPingers=1/StartPingers=10/g" "$zabbixconf" >> $logfile 2>&1
# Install Zabbix server service
log "Installing Zabbix Server Service..."
sudo tee -a /etc/systemd/system/zabbix-server.service > /dev/null <<EOT
[Unit]
Description=Zabbix Server
After=syslog.target network.target mysql.service
[Service]
Type=simple
User=zabbix
ExecStart=/usr/local/sbin/zabbix_server
ExecReload=/usr/local/sbin/zabbix_server -R config_cache_reload
RemainAfterExit=yes
PIDFile=/tmp/zabbix_server.pid
[Install]
WantedBy=multi-user.target
EOT
sudo -E systemctl enable zabbix-server >> $logfile 2>&1
# Install Zabbix agent 2 service
log "Installing Zabbix Agent 2 Service..."
sudo tee -a /etc/systemd/system/zabbix-agent2.service > /dev/null <<EOT
[Unit]
Description=Zabbix Agent 2
After=syslog.target network.target
[Service]
Type=simple
User=zabbix
ExecStart=/usr/local/sbin/zabbix_agent2 -c /usr/local/etc/zabbix_agent2.conf
RemainAfterExit=yes
PIDFile=/tmp/zabbix_agent2.pid
[Install]
WantedBy=multi-user.target
EOT
sudo -E systemctl enable zabbix-agent2 >> $logfile 2>&1
# Installing Zabbix front end
log "Installing Zabbix PHP Front End..."
cd "${srcdir}/${filename}" >> $logfile 2>&1
sudo -E mv "${srcdir}/${filename}/ui" /var/www/html/zabbix >> $logfile 2>&1
sudo -E chown -R www-data:www-data /var/www/html/zabbix >> $logfile 2>&1
# Start up Zabbix
log "Starting Zabbix Server..."
sudo -E service zabbix-server start >> $logfile 2>&1
log "Starting Zabbix Agent 2..."
sudo -E service zabbix-agent2 start >> $logfile 2>&1
log "Removing temp dir $tmpdir"
rm -rf "$tmpdir" >> $logfile 2>&1
| true
|
cb54cec1bc96d7ac1d336279a7645c1cfeedd5ca
|
Shell
|
kinushu/dotfiles
|
/etc/init/init.sh
|
UTF-8
| 2,704
| 2.890625
| 3
|
[] |
no_license
|
#!/bin/bash
set +eu
source $HOME/.bash_profile
set -eu
echo 'touch ~/.bashrc.local'
touch ~/.bashrc.local
# 共有フォルダで .DS_Store ファイルを作成しない
defaults write com.apple.desktopservices DSDontWriteNetworkStores true
# brew
if [[ -f /opt/homebrew/bin/brew ]] || [[ -f /usr/local/bin/brew ]]; then
echo 'brew already installed.'
else
/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install.sh)"
fi
# /usr/local/ 以下は ユーザー権限書込みとしておく
# これがないと Rubymine などのコマンドラインツールが入れられない
sudo mkdir -p /usr/local/bin/
sudo chown $(whoami):admin /usr/local/bin/
sudo mkdir -p /usr/local/lib/
sudo chown $(whoami):admin /usr/local/lib/
# brew using
brew install git tig gibo zlib
# Ruby
if [[ -d ~/.rbenv ]]; then
echo 'Ruby already installed.'
else
brew install ruby
git clone https://github.com/rbenv/rbenv.git ~/.rbenv
git clone https://github.com/sstephenson/ruby-build.git ~/.rbenv/plugins/ruby-build
rbenv rehash
which ruby
ruby -v
curl -fsSL https://github.com/rbenv/rbenv-installer/raw/master/bin/rbenv-doctor | bash
fi
## Go, etc..
brew install asdf
asdf plugin add python
asdf install python latest
asdf global python latest
asdf plugin add golang
asdf install golang latest
asdf global golang latest
asdf plugin add nodejs
asdf install nodejs latest
asdf global nodejs latest
# git
git config --global pull.rebase false
git config --global core.excludesfile ~/.gitignore_global
# git-secrets
if [[ -f ~/.git-templates/git-secrets/hooks/commit-msg ]]; then
echo 'git-secrets already installed.'
else
brew install git-secrets
git secrets --install ~/.git-templates/git-secrets
git config --global init.templatedir '~/.git-templates/git-secrets'
git secrets --register-aws --global
git secrets --add 'private_key' --global
git secrets --add 'private_key_id' --global
# git secrets --install # for repository folder
# less ~/.gitconfig # 設定確認
fi
brew install zsh
# oh-my-zsh
if [[ -d ~/.oh-my-zsh ]]; then
echo 'oh-my-zsh already installed.'
else
sh -c "$(curl -fsSL https://raw.githubusercontent.com/robbyrussell/oh-my-zsh/master/tools/install.sh)"
fi
echo 'library install.'
set +eu
brew install vim less lesspipe
brew install trash tree
brew install mas
brew install google-cloud-sdk
# go get github.com/sonots/lltsv
## Go lib
ghq get https://github.com/rupa/z
brew install curl
brew install peco fzf jump
brew install yq jq ghq
brew install mountain-duck
mkdir -p ~/duck/Volumes
echo 'fin.'
# git config --global user.name "Name"
# git config --global user.email "EMail"
# git config -l
| true
|
1c76c3b79d3af2eafaf57c50104a6e6e074f08f9
|
Shell
|
zhouxiaokai/scripts
|
/include/composer/redis.sh
|
UTF-8
| 283
| 3.078125
| 3
|
[] |
no_license
|
#!/bin/sh
nrk_predis(){
print_color "https://github.com/nrk/predis"
local wdir=$1
pushd $wdir || exit 1
require_insert $wdir "predis/predis" "1.1.*@dev"
[ -f ./composer.json ] && {
composer update
}
}
echo "Composer package:"
echo " nrk_predis: php redis"
| true
|
71d36fc7b01e2e65dd2787d90903e87f96379b4f
|
Shell
|
hopefulp/sandbox
|
/pypbs/prockill.sh
|
UTF-8
| 387
| 3.453125
| 3
|
[] |
no_license
|
#!/bin/bash
if [ $# -lt 2 ]; then
echo "input two numbers: \$1 for start process \$2 for the last process"
exit 1
fi
i=$1
f=$2
for ps in `ps aux | grep python | awk '{ print $2 }'`; do
if [ $i -le $ps -a $ps -le $f ]; then
echo "kill $ps"
fi
done
#n=$(expr $2 + 1)
#while [ $i -lt $n ]; do
# echo qdel $i
# qdel $i
# i=$(expr $i + 1)
# done
| true
|
36d4a0189ca77b75b4e4aeefd4636a39199ca266
|
Shell
|
iver/balance
|
/bash/install.sh
|
UTF-8
| 610
| 3.484375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
set -o errexit
set -o nounset
set -o pipefail
under_linux(){
apt-get update
apt-get -y install postgresql-client
wget https://packages.erlang-solutions.com/erlang-solutions_1.0_all.deb \
&& sudo dpkg -i erlang-solutions_1.0_all.deb
apt-get update
apt-get install esl-erlang
apt-get install elixir
}
under_osx(){
brew update
brew install elixir
}
install(){
unamestr=$(uname)
if [[ "$unamestr" == 'Darwin' ]]; then
under_osx
else
under_linux
fi
}
main(){
# shellcheck source=bash/config.sh
source "${ACTUAL}/bash/config.sh"
install
}
main "$@"
| true
|
6127b3dafe4aedb97d64d565a681c0f0fa59d271
|
Shell
|
tom-celerity/poc-play-rest-backend
|
/scripts/heroku-deploy-sample.sh
|
UTF-8
| 520
| 2.53125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# this is a sample script to deploy current local git repo to heroku.
# i use ClearDB (MySQL) and SendGrid (smtp service) add-ons
# This script assume that theses add-ons are already up for your app.
# db username value
heroku config:set PPRB_DB_USER=<db_username_value>
# db password value
heroku config:set PPRB_DB_PWD=<db_password_value>
# email recipient for technicals emails sent by the app (onStart and onError Play events)
heroku config:set PPRB_TECH_EMAIL=<email_address>
git push heroku master
| true
|
6a62fad74bf0397da1099f9356a95e351ba5a43e
|
Shell
|
kaplanlior/debian-installer
|
/scripts/g-i/lst2rng.sh
|
UTF-8
| 1,431
| 3.375
| 3
|
[] |
no_license
|
#!/bin/bash
# -*-sh-*-
#
# Copyright (C) 2005 Davide Viti <zinosat@tiscali.it>
#
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
#
# Convert a list of unicode coordinates into a set of unicode ranges
#
# 0000
# 000C
# 0010
# 0011
# 0012
#
# becomes: 0000 000C 0010:0012
#
RANGEFILE=$1
glyphs=($(cat ${RANGEFILE}))
num_glyphs=${#glyphs[@]}
glyphs[$num_glyphs]=0
start=0
end=$start
for ((j=`expr $start + 1`;j<=${num_glyphs};j+=1)); do
if [ $(printf "%d" 0x${glyphs[$j]}) -eq `expr $(printf "%d" 0x${glyphs[$end]}) + 1` ] ; then
end=$j
else
if [ $end -eq $start ] ; then
echo -n "u${glyphs[$start]} "
else
echo -n "u${glyphs[$start]}:u${glyphs[$end]} "
fi
start=$j
end=$j
fi
done
echo ""
| true
|
3811b7e5942ec4800bb1411b0320291e18f16971
|
Shell
|
mauline/filestore
|
/install
|
UTF-8
| 3,131
| 3.578125
| 4
|
[
"Zlib"
] |
permissive
|
#!/bin/bash
# Target directory
TARGET=
# User/group that owns the http daemon. Values below are for centos/RHEL.
HTTPD_USR=apache
HTTPD_GRP=apache
# Safety checks
[ $(id -u) -eq 0 ] || {
echo "This script must be run as root!" >&2
exit 1
}
[ "${TARGET}" ] || {
echo "You must define the target directory before running $0" >&2
exit 1
}
[ -d "${TARGET}" ] || {
echo "Target directory ${TARGET} is not a directory!" >&2
exit 1
}
getent passwd "${HTTPD_USR}" > /dev/null || {
echo "The user ${HTTPD_USR} does not exist on this system!" >&2
exit 1
}
getent group "${HTTPD_GRP}" > /dev/null || {
echo "The group ${HTTPD_GRP} does not exist on this system!" >&2
exit 1
}
# Create directories
install -d -m 02770 -o root -g ${HTTPD_GRP} ${TARGET}/data
install -d -m 0750 -o root -g ${HTTPD_GRP} ${TARGET}/images
install -d -m 0750 -o root -g ${HTTPD_GRP} ${TARGET}/po
install -d -m 0750 -o root -g ${HTTPD_GRP} ${TARGET}/po/de
install -d -m 0750 -o root -g ${HTTPD_GRP} ${TARGET}/po/de/LC_MESSAGES
install -d -m 0750 -o root -g ${HTTPD_GRP} ${TARGET}/upload-templates
# Copy files
install -m 0640 -o root -g ${HTTPD_GRP} www/layout.inc ${TARGET}/layout.inc
install -m 0640 -o root -g ${HTTPD_GRP} www/receive-generate.php ${TARGET}/receive-generate.php
install -m 0640 -o root -g ${HTTPD_GRP} www/receive.php ${TARGET}/receive.php
install -m 0640 -o root -g ${HTTPD_GRP} images/arrow-left-red-32x16.png ${TARGET}/images/arrow-left-red-32x16.png
install -m 0640 -o root -g ${HTTPD_GRP} images/arrow-right-green-32x16.png ${TARGET}/images/arrow-right-green-32x16.png
install -m 0640 -o root -g ${HTTPD_GRP} images/filestore-128x128.png ${TARGET}/images/filestore-128x128.png
install -m 0640 -o root -g ${HTTPD_GRP} images/filestore-96x96.png ${TARGET}/images/filestore-96x96.png
install -m 0640 -o root -g ${HTTPD_GRP} images/filestore.ico ${TARGET}/images/filestore.ico
install -m 0640 -o root -g ${HTTPD_GRP} www/filestore.js ${TARGET}/filestore.js
install -m 0640 -o root -g ${HTTPD_GRP} www/send-upload.php ${TARGET}/send-upload.php
install -m 0640 -o root -g ${HTTPD_GRP} www/filestore.inc ${TARGET}/filestore.inc
install -m 0640 -o root -g ${HTTPD_GRP} www/send.php ${TARGET}/send.php
install -m 0640 -o root -g ${HTTPD_GRP} www/index.php ${TARGET}/index.php
install -m 0640 -o root -g ${HTTPD_GRP} www/upload-templates/index.php ${TARGET}/upload-templates/index.php
install -m 0640 -o root -g ${HTTPD_GRP} www/upload-templates/receive-upload.php ${TARGET}/upload-templates/receive-upload.php
install -m 0640 -o root -g ${HTTPD_GRP} www/upload-templates/remove.php ${TARGET}/upload-templates/remove.php
install -m 0640 -o root -g ${HTTPD_GRP} www/upload-templates/remove-action.php ${TARGET}/upload-templates/remove-action.php
# Copy language files and generate the compiled language ones
for T in www/po/*/LC_MESSAGES/filestore.po; do
PO=$(echo ${T} | sed -e "s|^www|${TARGET}|g")
MO=$(dirname ${PO})/$(basename -s .po $T).mo
install -m 0640 -o root -g ${HTTPD_GRP} ${T} ${PO}
msgfmt --output-file=${MO} ${PO}
chown root.${HTTPD_GRP} ${MO}
chmod 0640 ${MO}
done
| true
|
a340e83c818829e2c7558be49bcb670308b4797b
|
Shell
|
ibizaman/rpi
|
/fileserver/aria2.sh
|
UTF-8
| 5,784
| 3.6875
| 4
|
[] |
no_license
|
#!/bin/bash
function arguments() {
if [ -z "$host" ]; then
help_args="$help_args HOST"
host="$1"
available_hosts="$(ls ~/.password-store/server-passwords)"
if [ -z "$host" ] || ! contains "$available_hosts" "$host"; then
echo "$help_args DOMAIN ARIA2_DEFAULT_DOWNLOAD_PATH"
echo "HOST must be one of:"
echo "$available_hosts"
exit 1
fi
shift
fi
domain="$1"
available_domains="$(ls -d ~/.password-store/mailgun.com/mg.* | xargs -n1 basename | cut -d '.' -f 2-)"
if [ -z "$domain" ] || ! contains "$available_domains" "$domain"; then
echo "$help_args DOMAIN ARIA2_DEFAULT_DOWNLOAD_PATH"
echo "DOMAIN must be one of:"
echo "$available_domains"
exit 1
fi
shift
aria2_default_download_path="$1"
if [ -z "$aria2_default_download_path" ]; then
echo "$help_args DOMAIN ARIA2_DEFAULT_DOWNLOAD_PATH"
echo "ARIA2_DEFAULT_DOWNLOAD_PATH cannot be empty"
exit 1
fi
shift
aria2_secret_key="aria2/$host.secret"
aria2_secret="$(pass $aria2_secret_key)"
if [ -z "$aria2_secret" ]; then
aria2_secret="$(pass generate --no-symbols $aria2_secret_key)"
fi
[ -z "$aria2_secret" ] && "Could not find nor generate $aria2_secret_key secret" && exit 1
}
function install_remote() {
pacman -Syu --noconfirm --needed \
aria2 \
darkhttpd \
git \
unzip \
|| exit 1
if ! grep aria2 /etc/iproute2/rt_tables; then
echo -e "\n10 aria2" >> /etc/iproute2/rt_tables
fi
cd /opt/ || exit 1
curl -L -O https://github.com/mayswind/AriaNg/releases/download/1.1.1/AriaNg-1.1.1.zip
if ! [ -d AriaNg-1.1.1 ]; then
unzip AriaNg-1.1.1.zip -d AriaNg-1.1.1
fi
chown -R aria2: AriaNg-1.1.1
groupadd --system downloader
useradd --create-home --home-dir /var/lib/aria2 --groups downloader aria2
su - aria2 <<ARIA2
touch session.lock
ARIA2
mkdir -p /etc/aria2
mkdir -p "$aria2_default_download_path"
chown aria2:aria2 "$aria2_default_download_path"
cat > /etc/aria2/aria2.conf <<ARIA2CONF
dir=$aria2_default_download_path
rpc-secret=$aria2_secret
ARIA2CONF
cat > /etc/systemd/system/aria2.service <<ARIA2SERVICE
[Unit]
Description=Aria2 Service
After=openvpn-client@privateinternetaccess.service
[Service]
User=aria2
Group=aria2
ExecStart=/usr/bin/aria2c \\
--enable-rpc \\
--rpc-allow-origin-all \\
--rpc-listen-port=6801 \\
--async-dns=false \\
--interface=tun0 \\
--bt-lpd-interface wlan0 \\
--save-session /var/lib/aria2/session.lock \\
--input-file /var/lib/aria2/session.lock \\
--conf-path=/etc/aria2/aria2.conf \\
--continue
[Install]
WantedBy=default.target
ARIA2SERVICE
cat > /etc/systemd/system/aria2web.service <<ARIA2WEBSERVICE
[Unit]
Description=Aria2 Web Service
After=network.target
[Service]
User=aria2
Group=aria2
WorkingDirectory=/opt/AriaNg-1.1.1
ExecStart=/usr/bin/darkhttpd . --port 6810
[Install]
WantedBy=default.target
ARIA2WEBSERVICE
cat > /etc/systemd/system/aria2files.service <<ARIA2FILESSERVICE
[Unit]
Description=Aria2 Files Service
After=network.target
[Service]
User=aria2
Group=aria2
WorkingDirectory=$aria2_default_download_path
ExecStart=/usr/bin/darkhttpd . --port 6811
[Install]
WantedBy=default.target
ARIA2FILESSERVICE
chown -R aria2: /opt/webui-aria2
cat << ARIA2ROUTEUP > /etc/openvpn/client/privateinternetaccess/conf_up/aria2_up.sh
#! /bin/bash
set -x
# add the vpn device as default route for this routing table
ip route add default via \$route_vpn_gateway dev \$dev table aria2
# add rules that all traffic going to the gateway as well as
# all traffic comming from my local VPN is routed through the
# VPN's gateway
ip rule add from \$ifconfig_local/32 table aria2
ip rule add to \$route_vpn_gateway/32 table aria2
# and flush the cache to make sure that the changes were commited
ip route flush cache
iptables -A OUTPUT -o wlan0 -m owner --uid-owner \$(id -u aria2) -j DROP
iptables -A OUTPUT -o eth0 -m owner --uid-owner \$(id -u aria2) -j DROP
exit 0
ARIA2ROUTEUP
chmod a+x /etc/openvpn/client/privateinternetaccess/conf_up/aria2_up.sh
cat << ARIA2ROUTEDOWN > /etc/openvpn/client/privateinternetaccess/conf_down/aria2_down.sh
#! /bin/bash
set -x
# add the vpn device as default route for this routing table
ip route del default via \$route_vpn_gateway dev \$dev table aria2
# add rules that all traffic going to the gateway as well as
# all traffic comming from my local VPN is routed through the
# VPN's gateway
ip rule del from \$ifconfig_local/32 table aria2
ip rule del to \$route_vpn_gateway/32 table aria2
# and flush the cache to make sure that the changes were commited
ip route flush cache
iptables -D OUTPUT -o wlan0 -m owner --uid-owner \$(id -u aria2) -j DROP
iptables -D OUTPUT -o eth0 -m owner --uid-owner \$(id -u aria2) -j DROP
exit 0
ARIA2ROUTEDOWN
chmod a+x /etc/openvpn/client/privateinternetaccess/conf_down/aria2_down.sh
systemctl restart openvpn-client@privateinternetaccess
systemctl daemon-reload
systemctl restart aria2
systemctl restart aria2web
systemctl restart aria2files
systemctl enable aria2
systemctl enable aria2web
systemctl enable aria2files
# aria2web
haproxysubdomains add /etc/haproxy/haproxy.cfg https "$domain" aria2 8888
systemctl reload haproxy
# jsondispatch
upnpport configure /etc/upnpport/upnpport.yaml add 8850
systemctl reload upnpport
echo You can find the secret token in /opt/webui-aria2/src/js/services/configuration.js
}
function install_local() {
:
}
| true
|
3f569a2fca54b909813507da7a8826e634663599
|
Shell
|
beanit/iec61850bean
|
/asn1/replace-berboolean.sh
|
UTF-8
| 296
| 2.65625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
cd `dirname $0`
# replace BerBoolean from ASN1bean with special one for IEC 61850 so that true is coded as 0x01 instead of 0xff
find ../ -iname "*.java" | xargs sed -i 's/import com\.beanit\.asn1bean\.ber\.types\.BerBoolean/import com\.beanit\.iec61850bean\.internal\.BerBoolean/g'
| true
|
b69d8df038a8e6dd05f6e03cf94dea0d0ac64a34
|
Shell
|
Shuang777/kaldi-2016
|
/egs/babel/s5c/mysteps/train_nnet.sh
|
UTF-8
| 21,860
| 2.703125
| 3
|
[
"Apache-2.0",
"LicenseRef-scancode-public-domain"
] |
permissive
|
#!/bin/bash
# Copyright 2012/2013 Brno University of Technology (Author: Karel Vesely)
# Apache 2.0
{
set -o pipefail
# Begin configuration.
config= # config, which is also sent to all other scripts
# NETWORK INITIALIZATION
mlp_init= # select initialized MLP (override initialization)
mlp_proto= # select network prototype (initialize it)
proto_opts= # non-default options for 'make_nnet_proto.py'
feature_transform= # provide feature transform (=splice,rescaling,...) (don't build new one)
network_type=dnn # (dnn,cnn1d,cnn2d,lstm)
#
hid_layers=4 # nr. of hidden layers (prior to sotfmax or bottleneck)
hid_dim=1024 # select hidden dimension
bn_dim= # set a value to get a bottleneck network
dbn= # select DBN to prepend to the MLP initialization
#
init_opts= # options, passed to the initialization script
logistic=false # use logistic regression on top layer (a quick fix)
# FEATURE PROCESSING
# feature config (applies always)
cmvn_opts="--norm-vars=false"
delta_opts=
sliding_cmvn=false
# feature_transform:
splice=5 # temporal splicing
splice_step=1 # stepsize of the splicing (1 == no gap between frames)
splice_opts=
feat_type= # traps?
# feature config (applies to feat_type traps)
traps_dct_basis=11 # nr. od DCT basis (applies to `traps` feat_type, splice10 )
# feature config (applies to feat_type transf) (ie. LDA+MLLT, no fMLLR)
transf=
splice_after_transf=5
splice_trans=true
# feature config (applies to feat_type lda)
lda_dim=300 # LDA dimension (applies to `lda` feat_type)
trans_mat=
cmvn_opts=""
cmvn_type=channel # channel or sliding
# LABELS
labels= # use these labels to train (override deafault pdf alignments)
labels_cv=
num_tgt= # force to use number of outputs in the MLP (default is autodetect)
# TRAINING SCHEDULER
learn_rate=0.008 # initial learning rate
train_opts= # options, passed to the training script
train_tool= # optionally change the training tool
# OTHER
use_gpu_id= # manually select GPU id to run on, (-1 disables GPU)
seed=777 # seed value used for training data shuffling and initialization
cv_subset_factor=0.1
scp_cv=
cv_base=utt # utt, spk, or random
resume_anneal=false
transdir=
resave=true
clean_up=true
# semi-supervised training
supcopy=1
semidata=
semialidir=
semitransdir=
semi_layers=
semi_cv=false # also use semi data for cross-validation
min_iters=
max_iters=20
updatable_layers=
# mpi training
mpi_jobs=0
mpi_mode=
frames_per_reduce=
reduce_type=
reduce_content=
# precondition
precondition=
alpha=4
max_norm=10
rank_in=30
rank_out=60
update_period=4
max_change_per_sample=0.075
num_samples_history=2000
# ivector adaptation
utt2spk=
ivector_scp=
# End configuration.
echo "$0 $@" # Print the command line for logging
. path.sh || exit 1;
. parse_options.sh || exit 1;
if [ $# != 3 ]; then
echo "Usage: $0 <data-dir> <ali-dir> <exp-dir>"
echo " e.g.: $0 data/train exp/mono_ali exp/mono_nnet"
echo "main options (for others, see top of script file)"
echo " --config <config-file> # config containing options"
exit 1;
fi
data=$1
alidir=$2
dir=$3
[ -z "$transdir" ] && transdir=$alidir
for f in $alidir/final.mdl $data/feats.scp; do
[ ! -f $f ] && echo "$0: no such file $f" && exit 1;
done
if [ -z "$labels" ]; then
[ ! -f $alidir/ali.1.gz ] && echo "$0: no such file $alidir/ali.1.gz" && exit 1;
fi
echo
echo "# INFO"
echo "$0 : Training Neural Network"
printf "\t dir : $dir \n"
printf "\t Train-set : $data $alidir \n"
mkdir -p $dir/{log,nnet}
# skip when already trained
#[ -e $dir/final.nnet ] && printf "\nSKIPPING TRAINING... ($0)\nnnet already trained : $dir/final.nnet ($(readlink $dir/final.nnet))\n\n" && exit 0
###### PREPARE ALIGNMENTS ######
echo
echo "# PREPARING ALIGNMENTS"
if [ ! -z "$labels" ]; then
echo "Using targets '$labels' (by force)"
labels_tr="$labels"
if [ ! -z "$labels_cv" ]; then
labels_cv="$labels_cv"
else
labels_cv="$labels"
fi
else
echo "Using PDF targets from dirs '$alidir' '$alidir_cv'"
# define pdf-alignment rspecifiers
labels_tr_ali="ark:ali-to-pdf $alidir/final.mdl \"ark:gunzip -c $alidir/ali.*.gz |\" ark:- |" # for analyze-counts.
labels_tr="ark:ali-to-pdf $alidir/final.mdl \"ark:gunzip -c $alidir/ali.*.gz |\" ark,t:- | ali-to-post ark,t:- ark:- |"
labels_cv="$labels_tr"
if [ ! -z $semialidir ]; then
labels_tr_ali="ark:ali-to-pdf $alidir/final.mdl \"ark:gunzip -c $alidir/ali.*.gz $semialidir/ali.*.gz |\" ark:- |" # for analyze-counts.
labels_tr="ark:ali-to-pdf $alidir/final.mdl \"ark:gunzip -c $alidir/ali.*.gz $semialidir/ali.*.gz |\" ark,t:- | ali-to-post ark,t:- ark:- |"
fi
# get pdf-counts, used later to post-process DNN posteriors
analyze-counts --binary=false "$labels_tr_ali" $dir/ali_train_pdf.counts || exit 1
# copy the old transition model, will be needed by decoder
copy-transition-model --binary=false $alidir/final.mdl $dir/final.mdl || exit 1
# copy the tree
cp $alidir/tree $dir/tree || exit 1
fi
# shuffle the list
echo "Preparing train/cv lists :"
if [ -z $scp_cv ]; then
num_utts_all=$(wc $data/feats.scp | awk '{print $1}')
num_utts_subset=$(awk "BEGIN {print(int( $num_utts_all * $cv_subset_factor))}")
echo "Split out cv feats from training data using cv_base $cv_base"
if [ $cv_base == spk ]; then
cat $data/spk2utt | utils/shuffle_list.pl --srand ${seed:-777} |\
awk -v num_utts_subset=$num_utts_subset '
BEGIN{count=0;}
{
count += NF-1;
if (count > num_utts_subset)
exit;
for(i=2; i<=NF; i++)
print $i;
}' > $dir/cv.utt
cat $data/feats.scp | utils/filter_scp.pl --exclude $dir/cv.utt | \
utils/shuffle_list.pl --srand ${seed:-777} > $dir/shuffle.train.scp
cat $data/feats.scp | utils/filter_scp.pl $dir/cv.utt | \
utils/shuffle_list.pl --srand ${seed:-777} > $dir/shuffle.cv.scp
elif [ $cv_base == utt ]; then
# chose last num_utts_subset utterance
tail -$num_utts_subset $data/feats.scp > $dir/shuffle.cv.scp
cat $data/feats.scp | utils/filter_scp.pl --exclude $dir/shuffle.cv.scp | \
utils/shuffle_list.pl --srand ${seed:-777} > $dir/shuffle.train.scp
else
cat $data/feats.scp | utils/shuffle_list.pl --srand ${seed:-777} > $dir/shuffle.scp
head -$num_utts_subset $dir/shuffle.scp > $dir/shuffle.cv.scp
cat $dir/shuffle.scp | utils/filter_scp.pl --exclude $dir/shuffle.cv.scp > $dir/shuffle.train.scp
fi
else
echo "Using cv feats from argument"
cat $data/feats.scp | utils/shuffle_list.pl > $dir/shuffle.train.scp
cat $scp_cv | utils/shuffle_list.pl > $dir/shuffle.cv.scp
fi
if [ ! -z "$semidata" ]; then
echo "Preparing semi-supervised lists"
[ -f $dir/sup.train.copy.scp ] && rm -f $dir/sup.train.copy.scp
echo "Copy supervised data for $supcopy times"
for ((c = 1; c <= $supcopy; c++))
do
cat $dir/shuffle.train.scp >> $dir/sup.train.copy.scp
done
if [ "$semi_cv" == true ]; then
num_semi_utts_all=$(wc $semidata/feats.scp | awk '{print $1}')
num_semi_utts_subset=$(awk "BEGIN {print(int( $num_semi_utts_all * $cv_subset_factor))}")
tail -$num_semi_utts_subset $semidata/feats.scp >> $dir/shuffle.cv.scp
cat $semidata/feats.scp | utils/filter_scp.pl --exclude $dir/shuffle.cv.scp > $dir/semi_feats_train.scp
else
cp $semidata/feats.scp $dir/semi_feats_train.scp
fi
cat $dir/semi_feats_train.scp $dir/sup.train.copy.scp | utils/shuffle_list.pl --srand ${seed:-777} > $dir/shuffle.semitrain.scp
cat $semidata/utt2spk $data/utt2spk | sort > $dir/semitrain.utt2spk
cat $semidata/cmvn.scp $data/cmvn.scp | sort > $dir/semitrain.cmvn.scp
(set -e;
cd $dir
if [ ! -f cmvn.scp ]; then
ln -s semitrain.cmvn.scp cmvn.scp
ln -s semitrain.utt2spk utt2spk;
mv shuffle.train.scp shuffle.train.scp.bak;
ln -s shuffle.semitrain.scp shuffle.train.scp
fi
)
data=$dir
fi
###### PREPARE FEATURES ######
echo
echo "# PREPARING FEATURES"
#read the features
if [ -z "$feat_type" ]; then
feat_type=delta;
if [ ! -z "$transdir" ] && [ -f $transdir/final.mat ]; then
feat_type=lda;
if [ -f $transdir/trans.1 ]; then
feat_type=fmllr;
fi
fi
fi
if [ $feat_type == lda ] || [ $feat_type == fmllr ]; then
splice_opts=`cat $transdir/splice_opts 2>/dev/null`
cp $transdir/splice_opts $dir 2>/dev/null
cp $transdir/final.mat $dir 2>/dev/null # any LDA matrix...
cp $transdir/tree $dir
fi
echo $cmvn_opts > $dir/cmvn_opts # keep track of options to CMVN.
echo $cmvn_type > $dir/cmvn_type # keep track of type of CMVN
if [ $cmvn_type == sliding ]; then
cmvn_feats="apply-cmvn-sliding $cmvn_opts --center=true"
elif [ $cmvn_type == channel ]; then
cmvn_feats="apply-cmvn $cmvn_opts --utt2spk=ark:$data/utt2spk scp:$data/cmvn.scp"
else
echo "Wrong cmvn_type $cmvn_type" && exit 1
fi
echo "$0: feature type is $feat_type"
case $feat_type in
raw) feats_tr="scp:$dir/shuffle.train.scp"
feats_cv="scp:$dir/shuffle.cv.scp"
;;
cmvn|traps) feats_tr="ark,s,cs:$cmvn_feats scp:$dir/shuffle.train.scp ark:- |"
feats_cv="ark,s,cs:$cmvn_feats scp:$dir/shuffle.cv.scp ark:- |"
;;
delta) feats_tr="ark,s,cs:$cmvn_feats scp:$dir/shuffle.train.scp ark:- | add-deltas $delta_opts ark:- ark:- |"
feats_cv="ark,s,cs:$cmvn_feats scp:$dir/shuffle.cv.scp ark:- | add-deltas $delta_opts ark:- ark:- |"
;;
lda|fmllr) feats_tr="ark,s,cs:$cmvn_feats scp:$dir/shuffle.train.scp ark:- | splice-feats $splice_opts ark:- ark:- | transform-feats $dir/final.mat ark:- ark:- |"
feats_cv="ark,s,cs:$cmvn_feats scp:$dir/shuffle.cv.scp ark:- | splice-feats $splice_opts ark:- ark:- | transform-feats $dir/final.mat ark:- ark:- |"
cp $transdir/final.mat $dir
;;
iveclda)
[ -z $transmat ] && echo "please provide trans_mat for iveclad feature" && exit 1
feats_tr="ark:ivector-transform $transmat scp:$dir/shuffle.train.scp ark:- | ivector-normalize-length ark:- ark:- |"
feats_cv="ark:ivector-transform $transmat scp:$dir/shuffle.cv.scp ark:- | ivector-normalize-length ark:- ark:- |"
;;
*) echo "$0: invalid feature type $feat_type" && exit 1;
esac
if [ -f $transdir/trans.1 ] && [ $feat_type == "fmllr" ]; then
if [ -z $semitransdir ]; then
echo "$0: using transforms from $transdir"
feats_cv="$feats_cv transform-feats --utt2spk=ark:$data/utt2spk 'ark:cat $transdir/trans.*|' ark:- ark:- |"
feats_tr="$feats_tr transform-feats --utt2spk=ark:$data/utt2spk 'ark:cat $transdir/trans.*|' ark:- ark:- |"
else
echo "$0: using transform from $transdir and $semitransdir"
feats_cv="$feats_cv transform-feats --utt2spk=ark:$data/utt2spk 'ark:cat $transdir/trans.* $semitransdir/trans.* |' ark:- ark:- |"
feats_tr="$feats_tr transform-feats --utt2spk=ark:$data/utt2spk 'ark:cat $transdir/trans.* $semitransdir/trans.* |' ark:- ark:- |"
fi
fi
[ -z "$splice_opts" ] && splice_opts=`cat $transdir/splice_opts 2>/dev/null`
#get feature dim
# re-save the shuffled features, so they are stored sequentially on the disk in /tmp/
if [ $resave == true ]; then
tmpdir=$dir/feature_shuffled; mkdir -p $tmpdir;
copy-feats "$feats_tr" ark,scp:$tmpdir/feats.tr.ark,$dir/train.scp
copy-feats "$feats_cv" ark,scp:$tmpdir/feats.cv.ark,$dir/cv.scp
# remove data on exit...
[ "$clean_up" == true ] && trap "echo \"Removing features tmpdir $tmpdir @ $(hostname)\"; rm -r $tmpdir" EXIT
else
[ -f $dir/train.scp ] && rm -f $dir/train.scp
[ -f $dir/cv.scp ] && rm -f $dir/cv.scp
(cd $dir; ln -s shuffle.train.scp train.scp; ln -s shuffle.cv.scp cv.scp)
fi
# print the list sizes
wc -l $dir/train.scp $dir/cv.scp
###### PREPARE FEATURE PIPELINE ######
# filter the features
copy-post "$labels_tr" "ark,t:|awk '{print \$1}'" > $dir/ali_train.txt
copy-post "$labels_cv" "ark,t:|awk '{print \$1}'" > $dir/ali_cv.txt
cat $dir/train.scp | utils/filter_scp.pl $dir/ali_train.txt > $dir/filtered.train.scp
cat $dir/cv.scp | utils/filter_scp.pl $dir/ali_cv.txt > $dir/filtered.cv.scp
if [ "$mpi_jobs" != 0 ]; then
min_frames_tr=$(feat-to-len scp:$dir/filtered.train.scp ark,t:- | sort -k2 -n -r | myutils/distribute_scp.pl $mpi_jobs $dir/train_list)
min_frames_cv=$(feat-to-len scp:$dir/filtered.cv.scp ark,t:- | sort -k2 -n -r | myutils/distribute_scp.pl $mpi_jobs $dir/cv_list)
for n in $(seq $mpi_jobs); do
cat $dir/filtered.train.scp | utils/filter_scp.pl $dir/train_list.$n.scp > $dir/train.$n.scp
cat $dir/filtered.cv.scp | utils/filter_scp.pl $dir/cv_list.$n.scp > $dir/cv.$n.scp
done
reduce_per_iter_tr=$(echo $min_frames_tr/$frames_per_reduce | bc)
echo "reduce_per_iter_tr=$reduce_per_iter_tr"
feats_tr_mpi="ark:copy-feats scp:$dir/train.MPI_RANK.scp ark:- |"
feats_cv_mpi="ark:copy-feats scp:$dir/cv.MPI_RANK.scp ark:- |"
if [[ `hostname` =~ stampede ]]; then
train_tool="ibrun nnet-train-frmshuff-mpi"
else
train_tool="mpirun -n $mpi_jobs nnet-train-frmshuff-mpi"
fi
if [ $mpi_mode == simulation ]; then
train_tool="nnet-train-frmshuff"
reduce_per_iter_tr=
frames_per_reduce=
reduce_type=
reduce_content=
feats_tr_mpi="ark:copy-feats scp:$dir/train.1.scp ark:- |"
feats_cv_mpi="ark:copy-feats scp:$dir/cv.1.scp ark:- |"
fi
fi
feats_tr="ark:copy-feats scp:$dir/filtered.train.scp ark:- |"
feats_cv="ark:copy-feats scp:$dir/filtered.cv.scp ark:- |"
echo substituting feats_tr with $feats_tr
echo substituting feats_cv with $feats_cv
#create a 10k utt subset for global cmvn estimates
head -n 10000 $dir/filtered.train.scp > $dir/filtered.train.scp.10k
# get feature dim
echo "Getting feature dim : "
feats_tr1=$(echo $feats_tr | sed -e "s#scp:$dir/train.scp#\"scp:head -1 $dir/train.scp |\"#g")
feat_dim=$(feat-to-dim --print-args=false "$feats_tr1" -)
echo "Feature dim is : $feat_dim"
# Now we will start building complex feature_transform which will
# be forwarded in CUDA to have fast run-time.
#
# We will use 1GPU for both feature_transform and MLP training in one binary tool.
# This is against the kaldi spirit to have many independent small processing units,
# but it is necessary because of compute exclusive mode, where GPU cannot be shared
# by multiple processes.
if [ ! -z "$feature_transform" ]; then
echo "Using pre-computed feature-transform : '$feature_transform'"
tmp=$dir/$(basename $feature_transform)
cp $feature_transform $tmp; feature_transform=$tmp
elif [ "$splice_transform" == true ]; then
# Generate the splice transform
echo "Using splice +/- $splice , step $splice_step"
feature_transform=$dir/tr_splice$splice-$splice_step.nnet
utils/nnet/gen_splice.py --fea-dim=$feat_dim --splice=$splice --splice-step=$splice_step > $feature_transform
# Choose further processing of spliced features
echo "Feature type : $feat_type"
case $feat_type in
plain)
;;
traps)
#generate hamming+dct transform
feature_transform_old=$feature_transform
feature_transform=${feature_transform%.nnet}_hamm_dct${traps_dct_basis}.nnet
echo "Preparing Hamming DCT transform into : $feature_transform"
#prepare matrices with time-transposed hamming and dct
utils/nnet/gen_hamm_mat.py --fea-dim=$feat_dim --splice=$splice > $dir/hamm.mat
utils/nnet/gen_dct_mat.py --fea-dim=$feat_dim --splice=$splice --dct-basis=$traps_dct_basis > $dir/dct.mat
#put everything together
compose-transforms --binary=false $dir/dct.mat $dir/hamm.mat - | \
transf-to-nnet - - | \
nnet-concat --binary=false $feature_transform_old - $feature_transform || exit 1
;;
transf)
feature_transform_old=$feature_transform
feature_transform=${feature_transform%.nnet}_transf_splice${splice_after_transf}.nnet
[ -z $transf ] && $transdir/final.mat
[ ! -f $transf ] && echo "Missing transf $transf" && exit 1
feat_dim=$(feat-to-dim "$feats_tr1 nnet-forward ${utt2spk:+ --utt2spk-rspecifier=ark:$utt2spk} ${ivector_scp:+ --ivector-rspecifier=scp:$ivector} 'nnet-concat $feature_transform_old \"transf-to-nnet $transf - |\" - |' ark:- ark:- |" -)
nnet-concat --binary=false $feature_transform_old \
"transf-to-nnet $transf - |" \
"utils/nnet/gen_splice.py --fea-dim=$feat_dim --splice=$splice_after_transf |" \
$feature_transform || exit 1
;;
lda)
echo "LDA transform applied already!";
;;
fmllr)
echo "Fmllr same as plain";
;;
iveclda)
echo "LDA transform already applied!";
;;
*)
echo "Unknown feature type $feat_type"
exit 1;
;;
esac
# keep track of feat_type
echo $feat_type > $dir/feat_type
# Renormalize the MLP input to zero mean and unit variance
feature_transform_old=$feature_transform
feature_transform=${feature_transform%.nnet}_cmvn-g.nnet
echo "Renormalizing MLP input features into $feature_transform"
$mpi_run nnet-forward --use-gpu=yes \
$feature_transform_old "$(echo $feats_tr | sed 's|train.scp|train.scp.10k|')" \
ark:- 2>$dir/log/nnet-forward-cmvn.log |\
compute-cmvn-stats ark:- - | cmvn-to-nnet - - |\
nnet-concat --binary=false $feature_transform_old - $feature_transform
else
# raw input
feature_transform=$dir/cmvn-g.nnet
compute-cmvn-stats "$(echo $feats_tr | sed 's|train.scp|train.scp.10k|')" - |\
cmvn-to-nnet --binary=false - $feature_transform
fi
###### MAKE LINK TO THE FINAL feature_transform, so the other scripts will find it ######
(cd $dir; [ ! -f final.feature_transform ] && ln -s $(basename $feature_transform) final.feature_transform )
###### INITIALIZE THE NNET ######
echo
echo "# NN-INITIALIZATION"
[ ! -z "$mlp_init" ] && echo "Using pre-initialized network '$mlp_init'";
if [ ! -z "$mlp_proto" ]; then
echo "Initializing using network prototype '$mlp_proto'";
mlp_init=$dir/nnet.init; log=$dir/log/nnet_initialize.log
nnet-initialize $mlp_proto $mlp_init 2>$log || { cat $log; exit 1; }
fi
if [[ -z "$mlp_init" && -z "$mlp_proto" ]]; then
echo "Getting input/output dims :"
#initializing the MLP, get the i/o dims...
#input-dim
num_fea=$(feat-to-dim "$feats_tr1 nnet-forward $feature_transform ark:- ark:- |" - )
{ #optioanlly take output dim of DBN
if [ ! -z $dbn ] ; then
if [ ! -z $ivector_scp ]; then
num_ivec_dim=$(copy-vector "scp:head -1 $ivector_scp |" ark,t:- | awk '{print NF-3}')
num_input=$(nnet-info $dbn | grep 'component 1 :' | tr ',' ' ' | awk '{print $6}')
if [ $num_input == $num_fea ]; then
nnet-copy --expand-first-component=$num_ivec_dim --expand-sideinfo=true $dbn $dir/dbn.expand
dbn=$dir/dbn.expand
fi
fi
num_fea=$(nnet-forward ${utt2spk:+ --utt2spk-rspecifier=ark:$utt2spk} ${ivector_scp:+ --ivector-rspecifier=scp:$ivector_scp} --feature-transform=$feature_transform $dbn "$feats_tr1" ark:- | feat-to-dim ark:- -)
fi
[ -z "$num_fea" ] && echo "Getting nnet input dimension failed!!" && exit 1
}
#output-dim
[ -z $num_tgt ] && num_tgt=$(hmm-info --print-args=false $alidir/final.mdl | grep pdfs | awk '{ print $NF }')
# make network prototype
mlp_proto=$dir/nnet.proto
echo "Genrating network prototype $mlp_proto"
case "$network_type" in
dnn)
myutils/nnet/make_nnet_proto.py $proto_opts \
${bn_dim:+ --bottleneck-dim=$bn_dim} \
$num_fea $num_tgt $hid_layers $hid_dim >$mlp_proto || exit 1
;;
lstm)
utils/nnet/make_lstm_proto.py $proto_opts \
$num_fea $num_tgt >$mlp_proto || exit 1
;;
*) echo "Unknown : --network-type $network_type" && exit 1
esac
if [ "$logistic" == true ]; then
echo "fixing proto with logistic layer"
myutils/logistic_regression_fix.pl $hid_layers $mlp_proto
fi
# initialize
mlp_init=$dir/nnet.init; log=$dir/log/nnet_initialize.log
echo "Initializing $mlp_proto -> $mlp_init"
nnet-initialize $mlp_proto $mlp_init 2>$log || { cat $log; exit 1; }
#optionally prepend dbn to the initialization
if [ ! -z $dbn ]; then
mlp_init_old=$mlp_init; mlp_init=$dir/nnet_$(basename $dbn)_dnn.init
nnet-concat $dbn $mlp_init_old $mlp_init || exit 1
fi
fi
if [ "$precondition" == simple ]; then
mv $mlp_init $mlp_init.bak
nnet-copy --affine-to-preconditioned=$precondition --alpha=$alpha --max-norm=$max_norm $mlp_init.bak $mlp_init
elif [ "$precondition" == online ]; then
mv $mlp_init $mlp_init.bak
nnet-copy --affine-to-preconditioned=$precondition --rank-in=$rank_in --rank-out=$rank_out --update-period=$update_period --max-change-per-sample=$max_change_per_sample --num-samples-history=$num_samples_history --alpha=$alpha $mlp_init.bak $mlp_init
elif [ ! -z "$precondition" ]; then
echo "unsupported precondition type $precondition"
fi
###### TRAIN ######
if [ $mpi_jobs != 0 ]; then
feats_tr="$feats_tr_mpi"
feats_cv="$feats_cv_mpi"
fi
echo
echo "# RUNNING THE NN-TRAINING SCHEDULER"
mysteps/train_nnet_scheduler.sh \
--feature-transform $feature_transform \
--learn-rate $learn_rate \
--randomizer-seed $seed \
--resume-anneal $resume_anneal \
--max-iters $max_iters \
${min_iters:+ --min-iters $min_iters} \
${utt2spk:+ --utt2spk $utt2spk} \
${ivector_scp:+ --ivector-scp $ivector_scp} \
${semi_layers:+ --semi-layers $semi_layers} \
${updatable_layers:+ --updatable-layers $updatable_layers} \
${frames_per_reduce:+ --frames-per-reduce $frames_per_reduce} \
${reduce_per_iter_tr:+ --reduce-per-iter-tr $reduce_per_iter_tr} \
${reduce_type:+ --reduce-type $reduce_type} \
${reduce_content:+ --reduce-content $reduce_content} \
${train_opts} \
${train_tool:+ --train-tool "$train_tool"} \
${config:+ --config $config} \
$mlp_init "$feats_tr" "$feats_cv" "$labels_tr" "$labels_cv" $dir
echo "$0 successfuly finished.. $dir"
sleep 3
exit 0
}
| true
|
549198112fd07a0d77b00f533ec8513309f39672
|
Shell
|
lvlPrImOlvl/llamadas_al_sistema
|
/fizzbuzz_final.sh
|
UTF-8
| 278
| 3.21875
| 3
|
[] |
no_license
|
#!/bin/bash
#Torres Ortiz Luis Miguel
#Problema fizzbuzz
for i in `seq 1 30`
do
if [ `expr $i % 3` -eq 0 ]; then
echo "fizzbuzz"
elif [ `expr $i % 5` -eq 0 ]; then
echo "fizz"
elif [ `expr $i % 3` -eq 0 ] && [ `expr $i % 5` -eq 0 ];; then
echo "buzz"
else
echo $i
fi
done
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.