blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
b7200b0ad875a44ae57bca148bd2cbc2b9d13881
|
Shell
|
saadulkh/scripts
|
/whitesur-theme-gtk.sh
|
UTF-8
| 1,474
| 2.71875
| 3
|
[] |
no_license
|
# A shell script to install & setup WhiteSur theme on GTK.
# WhiteSur-gtk-theme: https://github.com/vinceliuice/WhiteSur-gtk-theme
# WhiteSur-icon-theme: https://github.com/vinceliuice/WhiteSur-icon-theme
# Created by Saad Khan on April 01, 2021.
# Install Dash to Dock
xdg-open https://extensions.gnome.org/extension/307/dash-to-dock/
# Install WhiteSur-gtk-theme
git clone https://github.com/vinceliuice/WhiteSur-gtk-theme.git
sudo ./WhiteSur-gtk-theme/install.sh --dest $HOME/.themes --opacity solid --icon ubuntu --gdm
sudo flatpak override --filesystem=~/.themes
# Apply WhiteSur-gtk-theme
gsettings set org.gnome.desktop.interface gtk-theme 'WhiteSur-dark-solid'
gnome-extensions enable user-theme@gnome-shell-extensions.gcampax.github.com
gsettings set org.gnome.shell.extensions.user-theme name 'WhiteSur-dark-solid'
# Install WhiteSur dash-to-dock theme
./WhiteSur-gtk-theme/src/other/dash-to-dock/install.sh
# Apply & Configure WhiteSur dash-to-dock theme
gnome-extensions disable ubuntu-dock@ubuntu.com
gsettings set org.gnome.shell.extensions.dash-to-dock apply-custom-theme true
gsettings set org.gnome.shell.extensions.dash-to-dock dock-position 'BOTTOM'
gsettings set org.gnome.shell.extensions.dash-to-dock show-trash false
# Install WhiteSur-icon-theme
git clone https://github.com/vinceliuice/WhiteSur-icon-theme.git
./WhiteSur-icon-theme/install.sh
# Apply WhiteSur-icon-theme
gsettings set org.gnome.desktop.interface icon-theme 'WhiteSur-dark'
# Remove Leftover
rm -rf WhiteSur-gtk-theme WhiteSur-icon-theme
| true
|
2719a30571bdd5251f9ee001fdc7dd2dc74afe2c
|
Shell
|
cschneid-the-elder/mapa
|
/srcCount
|
UTF-8
| 988
| 2.8125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
javaCbl=$(grep --exclude-from=cobol/src-exclude-list --count ";" cobol/src/*.java | awk -F ":" '{tot = tot + $2} END {print tot}');
javaJcl=$(grep --exclude-from=jcl/src-exclude-list --count ";" jcl/src/*.java | awk -F ":" '{tot = tot + $2} END {print tot}');
antlrCbl=$(grep --count ";" cobol/src/*.g4 | awk -F ":" '{tot = tot + $2} END {print tot}');
antlrJcl=$(grep --count ";" jcl/src/*.g4 | awk -F ":" '{tot = tot + $2} END {print tot}');
antlrDb2=$(grep --count ";" db2z/src/*.g4 | awk -F ":" '{tot = tot + $2} END {print tot}');
javaTot=$((${javaCbl}+${javaJcl}));
antlrTot=$((${antlrCbl}+${antlrJcl}+${antlrDb2}));
grandTot=$((${javaTot}+${antlrTot}));
echo "java LOC cobol = ${javaCbl}"
echo "java LOC jcl = ${javaJcl}"
echo "antlr cobol rules = ${antlrCbl}"
echo "antlr jcl rules = ${antlrJcl}"
echo "antlr db2z rules = ${antlrDb2}"
echo "java LOC total = ${javaTot}"
echo "antlr rules total = ${antlrTot}"
echo "grand total FWIW = ${grandTot}"
| true
|
98751b76ee8c1ac198c8bb75dbf2ff0a72804192
|
Shell
|
rosyapril/TritonBot
|
/cogrob_ros/use_cogrob_workspace/link_from_bazel.sh
|
UTF-8
| 1,343
| 3.796875
| 4
|
[
"BSD-3-Clause"
] |
permissive
|
#!/usr/bin/env bash
BAZEL=bazel
# Get script path.
BASH_SCRIPT="${BASH_SOURCE[0]}"
while [ -h "$BASH_SCRIPT" ]; do
BASH_SCRIPT_PATH="$( cd -P "$( dirname "$BASH_SCRIPT" )" && pwd )"
BASH_SCRIPT="$(readlink "$BASH_SCRIPT")"
[[ $BASH_SCRIPT != /* ]] && BASH_SCRIPT="$BASH_SCRIPT_PATH/$BASH_SCRIPT"
done
BASH_SCRIPT_PATH="$( cd -P "$( dirname "$BASH_SCRIPT" )" && pwd )"
: ${BAZEL_WORKSPACE:=$BASH_SCRIPT_PATH/../../workspace}
DEST_WORKSPACE=$BASH_SCRIPT_PATH/src/use_cogrob_workspace/workspace
rm -rf $DEST_WORKSPACE
mkdir -p $DEST_WORKSPACE
IFS=$(echo -en "\n\b")
ALL_FILES=`\
find $BAZEL_WORKSPACE -type f -not -path '*bazel*' -and -not -path '*.git*'`
printf "Making symlink for all files.\n"
for file in $ALL_FILES; do
SRC_FILE="$file"
DEST_FILE="${file/$BAZEL_WORKSPACE/$DEST_WORKSPACE}"
DEST_BASEDIR=$(dirname "$DEST_FILE")
mkdir -p $DEST_BASEDIR
ln -s $SRC_FILE $DEST_FILE
done
printf "Creating __init__.py recursively.\n"
for CURRENT_DIR in `find $DEST_WORKSPACE -type d`; do
touch $CURRENT_DIR/__init__.py
done
printf "Compiling all proto files for Python.\n"
ALL_PROTO_FILES=`\find $DEST_WORKSPACE -name '*.proto' \
-not -path '*bazel*' -and -not -path '*.git*'`
python -m grpc_tools.protoc -I$DEST_WORKSPACE \
--python_out=$DEST_WORKSPACE --grpc_python_out=$DEST_WORKSPACE \
$ALL_PROTO_FILES
| true
|
eeac4f5c73ff2a742423eeab0ecffcfb2599cf4a
|
Shell
|
k0keoyo/nezha
|
/utils/build_helpers/build_xzutils_lf.sh
|
UTF-8
| 1,806
| 3.359375
| 3
|
[] |
no_license
|
CWD=`pwd`
INCL=${CWD}/utils/build_helpers/include.sh
source ${INCL}
PATCH=xzutils_5.2.2_xz.patch
GIT_COMMIT_VER=faf302137e54d605b44ecf0373cb51a6403a2de1
BDIR=${BUILD_LIBS}/${XZUTILS}_lf
echo -e "\t * Building ASAN/SanitizerCoverage-instrumented XZUtils"
if [ ! -d ${BDIR} ]; then
mkdir -p ${BDIR}
fi
LC="-g -fsanitize=address -fsanitize-coverage=edge,indirect-calls,8bit-counters"
if ! [ -d ${SRC_LIBS}/${XZUTILS} ]; then
echo -e "\t\t - Downloading XZ-Utils in ${SRC_LIBS}/xzutils"
git clone ${XZUTILS_ST} ${SRC_LIBS}/xzutils 2>/dev/null
fi
pushd ${SRC_LIBS}/${XZUTILS} >/dev/null
echo -e "\t\t - Configuring"
# clean up just in case
make -j10 clean > /dev/null 2>&1
make -j10 distclean > /dev/null 2>&1
git checkout ${GIT_COMMIT_VER} > /dev/null 2>&1
cp ../../../../utils/patches/${PATCH} .
patch -p1 < ${PATCH} > /dev/null 2>&1
bash autogen.sh > /dev/null 2>&1
./configure --disable-shared --with-pic --prefix=${BDIR} \
--exec-prefix=${BDIR} CC="clang-3.8" CXX="clang++-3.8" CFLAGS="$LC" \
>/dev/null 2>&1
echo -e "\t\t - Adding dependencies"
echo -e "\t\t - Compiling"
make -j10 > /dev/null 2>&1
echo -e "\t\t - Installing"
make -j10 install > /dev/null 2>&1
# clean up for next install
make -j10 clean > /dev/null 2>&1
make -j10 distclean > /dev/null 2>&1
${BDIR}/bin/xz --version 1> /tmp/ttest
test=$(objdump -d ${BDIR}/bin/xz 2>/dev/null \
| grep "__sanitizer" | wc -l)
if [ -f ${BDIR}/bin/xz ] &&
[ -f ${BDIR}/lib/liblzma.a ] &&
[ $(grep -i liblzma /tmp/ttest | wc -l) -ne 0 ] &&
[ $test -ne 0 ]; then
echo -e "\t\t - Testing install..\033[0;32m OK\n"
else
echo -e "\t\t - Testing install..\033[0;31m FAILED\n"
fi
echo -en "\e[0m"
popd >/dev/null
| true
|
023c936a4e8876e64773e993833aad4f7cffd861
|
Shell
|
davidrenne/asdineStormRocks
|
/bin/kill_golang
|
UTF-8
| 433
| 2.8125
| 3
|
[] |
no_license
|
#!/bin/bash
whoami=$(whoami)
port=$(bash $GOPATH/src/github.com/davidrenne/asdineStormRocks/bin/get_port_local)
if [[ "$whoami" == "root" ]] || [[ "$port" != "80" ]]; then
pkill compile &
ps -ax | grep "exe/asdineStormRocks" | awk '{print $1}' | xargs kill > /dev/null 2>&1 &
else
sudo pkill compile &
ps -ax | grep "exe/asdineStormRocks" | awk '{print $1}' | xargs sudo kill > /dev/null 2>&1 &
fi
say "server dead"
| true
|
e068025ef35758bf9cc73c8424d65dda5408714b
|
Shell
|
xergioalex/nginxDocker
|
/docker/nginx/docker.sh
|
UTF-8
| 7,002
| 3.40625
| 3
|
[
"MIT"
] |
permissive
|
# Utils functions
. utils.sh
# Create envs vars if don't exist
ENV_FILES=("docker-compose.yaml" ".env" "nginx/site.template" "nginx/site.template.ssl")
utils.check_envs_files "${ENV_FILES[@]}"
# Load environment vars, to use from console, run follow command:
utils.load_environment
# Menu options
if [[ "$1" == "machine.create" ]]; then
utils.printer "Cheking if remote machine exist..."
# If machine doesn't exist, create a droplet and provision machine
if [[ "$MACHINE_DRIVER" == "digitalocean" ]]; then
if [[ "$MACHINE_NAME" != $(docker-machine ls -q | grep "^$MACHINE_NAME$") ]]; then
utils.printer "Starting machine if it's off..."
docker-machine start $MACHINE_NAME
utils.printer "Creating machine..."
docker-machine create --driver digitalocean --digitalocean-access-token $DIGITAL_ACCESS_TOKEN --digitalocean-image $DIGITAL_IMAGE --digitalocean-size $DIGITAL_SIZE $MACHINE_NAME
utils.printer "Machine created at: $(docker-machine ip $MACHINE_NAME)"
else
utils.printer "Starting machine if it's off..."
docker-machine start $MACHINE_NAME
utils.printer "Machine already exist at: $(docker-machine ip $MACHINE_NAME)"
fi
elif [[ "$MACHINE_DRIVER" == "virtualbox" ]]; then
if [[ "$MACHINE_NAME" != $(docker-machine ls -q | grep "^$MACHINE_NAME$") ]]; then
utils.printer "Creating machine..."
docker-machine create -d virtualbox $MACHINE_NAME
utils.printer "Machine created at: $(docker-machine ip $MACHINE_NAME)"
else
utils.printer "Starting machine if it's off..."
docker-machine start $MACHINE_NAME
utils.printer "Machine already exist at: $(docker-machine ip $MACHINE_NAME)"
fi
fi
elif [[ "$1" == "machine.details" ]]; then
utils.printer "Searching for machine details..."
if [[ "$MACHINE_NAME" != $(docker-machine ls -q | grep "^$MACHINE_NAME$") ]]; then
utils.printer "Machine doesn't exist"
else
utils.printer "Machine driver: $MACHINE_DRIVER"
utils.printer "Machine name: $MACHINE_NAME"
utils.printer "Machine ip: $(docker-machine ip $MACHINE_NAME)"
fi
elif [[ "$1" == "machine.start" ]]; then
if [[ "$MACHINE_NAME" != $(docker-machine ls -q | grep "^$MACHINE_NAME$") ]]; then
utils.printer "Machine doesn't exist"
else
utils.printer "Power on machine..."
docker-machine rm $MACHINE_NAME
fi
elif [[ "$1" == "machine.restart" ]]; then
if [[ "$MACHINE_NAME" != $(docker-machine ls -q | grep "^$MACHINE_NAME$") ]]; then
utils.printer "Machine doesn't exist"
else
utils.printer "Restarting on machine..."
docker-machine restart $MACHINE_NAME
fi
elif [[ "$1" == "machine.stop" ]]; then
if [[ "$MACHINE_NAME" != $(docker-machine ls -q | grep "^$MACHINE_NAME$") ]]; then
utils.printer "Machine doesn't exist"
else
utils.printer "Power off machine..."
docker-machine stop $MACHINE_NAME
fi
elif [[ "$1" == "machine.rm" ]]; then
if [[ "$MACHINE_NAME" != $(docker-machine ls -q | grep "^$MACHINE_NAME$") ]]; then
utils.printer "Machine doesn't exist"
else
utils.printer "Power off machine..."
docker-machine stop $MACHINE_NAME
utils.printer "Removing machine..."
docker-machine rm $MACHINE_NAME
fi
elif [[ "$1" == "config" ]]; then
utils.printer "Set nginx configuration..."
docker-machine ssh $MACHINE_NAME mkdir -p /opt/nginx/config/
if [[ "$2" == "secure" ]]; then
docker-machine scp nginx/site.template.ssl $MACHINE_NAME:/opt/nginx/config/default.conf
else
docker-machine scp nginx/site.template $MACHINE_NAME:/opt/nginx/config/default.conf
fi
utils.printer "Creating logs files..."
docker-machine ssh $MACHINE_NAME mkdir -p /opt/nginx/logs/
docker-machine ssh $MACHINE_NAME touch /opt/nginx/logs/site.meteor.access
docker-machine ssh $MACHINE_NAME touch /opt/nginx/logs/site.flask.access
docker-machine ssh $MACHINE_NAME touch /opt/nginx/logs/site.meteor.error
docker-machine ssh $MACHINE_NAME touch /opt/nginx/logs/site.flask.error
if [[ "$2" == "secure" ]]; then
utils.printer "Stopping nginx machine if it's running..."
docker-compose $(docker-machine config $MACHINE_NAME) stop nginx
utils.printer "Creating letsencrypt certifications files..."
docker-compose $(docker-machine config $MACHINE_NAME) up certbot
fi
elif [[ "$1" == "deploy" ]]; then
utils.printer "Starting nginx machine..."
docker-compose $(docker-machine config $MACHINE_NAME) up -d nginx
docker-compose $(docker-machine config $MACHINE_NAME) restart nginx
elif [[ "$1" == "start" ]]; then
utils.printer "Start services"
docker-compose $(docker-machine config $MACHINE_NAME) start nginx
elif [[ "$1" == "restart" ]]; then
utils.printer "Restart services"
docker-compose $(docker-machine config $MACHINE_NAME) restart nginx
elif [[ "$1" == "stop" ]]; then
utils.printer "Stop services"
docker-compose $(docker-machine config $MACHINE_NAME) stop nginx
elif [[ "$1" == "rm" ]]; then
if [[ "$2" == "all" ]]; then
utils.printer "Stop && remove nginx service"
docker-compose $(docker-machine config $MACHINE_NAME) rm nginx
else
utils.printer "Stop && remove all services"
docker-compose $(docker-machine config $MACHINE_NAME) rm $2
fi
elif [[ "$1" == "bash" ]]; then
utils.printer "Connect to nginx bash shell"
docker-compose $(docker-machine config $MACHINE_NAME) exec nginx bash
elif [[ "$1" == "ps" ]]; then
utils.printer "Show all running containers"
docker-compose $(docker-machine config $MACHINE_NAME) ps
elif [[ "$1" == "logs" ]]; then
utils.printer "Showing nginx logs..."
if [[ -z "$2" ]]; then
docker-compose $(docker-machine config $MACHINE_NAME) logs -f --tail=30 nginx
else
docker-compose $(docker-machine config $MACHINE_NAME) logs -f --tail=$2 nginx
fi
elif [[ "$1" == "up" ]]; then
# Create machine
bash docker.sh machine.create
# Deploying services to remote host
bash docker.sh config $2
# Set server configuration
bash docker.sh deploy $2
else
utils.printer "Usage: docker.sh [build|up|start|restart|stop|mongo|bash|logs n_last_lines|rm|ps]"
echo -e "up --> Build && restart nginx service"
echo -e "start --> Start nginx service"
echo -e "restart --> Restart nginx service"
echo -e "stop --> Stop nginx service"
echo -e "bash --> Connect to nginx service bash shell"
echo -e "logs n_last_lines --> Show nginx server logs; n_last_lines parameter is optional"
echo -e "rm --> Stop && remove nginx service"
echo -e "rm all --> Stop && remove all services"
echo -e "server.config --> Set nginx configuration service"
echo -e "deploy --> Build, config && start services"
fi
| true
|
3b551e6360cb87eccf422697c1d067c59a896b29
|
Shell
|
jalkal/scripts
|
/processXml.sh
|
UTF-8
| 1,302
| 4.03125
| 4
|
[] |
no_license
|
#!/bin/sh
##############################################
# Author: Jose Alcalde
# Descriptrion: script that given a file name pattern is able find those files with this pattern
# and do whatever for each file
###############################################
multiFile() {
for FILE in $(ls $WORK_DIR/$PATTERN )
do
singleFile $FILE
done
}
singleFile(){
echo "executing for $1"
}
usage(){
echo "usage processXml.sh --file-pattern=<pattern> --mode=<single|multiple>"
echo " --input-file=<inputFile>"
exit 1
}
if [ $# = 0 ]; then
usage
fi
WORK_DIR="files"
for i in "$@"
do
case $i in
--input-file=*)
INPUT_FILE="${i#*=}"
shift # past argument=value
;;
--mode=*)
MODE="${i#*=}"
shift # past argument=value
;;
--file-pattern=*)
PATTERN="${i#*=}"
shift # past argument=value
;;
esac
done
#echo "INPUT_FILE:$INPUT_FILE"
#echo "MODE:$MODE"
#echo "PATTERN:$PATTERN"
if [[ $INPUT_FILE != "" ]]; then
singleFile $INPUT_FILE
elif [[ $PATTERN != "" ]]; then
if [[ $MODE = "multiple" ]]; then
multiFile
elif [[ $MODE = "single" ]]; then
LAST_FILE=$(ls -lt $WORK_DIR/$PATTERN | awk '/.*/ { f=$NF };END{ print f }')
singleFile $LAST_FILE
else
usage
fi
else
usage
fi
| true
|
5b352eb217ec7d0221b7082f69a6d41dd1dc2186
|
Shell
|
lianabatalova/HSEDevOpsHW2
|
/HW2_task1.sh
|
UTF-8
| 526
| 3.421875
| 3
|
[] |
no_license
|
#!/bin/bash
data_csv=$4
folder_name=$3
col_name=$2
col_name=$(head -1 $data_csv | tr -s ';' '\n' | nl -nln | grep "$col_name" | cut -f1)
echo $col_name
links=$(awk -F ";" -v head=$col_name '{print $head}' $data_csv)
mkdir -p $folder_name
cd $folder_name
workers_count=$1
workers_pool=$workers_count
echo $workers_count
for link in $links
do
if [ $workers_pool -eq 0 ]
then
wait
workers_pool=$workers_count
fi
workers_pool="$(($workers_pool-1))"
echo $workers_pool
echo $link
wget $link &
done
wait
| true
|
0c8a5f3d8ab2a340c8487e439406984cbdd44c0e
|
Shell
|
durai23/testrep
|
/pennms_scripts_main/SkullStripCorrection/ThresholdJacobian_new.sh
|
UTF-8
| 16,326
| 3.875
| 4
|
[] |
no_license
|
#!/bin/bash
################################################ VERSION INFO ################################################
# $Id: ThresholdJacobian.sh 71 2011-11-02 17:16:08Z doshijim@UPHS.PENNHEALTH.PRV $
#
version()
{
# Display the version number and date of modification extracted from
# the Id variable.
SVNversion="$Id: ThresholdJacobian.sh 71 2011-11-02 17:16:08Z doshijim@UPHS.PENNHEALTH.PRV $"
Auth="Jimit Doshi"
ver="$Rev: 71 $"
mod="$LastChangedDate: 2011-11-02 13:16:08 -0400 (Wed, 02 Nov 2011) $"
echo -e "Author : $Auth"
echo -e "Revision $ver"
echo -e "Last Modification $mod"
# echo -e "$0 version \c"
# echo $SVNversion|cut -f3,4,5 -d" "
exit 5
}
################################################ FUNCTIONS ################################################
help()
{
cat <<HELP
This script does the following:
##############################################
USAGE : $0 [OPTIONS]
OPTIONS:
Reqd: -in < file > absolute path of the input file to be skull-stripped and cerebellum removed
-jacRank < file > absolute path of the Jacobian Ranked mask
Opt: -dest < path > absolute path to the destination where the results are to be stored (default: same as input)
-tmp < path > absolute path to the temporary directory (default: \$SBIA_TMPDIR )
-perThresh < float > Percent Threshold for the aggresiveness of the skull-stripping and cerebellum removal. 0 < \$perThresh < 100 (default: 50)
-absThresh < float > Absolute Threshold for the aggresiveness of the skull-stripping and cerebellum removal. 0 < \$absThresh < max (no default)
If this argument is provided, it will override the -perThresh value.
-mask < pattern > Prefix of the output brain mask (default: input_cbq_mask)
Prefix of the output ventricle mask, if "-vnmask" is set (default: input_cbq_vnmask)
Provide the full filename without the extension or the path
-cbq < pattern > Prefix of the output skull-stripped, cerebellum removed image (default: input_str_cbq)
-kernel < int > Spherical dilation kernel size, in mm (default: 6mm)
-exe < path > absolute path to the directory containing the scripts (default: `dirname $0`)
-vnmask < 0/1 > flag to signify ventricle mask generation. If set, "-in" and "-cbq" options become invalid. (default: 0 - no VNmask)
-v verbose output (default: no output)
-V Version info
ERROR: Not enough arguments!!
##############################################
DEPENDENCIES:
3dcalc : `which 3dcalc`
3dBrickStat : `which 3dBrickStat`
nifti1_test : `which nifti1_test`
3dclust : `which 3dclust`
fslmaths : `which fslmaths`
HELP
exit 1
}
checkDependency()
{
pth=`which $1 2>&1`
if [ $? != 0 ]
then
echo -e "${1} not installed OR not found. Aborting operations ..."
cleanUpandExit
fi
}
checkExitCode()
{
if [ $1 != 0 ]
then
echo -e $2
cleanUpandExit
fi
}
cleanUpandExit()
{
echo -e ":o:o:o:o:o Aborting Operations .... \n\n"
if [ -d "$TMP" ]
then
if [ "$TMP" != "$dest" ]
then
rm -rfv ${TMP}
else
rmV ${Thresholded}
rmV ${Sum_open}
rmV ${Filled}
rmV ${Clustered}
if [ "$CpInput" == "1" ]
then
rmV ${InbName}.nii.gz
fi
if [ "$CpJacob" == "1" ]
then
rmV ${JRbName}.nii.gz
fi
fi
fi
executionTime
exit 1
}
checkPath()
{
path=`echo ${1##*/}`
if [ -n "$path" ]
then
echo ${1}/
else
echo $1
fi
}
checkFile()
{
if [ ! -f $1 ]
then
echo -e "\nERROR: Input file $1 does not exist! Aborting operations ..."
cleanUpandExit
fi
}
FileAtt()
{
IP=$1;
if [ ! -f ${IP} ]
then
echo -e "\nERROR: Input file $IP does not exist!"
cleanUpandExit
fi
ext=`echo ${IP##*.}`
bName=`basename ${IP%.${ext}}`
if [ "$ext" == "gz" ]
then
ext=`echo ${bName##*.}`.${ext}
bName=`basename ${IP%.${ext}}`
fi
if [ "$ext" != "nii.gz" ] && [ "$ext" != "hdr" ] && [ "$ext" != "img" ] && [ "$ext" != "nii" ]
then
echo -e "\nERROR: Input file extension $ext not recognized! Please check ..."
cleanUpandExit
fi
echo $ext $bName
}
executionTime()
{
endTimeStamp=`date +%s`
total=$[ (${endTimeStamp} - ${startTimeStamp})]
if [ ${total} -gt 60 ]
then
if [ ${total} -gt 3600 ]
then
if [ ${total} -gt 86400 ]
then
echoV "\nExecution time: $[ ${total} / 86400]d $[ ${total} % 86400 / 3600]h $[ ${total} % 86400 % 3600 / 60]m $[ ${total} % 86400 % 3600 % 60]s"
else
echoV "\nExecution time: $[ ${total} / 3600]h $[ ${total} % 3600 / 60]m $[ ${total} % 3600 % 60]s"
fi
else
echoV "\nExecution time: $[ ${total} / 60]m $[ ${total} % 60]s"
fi
else
echoV "\nExecution time: $[ ${total} % 60]s"
fi
}
parse()
{
while [ -n "$1" ]; do
case $1 in
-h)
help;
shift 1;; # help is called
-in)
input=$2;
if [ ! -f $input ]
then
echo -e "\nERROR: Input file $input does not exist! Aborting operations ..."
exit 1
fi
temp=`FileAtt $input`
InExt=`echo $temp | awk '{ print $1 }'`
InbName=`echo $temp | awk '{ print $2 }'`
shift 2;; # SubID is set
-dest)
dest=`checkPath $2`;
shift 2;; # source path is set
-tmp)
tmp=`checkPath $2`;
shift 2;; # source path is set
-perThresh)
perThresh=$2;
shift 2;; # source path is set
-absThresh)
absThresh=$2;
shift 2;; # source path is set
-kernel)
kernel=$2;
shift 2;; # source path is set
-mask)
mask=$2;
shift 2;; # source path is set
-cbq)
cbq=$2;
shift 2;; # source path is set
-exe)
scripts=`checkPath $2`;
shift 2;; # source path is set
-jacRank)
jacRank=$2;
if [ ! -f $jacRank ]
then
echo -e "\nERROR: Input file $jacRank does not exist! Aborting operations ..."
exit 1
fi
temp=`FileAtt $jacRank`
JRExt=`echo $temp | awk '{ print $1 }'`
JRbName=`echo $temp | awk '{ print $2 }'`
shift 2;; # source path is set
-vnmask)
vnmask=$2;
shift 2;; # source path is set
-v)
verbose=1;
shift 1;; # source path is set
-V)
version
shift 1;; # source path is set
-*)
echo "ERROR: no such option $1";
help;;
*)
break;;
esac
done
}
convertToNifti()
{
in=$1
if [ -z "$2" ]
then
out=$1
else
out=$2
fi
nifti1_test -zn1 $in $out
if [ -f ${out%.img}.nii.gz ]
then
echoV "\nConverted to NIFTIGZ: $out"
rm -fv ${in} ${in%.img}.hdr
else
echoV "\nConversion to NIFTIGZ failed: $in"
fi
}
import()
{
ext=$1
inFile=$2
outFile=$3
if [ ! -f ${outFile}.nii.gz ]
then
if [ "${ext}" == "nii.gz" ] || [ "${ext}" == "nii" ] || [ "${ext}" == "img" ]
then
echoV "nifti1_test -zn1 ${inFile} ${outFile}" 1>&2
nifti1_test -zn1 ${inFile} ${outFile}
echo 1 ### Returning a value indicating that the file was copied successfully
elif [ "${ext}" == "hdr" ]
then
echoV "nifti1_test -zn1 ${inFile%.hdr}.img ${outFile}" 1>&2
nifti1_test -zn1 ${inFile%.hdr}.img ${outFile}
echo 1 ### Returning a value indicating that the file was copied successfully
fi
fi
}
echoV()
{
if [ "$verbose" == "1" ]
then
echo -e $1
fi
}
rmV()
{
if [ -f $1 ]
then
if [ "$verbose" == "1" ]
then
rm -fv $1
else
rm -f $1
fi
fi
}
################################################ END OF FUNCTIONS ################################################
################################################ MAIN BODY ################################################
if [ $# -lt 4 ]; then
help
fi
### Timestamps
startTime=`date +%F-%H:%M:%S`
startTimeStamp=`date +%s`
echo -e "\nRunning commands on : `hostname`"
echo -e "Start time : ${startTime}\n"
### Default Parameters
perThresh=50
absThresh=''
kernel=6
verbose=1
vnmask=0
scripts=`dirname $0`/
FSLOUTPUTTYPE=NIFTI_GZ; export $FSLOUTPUTTYPE
### Specifying the trap signal
trap "checkExitCode 1 '\nProgram Interrupted. Received SIGHUP signal'" SIGHUP
trap "checkExitCode 1 '\nProgram Interrupted. Received SIGINT signal'" SIGINT
trap "checkExitCode 1 '\nProgram Interrupted. Received SIGTERM signal'" SIGTERM
trap "checkExitCode 1 '\nProgram Interrupted. Received SIGKILL signal'" SIGKILL
### Reading the arguments
echo -e "Parsing arguments : $*"
parse $*
### Checking for default parameters
if [ -z $dest ]
then
dest=`dirname $input`/
fi
if [ -z $mask ]
then
if [ "$vnmask" == "0" ]
then
mask=${InbName}_cbq_mask
else
mask=${InbName}_cbq_vnmask
fi
fi
if [ -z $cbq ]
then
cbq=${InbName}_str_cbq
fi
fillholes=${scripts}FillHoles_3D.sh
Morpho=${scripts}Morpho.sh
### Check if all dependenices are satisfied
checkDependency 3dcalc
checkDependency nifti1_test
checkDependency 3dBrickStat
checkDependency 3dclust
checkDependency fslmaths
### Forming FileNames
# TMP
PID=$$
if [ -n "$tmp" ]
then
if [ ! -d "$tmp" ]
then
mkdir -p $tmp
fi
# TMP=`mktemp -d -p ${tmp} ThresholdMask_${PID}.XXXXXXXXXX`/ || { echo -e "\nCreation of Temporary Directory failed."; exit 1; }
TMP=$tmp
elif [ -n "$SBIA_TMPDIR" ]
then
if [ ! -d "$SBIA_TMPDIR" ]
then
mkdir -p $SBIA_TMPDIR
fi
TMP=`mktemp -d -p ${SBIA_TMPDIR} ThresholdMask_${PID}.XXXXXXXXXX`/ || { echo -e "\nCreation of Temporary Directory failed."; exit 1; }
else
TMP=`mktemp -d -t ThresholdMask_${PID}.XXXXXXXXXX`/ || { echo -e "\nCreation of Temporary Directory failed."; exit 1; }
fi
echoV "\n-----> Temporary local directory created at $TMP ...\n"
# Output Images
Thresholded=${JRbName}_thresh.nii.gz
Sum_ero=${Thresholded%.nii.gz}_ero${kernel}mm.nii.gz
Sum_ero_clust=${Sum_ero%.nii.gz}_clust.nii.gz
Sum_ero_clust_dil=${Sum_ero_clust%.nii.gz}_dil${kernel}mm.nii.gz
Sum_open=${Sum_ero_clust_dil}
Filled=${Sum_open%.nii.gz}_filled.nii.gz
Clustered=${Filled%.nii.gz}_clustered.nii.gz
if [ "$verbose" == "1" ]
then
echo -e "\nINPUT FILES"
if [ "$vnmask" == "0" ]
then
echo -e "Input Image : ${input}"
fi
echo -e "Jacobian Ranked Mask : ${jacRank}"
echo -e "\nOUTPUT FILES"
if [ "$vnmask" == "0" ]
then
echo -e "Final Brain Mask : ${dest}${mask}.nii.gz"
echo -e "Final CBQ image : ${dest}${cbq}.nii.gz"
else
echo -e "Final Ventricle Mask : ${dest}${mask}.nii.gz"
fi
echo -e "\nPARAMETERS"
if [ -n "$absThresh" ]
then
echo -e "Absolute Threshold : $absThresh"
else
echo -e "Percent Threshold : $perThresh %"
fi
echo -e "Dilation Kernel Size : ${kernel}mm"
fi
### Importing data to the temporary directory
echoV "\n-----> Importing required files to the temporary local directory ...\n"
CpInput=0
CpJacob=0
if [ "$vnmask" == "0" ]
then
CpInput=`import ${InExt} ${input} ${TMP}${InbName}`
fi
CpJacob=`import ${JRExt} ${jacRank} ${TMP}${JRbName}`
cd $TMP
### Thresholding the Jacobian ranked reference masks
if [ -n "$absThresh" ]
then
thresh=$absThresh
else
max=`3dBrickStat -slow -max ${JRbName}.nii.gz`
thresh=`echo "scale=7; $perThresh / 100 * ${max}" | bc`
fi
echoV "\n-----> Thresholding the Jacobian ranked reference mask at ${thresh} ...\n"
echoV "\n3dcalc \n
-prefix ${Thresholded} \n
-a ${JRbName}.nii.gz \n
-expr 'step(a-$thresh)' \n
-nscale \n
-byte \n
-verbose;"
if [ "$verbose" == "1" ]
then
3dcalc \
-prefix ${Thresholded} \
-a ${JRbName}.nii.gz \
-expr "step(a-$thresh)" \
-nscale \
-byte \
-verbose;
else
3dcalc \
-prefix ${Thresholded} \
-a ${JRbName}.nii.gz \
-expr "step(a-$thresh)" \
-nscale \
-byte;
fi
checkExitCode $? "\nERROR: Thresholding of the Jacobian Rank Mask failed!!!"
### Opening the thresholded mask
if [ "$kernel" != 0 ]
then
echoV "\n-----> Eroding the thresholded mask ...\n"
echoV "${Morpho} \n
-in ${Thresholded} \n
-erode \n
-dest $TMP \n
-kernel $kernel \n
-v;"
if [ "$verbose" == "1" ]
then
${Morpho} \
-in ${Thresholded} \
-erode \
-dest $TMP \
-kernel $kernel \
-v;
else
${Morpho} \
-in ${Thresholded} \
-erode \
-dest $TMP \
-kernel $kernel;
fi
checkExitCode $? "\nERROR: Opening of the thresholded mask failed!!!"
echoV "\n-----> Clustering the eroded mask to remove small, isolated clusters ...\n"
thresh=$(( `3dBrickStat -slow -non-zero -count ${Sum_ero}` / 2 ))
echoV "--> 3dclust \n
-prefix ${Sum_ero_clust} \n
0 \n
${thresh} \n
${Sum_ero};"
if [ "$verbose" == "1" ]
then
3dclust \
-prefix ${Sum_ero_clust} \
0 \
${thresh} \
${Sum_ero};
else
3dclust \
-summarize \
-quiet \
-nosum \
-prefix ${Sum_ero_clust} \
0 \
${thresh} \
${Sum_ero};
fi
checkExitCode $? "\nERROR: Clustering of the processed ventricle mask failed!!!"
echoV "\n-----> Dilating the clustered mask ...\n"
echoV "${Morpho} \n
-in ${Sum_ero_clust} \n
-dilate \n
-dest $TMP \n
-kernel $kernel \n
-v;"
if [ "$verbose" == "1" ]
then
${Morpho} \
-in ${Sum_ero_clust} \
-dilate \
-dest $TMP \
-kernel $kernel \
-v;
else
${Morpho} \
-in ${Sum_ero_clust} \
-dilate \
-dest $TMP \
-kernel $kernel;
fi
checkExitCode $? "\nERROR: Opening of the thresholded mask failed!!!"
else
echoV "\n-----> No opening of the thresholded mask requested ...\n"
if [ "$verbose" == "1" ]
then
cp -v ${Thresholded} ${Sum_open}
else
cp ${Thresholded} ${Sum_open}
fi
fi
### Filling holes
echoV "\n-----> Filling holes in the final brain mask ...\n"
echoV "\n--> ${fillholes} \n
-in ${Sum_open} \n
-dest $TMP \n
-v;"
if [ "$verbose" == "1" ]
then
${fillholes} \
-in ${Sum_open} \
-dest $TMP \
-v;
else
${fillholes} \
-in ${Sum_open} \
-dest $TMP;
fi
checkExitCode $? "\nERROR: Hole Filling failed!!!"
### Clustering the final mask to exclude small, isolated clusters
echoV "\n-----> Clustering the final, threholded, eroded and dilated mask to remove small, isolated clusters ...\n"
thresh=$(( `3dBrickStat -slow -non-zero -count ${Filled}` / 2 ))
echoV "--> 3dclust \n
-prefix ${Clustered} \n
0 \n
${thresh} \n
${Filled};"
if [ "$verbose" == "1" ]
then
3dclust \
-prefix ${Clustered} \
0 \
${thresh} \
${Filled};
else
3dclust \
-summarize \
-quiet \
-nosum \
-prefix ${Clustered} \
0 \
${thresh} \
${Filled};
fi
checkExitCode $? "\nERROR: Clustering of the processed ventricle mask failed!!!"
### Renaming the final mask
echoV "\n-----> Converting the final CBQ mask to byte ...\n"
if [ "$verbose" == "1" ]
then
# mv -v ${Clustered} ${mask}.nii.gz
echoV "\n--> 3dcalc \n
-a ${Clustered} \n
-prefix ${mask}.nii.gz \n
-expr a \n
-verbose \n
-nscale \n
-byte;"
3dcalc \
-a ${Clustered} \
-prefix ${mask}.nii.gz \
-expr a \
-verbose \
-nscale \
-byte;
else
# mv ${Clustered} ${mask}.nii.gz
3dcalc \
-a ${Clustered} \
-prefix ${mask}.nii.gz \
-expr a \
-nscale \
-byte;
fi
### Removing the Skull and cerebellum
if [ "$vnmask" == "0" ]
then
echoV "\n-----> Removing the Skull and cerebellum ...\n"
echoV "\n--> 3dcalc \n
-prefix ${cbq}.nii.gz \n
-a ${InbName}.nii.gz \n
-b ${mask}.nii.gz \n
-expr 'a*b' \n
-nscale \n
-verbose;"
if [ "$verbose" == "1" ]
then
3dcalc \
-prefix ${cbq}.nii.gz \
-a ${InbName}.nii.gz \
-b ${mask}.nii.gz \
-expr 'a*b' \
-nscale \
-verbose;
else
3dcalc \
-prefix ${cbq}.nii.gz \
-a ${InbName}.nii.gz \
-b ${mask}.nii.gz \
-expr 'a*b' \
-nscale;
fi
checkExitCode $? "\nERROR: Masking out of skull and cerebellum failed!!!"
fi
### Transferring the results to the destination
echoV "\n-----> Transferring the results to the destination ...\n"
if [ "$dest" != "$TMP" ]
then
if [ ! -d $dest ]
then
if [ "$verbose" == "1" ]
then
mkdir -pv $dest
else
mkdir -p $dest
fi
fi
if [ "$verbose" == "1" ]
then
if [ "$vnmask" == "0" ]
then
mv -v ${TMP}${cbq}.nii.gz ${dest}${cbq}.nii.gz
fi
mv -v ${TMP}${mask}.nii.gz ${dest}${mask}.nii.gz
else
if [ "$vnmask" == "0" ]
then
mv ${TMP}${cbq}.nii.gz ${dest}${cbq}.nii.gz
fi
mv ${TMP}${mask}.nii.gz ${dest}${mask}.nii.gz
fi
### Removing temporary files from the destination
echoV "\n-----> Removing temporary files from the TMPDIR ...\n"
if [ "$verbose" == "1" ]
then
rm -fv ${TMP}*
rmdir -v ${TMP}
else
rm -f ${TMP}*
rmdir ${TMP}
fi
else
rmV ${Thresholded}
rmV ${Sum_open}
rmV ${Filled}
rmV ${Clustered}
if [ "$CpInput" == "1" ]
then
rmV ${InbName}.nii.gz
fi
if [ "$CpJacob" == "1" ]
then
rmV ${JRbName}.nii.gz
fi
fi
### Execution Time
executionTime
################################################ END ################################################
| true
|
75c2aa2f3679d72d0d0d1e21aa95a7742f072fb4
|
Shell
|
ModeladoFoundation/ocr-apps
|
/apps/hpcg/refactored/ocr/intel-Eager-Collective/scripts/driver.sh
|
UTF-8
| 1,208
| 3.515625
| 4
|
[
"BSD-2-Clause"
] |
permissive
|
#!/bin/bash
#
# Driver to build binaries, generate and run batch, show results
#
export JOB_PROPERTIES=${JOB_PROPERTIES-scripts/job.properties}
. ${JOB_PROPERTIES}
function build {
eval "TARGET=${TARGET_NAME} RUN_MODE=buildApp V=1 make install"
}
function gen {
# There's a nxyz entry per node entry we need to generate
nxyzArray=(`echo "${nxyz}"`)
nodeArray=(`echo "${NODE_SCALING}"`)
nbEntries=${#nxyzArray[*]}
if [[ $nbEntries != ${#nodeArray[*]} ]]; then
echo "error: NODE_SCALING and nxyz of different size"
exit 1
fi
echo "$nxyz"
echo "In gen ${nbEntries}"
let i=0
while (( $i < $nbEntries )); do
echo "${nxyzArray[$i]} => ${nodeArray[$i]}"
TPLARG_APP_NAME=${TARGET_NAME} n=${nodeArray[$i]} c="${CORE_SCALING}" nxyz=${nxyzArray[$i]} ./scripts/invoke.sh gen
let i=$i+1
done
}
function run {
TARGET=${TARGET_NAME} ./scripts/invoke.sh run
}
function clean {
rm -Rf ${CPGN} build install/x86-mpi/${TARGET_NAME}
}
function res {
filenamePrefix="ws_hpcgEager" c=${CORE_SCALING} ./scripts/invoke.sh res
}
while [[ $# -gt 0 ]]; do
cmd="${1#-}"
echo "Invoking $cmd"
eval ${cmd}
shift
done
| true
|
9ba58fd42ebdffd35e905cf0ba357a8d5bec5312
|
Shell
|
strivingman/zabbix
|
/zabbix-cpu-temperature/zabbix-temperature.sh
|
UTF-8
| 1,665
| 3.640625
| 4
|
[] |
no_license
|
#!/bin/bash
version=0.2
if [[ -e /etc/zabbix/temperature.conf ]]; then
. /etc/zabbix/temperature.conf
fi
n=`ls /sys/devices/platform/|grep coretemp|wc -l`
case "$1" in
"--temperature-discovery")
# Get the list of temperature devices
echo -en '{\n "data":\n ['
for SensorInput in $(/usr/bin/find /sys/devices/platform/ -type f -name temp*_input |sort -t '/' -k6 |grep --color 'temp[0-9]_'|head -$n)
do
SensorLabel=${SensorInput/_input/_label}
if [[ $IgnoreSensors ]]; then
# Check ignore list by sensor name first
if grep -qE '('${IgnoreSensors}')' $SensorLabel; then
continue
fi
# Check ignore list by path to sensor as well
if (echo $SensorInput | grep -qE '('${IgnoreSensors}')'); then
continue
fi
fi
SensorMax=${SensorInput/_input/_max}
echo -en "$Delimiter\n "
echo -en "{\"{#SENSORLABEL}\":\"$(cat ${SensorLabel})\",\"{#SENSORINPUT}\":\"${SensorInput}\",\"{#SENSORMAX}\":\"${SensorMax}\"}"
Delimiter=","
done
echo -e '\n ]\n}'
exit 0
;;
"--fan-discovery")
# Get the list of fan devices
typeset -i cntLines=0
echo -en '{\n "data":\n ['
for FanInput in $(/usr/bin/find /sys/devices/platform/ -type f -name fan*_input | sort)
do
cntLines=${cntLines}+1
echo -en "$Delimiter\n "
echo -en "{\"{#FANLABEL}\":\"Fan ${cntLines}\",\"{#FANINPUT}\":\"${FanInput}\"}"
Delimiter=","
done
echo -e '\n ]\n}'
exit 0
;;
*)
# This should not occur!
echo "ERROR on `hostname` in $0"
exit 1
;;
esac
| true
|
c8673d0a561b5746adcadbf57ca7ce904ce67b36
|
Shell
|
DarkStarSword/3d-fixes
|
/__profiles__/copy_profiles.sh
|
UTF-8
| 551
| 3.25
| 3
|
[] |
no_license
|
#!/bin/sh -e
version="$1"
if [ -z "$version" ]; then
echo "Specify version number"
exit 1
fi
mkdir "$version"
cp -v "/cygdrive/c/NVIDIA/DisplayDriver/${version}/Win8_WinVista_Win7_64/International/Display.Driver/nvdrsdb.bi_" "$version/"
cp -v "/cygdrive/c/ProgramData/NVIDIA Corporation/Drs/"*.bin "$version/"
cp -v "/cygdrive/c/windows/SysWOW64/nvwgf2um.dll" "$version/"
echo Now export the profiles by hand...
./'Geforce 3D Profile Manager.exe'
echo Sanitising profiles encoding...
./sanitise_nv_profiles.py "${version}/NVIDIA Profiles.txt"
| true
|
edd78504dc12b202fd6ead5941ee00342dd8cea1
|
Shell
|
dgricci/stretch-base
|
/utilities.sh
|
UTF-8
| 1,239
| 4.125
| 4
|
[
"Apache-2.0"
] |
permissive
|
# usage: file_env VAR [DEFAULT]
# ie: file_env 'XYZ_DB_PASSWORD' 'example'
# (will allow for "$XYZ_DB_PASSWORD_FILE" to fill in the value of
# "$XYZ_DB_PASSWORD" from a file, especially for Docker's secrets feature)
file_env() {
local var="$1"
local fileVar="${var}_FILE"
local def="${2:-}"
# If the first character of parameter is an exclamation point (!), a level of
# variable indirection is introduced. Bash uses the value of the variable
# formed from the rest of parameter as the name of the variable; this variable
# is then expanded and that value is used in the rest of the substitution,
# rather than the value of parameter itself. This is known as indirect
# expansion. The exceptions to this are the expansions of ${!prefix*} and
# ${!name[@]} described below. The exclamation point must immediately follow
# the left brace in order to introduce indirection.
if [ "${!var:-}" ] && [ "${!fileVar:-}" ]; then
echo >&2 "error: both $var and $fileVar are set (but are exclusive)"
exit 1
fi
local val="$def"
if [ "${!var:-}" ]; then
val="${!var}"
elif [ "${!fileVar:-}" ]; then
val="$(< "${!fileVar}")"
fi
export "$var"="$val"
unset "$fileVar"
}
| true
|
7834288bd6c897b6a87aa559befc481499bb969e
|
Shell
|
colmm99/mongo-analytics
|
/reports/export_Data.sh
|
UTF-8
| 632
| 3.90625
| 4
|
[] |
no_license
|
#!/bin/bash
#
# Export Mongodb to Make Generate Some Analysis
#
MONGO_SERVER=${1-"localhost"}
MONGOG_DB=${2-"DB_1"}
EXPORT_FILE=${3-export.csv}
# Delete previous Export
if [ -f ${EXPORT_FILE} ];then
rm -rf ${EXPORT_FILE}
fi
# Make Sure Mongo Client is Installed before running
if [ ! -f /usr/bin/mongoexport ];then
echo "Mongo Client not installed, it's required"
exit 1
fi
# Iterate through the collections
for COLLECTION in $(cat ./collections.txt)
do
/usr/bin/mongoexport -h ${MONGO_SERVER} --db ${MONGOG_DB} --collection ${COLLECTION} -csv --fieldFile fields.txt --query '{ "Status": "OK" }' >> ${EXPORT_FILE}
done
| true
|
26e6262656a3e328629f7b89451fc7ef1b221564
|
Shell
|
egan/scripts
|
/gitnew
|
UTF-8
| 749
| 3.875
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
##
# gitnew -- obtain fresh git commit logs and view them
#
# usage -- gitnew
#
# notes -- intended for use with aliases for specific repos
#
# written -- 25 July, 2012 by Egan McComb
#
# revised --
##
nfile="./.git/revision.info"
if [[ ! -d ./.git ]]
then
echo "Error: Invalid git repo '$PWD'" >&2
exit $ERR_VARGS
elif [[ ! -s "$nfile" ]]
then
echo "Error: Revision information unavailable: Creating" >&2
echo -n "Enter an initial revision name: " >&2
read response
echo $response > "$nfile"
echo "Created $pwd/$nfile" >&2
fi
if ! netbool.sh
then
echo "Error: Internet connectivity poor" >&2
exit 1
fi
read=$(cat "$nfile")
git log --reverse $read..HEAD | less
git show | grep 'commit' | cut -d " " -f 2 > "$nfile"
exit 0
| true
|
6e9dcd2520eb72625b1a08ebca6019b1765eb0ab
|
Shell
|
vladimercury/sparklab
|
/3-run-spark.sh
|
UTF-8
| 1,795
| 3.859375
| 4
|
[] |
no_license
|
#!/bin/bash
STACK_NAME=sparklab
COMPOSE_FILE=docker/docker-compose.yml
JAR_FILE=docker/code/spark.jar
SCRIPTS=/usr/local/app/scripts
RESULT_DIR=/usr/local/app/result
SLEEP_DELAY=3
wait_sec_counter=60
master_ok=
slave_ok=
# Check for sudo/root
printf "Checking for root permissions..."
if [[ $(id -u) -ne 0 ]]; then
>&2 echo "Error: Root permissions required"
exit 1
else
echo "OK"
fi
function do_cleanup () {
docker stack rm $STACK_NAME
docker swarm leave --force
}
if [[ "$1" = "yarn" ]]; then
echo "DOING YARN";
else
echo "DOING STANDALONE";
fi
sleep $SLEEP_DELAY
mkdir -p $RESULT_DIR
docker swarm init || {
ADVERTISE_ADDR=$(ifconfig | grep -hoEe "inet addr:192[^ ]+" | grep -hoEe "192[^ ]+" | head -n 1)
docker swarm init --advertise-addr $ADVERTISE_ADDR
}
docker stack deploy --compose-file $COMPOSE_FILE $STACK_NAME && {
# Wait for nodes to run
echo "Waiting...$wait_sec_counter"
while [[ (-z "${master_ok}") && (-z "${worker_ok}") && ($wait_sec_counter -ge 0) ]]; do
master_ok=$(docker ps --filter "name=${STACK_NAME}_master" --filter status=running --format "{{.Names}}")
worker_ok=$(docker ps --filter "name=${STACK_NAME}_worker" --filter status=running --format "{{.Names}}")
let wait_sec_counter=$wait_sec_counter-$SLEEP_DELAY
echo -e "\e[1AWaiting...$wait_sec_counter"
sleep $SLEEP_DELAY
done
if [[ $wait_sec_counter -le 0 ]]; then
echo "Nodes are not started"
do_cleanup
exit 1
fi
} && {
master=$(docker ps --filter "name=${STACK_NAME}_master" --format "{{.Names}}")
docker exec -it $master $SCRIPTS/init.sh
docker cp $JAR_FILE $master:$SCRIPTS/
if [[ "$1" = "yarn" ]]; then
docker exec -it $master $SCRIPTS/yarn.sh
else
docker exec -it $master $SCRIPTS/standalone.sh
fi
docker exec -it $master $SCRIPTS/get_results.sh
}
do_cleanup
| true
|
2910a42ec75a41b3e39ab625e4088fa461e6f4c4
|
Shell
|
TeamCodeStream/codestream-server
|
/dev-env.sh
|
UTF-8
| 1,745
| 2.953125
| 3
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
# This file is meant to be sourced into the shell environment
[ ! -d .git -o ! -d ./api_server ] && echo "change to the root of your codestream-server repo before sourcing in this file" && return 1
[ -n "$CSBE_SANDBOX" ] && echo "this env config is not compatble with your dev_tools sandbox" && return 1
[ -f .sandbox-config.sh ] && source .sandbox-config.sh && echo "loading .sandbox-config.sh"
[ -z "$CSSVC_BACKEND_ROOT" ] && export CSSVC_BACKEND_ROOT=$(pwd)
# [ -z "$CSSVC_ENV" ] && export CSSVC_ENV=local
[ -z "$CSSVC_CFG_URL" ] && export CSSVC_CFG_URL=mongodb://localhost/codestream
# update paths
export PATH=$CSSVC_BACKEND_ROOT/api_server/bin:$CSSVC_BACKEND_ROOT/broadcaster/bin:$CSSVC_BACKEND_ROOT/outbound_email/bin:$CSSVC_BACKEND_ROOT/inbound_email/bin:$PATH
export NODE_PATH=$CSSVC_BACKEND_ROOT/api_server/node_modules:$CSSVC_BACKEND_ROOT/broadcaster/node_modules:$CSSVC_BACKEND_ROOT/outbound_email/server/node_modules:$CSSVC_BACKEND_ROOT/inbound_email/node_modules:$NODE_PATH
[ ! -d "$CSSVC_BACKEND_ROOT/log" ] && { echo "creating $CSSVC_BACKEND_ROOT/log/ for run-time logs" && mkdir $CSSVC_BACKEND_ROOT/log || return 1; }
# temporary - these need to be removed from the default.json file
[ -z "$OPADM_LOGS" ] && export OPADM_LOGS=$CSSVC_BACKEND_ROOT/log
[ -z "$CS_API_LOGS" ] && export CS_API_LOGS=$CSSVC_BACKEND_ROOT/log
[ -z "$CS_BROADCASTER_SANDBOX" ] && export CS_BROADCASTER_SANDBOX=$CSSVC_BACKEND_ROOT
[ -z "$CS_MAILIN_SANDBOX" ] && export CS_MAILIN_SANDBOX=$CSSVC_BACKEND_ROOT
[ -z "$CS_OUTBOUND_EMAIL_LOGS" ] && export CS_OUTBOUND_EMAIL_LOGS=$CSSVC_BACKEND_ROOT/log
[ -z "$CS_OUTBOUND_EMAIL_TMP" ] && export CS_OUTBOUND_EMAIL_TMP=$CSSVC_BACKEND_ROOT/log
[[ "$SHELL" == *zsh* ]] && rehash
env | grep ^CSSVC_
return 0
| true
|
91fa4f5facea193eb27bc9b5f33b9cc99688714d
|
Shell
|
sexibytes/packer-sexigraf
|
/scripts/base.bak.sh
|
UTF-8
| 1,793
| 2.765625
| 3
|
[] |
no_license
|
# Update the box
DEBIAN_FRONTEND=noninteractive apt-get -y update
# apt-get -y install linux-headers-$(uname -r) build-essential
# apt-get -y install zlib1g-dev libssl-dev libreadline-gplv2-dev
DEBIAN_FRONTEND=noninteractive apt-get -y install curl unzip resolvconf console-setup apt-transport-https vim wget htop parted traceroute
# Tweak sshd to prevent DNS resolution (speed up logins)
# echo 'UseDNS no' >> /etc/ssh/sshd_config
sed -i 's/PermitRootLogin prohibit-password/PermitRootLogin yes/' /etc/ssh/sshd_config
sed -i 's/#PermitRootLogin/PermitRootLogin/' /etc/ssh/sshd_config
# https://communities.vmware.com/thread/514376
# vmwgfx.enable_fbdev=1
# https://kb.vmware.com/s/article/2053145
echo "options vmw_pvscsi cmd_per_lun=254 ring_pages=32" > /etc/modprobe.d/pvscsi
# fixing eth0 naming
sed -i 's/GRUB_CMDLINE_LINUX=\"\"/GRUB_CMDLINE_LINUX=\"net.ifnames=0 biosdevname=0\"/g' /etc/default/grub
grub-mkconfig -o /boot/grub/grub.cfg
sed -i 's/ens160/eth0/g' /etc/network/interfaces
sed -i 's/ens192/eth0/g' /etc/network/interfaces
sed -i 's/ens224/eth0/g' /etc/network/interfaces
sed -i 's/ens256/eth0/g' /etc/network/interfaces
# Enable ESX timesync
vmware-toolbox-cmd timesync enable
if fdisk -l|grep -i "/dev/sdb" > /dev/null; then
# https://www.digitalocean.com/community/tutorials/how-to-partition-and-format-storage-devices-in-linux
# https://askubuntu.com/questions/384062/how-do-i-create-and-tune-an-ext4-partition-from-the-command-line
echo "mount sdb"
parted /dev/sdb mklabel gpt
parted -a opt /dev/sdb mkpart primary ext4 0% 100%
mkfs.ext4 -N 8388608 -L wfs /dev/sdb1
mkdir -p /mnt/wfs
echo "#" >> /etc/fstab
echo "LABEL=wfs /mnt/wfs ext4 noatime,nodiratime,barrier=0,nobh,errors=remount-ro 0 1" >> /etc/fstab
mount -a
mkdir -p /mnt/wfs/whisper
fi
| true
|
ab7dce682d952f8d6819d24bdfbdc3a95a94bbb5
|
Shell
|
naparuba/opsbro
|
/test/test_module_statsd.sh
|
UTF-8
| 769
| 3.625
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
# Load common shell functions
MYDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
. $MYDIR/common_shell_functions.sh
print_header "Starting to test STATSD module"
# Enable STATSD module
opsbro agent parameters add groups statsd-listener
# Start it
/etc/init.d/opsbro start
cat /var/log/opsbro/daemon.log
# Wait for the numpy installation rule to be done
opsbro compliance wait-compliant "Install numpy if statsd module enabled" --timeout=60
# Look if the socket is open
echo "Look if the 8125 port is OPEN"
netstat -laputen | grep '^udp' | grep 8125
if [ $? != 0 ]; then
echo "The STATSD module did not open socket"
opsbro agent info
opsbro agent modules state
exit 2
fi
exit_if_no_crash "opsbro Statsd module is OK"
| true
|
2e347d6a0774ae7e48b9384318cb3645b4663f8f
|
Shell
|
gaspo53/spring-boot-api
|
/src/main/resources/scripts/cluster-update.sh
|
UTF-8
| 539
| 3.15625
| 3
|
[] |
no_license
|
#!/bin/bash
echo "################## CLUSTER UPDATE STARTED #######################"
USERNAME=root
HOSTS="unboundly-api-cluster-01 unboundly-api-cluster-02 unboundly-api-cluster-03 unboundly-api-cluster-04"
SCRIPT="cd /root/git/unboundly-api; git checkout master; git pull; /etc/init.d/unboundly-api stop; /etc/init.d/unboundly-api start"
for HOSTNAME in ${HOSTS} ; do
echo "Performing scripts in ${HOSTNAME}"
ssh "${USERNAME}@${HOSTNAME}" "${SCRIPT}"
done
echo "################## CLUSTER UPDATE FINISHED #######################"
| true
|
3893761bf045d12ff7a9e9969799402236bb9849
|
Shell
|
sushanthdn/dna-apps
|
/spark_standalone/src/spark_standalone.sh
|
UTF-8
| 2,273
| 3.546875
| 4
|
[] |
no_license
|
#!/bin/bash
set -e -x -o pipefail
# spark_standalone 0.0.1
# Create a spark standalone cluster with a number of worker nodes
# and run your spark application
#
# See https://wiki.dnanexus.com/Developer-Portal for tutorials on how
# to modify this file.
main() {
echo "Value of application: '$application'"
echo "Value of app_input: '$app_args'"
echo "Value of executors: '$workers'"
echo "Value of cores: '$cores'"
echo "Value of executor_memory: '$executor_memory'"
echo "Value of class: '$class'"
echo "Value of in: '$in_files'"
echo "Value of jars : '$jars'"
# The following line(s) use the dx command-line tool to download your file
# inputs to the local file system using variable names for the filenames. To
# recover the original filenames, you can use the output of "dx describe
# "$variable" --name".
# Download all input files
dx-download-all-inputs --parallel
echo "Starting Apache Spark in Standalone Mode"
export SPARK_WORKER_INSTANCES=$workers
JARS=''
if [ -z "$jars" ]
then
JARS=''
else
JARS="--jars $jars"
fi
source /apps/resources/spark/spark.environment
/apps/resources/spark/setup/setup-standalone.sh
$SPARK_HOME/bin/spark-submit --class $class \
--executor-cores $cores \
--executor-memory $executor_memory \
$JARS \
--master $SPARK_MASTER_URL \
$application_path $app_args
# Fill in your application code here.
#
# To report any recognized errors in the correct format in
# $HOME/job_error.json and exit this script, you can use the
# dx-jobutil-report-error utility as follows:
#
# dx-jobutil-report-error "My error message"
#
# Note however that this entire bash script is executed with -e
# when running in the cloud, so any line which returns a nonzero
# exit code will prematurely exit the script; if no error was
# reported in the job_error.json file, then the failure reason
# will be AppInternalError with a generic error message.
# Copy the log files
sudo mkdir -p out/output_files/logs
sudo tar -cvzf out/output_files/logs/$DX_JOB_ID-spark-logs.tar.gz $SPARK_LOG_DIR
# Upload outputs
dx-upload-all-outputs --parallel
}
| true
|
0d6b403ca10b78ce15d89ed0ba3b3f117fb66845
|
Shell
|
shukob/MateriAppsInstaller
|
/77_dsqss/intel.sh
|
UTF-8
| 1,408
| 3.359375
| 3
|
[] |
no_license
|
#!/bin/sh
SCRIPT_DIR=$(cd "$(dirname $0)"; pwd)
. $SCRIPT_DIR/../util.sh
. $SCRIPT_DIR/version.sh
set_prefix
. $PREFIX_TOOL/env.sh
LOG=$BUILD_DIR/dsqss-$DSQSS_VERSION-$DSQSS_MA_REVISION.log
PREFIX="$PREFIX_APPS/dsqss/dsqss-$DSQSS_VERSION-$DSQSS_MA_REVISION"
if [ -d $PREFIX ]; then
echo "Error: $PREFIX exists"
exit 127
fi
sh $SCRIPT_DIR/setup.sh
rm -rf $LOG
cd $BUILD_DIR/dsqss-$DSQSS_VERSION
start_info | tee -a $LOG
echo "[runConfigure]" | tee -a $LOG
mv runConfigure.sh runConfigure.sh.org
awk '$0 ~ /case/ && $0 ~ /COMPILER/ {print "COMPILER=INTEL"} {print}' runConfigure.sh.org > runConfigure.sh
sh ./runConfigure.sh | tee -a $LOG
echo "[make]" | tee -a $LOG
make | tee -a $LOG
echo "WORM_HOME=$PREFIX" > wv.sh
awk '$0 !~ /^WORM_HOME/ {print}' bin/wormvars.sh >> wv.sh
mv wv.sh bin/wormvars.sh
awk '$0 !~ /^ODIR=/ {print} $0 ~ /^ODIR=/ {print "ODIR=\$OD"}' bin/inpgene >> inpgene
mv inpgene bin/inpgene
chmod +x bin/inpgene
echo "[make install]" | tee -a $LOG
mkdir -p $PREFIX
cp -r bin $PREFIX
cp -r tool $PREFIX
cp -r samples $PREFIX
finish_info | tee -a $LOG
cat << EOF > $BUILD_DIR/dsqssvars.sh
# dsqss $(basename $0 .sh) $DSQSS_VERSION $DSQSS_MA_REVISION $(date +%Y%m%d-%H%M%S)
. $PREFIX/bin/wormvars.sh
EOF
DSQSSVARS_SH=$PREFIX_APPS/dsqss/dsqssvars-$DSQSS_VERSION-$DSQSS_MA_REVISION.sh
rm -f $DSQSSVARS_SH
cp -f $BUILD_DIR/dsqssvars.sh $DSQSSVARS_SH
cp -f $LOG $PREFIX_APPS/dsqss
| true
|
6843d46c0864273c3c5555b4ba5cf4e95a0c3d80
|
Shell
|
mlfcjob/shell
|
/20170726/redirect_in.sh
|
UTF-8
| 141
| 2.984375
| 3
|
[] |
no_license
|
#!/bin/bash
# redirecting file input
exec 0< testfile
count=1
while read line
do
echo "Line #$count: $line"
count=$[$count + 1]
done
| true
|
777f1ff5dd569797304274676e378ca9c9081270
|
Shell
|
per1234/inolibbuglist
|
/scripts/arduino-ci-script-wrapper.sh
|
UTF-8
| 637
| 3.171875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# This script is necessary because arduino-ci-script's check_keywords_txt's reference link check feature requires environment variables set by set_application_folder and install_ide but when those functions are run they are in a separate bash session so those environment variables are lost. Therefore this kludge of hardcoding them in this wrapper script is necessary.
arduinoCIscriptPath="$1"
arduinoCIscriptApplicationFolder="$2"
arduinoCIscriptArduinoIDEversion="$3"
source "$arduinoCIscriptPath"
set_application_folder "$arduinoCIscriptApplicationFolder"
NEWEST_INSTALLED_IDE_VERSION="$arduinoCIscriptArduinoIDEversion"
| true
|
f55df8633219bf000ceb18e1f264971e95bd9491
|
Shell
|
jagibson/ec2metadata-role-assumption
|
/setup.sh
|
UTF-8
| 1,369
| 3.40625
| 3
|
[] |
no_license
|
#!/bin/bash
MYNAME=`whoami`@`hostname`
DOCKERNETNAME='ec2metadata'
# Using a /30 here only allows 169.254.169.254 available.
DOCKERNET='169.254.169.252/30'
DOCKERGATEWAY='169.254.169.253'
if [ "`id -u`" == "0" ]; then
sudo=""
else
sudo="sudo"
fi
if which "docker.exe" > /dev/null 2>&1; then
dockercmd="docker.exe"
else
dockercmd="docker"
fi
# Do not use the lo interface in Linux. Any IPs assigned outside of the
# 127.0.0.0/8 network will be shared across all interfaces. This means your
# 169.254.169.254 IP and role assumption will be shared across the network!
# Why not use a docker net? Then we can be OS-agnostic.
$sudo $dockercmd network inspect $DOCKERNETNAME &> /dev/null
if [[ $? != 0 ]] ; then
$sudo $dockercmd network create \
--gateway $DOCKERGATEWAY \
--subnet $DOCKERNET \
-o com.docker.network.bridge.enable_icc=true \
-o com.docker.network.bridge.enable_ip_masquerade=true \
-o com.docker.network.bridge.host_binding_ipv4=0.0.0.0 \
-o com.docker.network.bridge.name=$DOCKERNETNAME \
-o com.docker.network.driver.mtu=1500 \
$DOCKERNETNAME
fi
$sudo $dockercmd run \
--name ec2metadata \
-e RACK_ENV=${RACK_ENV:-production} \
--network $DOCKERNETNAME \
-p 80:80 \
-v `ls -d ${AWS_PROFILE_PATH:-~/.aws}`:/root/.aws \
-e MYNAME \
${args:---rm -d} \
${image:-farrellit/ec2metadata:latest}
| true
|
6db8b7cd9685a3903408f858ee6723e194425e21
|
Shell
|
wkoszek/me
|
/scripts/upload.sh
|
UTF-8
| 163
| 2.640625
| 3
|
[
"BSD-2-Clause"
] |
permissive
|
#!/bin/sh
PARNAME="11_BLOG"
DRIVE=gdrive
PARID=`$DRIVE list -q "title = \"${PARNAME}\"" | grep -v ^Id | cut -d " " -f 1`
$DRIVE upload --convert -p $PARID -f $1
| true
|
fbcfba51fef60bfaa79da5bec67fb879f94782fc
|
Shell
|
jeremyschlatter/protocol
|
/tools/slither
|
UTF-8
| 405
| 3.453125
| 3
|
[
"BlueOak-1.0.0"
] |
permissive
|
#!/usr/bin/env bash -euo pipefail
# Note: This script requires slither installed using solc 0.8.9
function require() {
hash $1 2> /dev/null || ( echo >&2 "I require $1 but I cannot find it."; exit 1 )
}
require slither
# cd to project root
while [ ! -d .git -a `pwd` != "/" ]; do cd ..; done
# Run from inside `tools` so that the slither config files can live there
cd tools && slither ../ "$@"
| true
|
d86552e85d7b0a0b7f3b59ae7c5dbeec692ec2df
|
Shell
|
kevinmao/yelp-sna
|
/analysis/shell/review_per_year.sh
|
UTF-8
| 299
| 3
| 3
|
[] |
no_license
|
#!/bin/bash
# global vars
source ../../config.sh
mkdir -p ${YELP_DATA_STATS}
LOGGER "START..."
output=${YELP_DATA_STATS}/review_per_year.tsv
cat ${YELP_DATA_TSV}/review.tsv | cut -f4 | grep -v date | cut -d- -f1 | sort | uniq -c | sort -k2 | awk '{print $2"\t"$1}' > ${output}
LOGGER "Done."
| true
|
f8d154af0e8c380b83d26bf3a4a6c5804633eb39
|
Shell
|
MagdaKC/OPS_LARES
|
/GEODYN_II_ALL_IERS2010_RESXTND_P2
|
UTF-8
| 24,222
| 2.796875
| 3
|
[] |
no_license
|
#!/bin/ksh
#
${SCR_DBG}set -xv
set -xv
# GEODYN_II_ALL_IERS2010_RESXTND
#
# Imports are: CASEDIR CASE STAGE SV YR DATA LOCALDSK
#
#if [ `expr $#` -lt 5 ]; then
# echo "Usage: GEODYN_II_ALL_IERS2010_RESXTND ARC LABEL SAT DATA tdir\007"
# exit 101
#fi
#echo " # "
#echo " # GEODYN_II_ALL_IERS2010_RESXTND $* "
#echo " # "
# date '+DATE: %m/%d/%y%nTIME: %H:%M:%S'
SCR_NAME=GEODYN_II_ALL_IERS2010_RESXTND
SCR_PARM=5
SCR_LIST="ARC LABEL SAT DATA tdir"
SCR_VERS=110624.0
if [ `expr $#` -lt $SCR_PARM ]; then
echo "Usage: $SCR_NAME ${SCR_LIST}\007"
exit 101
fi
DateTime=`date '+DATE: %m/%d/%y TIME: %H:%M:%S'`
echo " # "
echo " # $SCR_NAME $* "
echo " # "
echo " # Version $SCR_VERS "
echo " # "
echo " # Run Date $DateTime "
echo " # "
ARC=$1
LABEL=$2
SAT=$3
DATA=$4
tdir=$5
TMP=$LOCALDSK
# Defaults :
disk=$DISK
# diskC=space/MISSIONS
# usr=$USER
gdir=/${TMP}/gdyn$tdir
cd $gdir
####################################################
# Global Setup #
####################################################
#
#
# General cards
#
cat /${disk}/$SAT/d_base/comn/${SV}glb${CASE}.gen.2012.IERS10 > ftn05
#
# FLUX @ 1 A.U. card
#
grep "D${Date_NEW_ARC}" $MAIN_PATH/INFILES/FLUX_cmp_d25_07_0310_110512_RUNNING_MEAN >> ftn05
#
# POLDYN card
#
grep "D${Date_NEW_ARC}" $MAIN_PATH/INFILES/POLDYN_RECS_1975-2020_20x20 >> ftn05
#
# Lense-Thirring card
#
#cat /${disk}/${usr}/$SAT/d_base/comn/LNSTHR.0.${SV} >> ftn05
#cat /${disk}/${usr}/$SAT/d_base/comn/LNSTHR.1.${SV} >> ftn05
#cat /${disk}/${usr}/$SAT/d_base/comn/LNSTHR.-1.${SV} >> ftn05
#if [ "${CASE}" = "EMT" ]; then
cat /${disk}/$SAT/d_base/comn/LNSTHR.1.${SV} >> ftn05
#fi
#
# poleut cards
#
#if [ "${CASE}" = "DR" ]; then
# if [ "$SAT" = "GISL1" -o "$SAT" = "GISL2" ];then
# zcat /${disk}/$SAT/d_base/padj/${STAGE}/${ARC}.Z >> ftn05
# fi
#elif [ "${CASE}" = "EMT" ]; then
if [ "${CASE}" = "EMT" ]; then
#zcat /${disk}/$SAT/d_base/padj/${STAGE}/${ARC}.Z >> ftn05
zcat /${disk}/GISL1/d_base/padj/WEEKLY/${ARC}.Z >> ftn05
###zcat /${disk}/STARL/d_base/padj/WEEKLY/${ARC}.Z >> ftn05
echo PADJ
fi
#
# earth model
#
if [ "${CASE}" = "DR" ]; then
zcat /${disk}/d_base/grav/${GRAVITY_MDL}.fxd.Z >> ftn05
# TVG cards:
zcat /${disk}/d_base/grav/${GRVTIM}.${SV}.fxd.Z >> ftn05
elif [ "${CASE}" = "EMT" ]; then
zcat /${disk}/d_base/grav/${GRAVITY_MDL}.adj.Z >> ftn05 # Use for standard analyses
# TVG cards:
zcat /${disk}/d_base/grav/${GRVTIM}.${SV}.adj.Z >> ftn05
fi
#
# tide cards
#
if [ "${CASE}" = "DR" ]; then
zcat /${disk}/d_base/tide/ETIDES/${ETIDE_MDL}.gz >> ftn05
zcat /${disk}/d_base/tide/OTIDES/${OTIDE_MDL}.gz >> ftn05
elif [ "${CASE}" = "EMT" ]; then
zcat /${disk}/d_base/tide/ETIDES/${ETIDE_MDL}.gz >> ftn05
zcat /${disk}/d_base/tide/OTIDES/${OTIDE_MDL}.gz >> ftn05
fi
#
# tidal diurnal & semidiurnal COM and EOP cards
#
#
#
if [ "${CASE}" = "DR" ]; then
zcat /${disk}/d_base/comn/di+semidiurnal_tidal_COM_GOT4.7.fxd.Z >> ftn05 # Use for standard analyses
zcat /${disk}/d_base/comn/di+semidiurnal_tidal_EOP_2008.fxd.Z >> ftn05
fi
if [ "${CASE}" = "EMT" ]; then
zcat /${disk}/d_base/comn/di+semidiurnal_tidal_COM_GOT4.7.adj.Z >> ftn05 # Use for standard analyses
zcat /${disk}/d_base/comn/di+semidiurnal_tidal_EOP_2008.adj.Z >> ftn05 # Use for standard analyses
fi
#
#
# oload cards
#
#
zcat /${disk}/d_base/comn/SLR_OLOAD_080520_got4.7.gdyn.Z >> ftn05
#
# reference frame cards
#
if [ "$YR" -gt "50" -a "$YR" -le "99" ];then
arc_yy=`expr $YR + 1900`
elif [ "$YR" -ge "00" -a "$YR" -le "50" ];then
arc_yy=`expr $YR + 2000`
fi
arc_mmdd=`echo ${ARC}|cut -c3-6`
arc_date=$arc_yy$arc_mmdd
if [ "${CASE}" = "DR" ]; then
suffix=fxd
elif [ "${CASE}" = "EMT" ]; then
suffix=adj
fi
if [ "${STAGE}" = "WEEKLY" -o "${STAGE}" = "FORTNTLY" -o "${STAGE}" = "MONTHLY" -o "${STAGE}" = "QUARTERLY" -o "${STAGE}" = "DAILY" ]; then
#if [ "${STAGE}" = "WEEKLY" -o "${STAGE}" = "FORTNTLY" -o "${STAGE}" = "MONTHLY" -o "${STAGE}" = "QUARTERLY" ]; then
#if [ "${STAGE}" = "WEEKLY" ]; then
if [ "$SITEFILE" = "NONE" ]; then
# if [ "$arc_date" -lt "20010624" ];then
#
## pre-Arequipa quake positions:
#
# zcat /${disk}/d_base/site/ITRF2000@970101.${suffix}.Z >> ftn05
#
# elif [ "$arc_date" -ge "20010624" ];then
#
## post-Arequipa quake positions:
#
# zcat /${disk}/d_base/site/ITRF2000@970101q.${suffix}.Z >> ftn05
#
# fi
cat /${disk}/d_base/site/${STAGE}/STAPOS.${suffix} >> ftn05
if [ "$suffix" = "fxd" ];then
##/usr/local/bin/zgrep -v -e SIGVEL /${disk}/d_base/site/SLRF2008/WEEKLY/${ARC}.adj.Z > tftn05
# /usr/local/bin/zgrep -v -e SIGVEL /${disk}/d_base/site/SLRF2008_FINAL/DAILY/${ARC}.adj.Z > tftn05
# /usr/local/bin/zgrep -v -e SIGVEL /${disk}/d_base/site/$ITRF_APRIORI/DAILY/${ARC}.adj.Z > tftn05
zgrep -v -e SIGVEL /umbc/epavlis/data01/LOCAL/magdak/MISSIONS/d_base/site/DAILY/$ITRF_APRIORI/EPOCH_2010.0/${ARC}.adj.Z > tftn05
zgrep -v -e CONSTADJ tftn05 > cftn05
zgrep -v -e CONSTEND cftn05 >> ftn05
elif [ "$suffix" = "adj" ];then
# zcat /${disk}/d_base/site/$ITRF_APRIORI/${STAGE}/${ARC}.adj.Z >> ftn05
##zcat /${disk}/d_base/site/SLRF2008/WEEKLY/${ARC}.adj.Z >> ftn05
# zcat /${disk}/d_base/site/SLRF2008_FINAL/DAILY/${ARC}.adj.Z >> ftn05
# 171116 zcat /${disk}/d_base/site/$ITRF_APRIORI/DAILY/${ARC}.adj.Z >> ftn05
zcat /umbc/epavlis/data01/LOCAL/magdak/MISSIONS/d_base/site/DAILY/$ITRF_APRIORI/EPOCH_2010.0/${ARC}.adj.Z >> ftn05
fi
cat /${disk}/d_base/site/${STAGE}/STAPOS.end >> ftn05
else
# this is the case of stations referenced to mid-arc epoch:
cat /${disk}/d_base/site/${STAGE}/STAPOS.adj >> ftn05
# 171116 /usr/local/bin/zgrep -v -e SIGVEL /${disk}/d_base/site/${STAGE}/$ITRF_APRIORI/${ARC}.adj.Z >> ftn05
zgrep -v -e SIGVEL /umbc/epavlis/data01/LOCAL/magdak/MISSIONS/d_base/site/DAILY/$ITRF_APRIORI/MID-ARC_EPOCH/${ARC}.adj.Z >> ftn05
# zcat /${disk}/d_base/site/${STAGE}/${ARC}.adj.Z >> ftn05
cat /${disk}/d_base/site/${STAGE}/STAPOS.end >> ftn05
# zcat /${disk}/d_base/site/WEEKLY/TEMP/${ARC}.adj.Z >> ftn05
fi
elif [ "${STAGE}" = "MONTHLY/PP6" ]; then
if [ "${CASE}" = "DR" ]; then
zcat /${disk}/$SAT/d_base/site/MONTHLY/PP6/${ARC}.fxd.Z >> ftn05
elif [ "${CASE}" = "EMT" ]; then
zcat /${disk}/$SAT/d_base/site/MONTHLY/PP6/${ARC}.adj.Z >> ftn05
fi
elif [ "${STAGE}" = "ORBIT/PP6" ]; then
# PP6 positions:
zcat /${disk}/$SAT/d_base/site/ORBIT/PP6/${ARC}.Z >> ftn05
fi
#
# endglbl card
#
if [ "${CASE}" = "EMT" ]; then
cat /${disk}/d_base/comn/endglb11 >> ftn05
elif [ "${CASE}" = "DR" ]; then
#cat /${disk}/d_base/comn/endglb11 >> ftn05
cat /${disk}/d_base/comn/endglb33 >> ftn05 # Use for standard analyses
fi
#
#
####################################################
# Iterated Arc Setup #
####################################################
#
#
# Satellite arc setup cards
#
#cat /${disk}/$SAT/d_base/trnd/${CASE}/${STAGE}/$ITRF_APRIORI/${ARC} >> ftn05
cat /${disk}/$SAT/d_base/trnd/${CASE}/${STAGE}/${ARC} >> ftn05
#cat /${disk}/$SAT/d_base/trnd/${CASE}/${STAGE}/V070207/${ARC} >> ftn05
#cat /${disk}/$SAT/d_base/trnd/${CASE}/${STAGE}/${ARC} >> ftn05
#cat /${disk}/$SAT/d_base/trnd/${CASE}/${STAGE}/NEWSPNAX/${ARC} >> ftn05
#cat /${disk}/$SAT/d_base/trnd/${CASE}/${STAGE}/${ARC} >> ftn05
if [ "${CASE}" = "EMT" ]; then
sed "s/EDIT 3.5000000000000D+02 .................................../EDIT 3.5000000000000D+02 1.00000000D+03 0.000000D+00 0.0D+00/g" ftn05 > ftn05a
sed -e "s/REFSYS..../REFSYS1943/g" ftn05a > ftn05b
sed "s/SOLRAD 0 0 0 0 0120 6 1 1.1050000000000D+00 0.00000000D+00 0.000000D+00 0.0D+00/SOLRAD 0 0 0 0 0120 6 1 1.1050000000000D+00 0.00000000D+00 1.000000D-04 0.0D+00/g" ftn05b > ftn05c
#cp ftn05a ftn05
#cp ftn05b ftn05
cp ftn05c ftn05
fi
#
# Check for ORBFIL
#
grep ORBFIL ftn05 > ORBFIL
#
#--------------------------------------------------
#
# Ancillary data sets
#
#--------------------------------------------------
#
# Get the laser tracking data
#
zcat /${disk}/$SAT/d_base/data/TDF/${DATA} > ftn40
#
# Get the Geodyn IERS Tables
#
#ln -s /${disk}/d_base/tabs/iers.current ftn02
#ln -s /${disk}/d_base/tabs/iers5708.ntab0801 ftn02
ln -s /umbc/research/epavlis/MISSIONS/d_base/tabs/iers.current ftn02
#ln -s /umbc/research/epavlis/MISSIONS/d_base/tabs/iers5715.ntab1512 ftn02
#
# Get the Planetary Ephemeris
#
#ln -s /${disk}/d_base/ephm/ephem403.data ftn01
#ln -s /${disk}/d_base/ephm/ephem1403.data ftn01
#ln -s /${disk}/d_base/ephm/ephem1403.ext.data ftn01
#ln -s /${disk}/d_base/ephm/ephem1403.ext.data.intel_native ftn01
#ln -s /${disk}/d_base/ephm/ephem1421.ext.data_RM=8.130725 ftn01
#ln -s /${disk}/d_base/ephm/DE430/ephem1430.data2025.bin.i64_RM=8.210409 ftn01
if [ "${NYEAR}" -lt "2000" ];then
#### after 20130725 valid to 210407
ln -s /${disk}/d_base/ephm/ephem1421.ext.data_RM=8.130725 ftn01
else
#valid after 210408
ln -s /${disk}/d_base/ephm/DE430/ephem1430.data2025.bin.i64_RM=8.210409 ftn01
fi
#
# Get a Gravity Model to satisfy ftn12 on HP
#
zcat /${disk}/d_base/grav/pgs7337b.Z > ftn12
#
# Get the Atmospheric Loading file
#
#zcat /${disk}/d_base/data/APLO_GSFC/040517/${ARC}.Z > ftn24
cat /${disk}/d_base/data/APLO_GSFC/${CURRENT_APLO}/${ARC} > ftn24
#
# Get the Atmospheric Gravity file
#
#cat /${disk}/ATMOS/ATGRAV/MERGED/ATGRAV.TEST_50x50 > fort.18
#cat /${disk}/ATMOS/ATGRAV/MERGED/ATGRAV.TEST_4x4 > fort.18
#cat /${disk}/ATMOS/ATGRAV/MERGED/ATGRAV.TEST > ftn18
#zcat /${disk}/d_base/data/ATGRAV/MERGED/${ARC}.Z > ftn24
cp ftn05 $ARC.SAVEITu05
#cp ftn01 FTN01
#cp ftn02 FTN02
#cp ftn12 FTN12
#cp ftn40 FTN40
#cp ftn05 /${disk}/$SAT/unitX/iisset.$ARC.$LABEL
#compress -f /${disk}/$SAT/unitX/iisset.$ARC.$LABEL
#exit 99
###
#######################################################################
# #
# RUN IIS #
# #
#######################################################################
echo " # "
echo " # Run IIS"
echo " # "
#
#...Execute the IIs
#
ls -la /umbc/research/epavlis/EXECUTABLES/${GDN_2S}
/umbc/research/epavlis/EXECUTABLES/${GDN_2S} > iisout 2> iisout_err
ls -la ftn*
#exit 99
#
cat iisout_err iisout > iis
rm iisout_err iisout
grep "ABNORMAL TERMINATION" iis > err2S
sum err2S > out.err2S
read lerr2S b c < out.err2S
if [ "$lerr2S" = "0" ]; then
\rm err2S out.err2S
else
echo " "
echo " ** 2S ** ABNORMAL TERMINATION " > 2mail
echo " "
cat out.err2S >> 2mail
echo " "
echo " $SAT $ARC $LABEL " >> 2mail
echo " "
# mail -m URGENT_from_2S -t epavlis@umbc.edu < 2mail
\rm 2mail
fi
\rm err2S out.err2S
rm ftn12
mv ftn11 g2e.11
mv ftn41 g2e.12
rm ftn*
rm EXAT*
mv g2e.11 ftn11
mv g2e.12 ftn12
echo " # "
echo " # End of IIS"
echo " # "
#
echo " #"
echo " # Run IIE"
echo " #"
#exit 99
ls -la /umbc/research/epavlis/EXECUTABLES/${GDN_2E}
/umbc/research/epavlis/EXECUTABLES/${GDN_2E} > iie 2> iieerr
#/space/users/epavlis/EXECUTABLES/geodyn_2E_0407p5 > iie 2> iieerr
#/space/users/epavlis/EXECUTABLES/geodyn_2E_0407p3 > iie 2> iieerr
# /space/users/epavlis/EXECUTABLES/giie0511p0.x > iie 2> iieerr
# /space/users/epavlis/EXECUTABLES/ge0511p0.x > iie 2> iieerr
#exit 99
cat iis iie iieerr > gdnout
$SCRIPTS/STATUS gdnout "0NORMAL.END.OF.GEODYN.II-E.EXECUTION." $PROC_STEP > o.STATUS_gdnout 2>&1
#
mv gdnout /${disk}/$SAT/output/${CASE}/${STAGE}/$ARC.$LABEL
mv fort.71 /${disk}/$SAT/emat/${STAGE}/$ARC.$LABEL
#mv fort.71 /${disk}/$SAT/emat/${CASEDIR}/$ARC.$LABEL
mv ftn07 /${disk}/$SAT/punchdout/${CASE}/${STAGE}/$ARC.$LABEL
mv fort.8 /${disk}/$SAT/orbtvu/car/$ARC.$LABEL
mv fort.9 /${disk}/$SAT/summaries/${CASE}/${STAGE}/$ARC.$LABEL
mv fort.10 /${disk}/$SAT/orbtvu/kep/$ARC.$LABEL
mv fort.16 /${disk}/$SAT/corrections/${CASE}/${STAGE}/$ARC.$LABEL
mv fort.20 /${disk}/$SAT/simdat/$ARC.$LABEL
mv fort.94 /${disk}/$SAT/products/arc/${STAGE}/$ARC.$LABEL
mv fort.95 /${disk}/$SAT/products/glb/${STAGE}/$ARC.$LABEL
mv ftn37 /${disk}/$SAT/biaschk/$ARC.$LABEL
mv ftn97 /${disk}/$SAT/telem/$ARC.$LABEL
#
#
#######################################################################
# #
# Create DATACAT Files #
# #
#######################################################################
#
#if [ "${CASE}" = "EMT" ]; then
$SCRIPTS/MAKE_DATA_CATALOG $ARC.$LABEL $SAT ${CASE} $STAGE
#fi
#
#######################################################################
# #
# Process Corrections File #
# #
#######################################################################
#
if [ -s /${disk}/$SAT/corrections/${CASE}/${STAGE}/$ARC.$LABEL ]; then
$SCRIPTS/MAKE_COR_FILE $ARC $LABEL $SAT ${disk} $CASE $STAGE
fi
#mv ftn80 ftn10
# /space/users/epavlis/EXECUTABLES/read_vmat.x > /${disk}/$SAT/output/${CASE}/${STAGE}/R_V.$ARC.$LABEL
#mv ftn11 /${disk}/$SAT/vmat/$ARC.$LABEL.P182
gzip -f /${disk}/$SAT/output/${CASE}/${STAGE}/$ARC.$LABEL
gzip -f /${disk}/$SAT/punchdout/${CASE}/${STAGE}/$ARC.$LABEL
gzip -f /${disk}/$SAT/orbtvu/car/$ARC.$LABEL
gzip -f /${disk}/$SAT/summaries/${CASE}/${STAGE}/$ARC.$LABEL
gzip -f /${disk}/$SAT/orbtvu/kep/$ARC.$LABEL
gzip -f /${disk}/$SAT/corrections/${CASE}/${STAGE}/$ARC.$LABEL
gzip -f /${disk}/$SAT/biaschk/$ARC.$LABEL
gzip -f /${disk}/$SAT/simdat/$ARC.$LABEL
gzip -f /${disk}/$SAT/telem/$ARC.$LABEL
#gzip -f /${disk}/$SAT/emat/${STAGE}/$ARC.$LABEL
#gzip -f /${disk}/$SAT/vmat/$ARC.$LABEL
#
#--------------------------------------------------
# If this is an EMT case, then convert and save orbits, otherwise NOT !
if [ "${CASE}" = "EMT" ]; then
#--------------------------------------------------
ls -al
#
cp fort.30 /${disk}/$SAT/traj/${ARC}.${LABEL}.orbfil
gzip -f /${disk}/$SAT/traj/${ARC}.${LABEL}.orbfil
#
#cp fort.30 /${disk}/$SAT/traj/$STAGE/${ARC}.${LABEL}.orbfil
#gzip -f /${disk}/$SAT/traj/${STAGE}/${ARC}.${LABEL}.orbfil
#
#--------------------------------------------------
#
#######################################################################
# #
# Run Trajectory Conversion Program #
# #
#######################################################################
#
#
if [ -s ORBFIL ]; then
\rm ftn05
#
ln -s /umbc/research/epavlis/MISSIONS/d_base/tabs/mstr.current gdntable.mst
/umbc/research/epavlis/EXECUTABLES/tj2rvg.x > out6
#
cat out6
mv fort.40 /${disk}/$SAT/traj/${STAGE}/RVG/${ARC}.${LABEL}.rvg
echo $ARC.$LABEL > $MAIN_PATH/INFILES/rv2sp13_INFILE.$SAT
fi
fi
#######################################################################
# #
# Process the Residuals #
# #
#######################################################################
if [ -s fort.19 ]; then
cp fort.19 /${disk}/$SAT/residuals/${CASE}/${STAGE}/$ARC.$LABEL
chmod 775 /${disk}/$SAT/residuals/${CASE}/${STAGE}/$ARC.$LABEL
####
#### Add new XTND processing
####
###utc2gps=0
###infil=resids
###
###cp fort.19 resids
###
###echo $utc2gps >binres.in5
###echo $infil >>binres.in5
###
### /umbc/research/epavlis/EXECUTABLES/binres.x
###
###
#### /space/users/epavlis/EXECUTABLES/binres.x
###
###sed -n "1,1p" resids.ascii > t0
###sed -n "2,\$p" resids.ascii > t1
####cut -c1-84 t1 > t2
####cut -c85-150 t1 > t3
####paste -d" " t2 t3 > t4
###
###sort -b -n -k 4,4 -k 6,6 -k 9,9 -o t5 t1
###
####cat t0 t5 > resids.sort
###
####sort -n -k 5,5 -k 7,7 -k 8,8 -k 10,10 -o t6 t5
###
####sort -n -k 4,4 -k 7,7 -k 8,8 -o $infil.sort $infil.ascii
###
###
###cat t0 t5 > sort_by_config
###
###sort -b -n -k 9,9 resids.ascii -o resids.ascii
####
###mv resids.ascii /${disk}/$SAT/residuals_ascii/${CASE}/${STAGE}/${ARC}.${LABEL}.ascii
####mv resids.sort /${disk}/$SAT/residuals_ascii/${CASE}/${STAGE}/${ARC}.${LABEL}.sort
###mv sort_by_config /${disk}/$SAT/residuals_ascii/${CASE}/${STAGE}/${ARC}.${LABEL}.sort_by_config
###
####
###gzip -f /${disk}/$SAT/residuals/${CASE}/${STAGE}/${ARC}.${LABEL}
####
###gzip -f /${disk}/$SAT/residuals_ascii/${CASE}/${STAGE}/${ARC}.${LABEL}.ascii
####gzip -f /${disk}/${usr}/$SAT/residuals_ascii/${CASE}/${STAGE}/${ARC}.${LABEL}.sort
###gzip -f /${disk}/$SAT/residuals_ascii/${CASE}/${STAGE}/${ARC}.${LABEL}.sort_by_config
###
###zcat /${disk}/$SAT/residuals_ascii/${CASE}/${STAGE}/${ARC}.${LABEL}.ascii > ${ARC}.${LABEL}
###
###fi
#
# Add new XTND processing
#
utc2gps=0
infil=residsN
cp fort.19 residsN
echo $utc2gps >binres.in5
echo $infil >>binres.in5
#/umbc/research/epavlis/EXECUTABLES/binresXTND07c.x
### 180316 /umbc/epavlis/data01/LOCAL/magdak/EXECUTABLES/binresXTND07c_mkc.x
/umbc/epavlis/data01/LOCAL/magdak/EXECUTABLES/P2/binresXTND07c_p2.x
#/space/users/magda/EXECUTABLES/binresN3.x
cp residsN.ascii TTD
#cp /${disk}/$SAT/residuals_ascii/${CASE}/${STAGE}/${ARC}.${LABEL}.sort_by_config TTD
grep "Reference Time MJD:" TTD > TTA
#grep "Reference Time MJD:" residsN.ascii > TTA2
cut -c20-40 TTA > TTB
#/umbc/epavlis/data01/LOCAL/magdak/EXECUTABLES/TIME_RES.x
## 180316 /umbc/epavlis/data01/LOCAL/magdak/EXECUTABLES/TIME_RES_P.x
/umbc/epavlis/data01/LOCAL/magdak/EXECUTABLES/P2/TIME_RES_p2.x
read YRR MR DRR < TTC
if [ "$YRR" -gt "50" -a "$YRR" -le "99" ];then
YRa=`expr $YRR + 1900`
elif [ "$YRR" -ge "00" -a "$YRR" -le "50" ];then
YRa=`expr $YRR + 2000`
fi
printf "$YRa $MR $DRR" > TIMEb
#
chmod 755 res*
#
## Program split into part up and part down
#
## 180316 /umbc/epavlis/data01/LOCAL/magdak/EXECUTABLES/res_up_down.x
/umbc/epavlis/data01/LOCAL/magdak/EXECUTABLES/P2/res_up_down_p2.x
## Change geocentric coordinates to topocentric (azimuth and elevation) and put fi and lambda for two cases: up and down
#
#/space/users/epavlis/EXECUTABLES/ecf2tcf.x
cp residsN.ascii TTE
grep "Reference Time MJD:" TTE > rRT
grep -v "Reference Time MJD:" TTE > residsN.ascii
##180316 /umbc/epavlis/data01/LOCAL/magdak/EXECUTABLES/cord.x
/umbc/epavlis/data01/LOCAL/magdak/EXECUTABLES/P2/cord_p2.x
#
chmod 755 res*
## Format TIMEa: 2004 z40808
#
echo "${arc_yy} ${ARC}" > TIMEa
#
## Merit data to new file
#
#cat /${disk}/$SAT/d_base/data/MRT2/L1_07 > merit-data
#zcat /${disk}/$SAT/d_base/data/MRT2/${SAT}_${YR}.mrt2.Z > merit-data
zcat /${disk}/$SAT/d_base/data/MRT2/${YR}.${SAT}.mr2.Z > merit-data
#
#
##Joins files from binary from programs: cord and res_up_down to one file
#
#/space/users/epavlis/EXECUTABLES/addpos2res.x
## 180316 /umbc/epavlis/data01/LOCAL/magdak/EXECUTABLES/res_bias3.x
/umbc/epavlis/data01/LOCAL/magdak/EXECUTABLES/P2/res_bias3_p2.x
##Format TIMEb: 2004 08 08
#
infile2=TIMEb
#
printf "$YRa $MR $DRR" > TIMEb
read YY MM DD < TIMEb
###################read YY MM DD < TIMEb
#
##Format TIMEc: 04221
#
#
#/space/users/epavlis/scripts/ymd2ydoy $YY $MM $DD > TIMEc
/umbc/research/epavlis/scripts/ymd2ydoy $YY $MM $DD > TIMEc
#
sort -u -n -k 10,10 residsN.ascii_last2 > residsN.ascii_last2_c
#
##Program: (1) reads files TIME, TIMEc.
## (2) Make new file TIMEc
##Format TMIEd - two line-
##0422105268708 - beginning of arc
##0422784790508 - ending of arc
##(3) cuts file Merit data for the beginning and ending of arc.
##(4) joins data from residual files with data from MERIT II.
#
#
cp residsN.ascii_last2_c residsN.ascii_last2
#
##/space/users/epavlis/EXECUTABLES/addMRT2resXTND28.x
####_101007 /umbc/epavlis/data01/LOCAL/magdak/EXECUTABLES/addMRT2resXTND.x
#NEW program correct with pass from old to new year
##180316 /umbc/epavlis/data01/LOCAL/magdak/EXECUTABLES/addMRT2resXTND_08.x
#/space/users/epavlis/EXECUTABLES/addMRT2resXTND7.x
#/space/users/magda/EXECUTABLES/res_bias4.x
/umbc/epavlis/data01/LOCAL/magdak/EXECUTABLES/P2/addMRT2resXTND_p2.x
#
chmod 755 *.*
#
mv residsN.ascii_last /${disk}/$SAT/residuals_ascii_xtnd/${CASE}/${STAGE}/${ARC}.${LABEL}.obsresxtnd
mv residsN.ascii_last2_c /${disk}/$SAT/residuals_ascii_xtnd/${CASE}/${STAGE}/${ARC}.${LABEL}.resxtnd
#
sed -n "1,1p" residsN.ascii > t0
sed -n "2,\$p" residsN.ascii > t1
#cut -c1-84 t1 > t2
#cut -c85-150 t1 > t3
#paste -d" " t2 t3 > t4
sort -b -n -k 4,4 -k 6,6 -k 9,9 -o t5 t1
#cat t0 t5 > resids.sort
#sort -n -k 5,5 -k 7,7 -k 8,8 -k 10,10 -o t6 t5
#sort -n -k 4,4 -k 7,7 -k 8,8 -o $infil.sort $infil.ascii
cat t0 t5 > sort_by_config
sort -b -n -k 9,9 residsN.ascii -o residsN.ascii
#
cp rRT rRT2
cat residsN.ascii >> rRT2
cp rRT2 residsN.ascii
mv residsN.ascii /${disk}/$SAT/residuals_ascii/${CASE}/${STAGE}/${ARC}.${LABEL}.ascii
#mv resids.sort /${disk}/$SAT/residuals_ascii/${CASE}/${STAGE}/${ARC}.${LABEL}.sort
cat sort_by_config >> rRT
cp rRT sort_by_config
mv sort_by_config /${disk}/$SAT/residuals_ascii/${CASE}/${STAGE}/${ARC}.${LABEL}.sort_by_config
#
gzip -f /${disk}/$SAT/residuals/${CASE}/${STAGE}/${ARC}.${LABEL}
#
gzip -f /${disk}/$SAT/residuals_ascii/${CASE}/${STAGE}/${ARC}.${LABEL}.ascii
#gzip -f /${disk}/${usr}/$SAT/residuals_ascii/${CASE}/${STAGE}/${ARC}.${LABEL}.sort
gzip -f /${disk}/$SAT/residuals_ascii/${CASE}/${STAGE}/${ARC}.${LABEL}.sort_by_config
zcat /${disk}/$SAT/residuals_ascii/${CASE}/${STAGE}/${ARC}.${LABEL}.ascii > ${ARC}.${LABEL}
fi
#######################################################################
# #
# RESIDUAL to plot #
#######################################################################
cp /${disk}/$SAT/residuals_ascii_xtnd/${CASE}/${STAGE}/${ARC}.${LABEL}.resxtnd res-file_org
zcat /${disk}/$SAT/residuals_ascii/${CASE}/${STAGE}/${ARC}.${LABEL}.ascii.gz > rRT
/umbc/epavlis/data01/LOCAL/magdak/EXECUTABLES/resfile2plot.x
cp RES2PLOT /${disk}/$SAT/residuals_ascii_xtnd/${CASE}/${STAGE}/${Date_NEW_ARC}.${LABEL}.RES2PLOT
cp RES2PLOT /${disk}/$SAT/residuals_ascii_xtnd/${CASE}/${STAGE}/${ARC}.${LABEL}.RES2PLOT
rm res-file_org res-file2 res-file rRT RT
#######################################################################
# #
# Combine the Residuals and Corrections files into one #
# #
#######################################################################
if [ -s /${disk}/$SAT/corrections/${CASE}/${STAGE}/CORR.${ARC}.${LABEL}.gz ]; then
if [ -s ${ARC}.${LABEL} ]; then
zcat /${disk}/$SAT/corrections/${CASE}/${STAGE}/CORR.${ARC}.${LABEL}.gz > CORR.tmp
sort -n -k 1,1 CORR.tmp -o CORR.tmp.s
mv CORR.tmp.s CORR.tmp
sort -n -k 2,2 ${ARC}.${LABEL} -o ${ARC}.${LABEL}.s
mv ${ARC}.${LABEL}.s ${ARC}.${LABEL}
$SCRIPTS/MAKE_COR+RES_FILE ${ARC}.${LABEL} CORR.tmp
zcat /${disk}/$SAT/orbtvu/car/$ARC.${LABEL}.gz > tmp0
sed -n "58,\$p" tmp0 > tmp1
grep -v '[A-Z]' tmp1 > tmp2
echo " Latitude Longitude Height " > ORBT.tmp
cut -c98-133 tmp2 >> ORBT.tmp
paste -d" " ${ARC}.${LABEL}.cor ORBT.tmp > CORR+ORBT.tmp
gzip -f CORR+ORBT.tmp
mv CORR+ORBT.tmp.gz /${disk}/$SAT/corrections/${CASE}/${STAGE}/${ARC}.${LABEL}.corb.gz
cp ${ARC}.${LABEL}.cor /${disk}/$SAT/corrections/${CASE}/${STAGE}/${ARC}.${LABEL}.cor
gzip -f /${disk}/$SAT/corrections/${CASE}/${STAGE}/${ARC}.${LABEL}.cor
fi
fi
#
#
# -----------------------------------------------------------
#
ls -al
#
# End of IIE
#
cd ..
\rm -r gdyn$tdir
| true
|
5d504a1a58dbbec2f08c97daef2e750683178c54
|
Shell
|
kalman/rc
|
/profile
|
UTF-8
| 4,752
| 3.140625
| 3
|
[] |
no_license
|
#!/bin/bash
#
# Undo stuff before sourcing anything
#
for a in l ll la; do
if alias -p | grep $a &>/dev/null; then
unalias $a &>/dev/null
fi
done
#
# Platform specific (first because some stuff in here relies on it)
#
case `uname` in
Darwin) source $HOME/.rc/profile_Darwin ;;
Linux) source $HOME/.rc/profile_Linux ;;
esac
#
# Paths etc
#
# Blue is 34m, green is 32m, red is 31m. They used to be 01;3Xm, I can't remember what 01; is for.
export PS1='\[\033[34m\]\w \[\033[31m\]$(__git_ps1 "(%s)")\n\[\033[01;32m\]> \[\033[00m\]'
export EDITOR="vim"
export SVN_LOG_EDITOR="$EDITOR"
export PATH="$HOME/local/bin:$PATH"
export PATH="$HOME/local/rc_scripts:$PATH"
export PATH="$HOME/local/depot_tools:$PATH"
export PATH="$HOME/local/npm-global/bin:$PATH"
export PATH="$HOME/goma:$PATH"
export PATH="/usr/bin:$PATH"
export PATH="/opt/local/bin:/opt/local/sbin:$PATH"
#
# General
#
fn() { find . -name "$@"; }
c() { cd -P "$@"; }
ll() { l -l "$@"; }
la() { l -A "$@"; }
lla() { l -lA "$@"; }
v() { vim -p "$@"; }
e() { vim -p $(echo $@ | sed 's/:/ +/'); }
wg() { wget --no-check-certificate -O- "$@"; }
grr() { grep -rn --color --exclude='.svn' "$@"; }
s() { screen -DR "$@"; }
prepend() { sed "s|^|$1" "$@"; }
sx() { ssh -Y "$@"; }
vl() {
file=`echo "$1" | cut -d: -f1`
line=`echo "$1" | cut -d: -f2`
v "$file" +"$line"
}
#
# Go
#
export GOPATH="$HOME/src/go"
export NOMS_VERSION_NEXT=1
cdg() {
c "$GOPATH/src"
}
#
# Quip
#
export PATH="$PATH:$HOME/android-sdk-macosx/emulator"
export PATH="$PATH:$HOME/android-sdk-macosx/tools"
export PATH="$PATH:$HOME/android-sdk-macosx/tools/bin"
export PATH="$PATH:$HOME/android-sdk-macosx/platform-tools"
export PATH="$PATH:$HOME/quip/android/tools"
export ANDROID_HOME="$HOME/android-sdk-macosx"
#
# Git
#
source "$HOME/.rc/git_completion"
complete -o default -o nospace -F _git_branch changed
complete -o default -o nospace -F _git_branch cherry-pick
complete -o default -o nospace -F _git_branch gb
complete -o default -o nospace -F _git_branch gcb
complete -o default -o nospace -F _git_checkout gch
complete -o default -o nospace -F _git_checkout gchr
complete -o default -o nospace -F _git_diff gd
complete -o default -o nospace -F _git_diff gdt
complete -o default -o nospace -F _git_diff gdno
complete -o default -o nospace -F _git_diff gdns
complete -o default -o nospace -F _git_diff gds
complete -o default -o nospace -F _git_merge_base gmb
complete -o default -o nospace -F _git_log gl
complete -o default -o nospace -F _git_rebase gr
g() { git "$@"; }
ga() { git add "$@"; }
gb() { git branch "$@"; }
gbD() {
read -p 'Are you sure? [y/N] ' -n1 READ;
if [ "$READ" == 'y' ]; then git branch -D "$@"; fi
}
gc() { git commit "$@"; }
gcaa() { gc -a --amend; }
gch() { git checkout "$@"; }
gcho() { git checkout origin/master "$@"; }
gcp() { git cherry-pick "$@"; }
gd() { git diff "$@"; }
gdno() { git diff --name-only "$@"; }
gdns() { git diff --name-status "$@"; }
gdo() { git diff origin/master "$@"; }
gds() { git diff --stat "$@"; }
gdt() { git difftool "$@"; }
gfa() { git fetch --all --verbose "$@"; }
gg() { git grep "$@"; }
gl() { git log "$@"; }
gls() { git ls-files "$@"; }
gm() { git merge "$@"; }
gmo() { git merge origin/master "$@"; }
gmb() { git merge-base "$@"; }
gp() { git pull "$@"; }
gpr() { git pull --rebase --autostash "$@"; }
gpu() { git push "$@"; }
gr() { git rebase "$@"; }
gro() { git rebase origin/master "$@"; }
gs() { git status "$@"; }
gpgp() { git push origin "$(gcb)" "$@"; }
unmerged() {
git status -s | grep '^[AUD][AUD] ' | cut -f2 -d' '
}
gC() {
gc -m "$(gcb)" "$@"
}
gcb() {
git branch | grep '^*' | cut -f2- -d' '
}
gbase() {
gmb "$(gcb)" origin/master
}
ghide() {
if [ -z "$1" ]; then
echo "ERROR: no branch(es) supplied"
return
fi
for branch in "$@"; do
gb "$branch" -m "__`date +%F`__$branch"
done
}
changed() {
base="$1"
if [ -z "$base" ]; then
base=`gbase`
fi
gdno "$base"
}
gchr() {
oldBranch="$(gcb)"
branch="$1"
if [ -z "$branch" ]; then
echo "ERROR: no branch supplied"
return
fi
gch "$branch"
gr "$oldBranch"
}
gf() {
gls "$1" "*/$1"
}
greplace() {
from="$1"
to="$2"
shift 2
for f in `gg -l "$@" "$from"`; do
echo "Replacing in $f"
sedi $SED_I_SUFFIX "s%$from%$to%g" "$f"
done
}
gclu() {
g cl upload `gbase` "$@"
}
gh() {
git for-each-ref --sort=committerdate refs/heads --format='%(refname:short)'
}
groot() {
oldpwd=`pwd`
while [ `pwd` != / ]; do
if [ -d .git ]; then
pwd
break
fi
cd ..
done
cd "$oldpwd"
}
gnb() {
gch -b "$1" --track origin/master
}
jsonp() {
python -m json.tool
}
| true
|
08bd71497ece0a3fe4c691208af64aa835adfcd4
|
Shell
|
iconara/heller
|
/bin/kafka
|
UTF-8
| 1,796
| 3.625
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
function awaitport() {
for i in {1..10}; do
nc -z localhost $1 && return 0
sleep 1
done
echo "Failed to connect to port $1"
return 1
}
function start() {
base_dir=$(dirname $0)/..
log_dir=$base_dir/tmp
config_dir=$base_dir/spec/support/config
classpath=$(ls -1 $(bundle show scala-library-jars)/lib/*.jar | paste -sd: -)
classpath=$classpath:$(ls -1 $(bundle show slyphon-log4j)/lib/*.jar | paste -sd: -)
classpath=$classpath:$(ls -1 $(bundle show slf4j-jars)/lib/slf4j-{api,simple}-*.jar | paste -sd: -)
classpath=$classpath:$(ls -1 $(bundle show zookeeper-jars)/lib/*.jar | paste -sd: -)
classpath=$classpath:$(ls -1 $(bundle show metrics-core-jars)/lib/*.jar | paste -sd: -)
classpath=$classpath:$(ls -1 $(bundle show snappy-jars)/lib/*.jar | paste -sd: -)
classpath=$classpath:$(ls -1 $(bundle show kafka-jars)/lib/*.jar | paste -sd: -)
kafka_java_opts="-Xmx512M -server -Dlog4j.configuration=file:$config_dir/log4j.properties -cp $classpath"
mkdir -p $log_dir
echo "Starting zookeeper"
java $kafka_java_opts org.apache.zookeeper.server.quorum.QuorumPeerMain $config_dir/zookeeper.properties < /dev/null >> $log_dir/zookeeper_console.log 2>&1 &
awaitport 2181 || (RETVAL=1 && return)
echo "Starting kafka"
java $kafka_java_opts kafka.Kafka $config_dir/server.properties < /dev/null >> $log_dir/kafka_console.log 2>&1 &
awaitport 9092 || (RETVAL=1 && return)
}
function stop() {
kill $(jps -m | grep Kafka | cut -d ' ' -f 1) > /dev/null 2>&1
kill $(jps -m | grep zookeeper | cut -d ' ' -f 1) > /dev/null 2>&1
}
RETVAL=0
case "$1" in
start)
start
;;
stop)
stop
;;
restart)
stop
start
;;
*)
echo "Usage: $NAME {start|stop|restart}" >&2
RETVAL=3
;;
esac
exit $RETVAL
| true
|
043c8ab5ebe80378943aee2d4ae54b9dab05a4f8
|
Shell
|
YordanGeorgiev/min-aws-cli
|
/src/bash/run/check-install-py-modules.func.sh
|
UTF-8
| 214
| 2.875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
do_check_install_py_modules(){
test -z ${TGT_DIR:-} && TGT_DIR=$PRODUCT_DIR
do_check_install_poetry $TGT_DIR
cd $TGT_DIR
poetry config virtualenvs.create true
poetry install -v
cd -
}
| true
|
ce45bf1b33dfd59291b5ac18cdb41a50e4563e58
|
Shell
|
dsc/scm-scripts
|
/svnurl.sh
|
UTF-8
| 264
| 3.71875
| 4
|
[] |
no_license
|
#! /bin/sh
# Extracts the repository URL from an svn or git-svn working copy.
CWD=$PWD
URL="${1:-.}"
cd $URL
if test -e '.git'; then
INFO=$(git svn info | fgrep 'URL: ')
else
INFO=$(svn info . | fgrep 'URL: ')
fi
echo $INFO | awk '{ print $2 }'
cd $CWD
| true
|
983c901bcebeeb43b1b34b1cb4fdc06cf654fc22
|
Shell
|
CDCgov/phoenix
|
/bin/sort_and_prep_dist.sh
|
UTF-8
| 4,382
| 3.609375
| 4
|
[
"CC0-1.0",
"LicenseRef-scancode-us-govt-public-domain",
"Apache-2.0",
"LicenseRef-scancode-public-domain"
] |
permissive
|
#!/bin/bash -l
#
# Description: Grabs the best species match based on %/read hits from the kraken tool run. Simplified for nextflow inclusion
#
# Usage: ./kraken_best_hit.sh -i path_to_.list_file
#
# Output location: same as input path_to_.list_file
#
# Modules required: None
#
# v1.0.3 (04/19/2022)
#
# Created by Nick Vlachos (nvx4@cdc.gov)
#
# Function to print out help blurb
show_help () {
echo "Usage is ./sort_and_prep_dists.sh -a assembly -x dists_file -d database_of_fastas_matching_dist_file_output"
echo "Output is saved to folder where .list file exists"
}
# Parse command line options
options_found=0
while getopts ":h?x:d:a:o:t:" option; do
options_found=$(( options_found + 1 ))
case "${option}" in
\?)
echo "Invalid option found: ${OPTARG}"
show_help
exit 0
;;
x)
echo "Option -x triggered, argument = ${OPTARG}"
dist_file=${OPTARG}
;;
a)
echo "Option -a triggered, argument = ${OPTARG}"
assembly_file=${OPTARG}
;;
o)
echo "Option -o triggered, argument = ${OPTARG}"
outdir=${OPTARG}
;;
t)
echo "Option -t triggered"
terra=${OPTARG}
;;
:)
echo "Option -${OPTARG} requires as argument";;
h)
show_help
exit 0
;;
esac
done
if [[ "${options_found}" -eq 0 ]]; then
echo "No argument supplied to best_hit_from_kraken_noconfig.sh, exiting"
show_help
exit 1
fi
# set the correct path for bc/wget - needed for terra
if [[ $terra = "terra" ]]; then
bc_path=/opt/conda/envs/phoenix/bin/bc
wget_path=/opt/conda/envs/phoenix/bin/wget
certificate_check="--no-check-certificate"
else
bc_path=bc
wget_path=/usr/bin/wget
certificate_check=""
fi
# Based upon standard naming protocols pulling last portion of path off should result in proper name
sample_name=$(basename "${dist_file}" .txt)
sorted_dists="${dist_file//.txt/_sorted.txt}"
sort -k3 -n -o "${sorted_dists}" "${dist_file}"
cutoff=$(head -n20 "${sorted_dists}" | tail -n1 | cut -d' ' -f3)
### Could add some logic here to prevent terrible isolats from continuing on
echo "Cutoff IS: ${cutoff}"
matches=0
# Needed a new variable to put a hard stop on fill-ins being 150% of orignal. Example - if max ani samples to use is 20, the 30th sample is the last one that could be used as filler
#maxplus_ani_samples=$(echo 'scale=0; 1.5 * '${max_ani_samples}' / 1' | bc -l)
#echo "${assembly_file}" > "${sample_name}_best_MASH_hits.txt"
while IFS= read -r var; do
echo "${var}"
source=$(echo "${var}" | cut -d$'\t' -f1)
dist=$(echo ${var} | cut -d' ' -f3)
kmers=$(echo ${var} | cut -d' ' -f5 | cut -d'/' -f1)
echo "dist-${dist} - ${source}"
if ((( $(echo "$dist <= $cutoff" | $bc_path -l) )) && [ ${kmers} -gt 0 ]); then
if [[ -f "${outdir}/${source}.gz" ]]; then
echo "${outdir}/${source}.gz" >> "${sample_name}_best_MASH_hits.txt"
# if [[ -f "${GCF_name}.gz" ]]; then
# echo "${GCF_name}.gz" >> "${sample_name}_best_MASH_hits.txt"
matches=$(( matches + 1))
else
filename=$(echo ${source} | cut -d'_' -f3- | rev | cut -d'_' -f2,3,4 | rev)
GCF_name=$(echo "${source}" | cut -d'_' -f3-)
GCF_check=${GCF_name:0:4}
if [[ "${GCF_check}" = "GCF_" ]]; then
alpha=${filename:4:3}
beta=${filename:7:3}
charlie=${filename:10:3}
echo "Copying - ${filename}"
echo "Trying - wget $certificate_check https://ftp.ncbi.nlm.nih.gov/genomes/all/GCF/${alpha}/${beta}/${charlie}/${filename}/${GCF_name}.gz -O ${outdir}/${source}.gz"
$wget_path $certificate_check https://ftp.ncbi.nlm.nih.gov/genomes/all/GCF/${alpha}/${beta}/${charlie}/${filename}/${GCF_name}.gz -O ${outdir}/${source}.gz
# echo "Trying - wget https://ftp.ncbi.nlm.nih.gov/genomes/all/GCF/${alpha}/${beta}/${charlie}/${filename}/${filename}_genomic.fna.gz -O ${filename}_genomic.fna.gz"
# wget https://ftp.ncbi.nlm.nih.gov/genomes/all/GCF/${alpha}/${beta}/${charlie}/${filename}/${filename}_genomic.fna.gz -O ${filename}_genomic.fna.gz
#curl --remote-name --remote-time "https://ftp.ncbi.nlm.nih.gov/genomes/all/GCF/${alpha}/${beta}/${charlie}/${filename}/${filename}_genomic.fna.gz"
echo "${outdir}/${source}.gz" >> "${sample_name}_best_MASH_hits.txt"
# echo "${GCF_name}.gz" >> "${sample_name}_best_MASH_hits.txt"
matches=$(( matches + 1))
else
echo "GCF check did not pass, look into the differences of ${source}"
fi
fi
else
break
fi
counter=$(( counter + 1 ))
done < ${sorted_dists}
| true
|
f6a12d78d5d27ec63008d768cb3cdad42d14eb46
|
Shell
|
debdutgoswami/pcc-cs592
|
/Shell Scripts/Day 3/pattern-1.sh
|
UTF-8
| 240
| 2.9375
| 3
|
[] |
no_license
|
# for loop does not work in sh
# to run bash scripts type ""bash <program_name>.sh""
read -p "enter number of rows: " num
for ((i = 1; i <= num; i++)); do
for ((j = 1; j <= i; j++)); do
printf "*"
done
printf "\n"
done
| true
|
6e50c33b2e12ab9958f46623d97ca08be5be6d69
|
Shell
|
emf-developer/MessangerWithBash
|
/b.sh
|
UTF-8
| 708
| 3.203125
| 3
|
[] |
no_license
|
#!/bin/bash
echo "username:"
read username
echo "loged in"
fielpos="$username"
declare -i lineno=0
while read -r line; do
echo $line
let ++lineno
sed -i "1 d" "$fielpos"
done < "$fielpos"
while true;
do
read line
if [ "$line" = "exit" ]; then
echo "khoshal shodim (0_0)"
break
elif [[ "$line" == "give me my messages" ]]; then
declare -i lineno=0
while read -r line; do
echo $line
let ++lineno
sed -i "1 d" "$fielpos"
done < "$fielpos"
elif [[ "$line" == msg* ]]; then
echo "message recieved and will be sent to:"
read destUser
echo "Msg From $username :" ${line:3} >> $destUser
else
echo "wrong format message"
fi
done
| true
|
ecdd91544eec82719f3ac9407afa3993f468cc11
|
Shell
|
IgorGramovich/my-ubuntu-setup
|
/slack.sh
|
UTF-8
| 861
| 2.90625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
cd /tmp
wget https://downloads.slack-edge.com/linux_releases/slack-desktop-3.0.2-amd64.deb
sudo dpkg -i slack-desktop-*.*.*-amd64.deb
sudo rm -rf slack-desktop-*.*.*-amd64.deb
sudo apt-get -f install -y
APP_NAME=Slack
APP_EXEC="/usr/bin/slack --disable-gpu %U"
AUTOSTART_CONFIG=~/.config/autostart/$APP_NAME.desktop
sudo touch $AUTOSTART_CONFIG
sudo echo "[Desktop Entry]" >> $AUTOSTART_CONFIG
sudo echo "Type=Application" >> $AUTOSTART_CONFIG
sudo echo "Exec=$APP_EXEC" >> $AUTOSTART_CONFIG
sudo echo "Hidden=false" >> $AUTOSTART_CONFIG
sudo echo "NoDisplay=false" >> $AUTOSTART_CONFIG
sudo echo "X-GNOME-Autostart-enabled=true" >> $AUTOSTART_CONFIG
sudo echo "Name[en_US]=$APP_NAME" >> $AUTOSTART_CONFIG
sudo echo "Name=$APP_NAME" >> $AUTOSTART_CONFIG
sudo echo "Comment[en_US]=" >> $AUTOSTART_CONFIG
sudo echo "Comment=" >> $AUTOSTART_CONFIG
| true
|
1b8547ad36592becde5e9671ec3f2f98126aed3c
|
Shell
|
tstoltmann/pegasus
|
/share/pegasus/sh/pegasus-lite-common.sh
|
UTF-8
| 6,853
| 3.671875
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
##
# Copyright 2007-2011 University Of Southern California
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
#
# This file contains a set of common bash funtions to be used by
# Pegasus Lite jobs
#
# Author: Mats Rynge <rynge@isi.edu>
#
function pegasus_lite_log()
{
TS=`/bin/date +'%F %H:%M:%S'`
echo "$TS: $1" 1>&2
}
function pegasus_lite_worker_package()
{
# many ways of providing worker package
if pegasus_lite_internal_wp_shipped || pegasus_lite_internal_wp_in_env || pegasus_lite_internal_wp_download; then
return 0
fi
return 1
}
function pegasus_lite_internal_wp_shipped()
{
# was the job shipped with a Pegasus worker package?
if ls $pegasus_lite_start_dir/pegasus-worker-*.tar.gz >/dev/null 2>&1; then
pegasus_lite_log "The job contained a Pegasus worker package"
tar xzf $pegasus_lite_start_dir/pegasus-worker-*.tar.gz
rm -f $pegasus_lite_start_dir/pegasus-worker-*.tar.gz
unset PEGASUS_HOME
export PATH=${pegasus_lite_work_dir}/pegasus-${pegasus_lite_full_version}/bin:$PATH
return 0
fi
return 1
}
function pegasus_lite_internal_wp_in_env()
{
old_path=$PATH
# use PEGASUS_HOME if set
if [ "x$PEGASUS_HOME" != "x" ]; then
PATH="$PEGASUS_HOME/bin:$PATH"
export PATH
fi
# is there already a pegasus install in our path?
detected_pegasus_bin=`which pegasus-config 2>/dev/null || /bin/true`
if [ "x$detected_pegasus_bin" != "x" ]; then
detected_pegasus_bin=`dirname $detected_pegasus_bin`
# does the version match?
if $detected_pegasus_bin/pegasus-config --version 2>/dev/null | grep -E "^${pegasus_lite_version_major}\.${pegasus_lite_version_minor}\." >/dev/null 2>/dev/null; then
pegasus_lite_log "Using existing Pegasus binaries in $detected_pegasus_bin"
return 0
else
pegasus_lite_log "Pegasus binaries in $detected_pegasus_bin do not match Pegasus version used for current workflow"
fi
fi
# back out env changes
unset PEGASUS_HOME
PATH=$old_path
export PATH
return 1
}
function pegasus_lite_internal_wp_download()
{
# fall back - download a worker package from pegasus.isi.edu
os=rhel
major=5
arch=`uname -m`
if [ $arch != "x86_64" ]; then
arch="x86"
fi
if [ -e /etc/redhat-release ]; then
os=rhel
major=`cat /etc/redhat-release | sed 's/.*release //' | sed 's/[\. ].*//'`
else
if [ -e /etc/debian_version ]; then
os=deb
major=`cat /etc/debian_version | sed 's/\..*//'`
fi
fi
url="http://download.pegasus.isi.edu/pegasus/${pegasus_lite_version_major}"
url="${url}.${pegasus_lite_version_minor}.${pegasus_lite_version_patch}"
url="${url}/pegasus-worker"
url="${url}-${pegasus_lite_version_major}.${pegasus_lite_version_minor}.${pegasus_lite_version_patch}"
url="${url}-${arch}_${os}_${major}.tar.gz"
pegasus_lite_log "Downloading Pegasus worker package from $url"
wget -q -O pegasus-worker.tar.gz "$url"
tar xzf pegasus-worker.tar.gz
rm -f pegasus-worker.tar.gz
unset PEGASUS_HOME
export PATH="${pegasus_lite_work_dir}/pegasus-${pegasus_lite_full_version}/bin:$PATH"
}
function pegasus_lite_setup_work_dir()
{
# remember where we started from
pegasus_lite_start_dir=`pwd`
if [ "x$pegasus_lite_work_dir" != "x" ]; then
pegasus_lite_log "Not creating a new work directory as it is already set to $pegasus_lite_work_dir"
return
fi
targets="$PEGASUS_WN_TMP $_CONDOR_SCRATCH_DIR $OSG_WN_TMP $TG_NODE_SCRATCH $TG_CLUSTER_SCRATCH $SCRATCH $TMPDIR $TMP /tmp"
unset TMPDIR
if [ "x$PEGASUS_WN_TMP_MIN_SPACE" = "x" ]; then
PEGASUS_WN_TMP_MIN_SPACE=1000000
fi
for d in $targets; do
pegasus_lite_log "Checking $d for potential use as work space... "
# does the target exist?
if [ ! -e $d ]; then
pegasus_lite_log " Workdir: $d does not exist"
continue
fi
# make sure there is enough available diskspace
cd $d
free=`df -kP . | awk '{if (NR==2) print $4}'`
if [ "x$free" == "x" -o $free -lt $PEGASUS_WN_TMP_MIN_SPACE ]; then
pegasus_lite_log " Workdir: not enough disk space available in $d"
continue
fi
if touch $d/.dirtest.$$ >/dev/null 2>&1; then
rm -f $d/.dirtest.$$ >/dev/null 2>&1
d=`mktemp -d $d/pegasus.XXXXXX`
export pegasus_lite_work_dir=$d
export pegasus_lite_work_dir_created=1
pegasus_lite_log " Work dir is $d - $free kB available"
cd $pegasus_lite_work_dir
return 0
fi
pegasus_lite_log " Workdir: not allowed to write to $d"
done
return 1
}
function pegasus_lite_init()
{
pegasus_lite_full_version=${pegasus_lite_version_major}.${pegasus_lite_version_minor}.${pegasus_lite_version_patch}
# announce version - we do this so pegasus-exitcode and other tools
# can tell the job was a PegasusLite job
pegasus_lite_log "PegasusLite: version ${pegasus_lite_full_version}" 1>&2
# for staged credentials, expand the paths and set strict permissions
for base in X509_USER_PROXY S3CFG SSH_PRIVATE_KEY irodsEnvFile; do
for key in `(env | grep -i ^$base | sed 's/=.*//') 2>/dev/null`; do
eval val="\$$key"
# expand the path
if ! (echo $val | grep "^/") >/dev/null 2>&1; then
eval $key=`pwd`/"$val"
eval val="\$$key"
pegasus_lite_log "Expanded \$$key to $val"
fi
chmod 0600 $val
done
done
}
function pegasus_lite_exit()
{
rc=$?
if [ "x$rc" = "x" ]; then
rc=0
fi
if [ "x$job_ec" != "x" ];then
if [ $job_ec != 0 ];then
pegasus_lite_log "Job failed with exitcode $job_ec"
rc=$job_ec
fi
fi
if [ $rc != 0 ]; then
pegasus_lite_log "FAILURE: Last command exited with $rc"
fi
if [ "x$pegasus_lite_work_dir_created" = "x1" ]; then
cd /
rm -rf $pegasus_lite_work_dir
pegasus_lite_log "$pegasus_lite_work_dir cleaned up"
fi
echo "PegasusLite: exitcode $rc" 1>&2
exit $rc
}
| true
|
7c1aba4bf2edd43af7a943e11f2234f50536827c
|
Shell
|
dihuynh/dotfiles
|
/.bashrc
|
UTF-8
| 1,419
| 2.9375
| 3
|
[] |
no_license
|
source ~/.bash_aliases
if [ -f ~/.git-prompt.sh ]; then
source ~/.git-prompt.sh
export PS1="\[\e[35;1m\]\T \[\e[32;1m\]\W\[\e[1;33m\]\$(__git_ps1) > \[\e[0m\]"
fi
if [ -f ~/.git-completion.bash ]; then
. ~/.git-completion.bash
fi
export NVM_DIR="/Users/di.huynh/.nvm"
[ -s "$NVM_DIR/nvm.sh" ] && . "$NVM_DIR/nvm.sh" # This loads nvm
#fasd
eval "$(fasd --init auto)"
export PHANTOMJS_BIN=/usr/local/bin/phantomjs
# bash history
export HISTCONTROL=ignoredups:erasedups # Avoid duplicates
export HISTSIZE=100000 # big big history
export HISTFILESIZE=100000 # big big history
# After each command, append to history
export PROMPT_COMMAND="history -a; $PROMPT_COMMAND"
# Run nvm automatically if .nvmrc exists
enter_directory() {
if [[ $PWD == $PREV_PWD ]]; then
return
fi
PREV_PWD=$PWD
[[ -f ".nvmrc" ]] && nvm use
}
export PROMPT_COMMAND=enter_directory
[ -f ~/.fzf.bash ] && source ~/.fzf.bash
[ -s "$NVM_DIR/bash_completion" ] && \. "$NVM_DIR/bash_completion" # This loads nvm bash_completion
export PATH="/Users/di.huynh/.rvm/gems/ruby-2.2.1/bin:$PATH"
export JAVA_HOME=$(/usr/libexec/java_home) PATH=$PATH:$JAVA_HOME/bin export JAVA_HOME
#THIS MUST BE AT THE END OF THE FILE FOR SDKMAN TO WORK!!!
export SDKMAN_DIR="/Users/di.huynh/.sdkman"
[[ -s "/Users/di.huynh/.sdkman/bin/sdkman-init.sh" ]] && source "/Users/di.huynh/.sdkman/bin/sdkman-init.sh"
| true
|
c79901bb76d21295f5ae6f9d3d0b8beaab90977c
|
Shell
|
mauilion/kind-workshop
|
/workshop/lib/demo.sh
|
UTF-8
| 873
| 3.046875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
########################
# include the magic
########################
. lib/demo-magic.sh
########################
# Configure the options
########################
#
# speed at which to simulate typing. bigger num = faster
#
TYPE_SPEED=200
#
# custom prompt
#
# see http://www.tldp.org/HOWTO/Bash-Prompt-HOWTO/bash-prompt-escape-sequences.html for escape sequences
#
DEMO_PROMPT="${GREEN}➜ ${CYAN}\W "
# put your demo awesomeness here
#if [ ! -d "stuff" ]; then
# pe "mkdir stuff"
#fi
#pe "cd stuff"
pe "docker run -d --name=buildenv \
-v /tmp:/tmp -v $PWD:/go/ \
-v /var/run/docker.sock:/var/run/docker.sock \
-v $PWD/.kube:/go/.kube \
mauilion/kind-buildenv"
pe "docker exec buildenv -- GO111MODULE=on go get sigs.k8s.io/kind@v0.5.1"
# show a prompt so as not to reveal our true nature after
# the demo has concluded
p ""
| true
|
85055e7b4caf063775fabeaf3d5a9262d59f7f7f
|
Shell
|
sourabbr/shell_scripting
|
/lesson_1/4_read_a_file_or_dir_name_and_do_something.sh
|
UTF-8
| 408
| 4.1875
| 4
|
[] |
no_license
|
#!/bin/bash
#Write a shell script that prompts the user for a name of a file or directory and does any operation
read -p "Enter file/dir name:" file
echo "File/Dir name: ${file}"
if [ -f $file ]
then
echo "Regular file"
elif [ -d $file ]
then
echo "Directory"
else
echo "Might not be a file/dir"
fi
list=$(ls $file)
echo "List of files/directories in ${file}:"
for item in $list
do
echo "$item"
done
| true
|
09b6fbdc3202690e9e1264bb7c0ab92457c1aba7
|
Shell
|
qais-yousef/qemu-imgs-manipulator
|
/chroot_img
|
UTF-8
| 780
| 3.828125
| 4
|
[] |
no_license
|
#!/bin/bash
set -eu
function print_usage {
echo "Usage: $(basename $0) [options]"
echo " Options:"
echo " --arch [arch]: x86_64(default)|i386|aarch64|arm"
echo " -h|--help: print this help message"
}
ARCH=x86_64
SHORTOPTS="h"
LONGOPTS="arch:,help"
ARGS==`getopt -n "$(basename $0)" --options $SHORTOPTS --longoptions $LONGOPTS -- "$@"`
eval set -- "$ARGS"
while true;
do
case "$1" in
-h|--help)
print_usage
exit 0
;;
--arch)
shift
ARCH="$1"
;;
--)
shift
break
;;
esac
shift
done
# Information required by plumbing layer
IMG=$(dirname $(realpath $0))/images/qemu-image-$ARCH.img
MNT=$(dirname $(realpath $0))/mnt/$ARCH
echo "#"
echo "# When done chroot access, type 'exit'"
echo "#"
$(dirname $0)/bin/chroot_enter.sh $IMG $MNT
| true
|
818d33fadb8a666a755826c3884c9d0a6cabfc7d
|
Shell
|
rtimmons/dotfiles
|
/050-pyenv/env.zshrc
|
UTF-8
| 370
| 2.953125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
pyenv() {
if which pyenv > /dev/null; then
eval "$(command pyenv init -)"
eval "$(command pyenv init --path)"
fi
_openssl=$(brew --prefix openssl)
export CPPFLAGS="-I${_openssl}/include -I$(xcrun -show-sdk-path)/usr/include ${CPPFLAGS:-}"
export LDFLAGS="-L${_openssl}/lib ${LDFLAGS:-}"
command pyenv "$@"
}
| true
|
957ddcc822c1bced53b670af12aae952fe35817b
|
Shell
|
ashikawa/scripts
|
/shell/getcdn.sh
|
UTF-8
| 388
| 3.5
| 4
|
[] |
no_license
|
#!/bin/sh
# GoogleCDN からファイルをダウンロード
# 先に root で /ajax ディレクトリを作っておく事
# @see https://developers.google.com/speed/libraries/devguide?hl=ja#jquery
VERSIONS=$@
for v in $VERSIONS
do
URL="http://ajax.googleapis.com/ajax/libs/jquery/$v/jquery.min.js"
DIR="/ajax/libs/jquery/$v/"
mkdir -p $DIR
curl -o $DIR/jquery.min.js $URL
done
| true
|
ab35a06a41fa3cbe7c9af7b06a7a51ade8150821
|
Shell
|
moraisgabri/trybe-exercises
|
/intro_webDev/my-shell-scripts/exercicio8.sh
|
UTF-8
| 123
| 2.671875
| 3
|
[] |
no_license
|
#!/bin/bash
FRASE="shell script usando estrutura repetição for terminal"
for PALAVRA in $FRASE ; do
echo $PALAVRA ; done
| true
|
efa0f5be389e6757e3acf742ea66a40c14466b2a
|
Shell
|
tiemei/lib
|
/shell/demos/printf.sh
|
UTF-8
| 1,003
| 3.046875
| 3
|
[] |
no_license
|
#!/bin/bash
printf "%s\n" "hello printf" # 一行
printf "%s\n" "hello printf" "in" "bash script" # 三行
printf "%s\t%s\n" "1" "2 3" "4" "5" # 两行,制表符分隔
printf "%s\n" "1" "2" "\n3" # 不会解释\n
printf "%b\n" "1" "2" "\n3" # 会解释\n
# %-10s 最少10位字符串,左对齐
# %08d 不足8位前补零
# %11.2f 精度2
printf "%d\n" 255 0xff 0377 3.5 # 整数,3.5不能正常解释
printf "%03d\n" 1 2 # 至少三位,不够前面补零
printf "%f\n" 255 0xff 0377 3.5 # 浮点型,默认六位进度
printf "%.1f\n" 255 0xff 0377 3.5 # 一位精度
# 一个输出table的demo ==================
divider===============================
divider=$divider$divider
header="\n %-10s %8s %10s %11s\n" # 10 + 8 + 10 + 11 = 39
format=" %-10s %08d %10s %11.2f\n"
width=43
printf "$header" "ITEM NAME" "ITEM ID" "COLOR" "PRICE"
printf "%$width.${width}s\n" "$divider" # 43位长
printf "$format" \
Triangle 13 red 20 \
Oval 204449 "dark blue" 65.656 \
Square 3145 orange .7
| true
|
805fd99c4ce5b071e97c11013db5762ca03cb284
|
Shell
|
CloudTestDrive/helidon-kubernetes
|
/setup/devops-labs/vault-secrets-destroy.sh
|
UTF-8
| 837
| 3.28125
| 3
|
[] |
no_license
|
#!/bin/bash -f
export SETTINGS=$HOME/hk8sLabsSettings
if [ -f $SETTINGS ]
then
echo "Loading existing settings information"
source $SETTINGS
else
echo "No existing settings cannot continue"
exit 10
fi
SAVED_DIR=`pwd`
cd ../common/vault
FINAL_RESP=0
bash ./vault-individual-secret-destroy.sh OCIR_HOST
RESP=$?
if [ $RESP -ne 0 ]
then
echo "Failure deleting the vault secret OCIR_HOST, exit code is $RESP, cannot continue"
echo "Please review the output and rerun the script"
FINAL_RESP=$RESP
fi
bash ./vault-individual-secret-destroy.sh OCIR_STORAGE_NAMESPACE
RESP=$?
if [ $RESP -ne 0 ]
then
echo "Failure deleting the vault secret OCIR_STORAGE_NAMESPACE, exit code is $RESP, cannot continue"
echo "Please review the output and rerun the script"
FINAL_RESP=$RESP
fi
cd $SAVED_DIR
exit $FINAL_RESP
| true
|
2e6fd03754a033312fc98ee89bb4bb371941c511
|
Shell
|
cycle13/newProjectPy
|
/brams/manipula_concat_BRAMS.gs
|
UTF-8
| 854
| 2.796875
| 3
|
[] |
no_license
|
#! /bin/bash
inctime=/stornext/home/carlos.bastarz/bin/inctime
datai=20180801
dataf=20180930
#dataf=20180802
#wrfout_d01_2018-08-24_23:00:00
data=${datai}
while [ ${data} -le ${dataf} ]
do
dataday=` echo ${data} |cut -c7-8`
datamonth=` echo ${data} |cut -c5-6`
#echo "${dataday}"
for i in $(seq -w 0 3 23)
do
echo "${i}"
#echo "cdo -f nc import_binary profile_2018093000G-A-2018-09-30-${i}0000-g1.ctl profile_2018093000G-A-2018-09-30-${i}0000-g1.nc"
echo "cdo -f nc import_binary profile_${data}00G-A-2018-${datamonth}-${dataday}-${i}0000-g1.ctl profile_${data}00G-A-2018-${datamonth}-${dataday}-${i}0000-g1.nc"
cdo -f nc import_binary profile_${data}00G-A-2018-${datamonth}-${dataday}-${i}0000-g1.ctl profile_${data}00G-A-2018-${datamonth}-${dataday}-${i}0000-g1.nc
done
data=$(${inctime} ${data} +1d %y4%m2%d2)
done
exit 0
| true
|
eadad804186c64b2f672e4d2d10c217e554a3c24
|
Shell
|
Bumsuk/ShellScript
|
/learn_adv/1601_exec_write.sh
|
UTF-8
| 1,532
| 3.40625
| 3
|
[] |
no_license
|
#!/bin/bash
# https://wiki.kldp.org/HOWTO/html/Adv-Bash-Scr-HOWTO/x10307.html
# 16.1. exec 쓰기
# exec <filename 명령어는 표준입력을 파일로 재지향 시켜줍니다. 이때부터는, 주로 키보드에서 받던 모든 표준입력이 그 파일에서 들어 오게 됩니다.
# 이렇게 하면 파일을 줄 단위로 읽을 수가 있게 되고 sed나 awk를 이용해서 입력되는 각 줄을 파싱할 수 있게 됩니다.
#
# 예 16-1. exec으로 표준입력을 재지향 하기
# 'exec'로 표준입력 재지향 하기.
exec 6<&0 # 표준입력을 6번 파일 디스크립터로 링크.
echo -e "1111\n2222\n3333\n4444\n5555" > data-file # 이건 내가 만들어준 파일
exec < data-file # 표준입력을 "data-file"에서.
read a1 # "data-file"의 첫번째 줄을 읽음.
read a2 # "data-file"의 두번째 줄을 읽음.
echo
echo "다음은 파일에서 읽어 들인 것입니다."
echo "-----------------------------------"
echo $a1
echo $a2
echo; echo; echo
exec 0<&6 6<&-
# 6번 파일 디스크립터에 저장되어 있던 표준입력을 복구시키고,
# 다른 프로세스가 쓸 수 있도록 6번 파일 디스크립터를 프리( 6<&- )시킴.
# <&6 6<&- 라고 해도 됩니다.
echo -n "데이타를 넣으세요 "
read b1 # "read"는 이제 원래 자신의 동작인 표준입력에서 입력을 받습니다.
echo "표준입력에서 읽은 값."
echo "---------------------"
echo "b1 = $b1"
echo
rm -rf data-file
exit 0
| true
|
b93beb1449ee5b973ddc312a97be7c05b305420f
|
Shell
|
cncf/devstats-docker-images
|
/example/affs.sh
|
UTF-8
| 749
| 3.09375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
if ( [ -z "$PG_PASS" ] || [ -z "$PG_HOST" ] || [ -z "$PG_PORT" ] )
then
echo "$0: you need to set PG_PASS, PG_HOST and PG_PORT to run this script"
exit 1
fi
export GHA2DB_PROJECTS_YAML="example/projects.yaml"
export LIST_FN_PREFIX="example/all_"
# GHA2DB_LOCAL=1 GHA2DB_PROCESS_REPOS=1 get_repos
. ./devel/all_projs.sh || exit 2
for proj in $all
do
db=$proj
if [ "$proj" = "kubernetes" ]
then
db="gha"
elif [ "$proj" = "all" ]
then
db="allprj"
fi
./devel/check_flag.sh "$db" devstats_running 0 || exit 3
./devel/clear_flag.sh "$db" provisioned || exit 4
GHA2DB_PROJECT=$proj PG_DB=$db ./shared/all_affs.sh || exit 5
./devel/set_flag.sh "$db" provisioned || exit 6
done
echo 'All affiliations updated'
| true
|
06b667810a8940cffe1dc22c90b8993d16bdfb60
|
Shell
|
xanarin/scuba
|
/bash_completion/scuba
|
UTF-8
| 935
| 3.765625
| 4
|
[
"MIT"
] |
permissive
|
#!bash
#
# bash completion for scuba
# https://github.com/JonathonReinhart/scuba
#
# Copy this file to /etc/bash_completion.d
_scuba()
{
COMPREPLY=()
# Look at all previous words, skipping command itself
local i
for ((i = 1; i < ${COMP_CWORD}; i++)); do
word="${COMP_WORDS[i]}"
if [[ $word != -* ]] ; then
# User has already entered a non-option;
# there's nothing we can suggest
return 0
fi
done
local cur="${COMP_WORDS[COMP_CWORD]}"
# Is the user typing an option?
if [[ ${cur} == -* ]] ; then
local opts=$(scuba --list-available-options)
COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) )
return 0
fi
# The only thing we can suggest here is an alias
local aliases="$(scuba --list-aliases | tail -n+2 | awk -F'\t' '{print $1 }')"
COMPREPLY=( $(compgen -W "${aliases}" -- ${cur}) )
}
complete -F _scuba scuba
| true
|
dad098c165171955f1101afa7bc8d60d6f5f1772
|
Shell
|
StudioEtrange/roracle-install
|
/pool/stella/nix/pool/feature-recipe/feature_ansible.sh
|
UTF-8
| 1,434
| 2.90625
| 3
|
[
"Apache-2.0"
] |
permissive
|
if [ ! "$_ansible_INCLUDED_" = "1" ]; then
_ansible_INCLUDED_=1
# https://github.com/ansible/ansible
# https://stackoverflow.com/questions/41535915/python-pip-install-from-local-dir
feature_ansible() {
FEAT_NAME=ansible
FEAT_LIST_SCHEMA="2_4_0_0:source"
FEAT_DEFAULT_ARCH=
FEAT_DEFAULT_FLAVOUR="source"
}
feature_ansible_2_4_0_0() {
FEAT_VERSION=2_4_0_0
FEAT_SOURCE_DEPENDENCIES="miniconda"
FEAT_BINARY_DEPENDENCIES=
FEAT_SOURCE_URL=https://releases.ansible.com/ansible/ansible-2.4.0.0.tar.gz
FEAT_SOURCE_URL_FILENAME=ansible-2.4.0.0.tar.gz
FEAT_SOURCE_URL_PROTOCOL=HTTP_ZIP
FEAT_BINARY_URL=
FEAT_BINARY_URL_FILENAME=
FEAT_BINARY_URL_PROTOCOL=
FEAT_SOURCE_CALLBACK=
FEAT_BINARY_CALLBACK=
FEAT_ENV_CALLBACK=
FEAT_INSTALL_TEST="$FEAT_INSTALL_ROOT"/bin/ansible
FEAT_SEARCH_PATH="$FEAT_INSTALL_ROOT"/bin
}
feature_ansible_install_source() {
INSTALL_DIR="$FEAT_INSTALL_ROOT"
#SRC_DIR="$STELLA_APP_FEATURE_ROOT/$FEAT_NAME-$FEAT_VERSION-src"
__set_toolset "STANDARD"
#__add_toolset "autotools"
__get_resource "$FEAT_NAME" "$FEAT_SOURCE_URL" "$FEAT_SOURCE_URL_PROTOCOL" "$INSTALL_DIR" "DEST_ERASE STRIP"
pip install -e "$INSTALL_DIR"
# AUTO_INSTALL_CONF_FLAG_PREFIX=
# AUTO_INSTALL_CONF_FLAG_POSTFIX=
# AUTO_INSTALL_BUILD_FLAG_PREFIX=
# AUTO_INSTALL_BUILD_FLAG_POSTFIX=
__feature_callback
#__auto_build "$FEAT_NAME" "$SRC_DIR" "$INSTALL_DIR" "NO_OUT_OF_TREE_BUILD AUTOTOOLS autogen"
}
fi
| true
|
b82b71a170b012df550e9e3014773e4965d69515
|
Shell
|
RanMax/SysProg
|
/2014-2015/IU4_93/Подорин Александр/homework_1/zad_51.sh
|
UTF-8
| 431
| 4.125
| 4
|
[] |
no_license
|
#!/bin/bash
if [ $# -lt 2 ];
then
echo "Not enough parameters"
exit 1
fi;
dir_path=$1
search_string=$2
if [ ! -d "$dir_path" ];
then
echo "Directory not exist"
exit 2
fi;
for file in `find "$dir_path" -maxdepth 1 -type f`
do
line_num=0
while read line ;
do
line_num=$(( $line_num+1 ))
if [[ "$line" == *"$search_string"* ]];
then
filename=${file##*/}
echo "$filename:$line_num $line"
fi;
done < $file
done;
| true
|
45c451ca9633ac66566dcae7d6c0d7ccdafb55e3
|
Shell
|
dataart-telco/restcomm-perf-test
|
/start_test.sh
|
UTF-8
| 5,011
| 3.59375
| 4
|
[] |
no_license
|
#!/bin/bash
source ./perfcorder.sh
source ./config.sh
usage() {
echo "----------"
echo "Usage: <max_calls_number> <simultaneous_calls> <calls_per_sec>"
echo "----------"
}
if [ "$#" -ne 3 ]; then
usage
exit 1
fi
if [ "$TEST_ENGINE" = 'local' ]; then
echo "Use local env"
machine_ip=`ip addr show eth0 | grep -o 'inet [0-9]\+\.[0-9]\+\.[0-9]\+\.[0-9]\+' | cut -f2 -d' '`
COLLECTD_SERVER_IP_PUBLIC=$machine_ip
RESTCOMM_IP_PRIVATE=$machine_ip
IVRAPP_IP_PUBLIC=$machine_ip
else
source ./machine_util.sh
COLLECTD_SERVER_IP_PUBLIC=`get_public_ip collectd-server`
RESTCOMM_IP_PRIVATE=`get_private_ip restcomm`
IVRAPP_IP_PUBLIC=`get_public_ip ivrapp`
fi
RESTCOMM_IP=$RESTCOMM_IP_PRIVATE
RESTCOMM_PORT=5080
PHONE_NUMBER=5555
########################################################################
### Init test env
########################################################################
#run perf collector
perfcorder_start restcomm
perfcorder_start mediaserver
#reset stat
curl -s http://${IVRAPP_IP_PUBLIC}:7090/start
#copy test
if [ "$TEST_ENGINE" = 'local' ]; then
#remove prev logs
rm -rf /tmp/sipp-test
mkdir -p /tmp/sipp-test
cp -ar $PWD/sipp-test /tmp
TEST_LOCAL_PATH=/tmp/sipp-test
else
#remove prev data
docker-machine ssh sipp-test sudo rm -rf /home/ubuntu/sipp-test
docker-machine scp -r $PWD/sipp-test sipp-test:/home/ubuntu/sipp-test
TEST_LOCAL_PATH='/home/ubuntu/sipp-test'
fi
########################################################################
### Start test container
########################################################################
docker \
$(get_docker_config sipp-test) \
run \
--rm \
--net host \
--privileged \
--name sipp-test \
-v $TEST_LOCAL_PATH:/opt/sipp-test \
-e SIP_ADDRESS=$RESTCOMM_IP:$RESTCOMM_PORT \
-e PHONE_NUMBER=$PHONE_NUMBER \
-e MAXIMUM_CALLS=$1 \
-e SIMULTANEOUS_CALLS=$2 \
-e CALLS_PER_SEC=$3 \
-it hamsterksu/sipp /opt/sipp-test/bootstrap.sh
########################################################################
### Collect results
########################################################################
echo "Copy results..."
DATE=`date +%Y_%m_%d_%H_%M_%S`
RESULT_DIR=results/${DATE}_$1_$2_$3
mkdir -p $RESULT_DIR
if [ "$TEST_ENGINE" = 'local' ]; then
mv $TEST_LOCAL_PATH/logs $RESULT_DIR
mv $TEST_LOCAL_PATH/results $RESULT_DIR
else
docker-machine scp -r sipp-test:/home/ubuntu/sipp-test/logs $RESULT_DIR
docker-machine scp -r sipp-test:/home/ubuntu/sipp-test/results $RESULT_DIR
fi
#stop perf collector
perfcorder_stop restcomm
perfcorder_stop mediaserver
perfcorder_dump restcomm
perfcorder_dump mediaserver
echo "Rendering results..."
echo "Collectd mem: ${INSTANCES_MEM["$INSTANCE_TYPE"]}"
docker \
$(get_docker_config collectd-server) \
exec \
collectd-server \
bash /opt/collectd-server/render.sh ${INSTANCES_MEM["$INSTANCE_TYPE"]}
COLLECTD_URL="http://${COLLECTD_SERVER_IP_PUBLIC}"
if [ "${TEST_ENGINE}" = "local" ]; then
services=(
"$(hostname)"
)
else
services=(
'restcomm'
'mediaserver'
'ivrapp'
'mysql'
)
fi
for service in ${services[*]}; do
wget -O $RESULT_DIR/${service}_cpu.png $COLLECTD_URL/${service}_cpu.png
wget -O $RESULT_DIR/${service}_memory.png $COLLECTD_URL/${service}_memory.png
wget -O $RESULT_DIR/${service}_network_eth0.png $COLLECTD_URL/${service}_network_eth0.png
done
RESULT_INCOMING=`curl -s http://${IVRAPP_IP_PUBLIC}:7090/stat/incoming`
RESULT_RECEIVED=`curl -s http://${IVRAPP_IP_PUBLIC}:7090/stat/received`
dif=`echo $RESULT_INCOMING - $RESULT_RECEIVED | bc`
perfcorder_install_local
render_perfcorder_result(){
folder=$1
echo "folder: $folder"
cp $RESULT_DIR/results/*_test.csv $folder/data/periodic/sip/sipp.csv
cur=$PWD
cd $folder
zip -rq result.zip data
cd $cur
$PERFCORDER_LOCAL/pc_analyse.sh $folder/result.zip 1 > $folder/PerfCorderAnalysis.xml
cat $folder/PerfCorderAnalysis.xml | $PERFCORDER_LOCAL/pc_test.sh ./xslt/mss-proxy-goals.xsl > $folder/TEST-PerfCorderAnalysisTest.xml
}
render_perfcorder_result $RESULT_DIR/perfcorder-restcomm
render_perfcorder_result $RESULT_DIR/perfcorder-mediaserver
########################################################################
### Print result
########################################################################
echo "
########################################################################
### Results
########################################################################
"
echo "Result forlder: $RESULT_DIR"
echo "
***Perfcorder data***
Restcomm: $RESULT_DIR/perfcorder-restcomm
Mediaserver: $RESULT_DIR/perfcorder-mediaserver"
echo "
***Collectd Stats***
Url: $COLLECTD_URL"
echo "
*** IVR server stats ***
Incoming calls: $RESULT_INCOMING
Gathered digits: $RESULT_RECEIVED
Failed calls: $dif" >> $RESULT_DIR/ivr_stat.txt
cat $RESULT_DIR/ivr_stat.txt
| true
|
25770e6eef2e6668f9f19e7978ac9179facbb884
|
Shell
|
Sanjit-Shelke/Operating_Systems
|
/Assignment-2/Assignment_2/prime.sh
|
UTF-8
| 189
| 3.375
| 3
|
[] |
no_license
|
echo Enter your no.!
read no
i=2
z=0
while [ $i -lt $no ]
do
s=`expr $no % $i`
if [ $s -eq $z ]
then
echo "Not a Prime Number"
exit
else
i=`expr $i + 1`
fi
done
echo "Prime No."
| true
|
c8e8f3db42b9b1cc45f2fb843d68225c55b23cd7
|
Shell
|
sc-forks/synthetix
|
/hooks/circleci
|
UTF-8
| 1,716
| 3.90625
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
set -euo pipefail
# Check .circleci/config.yml is up to date and valid, and that all changes are
# included together in this commit.
# Fail early if we accidentally used '.yaml' instead of '.yml'
if ! git diff --name-only --cached --exit-code -- '.circleci/***.yaml'; then
echo "ERROR: File(s) with .yaml extension detected. Please rename them .yml instead."
exit 1
fi
# Succeed early if no changes to yml files in .circleci/ are currently staged.
# make ci-verify is slow so we really don't want to run it unnecessarily.
if git diff --name-only --cached --exit-code -- '.circleci/***.yml'; then
exit 0
fi
# Make sure to add no explicit output before this line, as it would just be noise
# for those making non-circleci changes.
echo "==> Verifying config changes in .circleci/"
echo "--> OK: All files are .yml not .yaml"
# Ensure commit includes _all_ files in .circleci/
# So not only are the files up to date, but we are also committing them in one go.
if ! git diff --name-only --exit-code -- '.circleci/***.yml'; then
echo "ERROR: Some .yml diffs in .circleci/ are staged, others not."
echo "Please commit the entire .circleci/ directory together, or omit it altogether."
exit 1
fi
# Even untracked yml or yaml files will get baked into the output.
# This is a loose check on _any_ untracked files in .circleci/ for simplicity.
if [ -n "$(git ls-files --others --exclude-standard '.circleci/')" ]; then
echo "ERROR: You have untracked files in .circleci/ please add or delete them."
exit 1
fi
echo "--> OK: All .yml files in .circleci are staged."
if ! npm run ci:build; then
echo "ERROR: make ci-verify failed"
exit 1
fi
echo "--> OK: npm run ci:build succeeded."
| true
|
b7aaaa82a6f0466ae1e76c9f98fe4fed8854ba8e
|
Shell
|
archey/pentesting
|
/snort_alert_output.sh
|
UTF-8
| 1,968
| 4.28125
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
# Loops through a directory looking for pcaps and outputs and alert.log file for each pcap with a date and timestamp
dir="$1"
output_dir="$2"
logoutput=snort-alerts.txt
priority1alert=priority1-alerts.txt
priority2alert=priority2-alerts.txt
priority3alert=priority3-alerts.txt
if [[ ! -d "$dir" ]] || [[ ! -d "$output_dir" ]]; then
printf '%s %s %s %s\n' 'Usage:' "$(basename "$0")" 'location of pcaps' 'location to output logfile'
printf '%s\n' 'Searches a pcap file for snort alerts'
exit 0
fi
function alert_output() {
while read -r; do
printf '%s %s %s %s\n' 'Reading network traffic from' "$REPLY" 'Time Started:' "$(date +'%D %T')"
printf '%s\n\n' "################################"
sudo /sbin/snort -A console -r "$REPLY" --pcap-reset -c /etc/snort/snort.conf -U --daq pcap --daq-mode read-file -q
printf '%s\n\n' "################################"
printf '%s %s\n' 'Time Finished:' "$(date +'%D %T')"
done < <(find "$dir" -type f -iname '*.pcap') | tee -a "${output_dir}$logoutput"
echo "You may view the alert log at ${output_dir}$logoutput"
}
function priority_one_alerts() {
sed -e '/\[Priority: [23]\]/d' "${output_dir}$logoutput" > "${output_dir}$priority1alert"
echo "Your may view the priority 1 alerts at ${output_dir}$priority1alert"
}
function priority_two_alerts() {
sed -e '/\[Priority: [13]\]/d' "${output_dir}$logoutput" > "${output_dir}$priority2alert"
echo "You may view the priority 2 alerts at ${output_dir}$priority2alert"
}
function priority_three_alerts() {
sed -e '/\[Priority: [12]\]/d' "${output_dir}$logoutput" > "${output_dir}$priority3alert"
echo "You may view the priority 3 alerts at ${output_dir}$priority3alert"
}
function main {
alert_output "$dir" "$output_dir"
priority_one_alerts "$output_dir"
priority_two_alerts "$output_dir"
priority_three_alerts "$output_dir"
}
main
| true
|
985f478728d3b5ac4c21e40a31bc403ffa9263d9
|
Shell
|
Lindenk/dotfiles
|
/dotbot_encrypt.sh
|
UTF-8
| 2,290
| 4.40625
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
# =======================================
# Encrypt and decrypt files based on gpg
# =======================================
#
# Howto:
# Create a file, for example test, inside the main directory.
# Now encrypt it using gpg (like 'gpg -o test.enc.gpg -r <IDENTITY> -e test')
# And run this script.
# The script will find all files ending with .enc.gpg and tries to find it's decrypted file counterpart.
# If the decrypted file is not in the .gitignore file it will be added.
# Further changes to the decrypted file will automatically be transfered to the encrypted file.
# In case there is no decrypted version, the script will also decrypt it itself.
# If the encrypted file is newer than the decrypted one, it will decrypt again.
set -e
shopt -s globstar
# If no GPG ID available in ENV, take the first private identity that we have
[[ "${GPG_ID}" == '' ]] && GPG_ID=$(gpg -K 2>/dev/null | grep -E -o "\b[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Za-z]{2,6}\b" | head -n 1)
# If at this point we don't have a GPG identity, exit
[[ "${GPG_ID}" == '' ]] && exit 1
function decFile(){
gpg -q -d "${1}" 2>/dev/null > "${2}"
}
function encFile(){
#echo "[INFO] Encrypting \"${1}\" to \"${2}\""
gpg -q -o "${2}" -r "${GPG_ID}" -e "${1}" 2>/dev/null
}
function ts2Str(){
date -d @"${1}" +%Y%m%d%H%M.%S
}
for encName in **/*.enc.gpg; do
[[ ! -f "${encName}" ]] && continue
encModified=$(stat -c %Y "${encName}")
decName="${encName%.enc.gpg}"
if [[ ! -f "${decName}" ]]; then
echo "[INFO] Decrypting \"${1}\" to \"${2}\""
decFile "${encName}" "${decName}"
touch -mt $(ts2Str ${encModified}) "${decName}"
else
decModified=$(stat -c %Y "${decName}")
if [[ "${decModified}" -gt "${encModified}" ]]; then
echo "[INFO] Decrypted file \"${decName}\" has changed."
encFile "${decName}" "${encName}"
touch -mt $(ts2Str ${decModified}) "${encName}"
elif [[ "${encModified}" -gt "${decModified}" ]]; then
echo "[INFO] Encrypted file \"${encName}\" has changed."
decFile "${encName}" "${decName}"
touch -mt $(ts2Str ${encModified}) "${decName}"
fi
fi
if [[ ! $(git check-ignore "${decName}") ]]; then
echo "[INFO] Ignoring \"${decName}\""
echo -en "\n${decName}" >> '.gitignore'
fi
done
| true
|
cce87cbbd3b902b864b013408eb2d3f8b82b3224
|
Shell
|
OlegSchwann/MovieSearchService
|
/migration/main.sh
|
UTF-8
| 894
| 3.3125
| 3
|
[] |
no_license
|
#! /usr/bin/env bash
set -o errexit -o nounset -o xtrace -o pipefail;
# https://opendata.mkrf.ru/opendata/7705851331-register_movies
if wget --no-verbose 'https://opendata.mkrf.ru/opendata/7705851331-register_movies/data-6-structure-3.json.zip'; then
MIGRATION_DATA='data-6-structure-3.json.zip';
else
echo 'Failed to download new data, fallback to 14.12.2020 edition.' 1 > '/dev/stderr';
MIGRATION_DATA='old.data-6-structure-3.json.zip';
fi
unzip "${MIGRATION_DATA}";
for FILE in *'.json'; do
mongoimport \
--host '[::]' \
--port 27017 \
--db RegisterMovies \
--collection RegisterMovies \
--jsonArray \
--file "${FILE}";
done
# language=MongoDB
mongo --ipv6 'mongodb://[::]:27017/RegisterMovies' <<EOF
db.RegisterMovies.createIndex({
"data.general.filmname": "text",
"data.general.foreignName": "text",
"data.general.annotation": "text"
});
EOF
| true
|
1e71f8823c3b18b8c28654f50ba22f40ade2aa6f
|
Shell
|
xek/puppet-tripleo
|
/files/certmonger-haproxy-refresh.sh
|
UTF-8
| 2,054
| 4.125
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# This script is meant to reload HAProxy when certmonger triggers a certificate
# renewal. It'll concatenate the needed certificates for the PEM file that
# HAProxy reads.
die() { echo "$*" 1>&2 ; exit 1; }
[[ $# -eq 2 ]] || die "Invalid number of arguments"
[[ $1 == @(reload|restart) ]] || die "First argument must be one of 'reload' or 'restart'."
ACTION=$1
NETWORK=$2
certmonger_ca=$(hiera -c /etc/puppet/hiera.yaml certmonger_ca)
container_cli=$(hiera -c /etc/puppet/hiera.yaml container_cli podman)
service_certificate="$(hiera -c /etc/puppet/hiera.yaml tripleo::certmonger::haproxy_dirs::certificate_dir)/overcloud-haproxy-$NETWORK.crt"
service_key="$(hiera -c /etc/puppet/hiera.yaml tripleo::certmonger::haproxy_dirs::key_dir)/overcloud-haproxy-$NETWORK.key"
ca_path=""
if [ "$certmonger_ca" == "local" ]; then
ca_path="/etc/pki/ca-trust/source/anchors/cm-local-ca.pem"
elif [ "$certmonger_ca" == "IPA" ]; then
ca_path="/etc/ipa/ca.crt"
fi
if [ "$NETWORK" != "external" ]; then
service_pem="$(hiera -c /etc/puppet/hiera.yaml tripleo::certmonger::haproxy_dirs::certificate_dir)/overcloud-haproxy-$NETWORK.pem"
else
service_pem="$(hiera -c /etc/puppet/hiera.yaml tripleo::haproxy::service_certificate)"
fi
cat "$service_certificate" "$ca_path" "$service_key" > "$service_pem"
haproxy_container_name=$($container_cli ps --format="{{.Names}}" | grep haproxy)
if [ "$ACTION" == "reload" ]; then
# Copy the new cert from the mount-point to the real path
$container_cli exec "$haproxy_container_name" cp "/var/lib/kolla/config_files/src-tls$service_pem" "$service_pem"
# Set appropriate permissions
$container_cli exec "$haproxy_container_name" chown haproxy:haproxy "$service_pem"
# Trigger a reload for HAProxy to read the new certificates
$container_cli kill --signal HUP "$haproxy_container_name"
elif [ "$ACTION" == "restart" ]; then
# Copying the certificate and permissions will be handled by kolla's start
# script.
$container_cli restart "$haproxy_container_name"
fi
| true
|
8bef70b37bd68df50c7d26866b51eb37c1237abe
|
Shell
|
mesosphere/marathon-jenkins-stats
|
/lib/sql-common.sh
|
UTF-8
| 48
| 3.203125
| 3
|
[] |
no_license
|
table-prefix() {
basename "$1" | tr "-" "_"
}
| true
|
76cd08a48ba738c95dd8db41e000e88e56f1ca8c
|
Shell
|
Tech-XCorp/bilder
|
/packages/simyan.sh
|
UTF-8
| 2,441
| 3.234375
| 3
|
[] |
no_license
|
#!/bin/sh
######################################################################
#
# @file simyan.sh
#
# @brief Version and build information for simyan.
#
# @version $Rev$ $Date$
#
# Copyright © 2012-2017, Tech-X Corporation, Boulder, CO.
# See LICENSE file (EclipseLicense.txt) for conditions of use.
#
######################################################################
######################################################################
#
# Version
#
######################################################################
# Built from svn repo only
######################################################################
#
# Builds and deps
#
######################################################################
if test -z "$SIMYAN_BUILDS" -o "$SIMYAN_BUILDS" != NONE; then
SIMYAN_BUILDS="pycsh"
fi
SIMYAN_DEPS=numpy,dakota
# findBilderTopdir
addtopathvar PATH $BLDR_INSTALL_DIR/simyan/bin
######################################################################
#
# Launch simyan builds.
#
######################################################################
buildSimyan() {
# Set cmake options
local SIMYAN_OTHER_ARGS="$SIMYAN_CMAKE_OTHER_ARGS"
# Configure and build serial and parallel
getVersion simyan
if bilderPreconfig simyan; then
SIMYAN_MACHINE_ARGS="-DMPIRUN:STRING=aprun -DNODE_DETECTION:STRING=manual -DCORES_PER_NODE:INTEGER=4 -DSOCKETS_PER_NODE:INTEGER=2 -DNODE_ALLOCATION_MODE:SHARED=shared"
# Parallel build
if bilderConfig -c simyan pycsh "$CMAKE_COMPILERS_PYC $SIMYAN_OTHER_ARGS $SIMYAN_MACHINE_ARGS $CMAKE_SUPRA_SP_ARG" simyan; then
bilderBuild simyan pycsh "$SIMYAN_MAKEJ_ARGS"
fi
fi
}
######################################################################
#
# Test simyan
#
######################################################################
# Set umask to allow only group to modify
testSimyan() {
techo "Not testing simyan."
}
######################################################################
#
# Install polyswift
#
######################################################################
installSimyan() {
# Set umask to allow only group to use on some machines
um=`umask`
case $FQMAILHOST in
*.nersc.gov | *.alcf.anl.gov)
umask 027
;;
*)
umask 002
;;
esac
# Install parallel first, then serial last to override utilities
bilderInstall simyan pycsh simyan
# Revert umask
umask $um
}
| true
|
238c9ce6113c15b8327bde5fbfa185d49231e5d4
|
Shell
|
nak5124/build_env
|
/usr/local/bin/symcopy
|
UTF-8
| 302
| 2.828125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
src=$(cygpath -w ${1})
if [ -d "${1}" -a -d "${2}" ] ; then
dst=$(cygpath -w ${2})'\'${src##*\\}
else
dst=$(cygpath -w ${2})
fi
xcp=$(cygpath -w /c/Windows/system32/xcopy)
echo '
CALL chcp 65001 > nul
'${xcp}' /E /I /Q /K /Y /B /J '${src}' '${dst}'
exit
' | cmd > /dev/null
| true
|
5db6aa683e717988efcf6706d1c4844fef7d6012
|
Shell
|
dragonmaus/work
|
/.old/cabal-world.sh
|
UTF-8
| 419
| 3.703125
| 4
|
[] |
no_license
|
#!/bin/sh
set -e
cd "$HOME"
world=$HOME/etc/cabal/world
test -f "$world" || exit 0
cmd=$1
shift
case $cmd in
(install|update)
cabal v2-update
while read -u 3 name
do
case $name in
(--*)
continue
;;
(*/*)
test -d "$name" || continue
(cd "$name" && cabal v2-clean && rm -f .ghc.environment.* && cabal v2-install .)
;;
(*)
cabal v2-install "$name"
;;
esac
done 3<"$world"
;;
esac
| true
|
67e2b89548551fd167a193e44f754ac659ac355b
|
Shell
|
dperique/kube-mini-cloud
|
/source/load_docker_image.sh
|
UTF-8
| 418
| 2.890625
| 3
|
[] |
no_license
|
# Use this script if you don't have a container registry and just
# want to get your image onto your k8s nodes directory.
#
myImage=kube-mc:0.1
docker save $myImage -o /tmp/o.tar
mv /tmp/o.tar .
tar czvf t.tgz ./o.tar
for i in kube-test-10 ; do
scp -i junk.rsa t.tgz ubuntu@$i:/tmp
ssh -i junk.rsa ubuntu@$i "cd /tmp ; tar xzvf /tmp/t.tgz"
ssh -i junk.rsa ubuntu@$i "cd /tmp ; sudo docker load -i o.tar"
done
| true
|
0dde96a175ea197cb2a559275403b79645bd8e2f
|
Shell
|
habib-rangoonwala/oracle-ebs-monitoring
|
/bin/hGC.ksh
|
UTF-8
| 1,016
| 3.359375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/ksh
###############################################################
# Author: Habib Rangoonwala
# Creation Date: 03-May-2012
# Updation Date: 03-May-2012
###############################################################
hFile='gcstat.txt'
hJavaVendor=`java -version 2>&1 |grep -i jrockit`
if [ "$hJavaVendor" = "" ]; then
hJavaVendor="ORACLE"
else
hJavaVendor="JROCKIT"
fi
if [ "$hJavaVendor" = "ORACLE" ]; then
hPID=`adopmnctl.sh status|grep oacore|cut -d "|" -f 3`
else
hPID=`jps|grep Server|cut -d " " -f 1`
fi
while true
do
for i in $hPID
do
hTimeStamp=`date`
if [ "$hJavaVendor" = "ORACLE" ]; then
hOutput=`jstat -gcutil $i 1s 1|tail -1`
else
hOutput=`jstat -gc $i 1s 1|tail -1`
fi
echo $hOutput
echo "$hTimeStamp $hOutput" >> "$hFile.$i"
done
sleep 60
done
exit 0
| true
|
f64e13c4a100f13cf36d8c9c3eb2e47137552616
|
Shell
|
Ghost-VR/minecraft-gibberish
|
/start.sh
|
UTF-8
| 342
| 2.703125
| 3
|
[] |
no_license
|
#!/bin/sh
while true
do
java -Xmx3072M -Xms3072M -jar minecraft_server.1.14.1.jar nogui
echo "If you want to completely stop the server process now, press Ctrl+C before the time is up!"
echo "Rebooting in:"
for i in 5 4 3 2 1
do
echo "$i..."
sleep 1
done
echo "Rebooting now!"
done
| true
|
9cbbda22e9f30a2a538371603b45307d8f16c2d5
|
Shell
|
dsmid/kindle-pw2-l10n-cs
|
/sqlite2source
|
UTF-8
| 795
| 3.65625
| 4
|
[] |
no_license
|
#!/bin/bash
. config
cd translation_$VERSION
if [ -d "source/com" ]
then
echo "Delete translation_4.1/source/com first"
exit
fi
deeptouch ()
{
dir="$2/$(dirname "$1")"
[ -d "$dir" ] || mkdir -p "$dir"
touch "$2/$1"
}
sqlite3 -list -separator '^' ../kindle_loc.sqlite "select src,tran,file from trans where ver = 'strings_$VERSION' order by file" | sed "s/'/''/g;s|\\\\|\\\\\\\\|g" | \
{
IFS="^"
while read src tran file
do
deeptouch "$file" "source"
echo "src: $src"
echo "tran: $tran"
echo "file: $file"
echo "--------------------------------------------"
if [ "${file%.properties}" != "$file" ]
then
echo -e "$src=$tran\r" | sed "s/''/'/g" >> "source/$file"
else
echo -e "$src\t$tran\r" | sed "s/''/'/g" >> "source/$file"
fi
done
}
| true
|
f750465c09172311a80bb830d48c50ced67b08fb
|
Shell
|
yanji84/ml-automa
|
/scripts/clustersetup.sh
|
UTF-8
| 7,058
| 2.84375
| 3
|
[] |
no_license
|
curl -o /tmp/install_salt.sh -L https://bootstrap.saltstack.com && sh /tmp/install_salt.sh -Z -M git v2015.5.3
# reuse ssh key pair found in the repo
export PUBLIC_KEY=`cat ~/.ssh/id_rsa.pub | cut -d ' ' -f 2`
cat > /etc/salt/roster <<EOF
node1:
host: 158.85.79.185
priv: /root/.ssh/id_rsa
node2:
host: 158.85.79.186
priv: /root/.ssh/id_rsa
node3:
host: 158.85.79.184
priv: /root/.ssh/id_rsa
EOF
mv /etc/salt/master /etc/salt/master~orig
cat > /etc/salt/master <<EOF
file_roots:
base:
- /srv/salt
fileserver_backend:
- roots
pillar_roots:
base:
- /srv/pillar
EOF
mkdir -p /srv/{salt,pillar} && service salt-master restart
salt-ssh -i '*' cmd.run 'uname -a'
cat > /srv/salt/top.sls <<EOF
base:
'*':
- hosts
- root.ssh
- root.bash
EOF
cat > /srv/salt/hosts.sls <<EOF
localhost-hosts-entry:
host.present:
- ip: 127.0.0.1
- names:
- localhost
node1-fqdn-hosts-entry:
host.present:
- ip: 158.85.79.185
- names:
- node1.projectx.net
node2-fqdn-hosts-entry:
host.present:
- ip: 158.85.79.186
- names:
- node2.projectx.net
node3-fqdn-hosts-entry:
host.present:
- ip: 158.85.79.184
- names:
- node3.projectx.net
node1-hosts-entry:
host.present:
- ip: 158.85.79.185
- names:
- node1
node2-hosts-entry:
host.present:
- ip: 158.85.79.186
- names:
- node2
node3-hosts-entry:
host.present:
- ip: 158.85.79.184
- names:
- node3
EOF
mkdir /srv/salt/root
cat > /srv/salt/root/ssh.sls <<EOF
$PUBLIC_KEY:
ssh_auth.present:
- user: root
- enc: ssh-rsa
- comment: root@node1
EOF
cat > /srv/salt/root/bash_profile <<'EOF'
# .bash_profile
# Get the aliases and functions
if [ -f ~/.bashrc ]; then
. ~/.bashrc
fi
# User specific environment and startup programs
export PATH=$PATH:$HOME/bin
EOF
cat > /srv/salt/root/bashrc <<'EOF'
# .bashrc
# Source global definitions
if [ -f /etc/bashrc ]; then
. /etc/bashrc
fi
# User specific aliases and functions
alias rm='rm -i'
alias cp='cp -i'
alias mv='mv -i'
# Java
export JAVA_HOME="$(readlink -f $(which java) | grep -oP '.*(?=/bin)')"
# Spark
export SPARK_HOME="/usr/local/spark"
export PATH=$PATH:$SPARK_HOME/bin:$SPARK_HOME/sbin
# Hadoop
export HADOOP_HOME="/usr/local/hadoop"
export HADOOP_CONF_DIR=$HADOOP_HOME/etc/hadoop
export PATH=$PATH:$HADOOP_HOME/bin:$HADOOP_HOME/sbin
# Spark (part 2, should come after hadoop setup)
export SPARK_DIST_CLASSPATH=$(hadoop classpath)
EOF
cat > /srv/salt/root/bash.sls <<EOF
/root/.bash_profile:
file.managed:
- source: salt://root/bash_profile
- overwrite: true
/root/.bashrc:
file.managed:
- source: salt://root/bashrc
- overwrite: true
EOF
salt-ssh '*' state.highstate
salt-ssh '*' cmd.run 'yum install -y yum-utils'
salt-ssh '*' cmd.run 'yum install -y epel-release'
salt-ssh '*' cmd.run 'yum update -y'
salt-ssh '*' cmd.run 'yum install -y java-1.7.0-openjdk-headless'
mkdir /srv/salt/spark
cat > /srv/salt/spark/slaves <<EOF
node1.projectx.net
node2.projectx.net
node3.projectx.net
EOF
cat > /srv/salt/spark.sls <<EOF
spark:
archive.extracted:
- name: /usr/local/
- source: http://d3kbcqa49mib13.cloudfront.net/spark-1.5.1-bin-without-hadoop.tgz
- source_hash: md5=5b2774df2eb6b9fd4d65835471f7387d
- archive_format: tar
- tar_options: -z --transform=s,/*[^/]*,spark,
- if_missing: /usr/local/spark/
/usr/local/spark/conf/slaves:
file.managed:
- source: salt://spark/slaves
- overwrite: true
EOF
# mount a filesystem onto the secondary disk
salt-ssh '*' cmd.run 'mkdir -m 777 /data'
salt-ssh '*' cmd.run 'mkfs.ext4 /dev/xvdc'
salt-ssh '*' cmd.run 'echo "/dev/xvdc /data ext4 defaults,noatime 0 0" >> /etc/fstab'
salt-ssh '*' cmd.run 'mount /data'
mkdir /srv/salt/hadoop
cat > /srv/salt/hadoop/masters <<EOF
node1.projectx.net
EOF
cat > /srv/salt/hadoop/slaves <<EOF
node1.projectx.net
node2.projectx.net
node3.projectx.net
EOF
cat > /srv/salt/hadoop/core-site.xml <<EOF
<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
<property>
<name>fs.default.name</name>
<value>hdfs://node1.projectx.net:9000</value>
</property>
</configuration>
EOF
cat > /srv/salt/hadoop/etc/hadoop/mapred-site.xml <<EOF
<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
<property>
<name>mapreduce.framework.name</name>
<value>yarn</value>
</property>
</configuration>
EOF
cat > /srv/salt/hadoop/hdfs-site.xml <<EOF
<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
<property>
<name>dfs.replication</name>
<value>3</value>
</property>
<property>
<name>dfs.data.dir</name>
<value>/data/hdfs</value>
</property>
</configuration>
EOF
cat > /srv/salt/hadoop/yarn-site.xml <<EOF
<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
<property>
<name>yarn.nodemanager.aux-services</name>
<value>mapreduce_shuffle</value>
</property>
<property>
<name>yarn.nodemanager.aux-services.mapreduce.shuffle.class</name>
<value>org.apache.hadoop.mapred.ShuffleHandler</value>
</property>
<property>
<name>yarn.resourcemanager.resource-tracker.address</name>
<value>node1.projectx.net:8025</value>
</property>
<property>
<name>yarn.resourcemanager.scheduler.address</name>
<value>node1.projectx.net:8030</value>
</property>
<property>
<name>yarn.resourcemanager.address</name>
<value>node1.projectx.net:8050</value>
</property>
<property>
<name>yarn.nodemanager.vmem-check-enabled</name>
<value>false</value>
<description>Whether virtual memory limits will be enforced for containers</description>
</property>
</configuration>
EOF
cat > /srv/salt/hadoop.sls <<EOF
hadoop:
archive.extracted:
- name: /usr/local/
- source: http://apache.claz.org/hadoop/core/hadoop-2.7.1/hadoop-2.7.1.tar.gz
- source_hash: md5=203e5b4daf1c5658c3386a32c4be5531
- archive_format: tar
- tar_options: -z --transform=s,/*[^/]*,hadoop,
- if_missing: /usr/local/hadoop/
/usr/local/hadoop/etc/hadoop/masters:
file.managed:
- source: salt://hadoop/masters
- overwrite: true
/usr/local/hadoop/etc/hadoop/slaves:
file.managed:
- source: salt://hadoop/slaves
- overwrite: true
/usr/local/hadoop/etc/hadoop/core-site.xml:
file.managed:
- source: salt://hadoop/core-site.xml
- overwrite: true
/usr/local/hadoop/etc/hadoop/mapred-site.xml:
file.managed:
- source: salt://hadoop/mapred-site.xml
- overwrite: true
/usr/local/hadoop/etc/hadoop/hdfs-site.xml:
file.managed:
- source: salt://hadoop/hdfs-site.xml
- overwrite: true
/usr/local/hadoop/etc/hadoop/yarn-site.xml:
file.managed:
- source: salt://hadoop/yarn-site.xml
- overwrite: true
/data/hdfs:
file.directory
EOF
salt-ssh '*' state.apply spark
salt-ssh '*' state.apply hadoop
hadoop namenode -format
| true
|
296e2efee3fba6c79693bab75787bb01e54b0c6c
|
Shell
|
DaHuMao/Commonly-used-configuration
|
/dotfiles/zsh/setup.sh
|
UTF-8
| 440
| 3.546875
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
# get the dir of the current script
SCRIPT_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) && cd "$SCRIPT_DIR" || return 1
ln -sf $SCRIPT_DIR ~/.myzsh
for bin_file in `ls ~/.myzsh/bin`
do
chmod 777 ~/.myzsh/bin/$bin_file
done
ln -sf "$SCRIPT_DIR/gitconfig" ~/.gitconfig
[ ! -e ~/.zshrc ] && touch ~/.zshrc
cat ./zshrc > ~/.zshrc
[[ "$SHELL" =~ "zsh" ]] || chsh -s "$(command -v zsh)"
source ~/.zshrc
| true
|
4c4975daaa86a4c697e972df98bad3a80d75e76e
|
Shell
|
open-switch/opx-sdi-sys-vm
|
/src/unit_test/tests/sdi_run_tests.sh
|
UTF-8
| 1,282
| 3.796875
| 4
|
[] |
no_license
|
#!/bin/bash
SDI_CFG_DIR=/etc/opx/sdi
SQL_CREATE=sdi-db-create.sql
DN_SDI_DB_NAME=vm-test.db
DN_SDI_DB_INIT=sdi-db-test-init.sql
DN_SDI_DB_SEM_KEY=0x564d5554 # VMUT
BIN_DIR=$(dirname $0)
DN_SDI_DB_BASE_DIR=$(realpath $BIN_DIR/data/)
TEST_DB="$DN_SDI_DB_BASE_DIR/$DN_SDI_DB_NAME"
# Copy the create script over to the test data folder
cp $SDI_CFG_DIR/$SQL_CREATE $DN_SDI_DB_BASE_DIR
# Export the environment variable that tells SDI-DB to load the test database
export DN_SDI_DB_BASE_DIR
export DN_SDI_DB_NAME
export DN_SDI_DB_INIT
export DN_SDI_DB_SEM_KEY
# Cleanup the semaphore in case we have an old one lying around
ipcrm -S $DN_SDI_DB_SEM_KEY
# Wrapper function to run the tests and abort early if necessary
run_test()
{
local test_prog=$1
# Run the test
$BIN_DIR/$test_prog
# Save the return value
local retval=$?
if [[ "$retval" != "0" ]]
then
cleanup
exit $retval
fi
}
# Cleanup function
cleanup()
{
ipcrm -S $DN_SDI_DB_SEM_KEY
rm -f $TEST_DB
unset DN_SDI_DB_BASE_DIR
unset DN_SDI_DB_NAME
}
# Run the individual tests
run_test sdi_vm_entity_info_unittest
run_test sdi_vm_fan_unittest
run_test sdi_vm_led_unittest
run_test sdi_vm_media_unittest
run_test sdi_vm_thermal_unittest
# Cleanup and exit
cleanup
| true
|
aa0cabd2882e4c711a64733cc57f15d7f72a6d4b
|
Shell
|
Betterpath/EliIE
|
/download.sh
|
UTF-8
| 1,752
| 3.890625
| 4
|
[] |
no_license
|
#!/bin/bash
set -eu
# s3simple is a small, simple bash s3 client with minimal dependencies.
# See http://github.com/paulhammond/s3simple for documentation and licence.
s3simple() {
local command="$1"
local url="$2"
local file="${3:--}"
# todo: nice error message if unsupported command?
if [ "${url:0:5}" != "s3://" ]; then
echo "Need an s3 url"
return 1
fi
local path="${url:4}"
if [ -z "${AWS_ACCESS_KEY_ID-}" ]; then
echo "Need AWS_ACCESS_KEY_ID to be set"
return 1
fi
if [ -z "${AWS_SECRET_ACCESS_KEY-}" ]; then
echo "Need AWS_SECRET_ACCESS_KEY to be set"
return 1
fi
local method md5 args
case "$command" in
get)
method="GET"
md5=""
args="-o $file"
;;
put)
method="PUT"
if [ ! -f "$file" ]; then
echo "file not found"
exit 1
fi
md5="$(openssl md5 -binary $file | openssl base64)"
args="-T $file -H Content-MD5:$md5"
;;
*)
echo "Unsupported command"
return 1
esac
local date="$(date -u '+%a, %e %b %Y %H:%M:%S +0000')"
local string_to_sign
printf -v string_to_sign "%s\n%s\n\n%s\n%s" "$method" "$md5" "$date" "$path"
local signature=$(echo -n "$string_to_sign" | openssl sha1 -binary -hmac "${AWS_SECRET_ACCESS_KEY}" | openssl base64)
local authorization="AWS ${AWS_ACCESS_KEY_ID}:${signature}"
curl $args -s -f -H Date:"${date}" -H Authorization:"${authorization}" https://s3.amazonaws.com"${path}"
}
BUCKET="betterpath-code"
FILE="public_mm_linux_main_2016v2.tar.bz2"
s3simple get s3://$BUCKET/$FILE $FILE
| true
|
b346bb1b891fe21450fa781bb06c2f616e5ebf41
|
Shell
|
stickwarslit/dotfiles
|
/config/i3_ubuntu/mute_toggl.sh
|
UTF-8
| 282
| 2.625
| 3
|
[] |
no_license
|
#!/bin/bash
# Toggles mute state for the current audio device
amixer -c 1 set Master toggle
if amixer -c 1 get Master | grep "\[on\]"; then
if amixer -c 1 get Headphone | grep "\[0%\]"; then
amixer -c 1 set Speaker toggle
else
amixer -c 1 set Headphone toggle
fi
fi
| true
|
60387e81835be764ef63e531e13ed3b26035ec19
|
Shell
|
matthope/eng-concourse-resource-pagerduty-incident
|
/src/out
|
UTF-8
| 9,627
| 3.03125
| 3
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#! /usr/bin/env bash
set -euo pipefail
input_json="$(cat)"
rest_api=$( echo "$input_json" | jq -cM '.source.rest')
events_v2_api=$( echo "$input_json" | jq -cM '.source.events_v2')
if [[ "$rest_api" == 'null' ]] && [[ "$events_v2_api" == 'null' ]]; then
echo >&2 '[ERROR] You must define either "rest" or "events_v2" in the resource source - you cannot leave them both undefined!'
exit 1
fi
if [[ "$rest_api" != 'null' ]] && [[ "$events_v2_api" != 'null' ]]; then
echo >&2 '[ERROR] You must define either "rest" or "events_v2" in the resource source - you cannot define both of them at once!'
exit 1
fi
build_url="$ATC_EXTERNAL_URL/builds/$BUILD_ID"
if [[ "$rest_api" != 'null' ]]; then
api_key=$( echo "$input_json" | jq -r '.source.rest.api_key' )
from_pagerduty_user=$( echo "$input_json" | jq -r '.source.rest.from_pagerduty_user')
autogenerate_incident_key=$(echo "$input_json" | jq -r '.source.rest.autogenerate_incident_key? // true')
include_build_link=$( echo "$input_json" | jq -r '.source.rest.include_build_link? // true')
incident=$( echo "$input_json" | jq -c '.params' )
if [[ "$autogenerate_incident_key" == 'true' ]]; then
incident=$(echo "$incident" | jq -c \
--arg build_url "$build_url" \
'. * { "incident" : { "incident_key" : $build_url } }')
fi
if [[ "$include_build_link" == 'true' ]]; then
incident=$(echo "$incident" | jq -c \
--arg build_url "$build_url" \
'. * { "incident" : { "body" : { "details" : .incident.body.details + " <a href=\"$build_url\">Link to Build</a>" } } }')
fi
# note that --retry correctly retries only on timeouts and 429 (too many requests) not for unretriable errors
num_retries=10 # arbitrary hardcode
curl 1>&2 -XPOST \
--retry "$num_retries" \
--no-progress-meter \
--show-error \
--fail-with-body \
--header "Authorization: Token token=$api_key" \
--header "Accept: application/vnd.pagerduty+json;version=2" \
--header "Content-Type: application/json" \
--header "From: $from_pagerduty_user" \
--data "$incident" \
https://api.pagerduty.com/incidents
elif [[ "$events_v2_api" != 'null' ]]; then
routing_key=$( echo "$input_json" | jq -r '.source.events_v2.routing_key? // ""')
client=$( echo "$input_json" | jq -r '.source.events_v2.client? // ""')
client_url=$( echo "$input_json" | jq -r '.source.events_v2.client_url? // ""')
attach_build_url_to_links=$(echo "$input_json" | jq -r '.source.events_v2.attach_build_url_to_links? // true')
attach_timestamp=$( echo "$input_json" | jq -r '.source.events_v2.attach_timestamp? // true')
event_type=$( echo "$input_json" | jq -r '.params.event_type? // "alert"')
event_action=$( echo "$input_json" | jq -r '.params.event_action? // "trigger"')
dedup_key=$( echo "$input_json" | jq -r --arg dedup_key "$build_url" '.params.dedup_key? // $dedup_key')
summary=$( echo "$input_json" | jq -r '.params.summary? // ""')
source_=$( echo "$input_json" | jq -r '.params.source? // ""')
severity=$( echo "$input_json" | jq -r '.params.severity? // ""')
timestamp=$( echo "$input_json" | jq -r '.params.timestamp? // ""')
component=$( echo "$input_json" | jq -r '.params.component? // ""')
group=$( echo "$input_json" | jq -r '.params.group? // ""')
class=$( echo "$input_json" | jq -r '.params.class? // ""')
custom_details=$( echo "$input_json" | jq -r '.params.custom_details? // {}')
custom_details_file=$(echo "$input_json" | jq -r '.params.custom_details_file? // ""')
images=$( echo "$input_json" | jq -r '.params.images? // []')
links=$( echo "$input_json" | jq -r '.params.links? // []')
if [[ -z "$routing_key" ]]; then
echo >&2 "[ERROR] You must define a routing_key!"
exit 1
fi
if [[ "$event_type" != 'alert' ]] && [[ "$event_type" != 'change' ]]; then
echo >&2 "[ERROR][event_type: $event_type] Unrecognized event type, must be either 'alert' or 'change'"
exit 1
fi
dedup_key_length=$(echo -n "$dedup_key" | wc -c)
if [[ $dedup_key_length -gt 255 ]]; then
echo >&2 "[ERROR][dedup_key: $dedup_key][length: $dedup_key_length] The dedup_key is longer than the PagerDuty API maximum length of 255 characters!"
exit 1
fi
summary_length=$(echo -n "$summary" | wc -c)
if [[ $summary_length -gt 1024 ]]; then
echo >&2 "[ERROR][summary: $summary][length: $summary_length] The summary is longer than the PagerDuty API maximum length of 1024 characters!"
exit 1
fi
if [[ "$event_type" == 'alert' ]] && [[ "$event_action" == 'trigger' ]] ; then
if [[ -z "$summary" ]] ; then
echo >&2 "[ERROR][summary: $summary] You must define a summary, it is required by PagerDuty!"
exit 1
fi
if [[ -z "$source_" ]]; then
echo >&2 "[ERROR][source: $source_] You must define a source, it is required by PagerDuty!"
exit 1
fi
if [[ -z "$severity" ]]; then
echo >&2 "[ERROR][severity: $severity] You must define a severity, it is required by PagerDuty! You may choose from: critical, error, warning, info"
exit 1
fi
if [[ -n "$client" ]]; then
client='Concourse'
fi
if [[ -n "$client_url" ]]; then
client_url="$ATC_EXTERNAL_URL"
fi
if [[ -z "$timestamp" ]] && [[ "$attach_timestamp" == 'true' ]]; then
timestamp=$(date --iso-8601=ns)
fi
if [[ -n "$custom_details_file" ]]; then
custom_details=$(jq -nc \
--argjson custom_details "$custom_details" \
--slurpfile additional_custom_details "$custom_details_file" \
'$custom_details * $additional_custom_details')
fi
if [[ "$attach_build_url_to_links" == 'true' ]]; then
links=$(jq -nc \
--argjson links "$links" \
--arg build_url "$build_url" \
'$links + [{"text": "Link to Build", "href": $build_url}]')
fi
elif [[ "$event_type" == 'alert' ]]; then
client=''
client_url=''
elif [[ "$event_type" == 'change' ]]; then
if [[ -z "$summary" ]] ; then
echo >&2 "[ERROR][summary: $summary] You must define a summary, it is required by PagerDuty!"
exit 1
fi
client=''
client_url=''
if [[ -z "$timestamp" ]] && [[ "$attach_timestamp" == 'true' ]]; then
timestamp=$(date --iso-8601=ns)
fi
if [[ -n "$custom_details_file" ]]; then
custom_details=$(jq -nc \
--argjson custom_details "$custom_details" \
--slurpfile additional_custom_details "$custom_details_file" \
'$custom_details * $additional_custom_details')
fi
if [[ "$attach_build_url_to_links" == 'true' ]]; then
links=$(jq -nc \
--argjson links "$links" \
--arg build_url "$build_url" \
'$links + [{"text": "Link to Build", "href": $build_url}]')
fi
fi
# required payload
payload=$(jq -cn \
--arg routing_key "$routing_key" \
--arg event_action "$event_action" \
'{"routing_key": $routing_key, "event_action": $event_action }')
if [[ -n "$dedup_key" ]]; then
payload=$(echo "$payload" | jq -c --arg dedup_key "$dedup_key" '.* {"dedup_key": $dedup_key}')
fi
if [[ -n "$client" ]]; then
payload=$(echo "$payload" | jq -c --arg client "$client" ' .* {"client": $client}')
fi
if [[ -n "$client_url" ]]; then
payload=$(echo "$payload" | jq -c --arg client_url "$client_url" ' .* {"client_url": $client_url}')
fi
if [[ -n "$summary" ]]; then
payload=$(echo "$payload" | jq -c --arg summary "$summary" '. * {"payload":{"summary": $summary}}')
fi
if [[ -n "$source_" ]]; then
payload=$(echo "$payload" | jq -c --arg source_ "$source_" '. * {"payload":{"source": $source_}}')
fi
if [[ -n "$severity" ]]; then
payload=$(echo "$payload" | jq -c --arg severity "$severity" '. * {"payload":{"severity": $severity}}')
fi
if [[ -n "$timestamp" ]]; then
payload=$(echo "$payload" | jq -c --arg timestamp "$timestamp" '. * {"payload":{"timestamp": $timestamp}}')
fi
if [[ -n "$component" ]]; then
payload=$(echo "$payload" | jq -c --arg component "$component" '. * {"payload":{"component": $component}}')
fi
if [[ -n "$group" ]]; then
payload=$(echo "$payload" | jq -c --arg group "$group" '. * {"payload":{"group": $group}}')
fi
if [[ -n "$class" ]]; then
payload=$(echo "$payload" | jq -c --arg class "$class" '. * {"payload":{"class": $class}}')
fi
if [[ "$custom_details" != '{}' ]]; then
payload=$(echo "$payload" | jq -c --argjson custom_details "$custom_details" '. * {"payload":{"custom_details": $custom_details}}')
fi
if [[ "$images" != '[]' ]]; then
payload=$(echo "$payload" | jq -c --argjson images "$images" '. * {"images": $images}')
fi
if [[ "$links" != '[]' ]]; then
payload=$(echo "$payload" | jq -c --argjson links "$links" '. * {"links": $links}')
fi
# note that --retry correctly retries only on timeouts and 429 (too many requests) not for unretriable errors
pagerduty_endpoint='https://events.pagerduty.com/v2/enqueue'
if [[ "$event_type" == 'change' ]]; then
pagerduty_endpoint='https://events.pagerduty.com/v2/change/enqueue'
fi
num_retries=10 # arbitrary hardcode
curl 1>&2 -XPOST \
--retry "$num_retries" \
--no-progress-meter \
--show-error \
--fail-with-body \
--header "Content-Type: application/json" \
--data "$payload" \
"$pagerduty_endpoint"
fi
echo '{"version":{"hash":"none"},"metadata":[]}'
| true
|
e839f64439a624857980cbb699244e5d131fb29a
|
Shell
|
ahashisyuu/OpenSpider
|
/spider_sougou/download.sh
|
UTF-8
| 142
| 3.0625
| 3
|
[] |
no_license
|
start=$1
end=$2
mkdir ciku
for((i=$start;i<=$end;i++));do
link=`sed -n $i"p" download.list`
wget -P ciku/ "$link"
#sleep 1
done
| true
|
53ad5ac47763da37731ea65a01d1ccb9ea7d325a
|
Shell
|
entn-at/kaldi-lattice-utils
|
/fstbin/fst-minimize
|
UTF-8
| 712
| 3.296875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
set -e;
[ $# -lt 2 ] &&
echo "Usage: ${0##*/} [options] <fst-rspecifier> <fst-wspecifier>" >&2 &&
exit 1;
opts=();
while [ $# -gt 2 ]; do
opts+=("$1");
shift;
done;
cmd="fstcompile | fstminimize ${opts[@]} | fstprint";
fstcopy --print-args=false "$1" "ark,t:-" |
awk -v cmd="$cmd" 'BEGIN{
key="";
buffer="";
}{
if (key == "" && NF == 1) {
key = $1;
buffer = "";
next;
}
if (NF == 0) {
print key;
print buffer | cmd;
close(cmd);
print "";
key = "";
buffer = "";
next;
}
buffer = buffer""$0"\n";
}END{
if (key != "") {
print key;
print buffer | cmd;
close(cmd);
print "";
}
}' | fstcopy --print-args=false ark:- "$2";
| true
|
31e8ea5224f3d6c5d8539d5fe901737fd1e4ca6c
|
Shell
|
jonaspm/sysadmin-monitor
|
/krystal.sh
|
UTF-8
| 1,855
| 3.546875
| 4
|
[] |
no_license
|
#!/bin/bash
#autorjonaspm
#Fecha: Tue Apr 11 12:30:19 MDT 2017
#Comentario: Programa de monitoreo
clear
app_name="sysadmin-monitor"
folder="/etc/cron.$app_name"
config="krystal.conf"
list=$(tail -n+3 <(tac $folder/$config))
correo=$(echo $(cat $folder/$config | tail -n2) | awk '{print $1}')
access_token=$(cat $folder/$config | tail -n1)
commands[0]='df --total | grep total | awk '"'"'{print $5/1}'"'"
commands[1]='users | tr '"' ' '\n' "'| sort -u | wc -l'
commands[2]='uptime | awk '"'"'{print ($8+$9+$10)*100/3/$(getconf _NPROCESSORS_ONLN)}'"'"
commands[3]='free --total | grep Total | awk '"'"'{print $3/$2*100}'"'"
textos[0]="Porcentaje de disco: "
textos[1]="Usuarios conectados: "
textos[2]="Porcentaje de carga del sistema: "
textos[3]="Porcentaje de uso de RAM: "
echo "n" | ssh-keygen -f ~/.ssh/id_rsa -t rsa -N ""
while IFS=, read -r ip usuario contrasena; do
mkdir ~/$app_name/$ip 2> /dev/null
conexion="$usuario@$ip"
ssh-keygen -R $ip > /dev/null
ssh-keyscan $ip >> ~/.ssh/known_hosts
$folder/exp_copy_id $conexion $contrasena 2> /dev/null
mv ~/$app_name/$ip/$usuario ~/$app_name/$ip/$usuario.old 2> /dev/null
for index in ${!commands[*]}; do
resultado=$(ssh -n -T $conexion ${commands[$index]})
mensaje=$(echo ${textos[$index]} $resultado)
if [[ $(bc <<< "$resultado > 70") > 0 ]] && [[ $index -ne 1 ]]; then
printf $mensaje | mailx -v -s "Exceso de uso de recursos" $correo
fi
echo $mensaje >> ~/$app_name/$ip/$usuario
done
curl -X POST https://content.dropboxapi.com/2/files/upload --header "Authorization: Bearer $access_token" --header "Dropbox-API-Arg: {\"path\": \"/$ip/$usuario.txt\",\"mode\":\"add\",\"autorename\": true,\"mute\": false}" --header "Content-Type: application/octet-stream" --data-binary "@$(printf ~/$app_name/$ip/$usuario)"
rm ~/$app_name/$ip/$usuario.old
done << EOF
$list
EOF
exit 0
| true
|
3ac87bf4de24ce32262d90440afa8e7564011f72
|
Shell
|
JosephBrendler/joetoo
|
/dev-util/joetoolkit/files/backup_utilities/tahawusbackup_mount
|
UTF-8
| 1,194
| 3.21875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
source /usr/local/sbin/script_header_brendlefly
BUILD=0.0
VERBOSE=$TRUE
verbosity=2
#---[ main script ]---------------------------------------------------------
checkroot
separator "tahawusbackup_mount-${BUILD}"
message "creating mount point..."
mkdir -p /mnt/tahawusbackupROOT
message_n "Mounting /mnt/tahawusbackupROOT..."
mount /dev/mapper/vg_tahawusbackup-tahawusbackupROOT /mnt/tahawusbackupROOT
right_status $?; echo
message "Creating remaining mount points..."
for x in usr home var opt tmp
do
message_n "Creating mount point /mnt/tahawusbackupROOT/${LBon}$x${Boff}..." && \
mkdir -p /mnt/tahawusbackupROOT/$x
right_status $?; echo
done
message "mounting remaining LVs on /mnt/tahawusbackupROOT..."
message_n "Mounting /mnt/tahawusbackupROOT/${LBon}usr${Boff}..." && \
mount /dev/mapper/vg_tahawusbackup-tahawusbackupUSR /mnt/tahawusbackupROOT/usr/
right_status $?; echo
for x in home var opt tmp
do
message_n "Mounting /mnt/tahawusbackupROOT/${LBon}$x${Boff}..." && \
mount /dev/mapper/vg_tahawusbackup-$x /mnt/tahawusbackupROOT/$x
right_status $?; echo
done
message "mounted LVs on /mnt/tahawusbackupROOT as shown below:"
mount | grep tahawusbackupROOT
| true
|
58b736d9322beaf9f1c11a4ca5cf3861d26bb187
|
Shell
|
qomarullah/dsp-proxy
|
/start.sh
|
UTF-8
| 586
| 2.75
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
cd /home/apps/apidb
LIBDIR="lib/"
LIB="./classes"
for i in `ls $LIBDIR`
do
LIB=$LIB:$LIBDIR$i;
echo $i"\n"
done
echo ${LIB}
if [ ! -f app.pid ]
then
~/src/jdk1.8.0_65/bin/java -Xss128m -Xms256m -Xmn128m -Xmx512m -verbose:gc -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.port=9010 -Dcom.sun.management.jmxremote.local.only=false -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -cp $LIB org.api.db.App conf/mdb.conf > out.txt 2>&1 & echo $! > app.pid
echo "done"
else echo "udah jalan ? "
fi
| true
|
5752d5db18349dd1e36d98de05c8cd5cd74ba19b
|
Shell
|
linyows/wercker-step-ikachan
|
/run.sh
|
UTF-8
| 1,368
| 3.078125
| 3
|
[
"MIT"
] |
permissive
|
if [ ! -n "$WERCKER_IKACHAN_SERVER" ]; then
error 'Please specify server property'
exit 1
fi
if [ ! -n "$WERCKER_IKACHAN_PORT" ]; then
error 'Please specify port property'
exit 1
fi
if [ ! -n "$WERCKER_IKACHAN_CHANNEL" ]; then
error 'Please specify channel property'
exit 1
fi
if [ ! -n "$DEPLOY" ]; then
export WERCKER_JOB_TARGET=$WERCKER_GIT_BRANCH
export WERCKER_JOB_URL=$WERCKER_BUILD_URL
export WERCKER_JOB_TYPE='build'
else
export WERCKER_JOB_TARGET=$WERCKER_DEPLOYTARGET_NAME
export WERCKER_JOB_URL=$WERCKER_DEPLOY_URL
export WERCKER_JOB_TYPE='deploy'
fi
if [ "$WERCKER_RESULT" = "passed" ]; then
if [ ! -n "$WERCKER_IKACHAN_PASSED_MESSAGE" ]; then
export WERCKER_IKACHAN_MESSAGE="$WERCKER_IKACHAN_PASSED_MESSAGE"
fi
else
if [ ! -n "$WERCKER_IKACHAN_FAILED_MESSAGE" ]; then
export WERCKER_IKACHAN_MESSAGE="$WERCKER_IKACHAN_FAILED_MESSAGE"
fi
fi
if [ "$WERCKER_IKACHAN_ON" = "failed" ]; then
if [ "$WERCKER_RESULT" = "passed" ]; then
echo "Skipping.."
return 0
fi
fi
ruby "$WERCKER_STEP_ROOT/main.rb" \
-h "$WERCKER_IKACHAN_SERVER" \
-p "$WERCKER_IKACHAN_PORT" \
-c "$WERCKER_IKACHAN_CHANNEL" \
-m "$WERCKER_IKACHAN_MESSAGE" \
-r "$WERCKER_APPLICATION_NAME" \
-u "$WERCKER_STARTED_BY" \
-t "$WERCKER_JOB_TARGET" \
-l "$WERCKER_JOB_URL" \
-j "$WERCKER_JOB_TYPE" \
-s "$WERCKER_RESULT"
| true
|
62fd9e5f0f2855b0ef73a3d4b80700eeec4b3dda
|
Shell
|
bdekosky/NMED-NT69968
|
/NMED-NT69968_allelic_inclusion_finder.sh
|
UTF-8
| 3,813
| 3.515625
| 4
|
[] |
no_license
|
#!/bin/bash
#Usage command:
#bash NMED-NT69968_allelic_inclusion_finder.sh replicate1_unique_pairs_over1read_noalleles_file replicate2_unique_pairs_over1read_noalleles_file replicate1_complete_pairs_file replicate2_complete_pairs_file
#This script uses the file output from NMED-NT69968_VHVLIMGT_nucleotide_pairings.sh (specifically, complete_pairs.txt and unique_pairs_over1read_noalleles.txt) to detect heavy chains that paired with multiple light chains, where the allelically included pairings were detected in both technical replicates.
UNIQUE_FILE1=$1
UNIQUE_FILE2=$2
COMPLETE_FILE1=$3
COMPLETE_FILE2=$4
echo "$(date +%b-%d-%H:%M:%S) Preparing files..."
#Generate a temporary list of all HC's and LC's of interest from unique_files
awk '{print $2 "\t" $1}' "$UNIQUE_FILE1" | sort | uniq | sort > tempHC1.txt
awk '{print $2 "\t" $1}' "$UNIQUE_FILE2" | sort | uniq | sort > tempHC2.txt
awk '{print $3}' "$UNIQUE_FILE1" | sort | uniq -c | sort -n -r | awk '{if($1>1)print}' > tempLC1.txt
awk '{print $3}' "$UNIQUE_FILE2" | sort | uniq -c | sort -n -r | awk '{if($1>1)print}' > tempLC2.txt
awk '{if($1>1)print}' "$COMPLETE_FILE1" > temp_completefile1.txt
awk '{if($1>1)print}' "$COMPLETE_FILE2" > temp_completefile2.txt
join -j 1 -o 1.1 tempHC1.txt tempHC2.txt > tempHC_1and2.txt
rm 1HC_multipleLC_resultsfile.txt
echo "$(date +%b-%d-%H:%M:%S) Searching for HC's with multiple LC's in both sets..."
rm counterfile.txt
while read line
do
rm tempLClist1.txt
rm tempLClist2.txt
grep $line temp_completefile1.txt | awk '{print $3 "\t" $1 "\t" $8 "\t" $9 "\t" $10}' | sort | uniq > tempLClist1.txt
grep $line temp_completefile2.txt | awk '{print $3 "\t" $1}' | sort | uniq > tempLClist2.txt
join -j 1 -o 1.2 2.2 1.1 1.3 1.4 1.5 tempLClist1.txt tempLClist2.txt > temp_overlappingLCs.txt
#Iterate if HC paired with more than 3 LC
HCpaircount1=$(grep $line temp_completefile1.txt | awk '{print $3}' | sort | uniq | wc -l | awk '{print $1}')
if [ $HCpaircount1 -gt 4 ]; then
continue
fi
HCpaircount2=$(grep $line temp_completefile2.txt | awk '{print $3}' | sort | uniq | wc -l | awk '{print $1}')
if [ $HCpaircount2 -gt 4 ]; then
continue
fi
#Iterate if only one LC paired with that heavy
overlappingLCcount=$(wc -l temp_overlappingLCs.txt | awk '{print $1}')
if [ $overlappingLCcount == 1 ]; then
continue
fi
#Iterate if more than 3 LC seqs paired with that heavy to prevent promiscuously paired HCs
if [ $overlappingLCcount -gt 3 ]; then
continue
fi
#Iterate if nothing in the file
if [ $overlappingLCcount == 0 ]; then
continue
fi
#Iterate if only one VL seq paired with that heavy
numVLseqs=$(awk '{print $3}' temp_overlappingLCs.txt | sort | uniq | wc -l | awk '{print $1}')
if [ $numVLseqs == 1 ]; then
continue
fi
#Iterate if only one VL gene paired with that heavy
numVLgenes=$(awk '{print $4}' temp_overlappingLCs.txt | sort | uniq | wc -l | awk '{print $1}')
if [ $numVLgenes == 1 ]; then
continue
fi
#Iterate if only one VJ gene paired with that heavy
numVJgenes=$(awk '{print $5}' temp_overlappingLCs.txt | sort | uniq | wc -l | awk '{print $1}')
if [ $numVJgenes == 1 ]; then
continue
fi
hcseq=$(echo "$line" | awk '{print $1}')
hccount1=$(echo "$line" | awk '{print $2}')
hccount2=$(echo "$line" | awk '{print $3}')
awk -v hcseq="$hcseq" -v count1="$hccount1" -v count2="$hccount2" 'BEGIN {print "HC: " hcseq "\t" count1 "\t" count2 "\nLCs:"}; {print $1 "\t" $2 "\t" $3 "\t" $4 "\t" $5 "\t" $6}; END {print "\n\n"}' temp_overlappingLCs.txt >> 1HC_multipleLC_resultsfile.txt
echo >> counterfile.txt
COUNTER=$(wc -l counterfile.txt | awk '{print $1}')
echo "loop $COUNTER"
done < tempHC_1and2.txt
rm counterfile.txt
echo "$(date +%b-%d-%H:%M:%S) Job complete. 1HCmultipleLC seqs are located in 1HC_multipleLC_resultsfile.txt"
| true
|
bd5249859ebd5af9c2d7a133d2e60830785b3665
|
Shell
|
dipeshtripathi/artifactorydemo
|
/folder1/run.sh
|
UTF-8
| 595
| 2.65625
| 3
|
[] |
no_license
|
#!/bin/bash
MAILTO=
hostOs=$(/bin/echo RHEL`/bin/lsb_release -r | awk '{print $2}'`)
if [ -e /tmp/bgl-acme-v01/disable.sh ];
then
if /usr/bin/pgrep -u ngdevx -x telegraf > /dev/null
then
/usr/bin/pkill -u ngdevx -x telegraf > /dev/null
fi
else
if ! /usr/bin/pgrep -u ngdevx -x telegraf > /dev/null
then
/bin/chmod u=rwx -R /tmp/bgl-acme-v01
(site=BGL dc=BGL11 hostType=VM-8 hostOs=$hostOs tool=acme /tmp/bgl-acme-v01/telegraf --config-directory /tmp/bgl-acme-v01 --config /tmp/bgl-acme-v01/telegraf.conf </dev/null >/dev/null &>/dev/null &)
fi
fi
| true
|
dce2cbe1d5463837ee7ed81e0356f9cefc909aed
|
Shell
|
farinajf/cas-4.1
|
/client/cas-get.sh
|
UTF-8
| 1,751
| 3.296875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
#Usage: cas-get.sh {url} {username} {password}
DEST="$1"
USERNAME="$2"
PASSWORD="$3"
ENCODED_DEST=`echo "$DEST" | perl -p -e 's/([^A-Za-z0-9])/sprintf("%%%02X", ord($1))/seg' | sed 's/%2E/./g' | sed 's/%0A//g'`
CAS_HOST_PORT=wso2:8443
COOKIE_JAR=.cookieJar
HEADER_DUMP_DEST=.headers
rm $COOKIE_JAR
rm $HEADER_DUMP_DEST
#GET login form
curl -s -k -c $COOKIE_JAR https://$CAS_HOST_PORT/cas/login?service=$ENCODED_DEST > login.txt
H_LT=`cat login.txt | grep name=.lt | sed 's/.*value..//' | sed 's/\".*//'`
H_EXECUTION=`cat login.txt | grep name=.execution | sed 's/.*value..//' | sed 's/\".*//'`
H_EVENTID=`cat login.txt | grep name=._eventId | sed 's/.*value..//' | sed 's/\".*//'`
echo H_LT : $H_LT
echo H_EXECUTION: $H_EXECUTION
echo H_EVENTID : $H_EVENTID
#Submit the login form.
#We keep the headers from this request as the return value should be a 302 including a "ticket" param which we'll need in the next request
curl -v -k -X POST --data "username=$USERNAME&password=$PASSWORD<=$H_LT&execution=$H_EXECUTION&_eventId=$H_EVENTID" -i -b $COOKIE_JAR -c $COOKIE_JAR \
https://$CAS_HOST_PORT/cas/login?service=$ENCODED_DEST -D $HEADER_DUMP_DEST -o /dev/null
TGC=`cat $HEADER_DUMP_DEST | grep TGC= | sed 's/Set-Cookie: TGC=//' | sed 's/;Path.*//'`
TICKET=`cat $HEADER_DUMP_DEST | grep Location | grep ticket=ST | sed 's/.*ticket.//'`
echo "*-------------------------------------------------------*"
echo "* *"
echo \* TGC : $TGC
echo "* *"
echo \* TICKET: $TICKET
echo "* *"
echo "*-------------------------------------------------------*"
| true
|
a56b88eb24fd0d38f3ebbe4daf0dd309840fe909
|
Shell
|
talex5/mirage-clock
|
/.travis-ci.sh
|
UTF-8
| 490
| 2.625
| 3
|
[] |
no_license
|
case "$OCAML_VERSION,$OPAM_VERSION" in
3.12.1,1.1.0) ppa=avsm/ocaml312+opam11 ;;
4.00.1,1.1.0) ppa=avsm/ocaml40+opam11 ;;
4.01.0,1.1.0) ppa=avsm/ocaml41+opam11 ;;
*) echo Unknown $OCAML_VERSION,$OPAM_VERSION; exit 1 ;;
esac
echo "yes" | sudo add-apt-repository ppa:$ppa
sudo apt-get update -qq
sudo apt-get install -qq ocaml ocaml-native-compilers camlp4-extra opam
export OPAMYES=1
export OPAMVERBOSE=1
opam init
opam install ocamlfind
eval `opam config env`
make
make install
make test
| true
|
1cd0b6de2e8abbcd5234aa8826b6ae82cb84297e
|
Shell
|
mhany90/affecthor
|
/utils/filter_lexinput.sh
|
UTF-8
| 697
| 3.1875
| 3
|
[] |
no_license
|
#!/bin/bash
# this script applies a lexical filter using lexicons in a given directory
#SBATCH --time=00:15:00
#SBATCH --mem=40GB
# Arguments:
# $1 : input file
# $2 : output file
# $3 : directory containing lexicon evaluators
# $4 : language code (En, Ar, Es)
# join lexical filter input parameters
lexparams="-I 1 -U"
for f in ${3}/*; do
lexparams="${lexparams} -lexicon_evaluator \"affective.core.ArffLexiconEvaluator -lexiconFile $f -A 1 -B ${4}\""
done
lexfilter="weka.filters.unsupervised.attribute.TweetToInputLexiconFeatureVector ${lexparams}"
# build and run weka filtering command
run_filter="java -Xmx40G -cp weka.jar weka.Run ${lexfilter} -i ${1} -o ${2}"
eval $run_filter
| true
|
3a34dffdde0b268e3260810b10e31e70d1f6518b
|
Shell
|
wifido/UBS_Projects
|
/code/LicAudit/LicAudit2.ksh
|
UTF-8
| 1,673
| 3.25
| 3
|
[] |
no_license
|
#!/bin/ksh
if [ -f /sbclocal/netcool/UBS_ENVIRONMENT/omniprof ]; then
. /sbclocal/netcool/UBS_ENVIRONMENT/omniprof
fi
while getopts dp opt
do
case "$opt" in
d) # Sets User name for OS and GW login
PA_LIST="LDN1052_PA LDN1054_PA STM5257_PA STM1953_PA STM1935_PA STM1979_PA STM9174_PA SNG1281_PA"
LOGFILE=LicAudit2Dev.data;;
p) # Unknown argument
PA_LIST="LDN0064_PA LDN0150_PA LDN0628_PA LDN1014_PA LDN1017_PA LDN1018_PA LDN2376_PA LDN2795_PA LDN2892_PA LDN2893_PA OPF0100_PA SNG1001_PA SNG1209_PA SNG1282_PA SNG1283_PA SNG1284_PA SNG1285_PA SNG1299_PA STM1315_PA STM1318_PA STM1343_PA STM1394_PA STM1770_PA STM1771_PA STM5987_PA STM6143_PA STM8958_PA ZUR0565_PA ZUR0567_PA"
LOGFILE=LicAudit2Prod.data;;
*) # Unknown argument
PA_LIST="";;
esac
done
OMNIHOME=${MUSE_BASE}/omnibus
##############################################################################################################
##############################################################################################################
Check_PA_AGENT() {
${OMNIHOME}/bin/nco_pa_status -server ${PA_SERVER} -user netcool -password orange18 >> ${LOGFILE}
}
##############################################################################################################
if [ -f ${LOGFILE} ]; then
mv ${LOGFILE} ${LOGFILE}.old
fi
for PA_SERVER in ${PA_LIST}
do
# print "Now doing server ${PA_SERVER}\n"
print "================ Start of PA Server ${PA_SERVER}========================================" >> ${LOGFILE}
Check_PA_AGENT
print "================ End of PA Server ${PA_SERVER}========================================" >> ${LOGFILE}
print " " >> ${LOGFILE}
done
| true
|
347c972acb7c87956ffd4c299ca2720a805180a7
|
Shell
|
tklijnsma/geant4
|
/EEShashlikSimulation/H4OpticalSmall/rebuild.sh
|
UTF-8
| 761
| 2.734375
| 3
|
[] |
no_license
|
echo "Remember to set up the environment first with setup_geant4.sh"
if [ $HOSTNAME = "t3ui17" ]; then
echo "Rebuilding on PSI"
cd /shome/tklijnsm/testbeam/geant4/EEShashlikSimulation/H4OpticalSmall
rm -rf cmake
mkdir cmake
cd cmake
cp ../rebuild.sh rebuild.sh
cp ../recompile.sh recompile.sh
cp ../runThomas.mac runThomas.mac
cmake -DGeant4_DIR=/shome/tklijnsm/geant4/geant4.10.01.p02-install/lib64/Geant4-10.1.2/ ..
make
else
echo "Rebuilding on lxplus"
cd /afs/cern.ch/user/t/tklijnsm/Work/geant4/EEShashlikSimulation/H4OpticalSmall
rm -rf cmake
mkdir cmake
cd cmake
cp ../rebuild.sh rebuild.sh
cp ../recompile.sh recompile.sh
cp ../runThomas.mac runThomas.mac
cmake ..
make
fi
| true
|
55c3e885d0ca7cedd8a63902082084dd0e97f344
|
Shell
|
bmaltais/primitive
|
/scripts/prim.sh
|
UTF-8
| 432
| 3.09375
| 3
|
[
"MIT"
] |
permissive
|
# Example: /prim-mean.sh ber.jpg 2000 1024 2048 80 6 bertest
filename=${1%.*}
echo "prim.sh ${1} ${2} ${3} ${4} ${5} ${6}" > ${filename}-${5}-parms.txt
for i in $( eval echo {1..${6}} )
do
primitive -rep 10 -a 0 -i ${1} -m 0 -n ${2} -r ${3} -s ${4} -mp ${5} -v -o ${filename}-${5}-mean${6}-tmp$i.png
convert ${filename}-${5}-mean${6}-tmp*.png -average ${filename}-${5}-mean${6}.png
done
rm ${filename}-${5}-mean${6}-tmp*.png
| true
|
ffd882b79c633051dd1125c5c44079f657b618f4
|
Shell
|
mellwa/cs246_project
|
/Makefile.sh
|
UTF-8
| 410
| 3.09375
| 3
|
[] |
no_license
|
CXX = g++ # compiler
CXXFLAGS = -g -Wall -MMD # compiler flags
OBJECTS = 1.o 2.o 3.o 4.o ... # all .o files
DEPENDS = ${OBJECTS:.o=.d} # get dependency
EXEC = soda
${EXEC} : ${OBJECTS} # link step
${CXX} ${OBJECTS} -o ${EXEC}
-include ${DEPENDS}
.PHONY : clean
clean : # remove files that can be regenerated
rm -f ${DEPENDS} ${OBJECTS} ${EXECS}
| true
|
0824dda0c2509c66ac0120caf1a74f5606e2e6d3
|
Shell
|
lkcbharath/Lab_Assignments
|
/3rd Semester/IT203 UNIX/Lab 4/3.sh
|
UTF-8
| 800
| 2.671875
| 3
|
[] |
no_license
|
#!/bin/bash
rm -f 3a.text
rm -f 3b.text
rm -f 3c.text
rm -f 3d.text
echo "Enter contents of file 1 in editor about to open. Press enter to continue. Use Ctrl+Shift+S to save and Alt+F4 to close it."
read garbage
gedit 3a.text
echo "Enter contents of file 2 in editor about to open. Press enter to continue. Use Ctrl+Shift+S to save and Alt+F4 to close it."
read garbage
gedit 3b.text
echo "Enter contents of file 3 in editor about to open. Press enter to continue. Use Ctrl+Shift+S to save and Alt+F4 to close it."
read garbage
gedit 3c.text
echo "Enter contents of file 4 in editor about to open. Press enter to continue. Use Ctrl+Shift+S to save and Alt+F4 to close it."
read garbage
gedit 3d.text
zip 3zip 3a.text 3b.text 3c.text 3d.text
echo "Your files are now zipped into the zipfile 3zip."
| true
|
b29279fa30caf87720de3b42527a6b1881be7343
|
Shell
|
ted537/pt20-student
|
/test/generate-traces.sh
|
UTF-8
| 715
| 2.765625
| 3
|
[] |
no_license
|
#!/bin/bash
# generate all required ssltraces for evaluation
coder_test_dirs="test/coder test/examples"
semantic_test_dirs="$coder_test_dirs test/semantic"
parser_test_dirs="$semantic_test_dirs test/parser"
scanner_test_dirs="$parser_test_dirs test/scanner"
for test_case in $(./test/find_test_cases.sh $scanner_test_dirs); do
./test/scanner-trace.sh $test_case;
done
for test_case in $(./test/find_test_cases.sh $parser_test_dirs); do
./test/parser-trace.sh $test_case;
done
for test_case in $(./test/find_test_cases.sh $semantic_test_dirs); do
./test/semantic-trace.sh $test_case;
done
for test_case in $(./test/find_test_cases.sh $coder_test_dirs); do
./test/coder-trace.sh $test_case;
done
| true
|
b2f02bdcd8d41886368530821688d2b8fabe2660
|
Shell
|
carlbrown/SwiftServerComparison
|
/bin/utility/archive_core.sh
|
UTF-8
| 932
| 3.53125
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
RUN_DATE="`date '+%s'`"
if [ $# -ne 1 -a $# -ne 2 -a $# -ne 3 ] ; then
echo "Usage: $0 <executable_path> [port [override_date_in_seconds_since_epoch]]" >&2
exit 2
elif [ $# -eq 3 ] ; then
RUN_DATE="$3"
fi
if [ -z "$2" ] ; then
PORT="3000"
else
PORT="$2"
fi
export PORT
if [ -f core ] ; then
if [ ! -d $HOME/test_runs/$DATE ] ; then
mkdir -p $HOME/test_runs/$DATE
fi
DATE="$PORT.`date '+%s'`"
lldb $1 -c core -o "bt all" -o "script import os; os._exit(1)" > $HOME/test_runs/$RUN_DATE/core.stacktrace.$DATE.out 2>&1
#mv core $HOME/test_runs/$RUN_DATE/core.$DATE.core
rm -f core
git log -1 --pretty=oneline > $HOME/test_runs/$RUN_DATE/git.$DATE.out
for i in Packages/*; do
echo "$i: " >> $HOME/test_runs/$RUN_DATE/git.$DATE.out
cd $i
git log -1 --pretty=oneline >> $HOME/test_runs/$RUN_DATE/git.$DATE.out
git diff >> $HOME/test_runs/$RUN_DATE/git.$DATE.out
cd ../..
done
fi
| true
|
ce96c2a11fef9af1b35e29f22a920303c029ec79
|
Shell
|
UltraViolentLight/deployment_nginx_kubernetes
|
/scripts/upload_docker.sh
|
UTF-8
| 305
| 2.875
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
# Create dockerpath
dockerpath="ultraviolentlight/deployment_nginx_kubernetes"
# Authenticate & tag
echo "Docker ID and Image: $dockerpath"
docker login &&\
docker image tag deployment_nginx_kubernetes $dockerpath
# Push image to a docker repository
docker image push $dockerpath
| true
|
812bbb18c86141675a1a5474e86cc893b8bc88df
|
Shell
|
Willdiedog/MA5671-205
|
/usr/bin/wap.ssp.dproc
|
UTF-8
| 372
| 2.6875
| 3
|
[] |
no_license
|
#!/bin/sh
if [ $# -ne 1 ] ; then
echo "Error arg, please input task id. eg: wap.ssp.dproc 32"
exit 0
fi
echo $1 > /proc/wap_proc/proc_dbg
cat /proc/wap_proc/proc_dbg
echo "========================== backtrace ========================="
#wap.ssp.procbt $1
shellcmdexe $1 0x2004
echo "========================== maps =============================="
cat /proc/$1/maps
| true
|
88d1f5d9dee14131804d6d593d5fd0744636ba2e
|
Shell
|
18307612949/core-bash
|
/winhong/mysql.sh
|
UTF-8
| 1,237
| 3.265625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# 脚本来安装配置mysql
# mysql.sh mysql_password
# author: Xiong Neng
echo "====================change root password===================="
mysqladmin -u root password $1 2>/dev/null
echo "====================grant privilege to remote================="
hostnm=$(hostname)
mysql -uroot -p$1 -e "grant all privileges on *.* to root@'%' identified by '$1'; flush privileges;" 2>/dev/null
mysql -uroot -p$1 -e "grant all privileges on *.* to root@'$hostnm' identified by '$1'; flush privileges;" 2>/dev/null
mysql -uroot -p$1 -e "CREATE DATABASE IF NOT EXISTS winstore default charset utf8 COLLATE utf8_general_ci;" 2>/dev/null
echo "====================mysql encoding utf8======================="
charset=$(cat /etc/my.cnf | grep 'default-character-set')
if [[ "$charset" == "" ]]; then
cat <<EOF >>/etc/my.cnf
[mysql]
default-character-set = utf8
EOF
fi
echo "====================disable selinux========================="
if [[ -f /etc/selinux/config ]]; then
sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config
fi
echo "====================open 3306 port==========================="
firewall-cmd --zone=public --add-service=mysql --permanent 2>/dev/null
systemctl restart firewalld 2>/dev/null
| true
|
2aedfde8652527ba5e8d4c21ba9f5513aabc2e3b
|
Shell
|
simonpfish/timeflow
|
/setup
|
UTF-8
| 637
| 3.21875
| 3
|
[] |
no_license
|
#!/bin/sh
# modeled after https://github.com/jasonrudolph/keyboard#installation
set -e
which -s brew || (echo "Homebrew is required: http://brew.sh/" && exit 1)
brew bundle check || brew bundle
# Prepare custom settings for Hammerspoon
mkdir -p ~/.hammerspoon
if ! grep -sq "require('timeflow')" ~/.hammerspoon/init.lua; then
echo "require('timeflow')" >> ~/.hammerspoon/init.lua
fi
ln -sfn $PWD ~/.hammerspoon/timeflow
# If Hammerspoon is already running, kill it so we can pick up the new config
# when opening Hammerspoon below
killall Hammerspoon || true
# Open Hammerspoon
open /Applications/Hammerspoon.app
echo "Done!"
| true
|
82895e7ef9ffdef9503033ace4fc0a492630c867
|
Shell
|
Kuniwak/dotfiles
|
/setup-swift.sh
|
UTF-8
| 264
| 3.28125
| 3
|
[] |
no_license
|
#!/bin/sh
set -u
has() {
type "$1" > /dev/null 2>&1
}
setup_swiftenv() {
git clone git://github.com/kylef/swiftenv.git "$HOME/.swiftenv"
}
setup_lints() {
has swiftenv || setup_swiftenv
brew install swiftlint
}
setup_swift() {
setup_lints
}
setup_swift
| true
|
d9cfe2d919565cfebe95ef149516eb2eaf1934db
|
Shell
|
docker-suite/alpine-nginx
|
/Dockerfiles/rootfs/etc/entrypoint.d/401-logs.sh
|
UTF-8
| 833
| 4.03125
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# set -e : Exit the script if any statement returns a non-true return value.
# set -u : Exit the script when using uninitialised variable.
set -eu
# Log folder
log_dir="/var/log/nginx"
log_access="access.log"
log_error="error.log"
# Create log folder if necessary
if [[ ! -d "${log_dir}" ]]; then
DEBUG "Creating log folder: $log_dir"
mkdir -p $log_dir
fi
# Create access log file
if [[ ! -f "${log_dir}/${log_access}" ]]; then
DEBUG "Creating access log file: ${log_dir}/${log_access}"
touch ${log_dir}/${log_access}
fi
# Create error log file
if [[ ! -f "${log_dir}/${log_error}" ]]; then
DEBUG "Creating error log file: ${log_dir}/${log_error}"
touch ${log_dir}/${log_error}
fi
# Update ownership and permissions
chown -R "${NGINX_USER}":"${NGINX_USER}" ${log_dir}
chmod 0755 ${log_dir}
| true
|
2fe999a1ebedadde6701da7d86bf2bc8160d6c3d
|
Shell
|
linuxfhy/auto_test
|
/cvt/tests/mn/tc_serviceip_pos001.sh
|
UTF-8
| 1,942
| 3.5
| 4
|
[] |
no_license
|
#!/usr/bin/bash
#
# Copyright (c) 2017, Inspur. All rights reserved.
#
################################################################################
#
# __stc_assertion_start
#
# ID: mn/tc_serviceip_pos001
#
# DESCRIPTION:
# 1. verify service ip is changed as the cmd execute
# 2. verify the service ip is stored in midplane
#
# STRATEGY:
# 1. test mcs is running, if not prompt to start mcs
# 2. check service ip is the same between NIC and midplane
# 3. update serviceip using satask cmd
# 4. if the satask execute correct, check again service ip
# is the same between NIC and midplane
#
# __stc_assertion_end
#
################################################################################
NAME=$(basename $0)
CDIR=$(dirname $0)
TMPDIR=${TMPDIR:-"/tmp"}
TSROOT=$CDIR/../../
source $TSROOT/lib/libstr.sh
source $TSROOT/lib/libcommon.sh
source $TSROOT/config.vars
source $CDIR/mn_comm.sh
source $CDIR/sip_comm.sh
SERVICEIP=100.2.45.55
SERVICEGW=100.2.45.1
SERVICEMASK=255.255.255.0
function upd_serviceip_test
{
local is_run
local ret
local cmd
is_run=$(is_running_mcs)
[[ $? != 0 || $is_run != 1 ]] && {
msg_fail "please first run mcs"
return 1
}
cmd="satask chserviceip -serviceip ${SERVICEIP} \
-gw ${SERVICEGW} -mask ${SERVICEMASK}"
msg_info $cmd
RUN_POS satask chserviceip -serviceip ${SERVICEIP} \
-gw ${SERVICEGW} -mask ${SERVICEMASK}
if (($? != 0 )) ; then
msg_fail "update service ip failed [$ret]"
return 1
fi
return 0
}
tc_start $0
trap "tc_xres \$?" EXIT
msg_info "change service ip to default"
RUN_POS satask chserviceip -default || exit $STF_FAIL
msg_info "compare before update"
cmp_sip_midplane_local /tmp || exit $STF_FAIL
#update service ip using given ip
upd_serviceip_test || exit $STF_FAIL
msg_info "compare after update"
cmp_sip_midplane_local || exit $STF_FAIL
exit $STF_PASS
| true
|
fb748abd765007ea31c40bbc301cbd0839acc7fa
|
Shell
|
HELLOWORLDxN/raspberryPi
|
/autofan/install.sh
|
UTF-8
| 334
| 3
| 3
|
[] |
no_license
|
dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
python3 -m compileall "$dir"
dst="/usr/bin/autofan"
echo 'dst' $dst
echo 'dir' $dir
if [ ! -d "$dst" ]; then
mkdir "$dst"
fi
if [ ! -f "$dir/autofan.pid" ]; then
touch "$dir/autofan.pid"
fi
cp -r $dir/* /usr/bin/autofan
cp -r $dir/service/autofan.service /lib/systemd/system/
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.