blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 4 115 | path stringlengths 2 970 | src_encoding stringclasses 28 values | length_bytes int64 31 5.38M | score float64 2.52 5.28 | int_score int64 3 5 | detected_licenses listlengths 0 161 | license_type stringclasses 2 values | text stringlengths 31 5.39M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
55e498e22a6790dbe2cf7000cafa2bce47864e8d | Shell | e-gov/Mesh-POC | /CreatePortainer.sh | UTF-8 | 403 | 2.640625 | 3 | [
"MIT"
] | permissive | #!/bin/bash
#
# Loob uue konteineri Portainerile ja käivitab selle
#
# käivitamine: sudo bash RunPortainer.sh
#
# Portaineri UI ava sirvikus, port 9000 (http)
#
# Portaineri seistamine: docker stop portainer
#
# taaskäivitamine: docker start portainer
docker run \
--name portainer\
--detach \
--publish 9000:9000 \
--volume /var/run/docker.sock:/var/run/docker.sock \
portainer/portainer
| true |
d21987aafe3030fe9868840f5d9082956a14b495 | Shell | sfrenk/sTeloMap | /install.sh | UTF-8 | 394 | 3.359375 | 3 | [
"MIT"
] | permissive | #!/usr/bin/bash
# Find snakefile directory
script_dir="$(pwd)/sTeloMap"
snakefile_dir="${script_dir}/snakemake"
echo "Snakefile directory: $snakefile_dir"
# Edit setup_dir.sh to contain correct snakefile directory
#snakefile_dir_sed="\"${snakefile_dir}\""
#echo $snakefile_dir_sed
sed -i -e "s|^snakefile_dir.*|snakefile_dir=${snakefile_dir}|g" "${snakefile_dir}/setup_dir.sh"
echo "Done!"
| true |
baa78b49febe8ce100057497cd70347cbcd6f1c1 | Shell | anshulverma/bashrc | /lib/app/emacs.bash | UTF-8 | 391 | 3.359375 | 3 | [] | no_license | #!/bin/bash
if [ ! -z "$(which emacs)" ] && [ $PLATFORM == 'OSX' ] && $(brew_installed "emacs")
then
# Make sure newer emacs is picked up
alias emacs="/Applications/Emacs.app/Contents/MacOS/Emacs -nw"
else
EXIT_CODE=1
return
fi
# make sure mural (a fast, fuzzy typeahead) binary is in PATH
MURAL_PATH="$HOME/.mural"
if [ -d "$MURAL_PATH" ]; then
export PATH=$PATH:$MURAL_PATH
fi
| true |
515fa51e6897bb68f17a45c96949314e25bd10f3 | Shell | wso2-incubator/big-data-analytics-smartenergy | /game-trunk/utilities/portal/gldq | UTF-8 | 1,657 | 3.703125 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash
. config
# setup accounting folder
if [ ! -f acct ]; then
echo "# accounting started $(date)" >acct
echo "# date:jobdir:pid:user:project:seconds" >>acct
fi
# setup jobs folder
if [ ! -d jobs ]; then
mkdir -p jobs
fi
# start server
if [ "$1" == "start" ]; then
# server is already running
if [ -f .pid ]; then
exit 0;
fi
# cleanup on exit
trap 'rm -f .status .pid;' EXIT
# server files
echo $$ >.pid
echo "starting" >.status
echo "$(date): queue started"
# server loop
while [ true ]; do
for job in $(ls jobs); do
if [ "$(cat jobs/$job/.status)" == "Pending" ]; then
echo "running job $job" >.status
echo "$(date): starting job $job" >>.log
# run job
#find jobs/$job -type d -exec chmod g+w \{\} \;
if ( ./gldjob jobs/$job 2>&1 | sed -e "s/^/$(date): /" >>.log ); then
echo "$(date): job $job completed ok"
else
echo "$(date): job $job failed"
fi
fi
done
echo "idle" >.status
sleep 1
done
# stop server
elif [ "$1" == "stop" ]; then
if [ -f .pid ]; then
kill $(cat .pid)
echo "$(date): queue stopped"
fi
# reset server
elif [ "$1" == "reset" ]; then
pkill -e apache gldq
rm -f .pid .status
echo "$(date): server reset"
# acct reset
elif [ "$1" == "save" ]; then
if [ -z "$MAILTO" ]; then
save="acct-$(date +'%Y%m%d%H%M%S')"
mv acct $save
echo "$(date): accounting data saved to $save"
else
mail "$MAILTO" -s "GLD Portal accounting" <acct
rm acct
echo "$(date): accounting data mailed to '$MAILTO'"
fi
echo "# accounting started $(date)" >acct
echo "# date:jobdir:pid:user:project:seconds" >>acct
# report server status
else
cat .status
fi
| true |
c9b3d3f1dabd8bacd32323953d008f53958ea5f9 | Shell | delkyd/alfheim_linux-PKGBUILDS | /crossvc/PKGBUILD | UTF-8 | 1,998 | 2.828125 | 3 | [] | no_license | # Contributor: Shinlun Hsieh <yngwiexx@yahoo.com.tw>
# Maintainer: kaptoxic
pkgname=crossvc
pkgver=1.5.2
pkgrel=5
pkgdesc="A graphical interface for the cvs client"
arch=('i686' 'x86_64')
url="http://www.lincvs.org/"
license=('GPL')
depends=('qt3' 'cvs')
source=(
http://crossvc.com/download/$pkgname-$pkgver-0-generic-src.tgz
crossvc
crossvc.desktop
lincvs_updated.pro
)
md5sums=('4fb196e4e5fb5b6c5d901601869308b2'
'55ec40cdf6cacbba591dd95541e50574'
'70506604810792b1740c94b67333073c'
'c01681ec367d82815dd42b7d86c77135')
build() {
cd $srcdir/CrossVC
#. /etc/profile.d/qt3.sh
export QTDIR=/usr/lib/qt3
#export PATH=/usr/lib/qt3/bin:$PATH
#export QMAKESPEC=/usr/share/qt3/mkspecs/linux-g++
cp -f $srcdir/lincvs_updated.pro lincvs.pro
qmake-qt3 -makefile -o Makefile lincvs.pro
#cp -f $srcdir/Makefile Makefile
make || return 1
}
package() {
cd $srcdir/CrossVC
export QTDIR=/usr/lib/qt3
export PREFIX=$pkgdir/usr
make install
install -Dm755 ${srcdir}/crossvc $pkgdir/usr/bin/crossvc
# Fix permission in the package
mkdir -p $pkgdir/usr/share/${pkgname}/{Messages,Tools}
mkdir -p $pkgdir/usr/share/doc/${pkgname}
install -m755 crossvc.bin $pkgdir/usr/share/${pkgname}
install -m755 CrossVC/AppRun $pkgdir/usr/share/${pkgname}
install -m755 CrossVC/Tools/* $pkgdir/usr/share/${pkgname}/Tools
install -m644 CrossVC/Messages/* $pkgdir/usr/share/${pkgname}/Messages
for fn in CrossVC/Help/*; do
if [ -f $fn ]; then
install -m644 $fn $pkgdir/usr/share/doc/${pkgname}/
fi
done
#install -D -m644 CrossVC/Help/* $pkgdir/usr/share/doc/${pkgname}/
#cp -f CrossVC/Help/* $pkgdir/usr/share/doc/${pkgname}/ || echo "Done"
#chmod -R 644 $pkgdir/usr/share/doc/${pkgname}
#chmod -R 644 $pkgdir/usr/share/doc/${pkgname}/Translations
# Desktop related
install -Dm644 ${srcdir}/crossvc.desktop $pkgdir/usr/share/applications/crossvc.desktop
install -Dm644 CrossVC/AppIcon.xpm $pkgdir/usr/share/pixmaps/crossvc.xpm
}
| true |
0a6165031b20a4ff6bef9288523832bb839504ec | Shell | ftd-u01/minimalPreprocessing | /Pipelines-3.4.0/DiffusionPreprocessing/scripts/run_eddy.sh | UTF-8 | 8,248 | 3.859375 | 4 | [] | no_license | #!/bin/bash
#~ND~FORMAT~MARKDOWN~
#~ND~START~
#
# # run_eddy.sh
#
# ## Copyright Notice
#
# Copyright (C) 2012-2014 The Human Connectome Project
#
# * Washington University in St. Louis
# * University of Minnesota
# * Oxford University
#
# ## Author(s)
#
# * Stamatios Sotiropoulos - Analysis Group, FMRIB Centre
# * Saad Jbabdi - Analysis Group, FMRIB Center
# * Jesper Andersson - Analysis Group, FMRIB Center
# * Matthew F. Glasser - Anatomy and Neurobiology, Washington University in St. Louis
# * Timothy B. Brown, Neuroinformatics Research Group, Washington University in St. Louis
#
# ## Product
#
# [Human Connectome Project][HCP] (HCP) Pipelines
#
# ## License
#
# See the [LICENSE](https://github.com/Washington-University/Pipelines/blob/master/LICENCE.md) file
#
# ## Description
#
# This script runs FSL's eddy command as part of the Human Connectome Project's
# Diffusion Preprocessing
#
# ## Prerequisite Installed Software
#
# * [FSL][FSL] - FMRIB's Software Library (version 5.0.6)
#
# FSL's environment setup script must also be sourced
#
# ## Prerequisite Environment Variables
#
# See output of usage function: e.g. <code>$ ./run_eddy.sh --help</code>
#
# <!-- References -->
#
# [HCP]: http://www.humanconnectome.org
# [FSL]: http://fsl.fmrib.ox.ac.uk
#
#~ND~END~
# Load Function Libraries
source ${HCPPIPEDIR}/global/scripts/log.shlib # log_ functions
#
# Function Description:
# Show usage information for this script
#
usage() {
local scriptName=$(basename ${0})
echo ""
echo " Usage: ${scriptName} <options>"
echo ""
echo " Options: [ ] = optional; < > = user supplied value"
echo ""
echo " [-h | --help] : show usage information and exit with non-zero return code"
echo ""
echo " [-g | --gpu] : attempt to use the GPU-enabled version of eddy"
echo " (eddy.gpu). If the GPU-enabled version is not"
echo " found or returns a non-zero exit code, then"
echo " this script \"falls back\" to using the standard"
echo " version of eddy."
echo ""
echo " -w <working-dir> | "
echo " -w=<working-dir> | "
echo " --workingdir <working-dir> | "
echo " --workingdir=<working-dir> : the working directory (REQUIRED)"
echo ""
echo " Return code:"
echo ""
echo " 0 if help was not requested, all parameters were properly formed, and processing succeeded"
echo " Non-zero otherwise - malformed parameters, help requested or processing failure was detected"
echo ""
echo " Required Environment Variables:"
echo ""
echo " FSLDIR"
echo ""
echo " The home directory for FSL"
echo ""
}
#
# Function Description:
# Get the command line options for this script.
#
# Global Ouput Variables
# ${useGpuVersion} - Set to "True" if use has requested an attempt to use
# the GPU-enabled version of eddy
# ${workingdir} - User specified working directory
#
get_options() {
local scriptName=$(basename ${0})
local arguments=($@)
# global output variables
useGpuVersion="False"
unset workingdir
# parse arguments
local index=0
local numArgs=${#arguments[@]}
local argument
while [ ${index} -lt ${numArgs} ]; do
argument=${arguments[index]}
case ${argument} in
-h | --help)
usage
exit 1
;;
-g | --gpu)
useGpuVersion="True"
index=$(( index + 1 ))
;;
-w | --workingdir)
workingdir=${arguments[$(( index + 1 ))]}
index=$(( index + 2 ))
;;
-w=* | --workingdir=*)
workingdir=${argument/*=/""}
index=$(( index + 1 ))
;;
*)
echo "Unrecognized Option: ${argument}"
usage
exit 1
;;
esac
done
# check required parameters
if [ -z ${workingdir} ]; then
usage
echo " Error: <working-dir> not specified - Exiting without running eddy"
exit 1
fi
# report options
echo "-- ${scriptName}: Specified Command-Line Options - Start --"
echo " workingdir: ${workingdir}"
echo " useGpuVersion: ${useGpuVersion}"
echo "-- ${scriptName}: Specified Command-Line Options - End --"
}
#
# Function Description
# Validate necessary environment variables
#
validate_environment_vars() {
local scriptName=$(basename ${0})
# validate
if [ -z ${FSLDIR} ]; then
usage
echo "ERROR: FSLDIR environment variable not set"
exit 1
fi
# report
echo "-- ${scriptName}: Environment Variables Used - Start --"
echo " FSLDIR: ${FSLDIR}"
echo "-- ${scriptName}: Environment Variables Used - End --"
}
#
# Function Description
# Main processing of script
#
# Gets user specified command line options, runs appropriate eddy
#
main() {
# Get Command Line Options
#
# Global Variables Set:
# ${useGpuVersion} - Set to "True" if use has requested an attempt to use
# the GPU-enabled version of eddy
# ${workingdir} - User specified working directory
get_options $@
# Validate environment variables
validate_environment_vars $@
# Establish tool name for logging
log_SetToolName "run_eddy.sh"
# Determine eddy executable to use
#
# If the user has asked us to try to use the GPU-enabled version of eddy,
# then we check to see if that GPU-enabled version exists. If it does,
# we'll try to use it. Otherwise, we'll fall back to using the standard
# (CPU) version of eddy.
#
# If the user has not requested us to try to use the GPU-enabled version,
# then we don't bother looking for it or trying to use it.
gpuEnabledEddy="${FSLDIR}/bin/eddy.gpu"
stdEddy="${FSLDIR}/bin/eddy"
if [ "${useGpuVersion}" = "True" ]; then
log_Msg "User requested GPU-enabled version of eddy"
if [ -e ${gpuEnabledEddy} ]; then
log_Msg "GPU-enabled version of eddy found"
eddyExec="${gpuEnabledEddy}"
else
log_Msg "GPU-enabled version of eddy NOT found"
eddyExec="${stdEddy}"
fi
else
log_Msg "User did not request GPU-enabled version of eddy"
eddyExec="${stdEddy}"
fi
log_Msg "eddy executable to use: ${eddyExec}"
# Main processing - Run eddy
topupdir=`dirname ${workingdir}`/topup
${FSLDIR}/bin/imcp ${topupdir}/nodif_brain_mask ${workingdir}/
${eddyExec} --imain=${workingdir}/Pos_Neg --mask=${workingdir}/nodif_brain_mask --index=${workingdir}/index.txt --acqp=${workingdir}/acqparams.txt --bvecs=${workingdir}/Pos_Neg.bvecs --bvals=${workingdir}/Pos_Neg.bvals --fwhm=0 --topup=${topupdir}/topup_Pos_Neg_b0 --out=${workingdir}/eddy_unwarped_images --flm=quadratic -v #--resamp=lsr #--session=${workingdir}/series_index.txt
eddyReturnValue=$?
# Another fallback.
#
# If we were trying to use the GPU-enabled version of eddy, but it
# returned a failure code, then report that the GPU-enabled eddy
# failed and use the standard version of eddy.
if [ "${eddyExec}" = "${gpuEnabledEddy}" ]; then
if [ ${eddyReturnValue} -ne 0 ]; then
log_Msg "Tried to run GPU-enabled eddy, ${eddyExec}, as requested."
log_Msg "That attempt failed with return code: ${eddyReturnValue}"
log_Msg "Running standard version of eddy, ${stdEddy}, instead."
${stdEddy} --imain=${workingdir}/Pos_Neg --mask=${workingdir}/nodif_brain_mask --index=${workingdir}/index.txt --acqp=${workingdir}/acqparams.txt --bvecs=${workingdir}/Pos_Neg.bvecs --bvals=${workingdir}/Pos_Neg.bvals --fwhm=0 --topup=${topupdir}/topup_Pos_Neg_b0 --out=${workingdir}/eddy_unwarped_images --flm=quadratic -v #--resamp=lsr #--session=${workingdir}/series_index.txt
eddyReturnValue=$?
fi
fi
log_Msg "Completed with return value: ${eddyReturnValue}"
exit ${eddyReturnValue}
}
#
# Invoke the main function to get things started
#
main $@
| true |
cd171eb43bc7e0192cd44457ab87fdd200a750cc | Shell | begrif/homeconfig | /dot/profile | UTF-8 | 1,561 | 3.421875 | 3 | [] | no_license |
export LANG=en_US.UTF-8
export LC_COLLATE=C
addpath () {
case ":$PATH:" in
*:$1:*) : already on path ;;
*) case ":$2:" in
:end:) PATH=$PATH:$1 ;;
*) PATH=$1:$PATH ;;
esac
;;
esac
}
for p_dir in $HOME/bin ; do
if [ -d $p_dir ] ; then
addpath $p_dir
fi
done
for dir in $HOME/usr/bin $HOME/.cargo/bin ; do
if [ -d $p_dir ] ; then
addpath $p_dir end
fi
done
unset p_dir
case "$-" in *i*) # only run on interactive shells
if [ "X$TERM" = Xlinux ] ; then
# on console!
echo X alias added
alias X='(date +"X starting at %c"; startx; date +"X end at %c" ) > $HOME/.X.out 2>&1'
eval `ssh-agent | tee $HOME/.ssh-agent.last`
echo "SSH agent started"
ssh-add $HOME/.ssh/*_rsa
fi
if whence screen >/dev/null; then
printf "screen: "
screen -ls
alias SRD="screen -R -D"
fi
if whence tmux >/dev/null; then
printf "tmux: "
tmux list-sessions 2>/dev/null || echo
alias TRD="tmux attach -d || tmux new-session "
fi
# Some GUI programs think I want to save junk
trash=$HOME/.local/share/Trash
if [ -d $trash ] ; then
printf "removing trash -- size "
du -sh $trash
rm -rf $trash
fi
export EDITOR=vim
export VISUAL=vim
export RC=". $HOME/.interactive" ; $RC
: e $HOME/.interactive
# control characters as alias left-hand-side! Make the terminal backspace
# the "real" backspace.
alias ="stty erase '^?'"
alias ="stty erase '^H'"
set -o vi
;; esac # interactive shell wrapper
| true |
d8da3b648e63bffb401e00dca14254cd28ad54dc | Shell | mccabe082/SimpleFlightpathSimulator | /build.sh | UTF-8 | 153 | 2.625 | 3 | [] | no_license | #!/bin/bash
cd "$(dirname "$0")"
rm -rf build
mkdir build
pushd build
cmake ..
cmake --build . --config Debug
cmake --install . --config Debug
popd
| true |
b16343d4498d591b447f902fa0df5034791b80c8 | Shell | melodybliss/dotfiles | /.cshrc | UTF-8 | 3,554 | 3.0625 | 3 | [] | no_license | ######################################################################
# Melody's generic .cshrc file
# File: .cshrc
# Author: Melody Bliss <melody@melodybliss.org>
# ID: $Id$
# Revision: $Revision$
######################################################################
set HISTORY_SIZE=100
set HOST=`hostname`
if ( -x /usr/bin/uname ) then
set UNAME=/usr/bin/uname
else if ( -x /bin/uname ) then
set UNAME=/bin/uname
endif
set OS=`${UNAME}`
setenv TZ PST8PDT
setenv EDITOR vi
set OPATH=/sbin:/usr/sbin:/bin:/usr/bin:
set GCSPATH=/usr/local/gcsis-svn/bin:
set LPATH=
set XPATH=
set DPATH=
set FPATH=/usr/local/bin:
set UPATH=
set SVNPATH=
set GOPATH=
set GITPATH=
set GOPATH=${HOME}/go
setenv GOPATH ${GOPATH}
# Google Go Compiler
if ( -d /usr/local/go/bin ) then
set GOBINPATH=${GOPATH}/bin:/usr/local/go/bin:
endif
if( -d /usr/local/git ) then
set GITPATH=/usr/local/git/bin:
endif
# SunOS
if ( "${OS}" == "SunOS" ) then
if ( -d /usr/openwin/bin ) then
set XPATH=/usr/openwin/bin:
endif
if ( -d /usr/ccs/bin ) then
if ( -f /usr/ccs/bin/make ) then
set DPATH=/usr/ccs/bin:
endif
endif
endif # SunOS
# FreeBSD
if ( "${OS}" == "FreeBSD" ) then
set OPATH=${OPATH}/usr/games:
endif # FreeBSD
# Linux
if ( "${OS}" == "Linux" ) then
set OPATH=${OPATH}/usr/games:
endif
# Darwin/MacOS X
if ( "${OS}" == "Darwin" ) then
# Need to add int /opt/local/bin if darwinports is included
if ( -d /opt/local/bin ) then
set LPATH=${LPATH}/opt/local/bin:/opt/local/sbin:
endif
if ( -d /usr/X11R6/bin ) then
set XPATH=/usr/X11R6/bin:
endif
# if ( -d /Developer/usr/bin ) then
# set DPATH=/Developer/usr/bin:
# endif
endif
# If ~/bin exists, set UPATH to it.
if ( -d ${HOME}/bin ) then
set UPATH=${HOME}/bin:
endif
# if ~/svn/melody/bin exists, set SVNPATH to it
if ( -d ${HOME}/svn/melody/bin ) then
set SVNPATH=${HOME}/svn/melody/bin:
endif
# Create the path
set tPATH=${GITPATH}${UPATH}${GCSPATH}${SVNPATH}${OPATH}${LPATH}${XPATH}${DPATH}${FPATH}${GOBINPATH}
setenv PATH `echo ${tPATH} | sed 's/:$//'`
# Man Path
setenv MANPATH /usr/share/man:/usr/local/share/man:/usr/X11/man:/opt/local/man
set history=${HISTORY_SIZE}
unset autologout
set prompt="\! % "
# Process specific host .cshrc files
if ( -f ${HOME}/.cshrc.${HOST} ) then
source ${HOME}/.cshrc.${HOST}
endif
# Process aliases, both global and system specific
if ( -f ${HOME}/.alias ) then
source ${HOME}/.alias
endif
if ( -f ${HOME}/.alias.${HOST} ) then
source ${HOME}/.alias.${HOST}
endif
##
# Your previous /Users/melody/.cshrc file was backed up as /Users/melody/.cshrc.macports-saved_2009-09-14_at_16:04:02
##
# MacPorts Installer addition on 2009-09-14_at_16:04:02: adding an appropriate PATH variable for use with MacPorts.
setenv PATH /opt/local/bin:/opt/local/sbin:$PATH
# Finished adapting your PATH environment variable for use with MacPorts.
##
# Your previous /Users/melody/.cshrc file was backed up as /Users/melody/.cshrc.macports-saved_2011-11-15_at_13:46:48
##
# MacPorts Installer addition on 2011-11-15_at_13:46:48: adding an appropriate PATH variable for use with MacPorts.
setenv PATH /opt/local/bin:/opt/local/sbin:$PATH
# Finished adapting your PATH environment variable for use with MacPorts.
set path=( "/Applications/microchip/mplabc30/v3.30c/bin" $path )
set path=( "/Applications/microchip/mplabc32/v2.02/bin" $path )
# Setup DOCKER_HOST
if ( "`uname -s`" == "Darwin" ) then
if ( -x /usr/local/bin/docker && -d /Applications/boot2docker.app ) then
setenv DOCKER_HOST tcp://:2375
endif
endif
| true |
415c9f6971248eb951e5fdee8d7c3dff543ba258 | Shell | ingelity/javascript-playground | /shell_provisioner.sh | UTF-8 | 896 | 3.0625 | 3 | [] | no_license | #!/usr/bin/env bash
# set ownership of shared folder to vagrant user
sudo chown vagrant:vagrant /vagrant
# install nodejs using PPA so that we get the latest release, because
# official ubuntu repository (apt-get) probably doesn't have the latest one.
curl --silent --location https://deb.nodesource.com/setup_4.x | sudo bash -
sudo apt-get install --yes nodejs
sudo npm install -g npm@latest
#sudo npm install -g n
#sudo n stable
# install arangodb (arangodb is automatically started after install)
sudo wget https://www.arangodb.com/repositories/arangodb2/xUbuntu_14.04/Release.key
sudo apt-key add - < Release.key
sudo echo 'deb https://www.arangodb.com/repositories/arangodb2/xUbuntu_14.04/ /' >> /etc/apt/sources.list.d/arangodb.list
sudo apt-get update
sudo apt-get -q -y install arangodb=2.6.8
sudo cp /vagrant/arangod.conf /etc/arangodb/arangod.conf
#sudo /etc/init.d/arangodb restart
| true |
0eeea114fdc8a3832a2b3f59456b3c73704ce36d | Shell | elauzier/GridEngineContribScripts | /loadsensor/basic_load_sensor.sh | UTF-8 | 490 | 3.203125 | 3 | [] | no_license | #!/bin/sh
#http://docs.oracle.com/cd/E19080-01/n1.grid.eng6/817-5677/chp8-1524/index.html
myhost=`uname -n`
while [ 1 ]; do
# wait for input
read input
result=$?
if [ $result != 0 ]; then
exit 1
fi
if [ $input = quit ]; then
exit 0
fi
#send users logged in
logins=`who | cut -f1 -d" " | sort | uniq | wc -l | sed "s/^ *//"`
echo begin
echo "$myhost:logins:$logins"
echo end
done
# we never get here
exit 0 | true |
3bf79cdda9c64c3e9f2953365c8ba38fe16874fd | Shell | marcelja/contao4-debian-install | /install.sh | UTF-8 | 4,135 | 3.1875 | 3 | [] | no_license | #!/bin/bash
password=$(openssl rand -base64 14)
echo "The contao/mysql user password: $password"
echo "The install script will continue in 10 seconds"
sleep 10
if [ $(id -u) -eq 0 ]; then
pass=$(perl -e 'print crypt($ARGV[0], "password")' $password)
useradd -m -p $pass contao -s /bin/bash
usermod -aG sudo contao
fi
cd /home/contao
mkdir -p .ssh
touch .ssh/authorized_keys
chown -R contao:contao .ssh
sudo apt update
sudo apt -y install apache2
sudo sed -i "s/Options Indexes FollowSymLinks/Options FollowSymLinks/" /etc/apache2/apache2.conf
sudo systemctl stop apache2.service
sudo systemctl start apache2.service
sudo systemctl enable apache2.service
sudo apt -y install mariadb-server mariadb-client
sudo systemctl stop mariadb.service
sudo systemctl start mariadb.service
sudo systemctl enable mariadb.service
# https://bertvv.github.io/notes-to-self/2015/11/16/automating-mysql_secure_installation/
sudo mysql --user=root <<_EOF_
UPDATE mysql.user SET Password=PASSWORD('${password}') WHERE User='root';
DELETE FROM mysql.user WHERE User='';
DELETE FROM mysql.user WHERE User='root' AND Host NOT IN ('localhost', '127.0.0.1', '::1');
DROP DATABASE IF EXISTS test;
DELETE FROM mysql.db WHERE Db='test' OR Db='test\\_%';
FLUSH PRIVILEGES;
_EOF_
sudo systemctl restart mysql.service
# https://linuxhostsupport.com/blog/how-to-install-php-7-2-on-debian-9/
sudo apt -y install software-properties-common wget
sudo apt -y install lsb-release apt-transport-https ca-certificates
sudo wget -O /etc/apt/trusted.gpg.d/php.gpg https://packages.sury.org/php/apt.gpg
echo "deb https://packages.sury.org/php/ $(lsb_release -sc) main" | sudo tee /etc/apt/sources.list.d/php.list
sudo apt update
sudo apt -y install php7.2 libapache2-mod-php7.2 php7.2-common php7.2-mbstring php7.2-xmlrpc php7.2-soap php7.2-gd php7.2-xml php7.2-intl php7.2-mysql php7.2-cli php7.2-zip php7.2-curl
# https://stackoverflow.com/a/2464883/5203308
sudo sed -i "s/\(max_execution_time *= *\).*/\1180/" /etc/php/7.2/apache2/php.ini
sudo sed -i "s/\(memory_limit *= *\).*/\1512M/" /etc/php/7.2/apache2/php.ini
sudo sed -i "s/\(post_max_size *= *\).*/\120M/" /etc/php/7.2/apache2/php.ini
sudo sed -i "s/\(upload_max_filesize *= *\).*/\1100M/" /etc/php/7.2/apache2/php.ini
# https://websiteforstudents.com/install-contao-cms-on-ubuntu-16-04-lts-with-apache2-mariadb-and-php-7-1-support/
sudo mysql --user=root <<_EOF_
CREATE DATABASE contaodb;
CREATE USER 'contaouser'@'localhost' IDENTIFIED BY '${password}';
GRANT ALL ON contaodb.* TO 'contaouser'@'localhost' IDENTIFIED BY '${password}' WITH GRANT OPTION;
FLUSH PRIVILEGES;
_EOF_
if ! grep -q "innodb_large_prefix" /etc/mysql/mariadb.conf.d/50-server.cnf; then
sudo sed -i '/\[mysqld\]/a innodb_large_prefix = 1\ninnodb_file_format = Barracuda\ninnodb_file_per_table = 1\n' /etc/mysql/mariadb.conf.d/50-server.cnf
fi
sudo systemctl restart mysql.service
curl -sS https://getcomposer.org/installer | php
sudo mv composer.phar /usr/local/bin/composer
sudo apt -y install unzip
# https://github.com/composer/composer/issues/945#issuecomment-8552757
/bin/dd if=/dev/zero of=/var/swap.1 bs=1M count=1024
/sbin/mkswap /var/swap.1
/sbin/swapon /var/swap.1
su - contao -c "composer create-project --no-dev contao/managed-edition contaoproject"
# https://websiteforstudents.com/install-contao-cms-on-ubuntu-16-04-lts-with-apache2-mariadb-and-php-7-1-support/
ipaddress=$(curl ifconfig.me)
echo "<VirtualHost *:80>
ServerAdmin admin@example.com
DocumentRoot /home/contao/contaoproject/web
ServerName $ipaddress
ServerAlias $ipaddress
<Directory /home/contao/contaoproject/web/>
Options +FollowSymlinks
AllowOverride All
Require all granted
</Directory>
ErrorLog ${APACHE_LOG_DIR}/error.log
CustomLog ${APACHE_LOG_DIR}/access.log combined
</VirtualHost>" > /etc/apache2/sites-available/contao.conf
sudo chown -R www-data:www-data contaoproject/
sudo chmod -R 755 contaoproject
sudo a2ensite contao.conf
sudo a2enmod rewrite
sudo systemctl restart apache2.service
echo "Install script done, now open: http://$ipaddress/contao/install"
| true |
6467dbc491e4c4727bd6371748b40c45eb11633b | Shell | a427538/px4-sitl-docker | /entrypoint.sh | UTF-8 | 856 | 3.578125 | 4 | [] | no_license | #!/bin/bash
# Start virtual X server in the background
# - DISPLAY default is :99, set in dockerfile
# - Users can override with `-e DISPLAY=` in `docker run` command to avoid
# running Xvfb and attach their screen
if [[ -x "$(command -v Xvfb)" && "$DISPLAY" == ":99" ]]; then
echo "Starting Xvfb"
Xvfb :99 -screen 0 1600x1200x24+32 &
fi
# Starting PX4 SITL
# /bin/sh -c cd /opt/px4/firmware/build/px4_sitl_default/tmp && /opt/px4/firmware/Tools/sitl_run.sh /opt/px4/firmware/build/px4_sitl_default/bin/px4 none none iris /opt/px4/firmware /opt/px4/
# Use the LOCAL_USER_ID if passed in at runtime
if [ -n "${LOCAL_USER_ID}" ]; then
echo "Starting with UID : $LOCAL_USER_ID"
# modify existing user's id
usermod -u $LOCAL_USER_ID user
# run as user
exec gosu user "$@"
else
exec "$@"
fi
| true |
eb9fb435cd3876f9329e0fe4f4d99b5dbc6299fd | Shell | baidang201/eos-hackathon-2 | /backend/dev/fixture.sh | UTF-8 | 3,981 | 2.546875 | 3 | [] | no_license | #!/usr/bin/env bash
#
# Loads test fixture (account, etc) to nodeos
#
set -eu
set -o pipefail
BIN_DIR="$(cd $(dirname $0) && pwd)"
. "$BIN_DIR/_local_chain.incl.sh"
if [[ $# -ge 1 ]]; then
export NODE_URL="$1"
fi
#if "$BIN_DIR/cleos" get account supplier &>/dev/null; then
# die 'Already loaded (supplier found)'
#fi
create_account() {
local NAME="$1"
local PRIVATE="$2"
local PUBLIC="$3"
"$BIN_DIR/cleos" create account eosio "$NAME" "$PUBLIC" &>/dev/null || true
"$BIN_DIR/cleos" wallet import "$PRIVATE" &>/dev/null || true
}
create_account supplier '5K8uyLerwF8nw6tnq9DDTK4GGUJDKyaWAPMrTBAy1xGrQvP1z49' 'EOS64wVjdJzSKEBKgt3Lg6TWr97ZG8cQB2BvecCUWyfRpQKK6MQmT'
create_account electricity '5JR2XT3nokVfe5TgDrH8RPdjSC9WnyB1eQSzzus8aWe1FvFsM2Q' 'EOS7oPdzdvbHcJ4k9iZaDuG4Foh9YsjQffTGniLP28FC8fbpCDgr5'
create_account rfidreader '5JERmsYigQaH3us3JkEWWPAxJmBF9cq32d3e6sSLSKVDpNGkhQH' 'EOS6yA6s7jshQPjtE3ir36rSYULmoxmXYVXy2XMnrBfRzuTgnCW2B'
create_account rfiduser1 '5JV8sTXJN9MER8pnYcJZ3WE1vbRQRjmibwvSraX6JFc7BLod8Y8' 'EOS5d5ay4r2Ah2kK2BpXYP3TayPGy95cBSB4CxFULzt7XLgczDdSY'
create_account rfiduser2 '5KenTrMhcrL4khxRb9XgAbSDeWfkoidnRzVJessEhfQGUAYNSQC' 'EOS4uBeFZBUxMcpE4bCXGZVBRDgQoFdoWfP5ChYaqTDegUaeVrEjj'
create_account billelectro '5K6FSD2BKpG6Tzxo48iJQQ6ZgLY8kDuGdjNfnA7niHbdiJDsASx' 'EOS8ZSpHAvVy5zxzp4z39gieesnARgu1yDRj45AxptjovNkMFW6xJ'
create_account billrfid '5J2w4621wvHFLUcNRv3wULHEDV3QeY4z8FV33xxzogD4KJRQDsZ' 'EOS8LFuvw7SN6A13gaPq1GYsZgFexmtTJGpm2xgJXxXg7NVhRMpkQ'
# ./cleos push action supplier adduser '["user1", "descr1", "meta1"]' -p supplier
# ./cleos push action supplier addrate '["simple electro", "billelectro", "100 10"]' -p supplier #billing_meta: <uint: watts/hour per measurement> <uint: payment per kWt/hour>
# ./cleos push action supplier adddevice '["electricity", "user1", 0, "electro counter"]' -p supplier
# ./cleos push action supplier addbalance '["user1", "100000"]' -p supplier
# rdif case:
#
# NODE_URL='https://api.eos-hackathon.smartz.io/' ./backend/dev/cleos push action billrfid addframe '["rfidreader"]' -p billrfid
# NODE_URL='https://api.eos-hackathon.smartz.io/' ./backend/dev/cleos push action billrfid addsku '[132353, 55]' -p billrfid
# NODE_URL='https://api.eos-hackathon.smartz.io/' ./backend/dev/cleos push action billrfid addsku '[13, 100]' -p billrfid
#
# NODE_URL='https://api.eos-hackathon.smartz.io/' ./backend/dev/cleos push action supplier addrate '["", "billrfid", ""]' -p supplier
# NODE_URL='https://api.eos-hackathon.smartz.io/' ./backend/dev/cleos push action supplier adduser '["rfiduser1", "", ""]' -p supplier
# NODE_URL='https://api.eos-hackathon.smartz.io/' ./backend/dev/cleos push action supplier adddevice '["rfiduser1", "rfiduser1", 0, ""]' -p supplier
#
# NODE_URL='https://api.eos-hackathon.smartz.io/' ./backend/dev/cleos push action eosio updateauth '{"account":"rfiduser1","permission":"active","parent":"owner","auth":{"keys":[{"key":"EOS5d5ay4r2Ah2kK2BpXYP3TayPGy95cBSB4CxFULzt7XLgczDdSY", "weight":1}],"threshold":1,"accounts":[{"permission":{"actor":"billrfid","permission":"eosio.code"},"weight":1},{"permission":{"actor":"supplier","permission":"eosio.code"},"weight":1}],"waits":[]}}' -p rfiduser1
# NODE_URL='https://api.eos-hackathon.smartz.io/' ./backend/dev/cleos push action eosio updateauth '{"account":"rfiduser2","permission":"active","parent":"owner","auth":{"keys":[{"key":"EOS4uBeFZBUxMcpE4bCXGZVBRDgQoFdoWfP5ChYaqTDegUaeVrEjj", "weight":1}],"threshold":1,"accounts":[{"permission":{"actor":"billrfid","permission":"eosio.code"},"weight":1},{"permission":{"actor":"supplier","permission":"eosio.code"},"weight":1}],"waits":[]}}' -p rfiduser2
# buy
# NODE_URL='https://api.eos-hackathon.smartz.io/' ./backend/dev/cleos push action supplier devicesignal '["rfiduser1", 132353]' -p rfiduser1
#
# it wont be billed so using rfidreader as user
# NODE_URL='https://api.eos-hackathon.smartz.io/' ./backend/dev/cleos push action supplier adduser '["rfidreader", "", ""]' -p supplier
# NODE_URL='https://api.eos-hackathon.smartz.io/' ./backend/dev/cleos push action supplier adddevice '["rfidreader", "rfidreader", 0, ""]' -p supplier
# NODE_URL='https://api.eos-hackathon.smartz.io/' ./backend/dev/cleos push action eosio updateauth '{"account":"rfidreader","permission":"active","parent":"owner","auth":{"keys":[{"key":"EOS6yA6s7jshQPjtE3ir36rSYULmoxmXYVXy2XMnrBfRzuTgnCW2B", "weight":1}],"threshold":1,"accounts":[{"permission":{"actor":"billrfid","permission":"eosio.code"},"weight":1},{"permission":{"actor":"supplier","permission":"eosio.code"},"weight":1}],"waits":[]}}' -p rfidreader
# frame at exit
# NODE_URL='https://api.eos-hackathon.smartz.io/' ./backend/dev/cleos push action supplier devicesignal '["rfidreader", 132353]' -p rfidreader
| true |
670c3b70a09ec7caf1d89e20e30fb834a95bcf45 | Shell | isabella232/fs-stress-test | /sharness/lib/test-lib.sh | UTF-8 | 1,051 | 3.46875 | 3 | [
"MIT"
] | permissive | # Test framework for go-ipfs
#
# Copyright (c) 2014 Christian Couder
# MIT Licensed; see the LICENSE file in this repository.
#
# We are using sharness (https://github.com/mlafeldt/sharness)
# which was extracted from the Git test framework.
# Add current directory to path, for multihash tool.
PATH=$(pwd)/bin:${PATH}
# Set sharness verbosity. we set the env var directly as
# it's too late to pass in --verbose, and --verbose is harder
# to pass through in some cases.
test "$TEST_VERBOSE" = 1 && verbose=t
# assert the `ipfs` we're using is the right one.
if test `which ipfs` != $(pwd)/bin/ipfs; then
echo >&2 "Cannot find the tests' local ipfs tool."
echo >&2 "Please check test and ipfs tool installation."
exit 1
fi
SHARNESS_LIB="lib/sharness/sharness.sh"
. "$SHARNESS_LIB" || {
echo >&2 "Cannot source: $SHARNESS_LIB"
echo >&2 "Please check Sharness installation."
exit 1
}
# ipfs-stress-fs basics
ipfs_init() {
export IPFS_PATH="$(pwd)/.ipfs"
ipfs init -b 1024 >/dev/null
ipfs bootstrap rm --all >/dev/null # clear bootstrap list.
}
| true |
92f32e15ad1baff80f9556636e9da85ecb368632 | Shell | Open-CAS/open-cas-linux | /configure.d/1_bd_part_count.conf | UTF-8 | 1,396 | 3.28125 | 3 | [
"BSD-3-Clause",
"MIT"
] | permissive | #!/bin/bash
#
# Copyright(c) 2012-2022 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
. $(dirname $3)/conf_framework.sh
check() {
cur_name=$(basename $2)
config_file_path=$1
if compile_module $cur_name "struct gendisk *disk = NULL; struct xarray xa; xa = disk->part_tbl;" "linux/blkdev.h" ||
compile_module $cur_name "struct gendisk *disk = NULL; struct xarray xa; xa = disk->part_tbl;" "linux/genhd.h"
then
echo $cur_name "1" >> $config_file_path
elif compile_module $cur_name "struct gendisk *disk = NULL; struct disk_part_tbl *ptbl; ptbl = disk->part_tbl;" "linux/genhd.h"
then
echo $cur_name "2" >> $config_file_path
else
echo $cur_name "X" >> $config_file_path
fi
}
apply() {
case "$1" in
"1")
add_function "
static inline int cas_blk_get_part_count(struct block_device *bdev)
{
struct block_device *part;
unsigned long idx;
int count = 0;
xa_for_each(&bdev->bd_disk->part_tbl, idx, part) {
count++;
}
return count;
}" ;;
"2")
add_function "
static inline int cas_blk_get_part_count(struct block_device *bdev)
{
struct disk_part_tbl *ptbl;
int i, count = 0;
rcu_read_lock();
ptbl = rcu_dereference(bdev->bd_disk->part_tbl);
for (i = 0; i < ptbl->len; ++i) {
if (rcu_access_pointer(ptbl->part[i]))
count++;
}
rcu_read_unlock();
return count;
}" ;;
*)
exit 1
esac
}
conf_run $@
| true |
332a2a43b56351d1325d51b6c58901176b69c857 | Shell | fordream/spritebuilder-lua | /sbluaexample/tools/make_res.sh | UTF-8 | 1,129 | 3.09375 | 3 | [
"MIT"
] | permissive | platform=$1
filename=$2
dest_dir=$3/${filename}
if [ "$platform" = "android" ]; then
echo buiding Resources Android start
mkdir -p ${dest_dir}
rm -rf ${filename}.png ${filename}.plist
texturepacker --data ${filename}.plist --allow-free-size --algorithm MaxRects --maxrects-heuristics best --shape-padding 2 --border-padding 0 --padding 0 --inner-padding 0 --disable-rotation --opt RGBA8888 --dither-none-nn --dpi 72 --format cocos2d ../${filename} --sheet ${filename}.png
mv ${filename}.plist ${filename}.png ${dest_dir}
echo @@@@ building common resource done!
fi
if [ "$platform" = "ios" ]; then
echo buiding Resources ios start
mkdir -p ${dest_dir}
rm -rf ${filename}.pvr.ccz ${filename}.plist
texturepacker --texture-format pvr2ccz --data ${filename}.plist --allow-free-size --algorithm MaxRects --maxrects-heuristics best --shape-padding 2 --border-padding 0 --padding 0 --inner-padding 0 --disable-rotation --opt PVRTC4 --dither-none-nn --dpi 72 --format cocos2d ../${filename} --sheet ${filename}.pvr.ccz
mv ${filename}.plist ${filename}.pvr.ccz ${dest_dir}
echo @@@@ building common resource done!
fi
| true |
1384c46598244653862b67394e8ef6a8925a56f3 | Shell | ofaaland/func | /scripts/func.hmsout.sh | UTF-8 | 1,288 | 3.40625 | 3 | [] | no_license | #!/bin/bash
####################
# hmsout <number> <units>
# Convert the number in hours, minutes or seconds into H:m:s format
####################
# Author - Robert E. Novak aka REN
# sailnfool@gmail.com
# skype:sailnfool.ren
#_____________________________________________________________________
# Rev.|Auth.| Date | Notes
#_____________________________________________________________________
# 1.0 | REN |01/15/2020| original version
#_____________________________________________________________________
####################
source func.errecho
if [ -z "${__func_hmsout}" ]
then
export __func_hmsout=1
function hmsout() {
number=$1
units=$2
case ${units} in
seconds)
((hour=${number}/3600))
((mins=(${number}-hour*3600)/60))
((sec=${number}-((hour*3600) + (mins*60))))
;;
minutes)
((hour=${number}/60))
((mins=(${number}-hour*60)))
((sec=0))
;;
hours)
((hour=${number}))
((mins=0))
((sec=0))
;;
\?)
errecho ${LINENO} ${FUNCNAME} "Invalid units=${units}"
exit 1
;;
esac
printf "%02d:%02d:%02d" "${hour}" "${mins}" "${sec}"
}
fi # if [ -z "${__func_hmsout}" ]
# vim: set syntax=bash, ts=2, sw=2, lines=55, columns=120,colorcolumn=78
| true |
dfd407d9ce020f4108df56123a26422273a5aa43 | Shell | ArtieReus/go-tester | /ci/prepare-release | UTF-8 | 569 | 3.203125 | 3 | [] | no_license | #!/bin/bash
set -e -x
mkdir -p release;
version="1.0.7"
echo "v$version" > release/name
echo "v$version" > release/tag
git -C . rev-parse HEAD > release/commitish
cat > release/body <<EOF
A release candidate of the go-tester.
EOF
mkdir -p release/artifacts
cp bin/go-tester_* release/artifacts/
upx release/artifacts/go-tester_windows_amd64.exe
upx release/artifacts/go-tester_linux_amd64
hub release create -a release/artifacts/go-tester_windows_amd64.exe -a release/artifacts/go-tester_linux_amd64 -F release/body -t $(cat release/commitish) $(cat release/tag)
| true |
1f925a351d9f82167e76db7b3b227ad730d3c2fe | Shell | pjdiebold/OIL-PCR_Linking_plasmid-based_beta-lactamases | /16S_and_rarefaction/read_processing/MOTHUR_classify_filtered.sh | UTF-8 | 556 | 2.5625 | 3 | [] | no_license | #!/bin/bash
WRK=/workdir/users/pd378/oilPCR/all_16s_for_paper #UPDATE THE WORKDIR
FASTAS_ALL=/workdir/users/pd378/oilPCR/all_16s_for_paper/s5_cluster_otus/all_otu.fa
SILVA=/workdir/users/pd378/DataBases/MOTHUR_SILVA/silva.nr_v132.V4_oil
OUT=$WRK/s5_cluster_otus
cd $OUT
/programs/mothur/mothur "#classify.seqs(fasta=$FASTAS_ALL, template=$SILVA.align, taxonomy=$SILVA.tax, processors=10, cutoff=0)"
#/programs/mothur/mothur "#classify.seqs(fasta=ALL_OTUs.fasta, template=$SILVA/silva.nr_v132.align, taxonomy=$SILVA/silva.nr_v132.tax, processors=20)"
| true |
976e8a7f180eae5b2e5290f90bace865ec33e447 | Shell | zw963/asuswrt-merlin-v2ray | /deploy_tls | UTF-8 | 673 | 3.390625 | 3 | [
"MIT"
] | permissive | #!/bin/bash
if grep -qs -e 'set_your_domain_name_here' ./xray_server.json; then
echo "You must change \`[0m[33mset_your_domain_name_here[0m' in \`./xray_server.json' to a domain name which point to this host!"
echo "e.g. if your's domain name is: \`a.example.com', please set to \`example.com'."
exit
fi
self="$(\curl -sS https://gitlab.com/zw963/deployment_bash/-/raw/v0.8.3/deploy_start.sh)" && eval "$self"
export_variable target=$1
export_variable domain_name=$(cat ./xray_server.json |grep certificateFile | sed 's#.*/etc/ssl/\(.*\)/fullchain.pem.*#\1#g')
echo $domain_name
deploy_start
set -eu
deploy_tls "$domain_name" "systemctl restart xray"
| true |
8bf56f5ef0be9c56200a782345a505f1bf26743e | Shell | walshie4/Ultimate-Blocklist | /UpdateList.sh | UTF-8 | 4,265 | 2.96875 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env bash
# A simple script that downloads all blacklists in the list, and saves them in one mega-list
# Written by: Adam Walsh
# Written on 2/24/14
#This has been tested on Ubuntu 12.04
#-----CONFIG-----
LIST="list.txt" #This is the name of the final list file
while getopts ":c:zh" opt; do
case $opt in
c)
CONF_DIR=$OPTARG
;;
z)
zip=true
;;
h)
echo -ne "Usage: -c config dir\n\t-z gzip result file ( doesn't work with daemon 2.84 )\n"
exit 0
;;
\?)
echo "Invalid option: -$OPTARG" >&2
exit 1
;;
:)
echo "Option -$OPTARG requires an argument." >&2
exit 1
;;
esac
done
if [[ ! -z $CONF_DIR ]]; then
path_to_config=$CONF_DIR
elif [[ $OSTYPE =~ "darwin" ]]; then
path_to_config=$HOME/Library/Application\ Support/Transmission
else
path_to_config=$HOME/.config/transmission
fi
blocklist_path=$path_to_config/blocklists
TITLEs=("Bluetack LVL 1" "Bluetack LVL 2" "Bluetack LVL 3" "Bluetack edu" "Bluetack ads"
"Bluetack spyware" "Bluetack proxy" "Bluetack badpeers" "Bluetack Microsoft" "Bluetack spider"
"Bluetack hijacked" "Bluetack dshield" "Bluetack forumspam" "Bluetack webexploit" "TBG Primary Threats"
"TBG General Corporate Range" "TBG Buissness ISPs" "TBG Educational Institutions"
)
URLs=("http://list.iblocklist.com/?list=bt_level1&fileformat=p2p&archiveformat=gz"
"http://list.iblocklist.com/?list=bt_level2&fileformat=p2p&archiveformat=gz"
"http://list.iblocklist.com/?list=bt_level3&fileformat=p2p&archiveformat=gz"
"http://list.iblocklist.com/?list=bt_edu&fileformat=p2p&archiveformat=gz"
"http://list.iblocklist.com/?list=bt_ads&fileformat=p2p&archiveformat=gz"
"http://list.iblocklist.com/?list=bt_spyware&fileformat=p2p&archiveformat=gz"
"http://list.iblocklist.com/?list=bt_proxy&fileformat=p2p&archiveformat=gz"
"http://list.iblocklist.com/?list=bt_templist&fileformat=p2p&archiveformat=gz"
"http://list.iblocklist.com/?list=bt_microsoft&fileformat=p2p&archiveformat=gz"
"http://list.iblocklist.com/?list=bt_spider&fileformat=p2p&archiveformat=gz"
"http://list.iblocklist.com/?list=bt_hijacked&fileformat=p2p&archiveformat=gz"
"http://list.iblocklist.com/?list=bt_dshield&fileformat=p2p&archiveformat=gz"
"http://list.iblocklist.com/?list=ficutxiwawokxlcyoeye&fileformat=p2p&archiveformat=gz"
"http://list.iblocklist.com/?list=ghlzqtqxnzctvvajwwag&fileformat=p2p&archiveformat=gz"
"http://list.iblocklist.com/?list=ijfqtofzixtwayqovmxn&fileformat=p2p&archiveformat=gz"
"http://list.iblocklist.com/?list=ecqbsykllnadihkdirsh&fileformat=p2p&archiveformat=gz"
"http://list.iblocklist.com/?list=jcjfaxgyyshvdbceroxf&fileformat=p2p&archiveformat=gz"
"http://list.iblocklist.com/?list=lljggjrpmefcwqknpalp&fileformat=p2p&archiveformat=gz"
)
#---END CONFIG---
if tty -s; then
info() {
echo "$@"
}
else # we're non-interactive, no output needed
info() {
true
}
fi
die() {
echo "$@"
exit 1
}
rm -f $LIST #delete the old list
if wget=$(command -v wget); then
download() {
$wget -q -O "list.gz" "$1"
}
elif curl=$(command -v curl); then
download() {
$curl "$1" -L -o "list.gz"
}
else
die "$0: 'wget' or 'curl' required but not found. Aborting."
fi
index=0
for url in "${URLs[@]}"; do
title="${TITLEs[$index]}"
info "Downloading list $title"
download "$url" || die "Cannot download from $url"
info "Adding IP's to list file..."
gunzip -c "list.gz" >> "$LIST" || die "Cannot append to list" #append to list file
rm "list.gz" || die "Cannot remove downloaded file"
info ""
index=$((index+=1))
done
# remove duplicate entries
LIST_UNIQUE="unique-$LIST"
sort -u $LIST > $LIST_UNIQUE
mv $LIST_UNIQUE $LIST
if [[ ! -z $zip ]]; then
info "Zipping..."
gzip -c $LIST > list.gz || die "Cannot gzip"
info "Copying zipped list to $blocklist_path"
cp list.gz "$blocklist_path/" || die "Cannot copy to $blocklist_path/"
else
info "Copying list to $blocklist_path"
cp list.txt "$blocklist_path/" || die "Cannot copy to $blocklist_path/"
fi
wc -l $LIST || die "Cannot count lines" #print out some list stats
rm -f list.* || die "Cannot cleanup"
info "Done!"
info "Restart transmission"
| true |
1fb81d8db42ed493f22817676c3c92e4d59558f2 | Shell | galaxy001/artools | /bin/pkg/pkg2yaml.in | UTF-8 | 4,691 | 3.484375 | 3 | [] | no_license | #!/bin/bash
#
# Copyright (C) 2018-19 artoo@artixlinux.org
# Copyright (C) 2018 Artix Linux Developers
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
m4_include(lib/base/message.sh)
m4_include(lib/base/yaml.sh)
#{{{ functions
srcyaml_write_attr(){
local ident1="$1" ident2="$2" ident3="$3"
local attrname=$4 attrvalues=("${@:5}")
# normalize whitespace, strip leading and trailing
attrvalues=("${attrvalues[@]//+([[:space:]])/ }")
attrvalues=("${attrvalues[@]#[[:space:]]}")
attrvalues=("${attrvalues[@]%[[:space:]]}")
case $attrname in
pkgver|pkgrel|epoch|url|install|changelog)
for v in "${attrvalues[@]}"; do
Yaml+=$(write_yaml_map "$ident3" "$attrname" "$v")
done
;;
*)
Yaml+=$(write_yaml_map "$ident1" "$attrname")
for v in "${attrvalues[@]}"; do
Yaml+=$(write_yaml_seq "$ident2" "$v")
done
;;
esac
}
pkgbuild_extract_to_yaml() {
local pkgname=$1 attrname=$2 isarray=$3 outvalue=
if get_pkgbuild_attribute "$pkgname" "$attrname" "$isarray" 'outvalue'; then
[[ -z $pkgname ]] && srcyaml_write_attr 2 4 2 "$attrname" "${outvalue[@]}"
[[ -n $pkgname ]] && srcyaml_write_attr 4 6 2 "$attrname" "${outvalue[@]}"
fi
}
srcyaml_write_section_details() {
local attr package_arch a
local multivalued_arch_attrs=(source provides conflicts depends replaces
optdepends makedepends checkdepends)
# "${known_hash_algos[@]/%/sums}")
for attr in "${singlevalued[@]}"; do
pkgbuild_extract_to_yaml "$1" "$attr" 0
done
for attr in "${multivalued[@]}"; do
pkgbuild_extract_to_yaml "$1" "$attr" 1
done
get_pkgbuild_attribute "$1" 'arch' 1 'package_arch'
for a in "${package_arch[@]}"; do
# 'any' is special. there's no support for, e.g. depends_any.
[[ $a = any ]] && continue
for attr in "${multivalued_arch_attrs[@]}"; do
pkgbuild_extract_to_yaml "$1" "${attr}_$a" 1
done
done
}
yaml_write_global() {
local singlevalued=(pkgver pkgrel epoch url install changelog) #pkgdesc
local multivalued=(arch groups license checkdepends makedepends
depends provides conflicts replaces)
#noextract options backup optdepends
#source validpgpkeys "${known_hash_algos[@]/%/sums}")
Yaml+=$(write_empty_line)
Yaml+=$(write_yaml_map 0 "pkgbase")
Yaml+=$(write_yaml_map 2 "name" "${pkgbase:-$pkgname}")
${details} && srcyaml_write_section_details ''
Yaml+=$(write_empty_line)
}
yaml_write_package() {
local singlevalued=(url install changelog) #pkgdesc
local multivalued=(arch groups license checkdepends depends
provides conflicts replaces) #options backup optdepends)
Yaml+=$(write_yaml_map 0 "packages")
for pkg in "${pkgname[@]}"; do
Yaml+=$(write_yaml_seq_map 2 "pkgname" "$pkg")
${details} && srcyaml_write_section_details "$pkg"
done
Yaml+=$(write_empty_line)
}
yaml_write_fileinfo(){
local version
version=$(get_full_version)
pkgbase=${pkgbase:-$pkgname}
Yaml+=$(write_yaml_map 0 "version" "${version:-0}")
Yaml+=$(write_empty_line)
local pkgfile
pkgfile=$(print_all_package_names)
Yaml+=$(write_yaml_map 0 "files")
for f in ${pkgfile}; do
Yaml+=$(write_yaml_seq 2 "${f##*/}")
done
Yaml+=$(write_empty_line)
}
write_srcyaml(){
Yaml=$(write_yaml_header)
yaml_write_global
yaml_write_package
yaml_write_fileinfo
printf '%s' "${Yaml}"
}
#}}}
usage() {
echo "Usage: ${0##*/} [options]"
echo " -d Don't include details"
echo ' -h This help'
echo ''
exit "$1"
}
details=true
opts='dh'
while getopts "${opts}" arg; do
case "${arg}" in
d) details=false ;;
h|?) usage 0 ;;
*) echo "invalid argument '${arg}'"; usage 1 ;;
esac
done
shift $(( OPTIND - 1 ))
srcpath=$(readlink -f "$1")
[[ -f "$srcpath"/PKGBUILD ]] || die "%s/PKGBUILD does not exist!" "$srcpath"
package="$srcpath"/PKGBUILD; shift
# shellcheck disable=1090
. "${package}"
load_makepkg_config
write_srcyaml
| true |
fbb4827eab173ae232ad8a67e2c0656b979074a1 | Shell | olsonanl/p3_data | /updateGenomes.sh | UTF-8 | 365 | 2.640625 | 3 | [] | no_license | for genome in `cat $1`;
do
echo ""
echo "Processing $genome"
echo ""
perl -pi -e 's/commit=true/commit=false/' $genome.delete_pathway.sh
chmod 755 $genome.delete_pathway.sh
./$genome.delete_pathway.sh
post.update.sh genome_feature $genome.genome_feature.json
post.update.sh pathway $genome.pathway.json
post.update.sh sp_gene $genome.sp_gene.json
done
| true |
cdb6ec220d457d63577b6dc1a37a6f85846d2fe9 | Shell | anilburakbilsel/Learn-Programming | /Linux/bash/bashbasics.sh | UTF-8 | 528 | 3.3125 | 3 | [] | no_license | env # displays all environment variables
echo $SHELL # displays the shell you're using
echo $BASH_VERSION # displays bash version
bash # if you want to use bash (type exit to go back to your previously opened shell)
whereis bash # locates the binary, source and manual-page for a command
which bash # finds out which program is executed as 'bash' (default: /bin/bash, can change across environments)
clear # clears content on window (hide displayed lines) | true |
3ac989c2be7caa9dcf78d150b886900ddfa4c80f | Shell | bayvictor/distributed-polling-system | /bin/curdir_parse_code_goo_search_pages_and_svn_all.sh | UTF-8 | 676 | 2.921875 | 3 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | echo "\nWARNING!!! \n"
echo "in firefox, using repagnation for pack all code.google.com search in one page, save it here!"
echo "\n"
echo "^C to break, anykey to continue parsing all *.html -- in current dir! ";read readline
cat *.html |sed 's|<a href=|\n|g'|grep -e "^\"" | cut -d'"' -f2 |grep -e "\/$"| grep -e "\/p\/" > lastlist.txt
cat lastlist.txt |sed -e 's|http://code.google.com/p/||g;s|/$||g' > test0.txt
cat test0.txt|sed -e "s|$|-read-only|g" > test01.txt
cat test0.txt|sed -e "s|$|.googlecode.com/svn/trunk/ |g;s|^|svn checkout http://|g" > test00.txt
paste test00.txt test01.txt >code_goo__downall.sh
chmod +x ./code_goo__downall.sh
./code_goo__downall.sh
| true |
dd48f481a4b39339f1486f957041aaedf7f48fa9 | Shell | mjkim610/cloudy | /fullvirtualization/helpers/provision_vm.sh | UTF-8 | 1,633 | 3.03125 | 3 | [
"MIT"
] | permissive | # Install guest additions
sudo apt-get install virtualbox-guest-dkms
sudo apt-get install git
# Change directory
cd ~/VirtualBox\ VMs/
# Set variables
VM_NAME="ubuntu16"
VM_HD_PATH="ubuntu16.vdi" # The path to VM hard disk (to be created).
HD_SIZE=10000
RAM_SIZE=4096
VRAM_SIZE=128
VM_ISO_PATH=~/ubuntu-16.04.3-server-amd64.iso # Change path as needed
# SHARED_PATH=~ # Share home directory with the VM
# Create and modify VM spec
vboxmanage createvm --name $VM_NAME --ostype Ubuntu_64 --register
vboxmanage createhd --filename $VM_NAME.vdi --size $HD_SIZE
vboxmanage storagectl $VM_NAME --name "SATA Controller" --add sata --controller IntelAHCI
vboxmanage storageattach $VM_NAME --storagectl "SATA Controller" --port 0 --device 0 --type hdd --medium $VM_HD_PATH
vboxmanage storagectl $VM_NAME --name "IDE Controller" --add ide
vboxmanage storageattach $VM_NAME --storagectl "IDE Controller" --port 0 --device 0 --type dvddrive --medium $VM_ISO_PATH
vboxmanage modifyvm $VM_NAME --ioapic on
vboxmanage modifyvm $VM_NAME --memory $RAM_SIZE --vram $VRAM_SIZE
vboxmanage modifyvm $VM_NAME --nic1 nat
vboxmanage modifyvm $VM_NAME --natpf1 "guestssh,tcp,,2222,,22"
vboxmanage modifyvm $VM_NAME --natdnshostresolver1 on
# vboxmanage sharedfolder add $VM_NAME --name shared --hostpath $SHARED_PATH --automount
# Go through Ubuntu installation in a GUI environment, because installing headless is impossible in our case
vboxmanage startvm $VM_NAME
# After initial setup and installation of openssh-server via GUI, start vm with command
vboxmanage startvm $VM_NAME --type headless
# Connect to VM via ssh
ssh -p 2222 capstone@localhost
| true |
356d28f3ad666373488575144982d79ff2023203 | Shell | gitter-badger/grimlock | /src/main/scala/au/com/cba/omnia/grimlock/test/run.sh | UTF-8 | 3,538 | 2.90625 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
#
# Copyright 2014 Commonwealth Bank of Australia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -vx
JAR=grimlock.jar
NUM_TEST=24
DO_LOCAL=true
DO_DEMO=false
DO_CLUSTER=false
DO_CLEANUP=true
DO_INIT=false
if [ ${DO_LOCAL} = "true" ]
then
if [ ${DO_CLEANUP} = "true" ]
then
rm -rf tmp/*
fi
for i in $(seq 1 ${NUM_TEST})
do
export HADOOP_OPTS="-Dsun.io.serialization.extendedDebugInfo=true"; \
hadoop jar $JAR com.twitter.scalding.Tool grimlock.test.Test${i} --local --input "someInputfile3.txt"
done
fi
if [ ${DO_DEMO} = "true" ]
then
if [ ${DO_CLEANUP} = "true" ]
then
rm -rf demo/*
hadoop fs -rm -r -f 'demo/*'
fi
export HADOOP_OPTS="-Dsun.io.serialization.extendedDebugInfo=true"; \
hadoop jar $JAR com.twitter.scalding.Tool grimlock.examples.BasicOperations --local
export HADOOP_OPTS="-Dsun.io.serialization.extendedDebugInfo=true"; \
hadoop jar $JAR com.twitter.scalding.Tool grimlock.examples.DataSciencePipelineWithFiltering --local
export HADOOP_OPTS="-Dsun.io.serialization.extendedDebugInfo=true"; \
hadoop jar $JAR com.twitter.scalding.Tool grimlock.examples.Scoring --local
export HADOOP_OPTS="-Dsun.io.serialization.extendedDebugInfo=true"; \
hadoop jar $JAR com.twitter.scalding.Tool grimlock.examples.DataQualityAndAnalysis --local
if [ ${DO_INIT} = "true" ]
then
hadoop fs -mkdir -p demo
hadoop fs -put exampleInput.txt
hadoop fs -put exampleWeights.txt
fi
if [ ${DO_CLUSTER} = "true" ]
then
export HADOOP_OPTS="-Dsun.io.serialization.extendedDebugInfo=true"; \
hadoop jar $JAR com.twitter.scalding.Tool grimlock.examples.BasicOperations --hdfs
export HADOOP_OPTS="-Dsun.io.serialization.extendedDebugInfo=true"; \
hadoop jar $JAR com.twitter.scalding.Tool grimlock.examples.DataSciencePipelineWithFiltering --hdfs
export HADOOP_OPTS="-Dsun.io.serialization.extendedDebugInfo=true"; \
hadoop jar $JAR com.twitter.scalding.Tool grimlock.examples.Scoring --hdfs
export HADOOP_OPTS="-Dsun.io.serialization.extendedDebugInfo=true"; \
hadoop jar $JAR com.twitter.scalding.Tool grimlock.examples.DataQualityAndAnalysis --hdfs
fi
fi
if [ ${DO_CLUSTER} = "true" ]
then
if [ ${DO_CLEANUP} = "true" ]
then
hadoop fs -rm -r -f 'tmp/*'
fi
if [ ${DO_INIT} = "true" ]
then
hadoop fs -mkdir -p tmp
hadoop fs -put dict.txt
hadoop fs -put ivoryInputfile1.txt
hadoop fs -put numericInputfile1.txt
hadoop fs -put smallInputfile.txt
hadoop fs -put someInputfile3.txt
hadoop fs -put somePairwise.txt
hadoop fs -put somePairwise2.txt
hadoop fs -put somePairwise3.txt
fi
for i in $(seq 1 ${NUM_TEST})
do
export HADOOP_OPTS="-Dsun.io.serialization.extendedDebugInfo=true"; \
hadoop jar $JAR com.twitter.scalding.Tool grimlock.test.Test${i} --hdfs --input "someInputfile3.txt"
# --tool.graph
#dot -Tps2 grimlock.Test${i}0.dot -o graph_${1}.ps
#dot -Tps2 grimlock.Test${i}0_steps.dot -o graph_${i}_steps.ps
done
fi
| true |
7ece3442f6df4642ef8581e9cafcc38f6377ae9d | Shell | Pyq2022/ImageSensor-ext | /Eyelock/data/Scripts/autoRestore.sh | UTF-8 | 3,027 | 3.265625 | 3 | [] | no_license | #!/bin/bash
echo "run autoRestore.sh ..."
if [ -f /home/icmupdate.txt ]
then
touch /home/updateInProgress.txt
fi
if [ -f /home/firmwareUpdate.txt ]
then
touch /home/updateInProgress.txt
fi
if [ -f /home/untarpackage.txt ]
then
touch /home/updateInProgress.txt
fi
if [ -f /home/untarpackage.txt ]
then
touch /home/updateInProgress.txt
fi
if [[ -f /home/slaveUpdating.txt || -f /home/slaveUpdated.txt ]]
then
touch /home/updateInProgress.txt
fi
if [ ! -f /home/updateInProgress.txt ]
then
exit
fi
echo "Software upgrade failed ..."
if [ -f /home/createrestorepoint.txt ]
then
echo "create restorepoint failed ..."
rm /home/createrestorepoint.txt /home/updateInProgress.txt
exit
fi
echo "now restoring the previous release ..."
cd /home/firmware/nano/restorepoints
filename=$(ls -t * | head -1)
echo "start backup restore - $filename"
# stop master
cd /home/root
i2cset -y 3 0x2e 4 6
rm Eyelock.run
killall -KILL Eyelock
sleep 2
# install on master
#rm -rf default firmware root user www
#cd /home
#mv /home/root /home/upgradeTemp/root_old
#mv /home/upgradeTemp/root /home/root
#mv /home/upgradeTemp/root_old /home/upgradeTemp/root
#mv /home/www /home/upgradeTemp/www_old
#mv /home/upgradeTemp/www /home/www
#mv /home/upgradeTemp/www_old /home/upgradeTemp/www
#sleep 2
# install on master
#rm -rf default firmware root user www
cd /home
cp /home/firmware/nano/restorepoints/$filename .
sleep 3
tar -xvf $filename
sleep 3
chmod 755 /home/root/* /home/root/scripts/*
sleep 2
# install on slave
if [[ -f /home/slaveUpdating.txt || -f /home/slaveUpdated.txt ]]
then
ping -q -c2 192.168.40.2 > /dev/null
if [ $? -eq 0 ]
then
echo "Slave is Pingable"
# stop slave
ssh root@192.168.40.2 "cd /home/root; killall -KILL Eyelock"
ssh root@192.168.40.2 "cd /home; rm *.tar *.tgz; cp /home/firmware/nano/restorepoints/$filename ."
#scp EyelockNxt_*_Slave.tar.gz root@192.168.40.2:/home
#ssh root@192.168.40.2 "cd /home; rm *tar; gunzip EyelockNxt_*_Slave.tar.gz"
#sleep 5
#ssh root@192.168.40.2 "cd /home; tar -xvf EyelockNxt_*_Slave.tar"
sleep 5
ssh root@192.168.40.2 "cd /home; tar -xvf $filename"
sleep 5
ssh root@192.168.40.2 "cd /home/root; chmod 755 *; chmod 755 /home/root/scripts/*"
sleep 1
rm /home/slaveUpdating.txt /home/slaveUpdated.txt
else
echo "Slave not pingable No point"
fi
fi
if [ -f /home/icmupdate.txt ]
then
echo "Restore ICM ..."
cd /home/root
./icm_communicator -p nanoNxt_ICM_*.cyacd
rm /home/icmupdate.txt
fi
sleep 5
# cleanup
rm /home/root_*.tgz
rm /home/createrestorepoint.txt /home/updateInProgress.txt
rm /home/firmwareUpdate.txt /home/untarpackage.txt
# log
touch /home/restoreSoftware.txt
NOW=$(date +"%Y-%m-%d, %T, 000")
echo "$NOW, INFO , [Eyelock], - SW Restore: $filename" > /home/root/nxtEvent.log
echo "$NOW, INFO , [Eyelock], - SW Restore: $filename" > /home/root/nxtLog.log
sleep 25
sync
sleep 5
i2cset -y 3 0x2e 4 7
sleep 5
i2cset -y 3 0x2e 4 8
reboot
| true |
3daa730bd659dd367c6b2e73f86f077214ad20f1 | Shell | andersfylling/stack-guru | /updater.sh | UTF-8 | 3,925 | 3.953125 | 4 | [
"MIT"
] | permissive | #!/bin/bash
#
# Variables (may be overriden by environment)
#
STACKGURU_ENV=${STACKGURU_ENV:-"production"}
WORKSPACE_DIR=${WORKSPACE_DIR:-"$(dirname "$0")"}
UPDATER_LOG=${UPDATER_LOG:-"$HOME/discord_autoupdate.log"}
LOCK_FILE=${LOCK_FILE:-"/tmp/discord_updater.lock"}
PHP_ERROR_LOG_FILE=${PHP_ERROR_LOG_FILE:-"/var/log/php_errors.log"}
SYSTEMD_SERVICE=${SYSTEMD_SERVICE:-"stackguru.service"}
GIT_REMOTE=${GIT_REMOTE:-"origin"}
GIT_BRANCH=${GIT_BRANCH:-""}
shopt -s nocasematch
if [ "${STACKGURU_ENV,,}" == "production" ]; then
DEV_MODE=0
GIT_BRANCH=${GIT_BRANCH:-"master"}
else
DEV_MODE=1
fi
#
# Constants
#
BINARY_DEPENDENCIES=( "sudo" "git" "composer" "systemctl" )
FILE_DEPENDENCIES=(
"$WORKSPACE_DIR"
"$WORKSPACE_DIR/run-bot.sh"
)
SERVICE_DEPENDENCIES=( "$SYSTEMD_SERVICE" )
#SERVICE_DEPENDENCIES=( "mysql.service" "$SYSTEMD_SERVICE" )
#
# Helper functions
#
# Format output
function log_formatted {
NOW=$(date --utc +"%Y-%m-%d %H:%I:%S")
echo "${NOW} | $@"
}
# Log standard output
function log_echo {
# Log to stderr if DEBUG is set, otherwise log to UPDATER_LOG
message="$@"
if [ "$DEV_MODE" -eq "0" ]; then
log_formatted "$message" >> $UPDATER_LOG
else
log_formatted "$message"
fi
}
# Log error output
function log_error {
# Log to stderr if DEBUG is set, otherwise log to UPDATER_LOG
message="ERROR: $@"
if [ "$DEV_MODE" -eq "0" ]; then
log_formatted "$message" >> $UPDATER_LOG
else
log_formatted "$message" >&2
fi
}
# Log error output and quit
function log_fatal {
log_error "$@. Aborting."
exit 1
}
# Check for binary in path
function check_binary_dependency {
hash "$1" 2>/dev/null || log_fatal "Dependency '$1' is not installed or not in PATH."
}
# Check for file existence
function check_file_dependency {
[ -e "$1" ] || log_fatal "File or folder '$1' does not exist."
}
# Check for systemd service existence
function check_service_dependency {
if ! systemctl list-unit-files | grep "$1" >/dev/null; then
log_fatal "Service '$1' does not exist."
fi
}
# Lock/Unlock workspace
function lock_workspace {
[ -f $LOCK_FILE ] && log_fatal "Workspace is locked already. Remove '${LOCK_FILE}' if necessary."
touch $LOCK_FILE
# Stop bot
log_echo "Stopping service '${SYSTEMD_SERVICE}'..."
if ! sudo systemctl stop "${SYSTEMD_SERVICE}"; then
log_error "Failed to stop service '${SYSTEMD_SERVICE}'."
unlock_workspace
exit 2
fi
}
function unlock_workspace {
# Unlock workspace
rm $LOCK_FILE
# Restart bot
log_echo "Restarting service '${SYSTEMD_SERVICE}'..."
if ! sudo systemctl restart "${SYSTEMD_SERVICE}"; then
log_error "Failed to restart service '${SYSTEMD_SERVICE}'."
exit 2
fi
log_echo "Service '${SYSTEMD_SERVICE}' restarted."
}
#
# Check dependencies
#
# Binary dependencies
for dependency in "${BINARY_DEPENDENCIES[@]}"; do
check_binary_dependency "$dependency"
done
# File dependencies
for dependency in "${FILE_DEPENDENCIES[@]}"; do
check_file_dependency "$dependency"
done
# Service dependencies
for dependency in "${SERVICE_DEPENDENCIES[@]}"; do
check_service_dependency "$dependency"
done
#
# Update mechanism
#
# Lock workspace
lock_workspace
# Switch to workspace directory
[ ! -z "$WORKSPACE_DIR" ] && cd "$WORKSPACE_DIR"
# Update source code
log_echo "Updating source code..."
git reset --hard >/dev/null
git pull "$GIT_REMOTE" "$GIT_BRANCH" >/dev/null
# Update composer dependencies
log_echo "Updating dependencies..."
if [ "$DEV_MODE" -eq "1" ]; then
composer install
else
composer install --no-dev --optimize-autoloader
fi
log_echo "Updated dependencies"
# Log update
COMMIT=$(git log -1 --oneline)
VERSION=$(git describe 2>/dev/null)
if [ -z $VERSION ]; then
VERSION="${COMMIT}"
else
VERSION="${VERSION} (${COMMIT})"
fi
log_echo "Updated source code! HEAD is now at: ${VERSION}"
# Unlock workspace, restart bot
unlock_workspace
exit 0
| true |
5192e5c7969795d0872f9e132d294580e8b8d9a2 | Shell | yszou/env | /docker/phabricator/build | UTF-8 | 746 | 2.75 | 3 | [] | no_license | #!/bin/bash
if [ ! -f "libphutil.tar.gz" ]; then
git clone https://github.com/phacility/libphutil.git && \
cd libphutil && \
git pull --rebase && \
cd .. && \
tar czf libphutil.tar.gz libphutil && \
rm -rf libphutil
fi
if [ ! -f "arcanist.tar.gz" ]; then
git clone https://github.com/phacility/arcanist.git && \
cd arcanist && \
git pull --rebase && \
cd .. && \
tar czf arcanist.tar.gz arcanist && \
rm -rf arcanist
fi
if [ ! -f "phabricator.tar.gz" ]; then
git clone https://github.com/phacility/phabricator.git && \
cd phabricator && \
git pull --rebase && \
cd .. && \
tar czf phabricator.tar.gz phabricator && \
rm -rf phabricator
fi
docker build -t phabricator .
| true |
44b0f1bdbec91f1cf4e4ef5232593e7fe57d0fe6 | Shell | qq542vev/w3mplus | /source/.w3mplus/lib/abspath.sh | UTF-8 | 1,379 | 3.84375 | 4 | [
"CC-BY-4.0"
] | permissive | #!/usr/bin/env sh
### Script: abspath.sh
##
## 相対パスを絶対パスに変換する関数を定義する。
##
## Usage:
##
## ------ Text ------
## . 'abspath.sh'
## ------------------
##
## Metadata:
##
## id - 78fa36b1-64b6-4c15-a164-2b89c16b5c01
## author - <qq542vev at https://purl.org/meta/me/>
## version - 1.0.0
## date - 2022-09-02
## since - 2022-07-26
## copyright - Public Domain.
## license - <CC0 at https://creativecommons.org/publicdomain/zero/1.0/>
## package - w3mplus
##
## See Also:
##
## * <Project homepage at https://github.com/qq542vev/w3mplus>
## * <Bug report at https://github.com/qq542vev/w3mplus/issues>
### Function: abspath
##
## 相対パスを絶対パスに変換する。
##
## Parameters:
##
## $1 - 結果を代入する変数名。
## $2 - 相対パス。
## $3 - ベースとする絶対パス。
##
## See Also:
##
## * <シェルスクリプトで相対パスと絶対パスを相互に変換する関数 at https://qiita.com/ko1nksm/items/88d5b7ac3b1db8778452>
abspath() {
case "${2}" in
'/'*) set -- "${1}" "${2}/" '';;
*) set -- "${1}" "${3:-$PWD}/$2/" '';;
esac
while [ -n "${2}" ]; do
case "${2%%/*}" in
'' | '.') set -- "${1}" "${2#*/}" "${3}";;
'..') set -- "${1}" "${2#*/}" "${3%/*}";;
*) set -- "${1}" "${2#*/}" "${3}/${2%%/*}";;
esac
done
eval "${1}=\"/\${3#/}\""
}
| true |
95a3db76ee23fb8b527d8debd373f5dfd4d1ee1e | Shell | hobbyquaker/BOOSTreveng | /Examples/bash/colorsensor.sh | UTF-8 | 1,377 | 3.84375 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env bash
# coloredEcho function
# https://stackoverflow.com/questions/5947742/how-to-change-the-output-color-of-echo-in-linux
function coloredEcho(){
local exp=$1;
local color=$2;
if ! [[ $color =~ '^[0-9]$' ]] ; then
case $(echo $color | tr '[:upper:]' '[:lower:]') in
black) color=0 ;;
red) color=1 ;;
green) color=2 ;;
yellow) color=3 ;;
blue) color=4 ;;
magenta) color=5 ;;
cyan) color=6 ;;
white|*) color=7 ;; # white or invalid color
esac
fi
tput setaf $color;
echo $exp;
tput sgr0;
}
# activate notifications
gatttool -b 00:16:53:A4:CD:7E --char-write-req --handle=0x0f --value=0100
# activate continuous color reading
gatttool -b 00:16:53:A4:CD:7E --char-write-req --handle=0x0e --value=0a004101080100000001 --listen |
while IFS= read -r line
do
output=${line##*:}
output2=($output)
case ${output2[4]} in
00)
coloredEcho "BLACK" black
;;
03)
coloredEcho "BLUE" blue
;;
05)
coloredEcho "GREEN" green
;;
07)
coloredEcho "YELLOW" yellow
;;
09)
coloredEcho "RED" red
;;
0a)
coloredEcho "WHITE" white
;;
ff)
echo "TOO FAR"
;;
*)
echo "???"
;;
esac
done
| true |
f13534ed592c4e08a7bd6260080988a04a5d3339 | Shell | StevenACoffman/dotfiles | /bin/ws/add_dmusser.sh | UTF-8 | 440 | 2.84375 | 3 | [
"MIT"
] | permissive | #!/bin/bash
for dir in "${HOME}/Documents/git/worldspace"*
do
dir=${dir%*/}
#echo ${dir##*/}
cd ${dir##*/}
#pwd
git remote add dmusser git@bitbucket.org:dmusser/${dir##*/}.git
git fetch dmusser
#git checkout -b upgrade dmusser/upgrade
#git branch --track upgrade origin/upgrade
git branch upgrade -u origin/upgrade
#git config user.email "steve.coffman@deque.com"
#git branch --track upgrade origin/upgrade
cd ..
done
| true |
1ab46cf676b7b7056d5989fc65a50f112b5b1cc7 | Shell | gb-archive/dotfiles-1 | /setup.sh | UTF-8 | 5,815 | 3.59375 | 4 | [] | no_license | #!/bin/bash
set -euo pipefail
IFS=$'\n\t'
command_exists () {
type "$1" &> /dev/null ;
}
update_or_install() {
local installed=$(brew ls --versions $1 | wc -l)
if [[ "$installed" -gt 0 ]] ; then
brew upgrade $1
else
brew install $@
fi
}
mv_if_exists() {
if [ -f "$1" ]; then
mv "$1" "$2"
fi
}
# Poor mans input parsing.
SKIPINSTALL=0
if [[ "$1" == "--skip-install" ]]; then
SKIPINSTALL=1
fi
SKIPCOPY=0
if [[ "$1" == "--skip-copy" ]]; then
SKIPCOPY=1
fi
if [[ "$SKIPCOPY" -eq 0 ]]; then
echo -e "Enter username:"
read NEWUSER
echo -e "\nEnter home directory:"
read NEWHOME
echo -e "\nEnter editor:"
read NEWEDITOR
fi
if [[ "$SKIPINSTALL" -eq 0 ]]; then
UNAME=$(uname)
if [[ "$UNAME" == "Darwin" ]]; then
if command_exists brew ; then
brew update
update_or_install astyle
update_or_install autoconf
update_or_install automake
update_or_install binutils
update_or_install cdiff
update_or_install coreutils
update_or_install dateutils
update_or_install diffutils
update_or_install findutils --with-default-names
update_or_install gawk
update_or_install gcc
update_or_install gdb
update_or_install git
update_or_install gnu-indent --with-default-names
update_or_install gnu-sed --with-default-names
update_or_install gnu-tar --with-default-names
update_or_install gnu-which --with-default-names
update_or_install gnutls
update_or_install grep --with-default-names
update_or_install gzip
update_or_install less
update_or_install lynx
update_or_install m4
update_or_install macvim
update_or_install make
update_or_install md5sha1sum
# mutt requires sidebar patch
# update_or_install mutt
update_or_install perl
update_or_install pidof
update_or_install pkg-config
update_or_install python
update_or_install ranger
update_or_install reattach-to-user-namespace
update_or_install screen
update_or_install the_silver_searcher
update_or_install tig
update_or_install tmux
update_or_install tmux-mem-cpu-load
update_or_install tree
update_or_install valgrind
update_or_install vim --override-system-vi
update_or_install unzip
update_or_install urlview
update_or_install watch
update_or_install wdiff --with-gettext
update_or_install weechat
update_or_install wget
update_or_install zsh
brew linkapps macvim
else
echo "brew is not installed."
exit 1
fi
fi
fi
if [[ "$SKIPCOPY" -eq 0 ]]; then
sed -e 's,##NEWHOME##,'"$NEWHOME"',g' .zshrc > .zshrc.tmp && mv .zshrc.tmp .zshrc
sed -e 's,##NEWUSER##,'"$NEWUSER"',g' .zshrc > .zshrc.tmp && mv .zshrc.tmp .zshrc
sed -e 's,##NEWEDITOR##,'"$NEWEDITOR"',g' .zshrc > .zshrc.tmp && mv .zshrc.tmp .zshrc
rm -rf $NEWHOME/.dotbackup
mkdir -p $NEWHOME/.dotbackup
mv_if_exists $NEWHOME/.astylerc $NEWHOME/.dotbackup/
# Do not "backup" .config, or a lot of stuff might be lost
# mv_if_exists $NEWHOME/.config $NEWHOME/.dotbackup/
mv_if_exists $NEWHOME/.gitconfig $NEWHOME/.dotbackup/
mv_if_exists $NEWHOME/.jsbeautifyrc $NEWHOME/.dotbackup/
mv_if_exists $NEWHOME/.jshintrc $NEWHOME/.dotbackup/
mv_if_exists $NEWHOME/.lessfilter $NEWHOME/.dotbackup/
mv_if_exists $NEWHOME/.lesskey $NEWHOME/.dotbackup/
mv_if_exists $NEWHOME/.lldbinit $NEWHOME/.dotbackup/
# Do not "backup" .mutt either, or mail cache is lost
# mv_if_exists $NEWHOME/.mutt $NEWHOME/.dotbackup/
# Do not "backup" .oh-my-zsh either, or the oh-my-zsh installation is lost
# mv_if_exists $NEWHOME/.oh-my-zsh $NEWHOME/.dotbackup/
mv_if_exists $NEWHOME/.passwords.sh $NEWHOME/.dotbackup/
mv_if_exists $NEWHOME/.tigrc $NEWHOME/.dotbackup/
# do not backup .tmux, or plugins are lost
# mv_if_exists $NEWHOME/.tmux $NEWHOME/.dotbackup/
mv_if_exists $NEWHOME/.tmux.conf $NEWHOME/.dotbackup/
# Do not "backup" .vim directory either, or all plugins have to re-installed
# mv_if_exists $NEWHOME/.vim $NEWHOME/.dotbackup/
mv_if_exists $NEWHOME/.vimrc $NEWHOME/.dotbackup/
mv_if_exists $NEWHOME/.xbindkeysrc $NEWHOME/.dotbackup/
mv_if_exists $NEWHOME/.Xdefaults $NEWHOME/.dotbackup/
mv_if_exists $NEWHOME/.xinitrc $NEWHOME/.dotbackup/
mv_if_exists $NEWHOME/.Xmodmap $NEWHOME/.dotbackup/
mv_if_exists $NEWHOME/.xsession $NEWHOME/.dotbackup/
mv_if_exists $NEWHOME/.ycm_extra_conf.py $NEWHOME/.dotbackup/
mv_if_exists $NEWHOME/.zsh $NEWHOME/.dotbackup/
mv_if_exists $NEWHOME/.zshrc $NEWHOME/.dotbackup/
# Copy all files over:
cp .astylerc $NEWHOME/
cp -r .config $NEWHOME/
cp .gitconfig $NEWHOME/
cp .jsbeautifyrc $NEWHOME/
cp .jshintrc $NEWHOME/
cp .lessfilter $NEWHOME/
cp .lesskey $NEWHOME/
cp .lldbinit $NEWHOME/
cp -r .mutt $NEWHOME/
cp -r .oh-my-zsh $NEWHOME/
cp .passwords.sh $NEWHOME/
cp .tigrc $NEWHOME/
cp -r .tmux $NEWHOME/
cp .tmux.conf $NEWHOME/
cp -r .vim $NEWHOME/
cp .vimrc $NEWHOME/
cp .xbindkeysrc $NEWHOME/
cp .Xdefaults $NEWHOME/
cp .xinitrc $NEWHOME/
cp .Xmodmap $NEWHOME/
cp .xsession $NEWHOME/
cp .ycm_extra_conf.py $NEWHOME/
cp -r .zsh $NEWHOME/
cp .zshrc $NEWHOME/
# Reset .zshrc
git checkout -- .zshrc
fi
| true |
f6903c48b73d2421b7c981b574921b4119963878 | Shell | kbrow1i/cygtexlive | /texlive/0p_texlive_prep.dash | UTF-8 | 1,785 | 3.328125 | 3 | [] | no_license | #! /bin/dash
markerdir=/var/lib/texmf/postinstall
# Update TL package database.
for f in ${markerdir}/*.tlp
do
if [ -f ${f} ]
then
tlps=$(cat ${markerdir}/*.tlp)
fi
break
done
if [ -n "${tlps}" ]
then
[ -x /usr/libexec/update_tlpdb ] && /usr/libexec/update_tlpdb add ${tlps} \
&& for f in ${markerdir}/*.tlp ; do mv -f ${f} ${f}.done ; done
fi
# Recreate language files if necessary.
regenerate_language=0
for f in ${markerdir}/*.lang
do
if [ -f ${f} ]
then
regenerate_language=1
fi
break
done
if [ ${regenerate_language} = 1 ] \
|| [ ! -f /var/lib/texmf/tex/generic/config/language.def ] \
|| [ ! -f /var/lib/texmf/tex/generic/config/language.dat ] \
|| [ ! -f /var/lib/texmf/tex/generic/config/language.dat.lua ]
then
/usr/bin/touch ${markerdir}/texlive.rebuild_all_fmts
regen_status=0
[ -x /usr/bin/tlmgr ] && /usr/bin/tlmgr generate language \
|| regen_status=1
if [ ${regenerate_language} = 1 -a ${regen_status} = 0 ]
then
for g in ${markerdir}/*.lang
do
mv -f ${g} ${g}.done
done
fi
fi
# Rebuild the ls-R files if necessary.
for f in ${markerdir}/*.lsr
do
if [ -f ${f} ]
then
[ -x /usr/bin/mktexlsr ] && /usr/bin/mktexlsr \
&& for g in ${markerdir}/*.lsr ; do mv -f ${g} ${g}.done; done
fi
break
done
# Refresh formats if necessary.
if [ -f ${markerdir}/texlive.refresh_fmts ]
then
# Maybe all formats will be rebuilt in zp_texlive_finish.dash.
if [ -f ${markerdir}/texlive.rebuild_all_fmts ]
then
mv -f ${markerdir}/texlive.refresh_fmts \
${markerdir}/texlive.refresh_fmts.done
else
[ -x /usr/bin/fmtutil-sys ] && /usr/bin/fmtutil-sys --refresh \
&& mv -f ${markerdir}/texlive.refresh_fmts \
${markerdir}/texlive.refresh_fmts.done
fi
fi
| true |
e0665c5d8102b557dc6f469ee360ec0a0ccb9d5c | Shell | asppj/mkcert | /install.sh | UTF-8 | 296 | 3.375 | 3 | [] | no_license | #!/bin/bash
installCA(){
echo "准备安装..."
# cat $1
sudo security add-trusted-cert -d -r trustRoot -k /Library/Keychains/System.keychain $1
echo "安装完成"
}
echo "判断命令格式"
if [ $# != 1 ];
then
echo "请指定ca证书路径"
else
installCA $1
fi
| true |
d80561ec20aa9350b43d9154f9ccfda914a90ab4 | Shell | dnbabkov/intro-os | /lab12/test.sh | UTF-8 | 508 | 3.59375 | 4 | [
"CC-BY-4.0"
] | permissive | while getopts i:o:p:Cn optletter; do
case $optletter in
i) iflag=1; ival=$OPTARG;;
o) oflag=1; oval=$OPTARG;;
p) pflag=1; pval=$OPTARG;;
C) Cflag=1;;
n) nflag=1;;
*) echo Illegal option $optletter
esac
done
if [ $iflag -eq 1 ]; then
echo "-i flag: $ival"
fi
if [ $oflag -eq 1 ]; then
echo "-o flag: $oval"
fi
if [ $pflag -eq 1 ]; then
echo "-p flag: $pval"
fi
if [ $Cflag -eq 1 ]; then
echo "-C flag specified"
fi
if [ $nflag -eq 1 ]; then
echo "-n flag specified"
fi
| true |
ee9e5ac855592baad3201ca2394c37837a7bb759 | Shell | wwood/guix-notes | /scripts/ruby-guix-env | UTF-8 | 968 | 3.5 | 4 | [
"MIT"
] | permissive | #! /bin/bash
#
# Set up a Ruby environment for Guix after installing ruby. Run as
#
# . ruby-guix-env
#
# This can also works with Nix, provided .nix-profile is in the path
# and a ruby is installed.
echo $PATH|grep guix-profile
if [ $? == 1 ]; then
echo guix missing! Adding ~/guix-profile/bin to the PATH.
export PATH=$HOME/.guix-profile/bin:$PATH
fi
RUBYBIN=$(readlink -f `which ruby`)
RUBYHASH=$(basename $(dirname $(dirname $RUBYBIN)))
# Set GEM_PATH and GEM_HOME to point to a unique local dir
export GEM_PATH=$HOME/.gem/$RUBYHASH/2.1.0
mkdir -p $GEM_PATH
export GEM_HOME=$GEM_PATH
# Add the rspec cache
export GEM_SPEC_CACHE=$HOME/.gem/$RUBYHASH/specs
mkdir -p $GEM_SPEC_CACHE
# Now add GNU Guix local GEM path
export GEM_PATH=$GEM_PATH:$HOME/.guix-profile/lib/ruby/gems/2.2.0/
# Add the path for local gems built in-situ
GEM_BIN=$HOME/.gem/$RUBYHASH/2.1.0/bin/
echo $PATH|grep $GEM_BIN
if [ $? == 1 ]; then
export PATH=$GEM_BIN:$PATH
fi
gem env
| true |
581d18338857095cbd0f595ab43377f1d1b8c39e | Shell | bduffany/buildbuddy | /tools/metrics/run.sh | UTF-8 | 2,613 | 3.796875 | 4 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | #!/bin/bash
set -e -o pipefail
__file__=$(realpath "$0")
__dir__=$(dirname "$__file__")
cd "$__dir__"
START_BRANCH=$(git branch --show-current)
: ${GRAFANA_PORT:=4500}
: ${GRAFANA_ADMIN_PASSWORD:="admin"}
GRAFANA_STARTUP_URL="http://localhost:$GRAFANA_PORT?orgId=1&refresh=5s"
GRAFANA_DASHBOARD_URL="http://admin:$GRAFANA_ADMIN_PASSWORD@localhost:$GRAFANA_PORT/api/dashboards/db/buildbuddy-metrics"
GRAFANA_DASHBOARD_FILE_PATH="./grafana/dashboards/buildbuddy.json"
: ${KUBE_CONTEXT:=""}
: ${KUBE_NAMESPACE:="monitor-dev"}
: ${KUBE_PROM_SERVER_RESOURCE:="deployment/prometheus-server"}
: ${KUBE_PROM_SERVER_PORT:=9090}
# Open Grafana dashboard when the server is up and running
(
open=$(which open &>/dev/null && echo "open" || echo "xdg-open")
tries=100
while ! (curl "$GRAFANA_STARTUP_URL" &>/dev/null && curl "http://localhost:9100/metrics" &>/dev/null); do
sleep 0.5
tries=$((tries - 1))
if [[ $tries == 0 ]]; then
exit 1
fi
done
echo "Opening $GRAFANA_STARTUP_URL"
"$open" "$GRAFANA_STARTUP_URL"
) &
function sync() {
local json
json=$(curl "$GRAFANA_DASHBOARD_URL" 2>/dev/null)
if [[ -z "$json" ]]; then
echo "$0: WARNING: Could not download dashboard from $GRAFANA_DASHBOARD_URL"
return
fi
json=$(echo "$json" | jq -M -r '.dashboard | del(.version)')
current=$(cat "$GRAFANA_DASHBOARD_FILE_PATH" | jq -M -r 'del(.version)')
# If the dashboard hasn't changed, don't write a new JSON file, to avoid
# updating the file timestamp (causing Grafana to show "someone else updated
# this dashboard")
if [ "$json" == "$current" ]; then return; fi
local current_branch
current_branch=$(git branch --show-current)
if [[ "$current_branch" != "$START_BRANCH" ]]; then
echo -e "$0: \033[33mWARNING: git branch has changed. Changes to the dashboard will not be auto-saved.\033[0m"
return
fi
echo "$0: Detected change in Grafana dashboard. Saving to $GRAFANA_DASHBOARD_FILE_PATH"
echo "$json" >"$GRAFANA_DASHBOARD_FILE_PATH"
}
# Poll for dashboard changes and update the local JSON files.
(
while true; do
sleep 3
sync
done
) &
docker_compose_args=("-f" "docker-compose.grafana.yml")
if [[ "$1" == "kube" ]]; then
# Start a thread to forward port 9100 locally to the Prometheus server on Kube.
(
kubectl --context="$KUBE_CONTEXT" --namespace="$KUBE_NAMESPACE" \
port-forward "$KUBE_PROM_SERVER_RESOURCE" 9100:"$KUBE_PROM_SERVER_PORT"
) &
else
# Run the Prometheus server locally.
docker_compose_args+=("-f" "docker-compose.prometheus.yml")
fi
docker-compose "${docker_compose_args[@]}" up
| true |
b55808d6ec83a45c8f045c2596c6b2f609f59933 | Shell | mortie23/shell-param | /project/run.sh | UTF-8 | 656 | 3.875 | 4 | [] | no_license | #!/bin/bash
## Name: Shell script with paramters
## Author: Christopher Mortimer
## Date: 2018-07-19
## Desc:
## Notes:
## Usage: ./run -o -p test
## Param:
## -o : option
## -p : parameter with value
## Include script with common functions
. ../common/common-functions.sh
## Parse arguments passed
parseArgs "$@"
echoLog "INFO" "arguments: ${option} ${param} ${help}"
if [[ ${help} ]]
then
error_code=1
echo "
This project code is used to demostrate simple argument passing to a shell script
Arguments
-o --option: An option to set a flag to Y
-p --param: Pass a parameter value using '-p value'
"
exit ${error_code}
fi | true |
6199c92a68c6c2ae763cf6cf039e7d2d27855cf7 | Shell | ippsio/dotfiles | /bin/rg_wrap | UTF-8 | 551 | 3.5 | 4 | [] | no_license | #!/bin/bash
SEARCH_WORD="$1"
shift
RG_CMD="rg --fixed-strings --field-match-separator '\t' --field-context-separator '\t' --no-context-separator --color=always --no-ignore --hidden --vimgrep --no-column --glob !.git/ --glob !.DS_Store \"${SEARCH_WORD}\" $@"
RG_RESULT=$(eval "${RG_CMD}")
RG_EXIT_CD=$?
if [[ "${RG_EXIT_CD}" != 0 ]]; then
echo "Not found"
exit ${RG_EXIT_CD}
else
echo "${RG_RESULT}"| awk -v GREP_RESULT_COUNT="$(echo "${RG_RESULT}"| wc -l| tr -d ' ')" '{ printf "(%3d/%d:ripgrep) %s\n", NR, GREP_RESULT_COUNT, $0 }'
exit 0
fi
| true |
224af318b1bc7f85cfacf6d02333578d2e3a98fd | Shell | ifsmirnov/dotfiles | /bin/vpn | UTF-8 | 442 | 3.46875 | 3 | [] | no_license | #!/bin/bash
function status {
ps aux | grep openvpn | grep -v grep >/dev/null && echo "VPN is up" || echo "VPN is down"
}
case "${1:-}" in
start|up)
sudo openvpn --config /etc/openvpn/openvpn.conf
sudo systemd-tty-ask-password-agent
;;
stop|down)
sudo killall -q openvpn
sleep 0.1
;;
-h|--help|help)
echo "Usage: vpn [ up/start | down/stop ]"
;;
esac
status
| true |
1c6e1abcaad03947222a69f1cc6994bbf65d296f | Shell | liqiang76/tinyos_cxl | /apps/breakfast/bacon/testbed/src/rrb.2.sh | UTF-8 | 460 | 2.71875 | 3 | [] | no_license | #!/bin/bash
od=options
sd=$(dirname $0)
label=$1
shift 1
if [ $# -eq 0 ]
then
./$sd/blink.sh
fi
source ./$sd/install.sh maps/map.root $label $od/root.options $od/rrb.options $od/static.options $od/rx.options $od/bw_2.options $od/radiostats.options $od/network.options
source ./$sd/install.sh maps/map.nonroot $label $od/slave.options $od/rrb.options $od/static.options $od/unicast_fast.options $od/bw_2.options $od/radiostats.options $od/network.options
| true |
d1424af1b9350b522cf9ceb69ca437352096de87 | Shell | idleuncle/bdpaas | /easy_deploy/scripts/cmd_docker.sh | UTF-8 | 956 | 3.96875 | 4 | [
"MIT"
] | permissive | #!/bin/bash
# -------- cmd_docker_install() --------
function cmd_docker_install() {
DOCKER_ENGINE_DIR="./packages/docker-engine"
for node in $NODES; do
echo "-------- $node --------"
echo "scp $DOCKER_ENGINE_DIR/docker-1.12.1/* $APPUSER@$node:/usr/local/bin/"
echo "scp $DOCKER_ENGINE_DIR/docker-compose-1.8.0/* $APPUSER@$node:/usr/local/bin/"
done
}
# -------- cmd_docker_usage() --------
function cmd_docker_usage() {
echo
echo "Usage: $SCRIPT_NAME docker <command>"
echo
echo " commands: install"
echo
echo "Use $SCRIPT_NAME docker <command> --help to find how to use it."
echo
exit 0
}
# ======== cmd_docker() ========
# 'easy_deploy docker' command entry.
function cmd_docker() {
CMD=$1
case $CMD in
install)
FUNCTION=cmd_docker_install
;;
*)
FUNCTION=cmd_docker_usage
;;
esac
${FUNCTION} $@
}
| true |
1a9404065b98d2dbd5c0536c14dda2150504a25c | Shell | ZeldaZach/CS447 | /Parallel-k-NN/tests/test-truth.sh | UTF-8 | 998 | 3.0625 | 3 | [] | no_license | #!/bin/bash
for trainingSize in `seq 2 2 10`;
do
for dimension in `seq 1 2 10`;
do
for k in `seq 2 2 $trainingSize`;
do
for numQueries in `seq 1 2 10`;
do
rel1=$(./make_training_file $trainingSize $dimension 1)
abs1=$(realpath $rel1)
rel2=$(./make_query_file $numQueries $dimension 1 $k)
abs2=$(realpath $rel2)
for numCores in `seq 1 4`;
do
../k-nn $numCores $abs1 $abs2 results.dat
if ! ./verify-results $abs1 $abs2 results.dat; then
echo "FAILED WITH " $rel1 $rel2
exit -1
fi
done
done
done
done
done
for trainingSize in `seq 10000 20000 100000`;
do
for dimension in `seq 5 5 25`;
do
rel1=$(./make_training_file $trainingSize $dimension 1)
abs1=$(realpath $rel1)
rel2=$(./make_query_file 1 $dimension 1 5)
abs2=$(realpath $rel2)
../k-nn 8 $abs1 $abs2 results.dat
if ! ./verify-results $abs1 $abs2 results.dat; then
echo "FAILED WITH " $rel1 $rel2
exit -1
fi
done
done
echo "Passed all test cases"
| true |
88bfcc927b0a1792ad6e4aae61f6b340f81550b3 | Shell | brettviren/garpi | /python/garpi/source.sh | UTF-8 | 211 | 3.609375 | 4 | [] | no_license | #!/bin/sh
file=$1
shift
delim=$1
if [ -z "$file" ] ; then
echo "No file given to source"
exit 1
fi
if [ ! -f $file ] ; then
echo "No such file: $file"
exit 1
fi
. $file && echo $delim && env
| true |
23e836c6799743becf9374ad02026cb9f8a1b11f | Shell | very-twi/vesta | /bin/v_list_sys_users | UTF-8 | 3,263 | 3.796875 | 4 | [] | no_license | #!/bin/bash
# info: listing system users
#----------------------------------------------------------#
# Variable&Function #
#----------------------------------------------------------#
# Argument defenition
format=${1-shell}
# Importing variables
source $VESTA/conf/vars.conf
source $V_FUNC/shared.func
# Json function
json_list_users() {
echo '{'
fileds_count=$(echo "$fields" | wc -w)
# Starting main loop
for USER in $(ls $V_USERS/); do
# Reading user data
user_data=$(cat $V_USERS/$USER/user.conf)
# Assign key/value config
for key in $user_data; do
eval ${key%%=*}=${key#*=}
done
# Closing bracket if there already was output
if [ -n "$data" ]; then
echo -e ' },'
fi
i=1
for field in $fields; do
eval value=$field
if [ $i -eq 1 ]; then
# Printing parrent
(( ++i))
echo -e "\t\"$value\": {"
else
# Printing child
if [ $i -lt $fileds_count ]; then
(( ++i))
echo -e "\t\t\"${field//$/}\": \"${value//,/, }\","
else
echo -e "\t\t\"${field//$/}\": \"${value//,/, }\""
data=1
fi
fi
done
done
# Closing bracket if there was output
if [ -n "$data" ]; then
echo -e ' }'
fi
# Printing bottom bracket
echo -e '}'
}
# Shell function
shell_list_users() {
if [ -z "$nohead" ]; then
# Print brief info
echo "${fields//$/}"
for a in $fields; do
echo -e "--------- \c"
done
echo # new line
fi
# Starting main loop
for USER in $(ls $V_USERS/); do
user_data=$(cat $V_USERS/$USER/user.conf)
# Assign key/value config
for key in $user_data; do
eval ${key%%=*}=${key#*=}
done
eval echo "$fields"
done
}
#----------------------------------------------------------#
# Action #
#----------------------------------------------------------#
# Defining fileds to select
fields="\$USER \$FNAME \$LNAME \$PACKAGE \$WEB_DOMAINS \$WEB_SSL \$WEB_ALIASES"
fields="$fields \$DATABASES \$MAIL_DOMAINS \$MAIL_BOXES \$MAIL_FORWARDERS"
fields="$fields \$DNS_DOMAINS \$DISK_QUOTA \$BANDWIDTH \$NS \$SHELL \$BACKUPS"
fields="$fields \$WEB_TPL \$SUSPENDED \$CONTACT \$RKEY \$REPORTS \$IP_OWNED"
fields="$fields \$U_DIR_DISK \$U_DISK \$U_BANDWIDTH \$U_WEB_DOMAINS"
fields="$fields \$U_WEB_SSL \$U_DNS_DOMAINS \$U_DATABASES \$U_MAIL_DOMAINS"
fields="$fields \$DATE"
# Listing domains
case $format in
json) json_list_users ;;
plain) nohead=1; shell_list_users ;;
shell) fields='$USER $PACKAGE $U_DISK $U_BANDWIDTH $SUSPENDED $DATE';
shell_list_users | column -t ;;
*) check_args '1' '0' '[format]' ;;
esac
#----------------------------------------------------------#
# Vesta #
#----------------------------------------------------------#
exit
| true |
6034b52ffd08b33aff6aebad62c3ecc05a7ba747 | Shell | nathanjhaveri/vm-setup | /setup-debian-vm.sh | UTF-8 | 1,627 | 2.953125 | 3 | [] | no_license | # Basic setup of debian vm to use locally
# use vim rather than nano
update-alternatives --set editor /usr/bin/vim.basic
# Allow running sudo without password
echo "ALL ALL = (ALL) NOPASSWD: ALL" >> /etc/sudoers
#create user nathan
adduser --disabled-password nathan
# Allow ssh
mkdir /home/nathan/.ssh
echo "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDWZN8F5IzLRDcAiku79zRVBjJ6TB+jxmK84vJ+cJ5rbFI4hEbDmx28bwA6etuOJ4DvqRFhMfAZcItx+C4PBZDm0DYvtlYdcjwszGGSZjJ9MW1YqvpyRKnprNXzTAzo8zyQTHWSeu9N03V8uDDJ3l7F+yOswKasC/l0hLOlzUEOWkU5vd1ngLsNmSf++TthWfRQ8vewedzGSxIYB0rLiaU5mi24x72VF040L4xhfPnlL4jSokunXJsVbEq9juD752x1roiZZUsrpT+3QPSqi0bkescWWNHnIh4Gugsq2a6pWid9h8Yb/wm2D7rS/PaexY4WQyQcRD5Hyic1T20ltE79 jhaveri@Nathans-MacBook-Air.local" >> /home/nathan/.ssh/authorized_keys
chown -R nathan:nathan /home/nathan/.ssh
chmod 700 /home/nathan/.ssh
chmod 600 /home/nathan/.ssh/authorized_keys
echo "PubkeyAuthentication yes" >> /etc/ssh/sshd_config
## Can login as nathan here
# install basics
sudo apt update -y && sudo apt upgrade -y
sudo apt install -y \
curl \
git \
build-essential \
gdb \
strace \
htop \
libssl-dev \ # openssl libraries
pkg-config # so rust can find openssl
# Setup github ssh key
ssh-keygen -t ed25519 -C "jhaveri@umich.edu"
eval "$(ssh-agent -s)"
ssh-add ~/.ssh/id_ed25519
echo "copy ssh pub key to github now"
cat ~/.ssh/id_ed25519.pub
# Setup git
git config --global user.email "jhaveri@umich.edu"
git config --global user.name "Nathan Jhaveri"
git config --global init.defaultBranch main
#install rust
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh
source $HOME/.cargo/env
cargo install cargo-edit
| true |
4e4aa0f853dba1c2ece1cbd59a4d4e1761a3f3e7 | Shell | marianaplazas/holberton-system_engineering-devops | /0x0F-load_balancer/1-install_load_balancer | UTF-8 | 518 | 2.53125 | 3 | [] | no_license | #!/usr/bin/env bash
# Ha Proxy
CONFIG="\\\nlisten appname 0.0.0.0:80\n\tmode http\n\tbalance roundrobin\n\toption httpclose\n\toption forwardfor\n\tserver 768-web-01 34.74.134.215 check\n\tserver 768-web-02 35.196.166.20 check\n"
sudo apt-get update
sudo apt-get -y install nginx
sudo apt-get install -y haproxy=1.5\*
sudo sed -i "s/ENABLED=0/ENABLED=1/" /etc/default/haproxy
sudo cp /etc/haproxy/haproxy.cfg /etc/haproxy/haproxy.cfg.backup
sudo sed -i "\$a $CONFIG" /etc/haproxy/haproxy.cfg
sudo service haproxy start
| true |
7b488d6be7ec2c731e259892e637809858b054ef | Shell | dfsilva/polling-module | /scripts/subscripts/locales.sh | UTF-8 | 427 | 3.1875 | 3 | [] | no_license | #!/bin/bash
for folder in ../../locales/*; do
loc=${folder#\.\.\/\.\.\/locales\/}
repoLocale="../../locales/$loc/bbbResources.properties"
if grep -q "bbb.polling.createPoll" ~/dev/bigbluebutton/bigbluebutton-client/locale/$loc/bbbResources.properties; then
echo "Localization for $loc is already applied."
else
cat $repoLocale >> ~/dev/bigbluebutton/bigbluebutton-client/locale/$loc/bbbResources.properties
fi
done
| true |
88767399a6943f31f925243d4d293766563cd722 | Shell | bjzz/oh-my-tools | /shell/afb.sh | UTF-8 | 847 | 3.5 | 4 | [] | no_license | #!/bin/bash
#Android Fast reBuild script
################################# func ############################
gen_mkfile ()
{
cat <<EOF > $ANDROID_PRODUCT_OUT/Android.mk
#common device
-include \$(TARGET_DEVICE_DIR)/AndroidBoard.mk
#msm8996
-include vendor/qcom/proprietary/common/scripts/Android.mk
EOF
}
####################################### main ################################################
cpus=$( grep '^processor' /proc/cpuinfo | wc -l) #default
source $(ANDROID_BUILD_TOP)/build/envsetup.sh > /dev/null
if [ ! -d "$(ANDROID_PRODUCT_OUT)" ]; then
mkdir -p $(ANDROID_PRODUCT_OUT)
fi
gen_mkfile
cd $(ANDROID_PRODUCT_OUT)
case "$1" in
aboot)
mm aboot -j$(cpus);;
bootimage)
mm bootimage -j$(cpus);;
*)
echo -e "Android Fast Build What???\n"
echo "afb.sh aboot"
echo "afb.sh bootimage"
;;
esac
| true |
068bc98d9dab0b34a8323f69181397a6ca9efd89 | Shell | uzura8/server_setup_tool | /amazon_linux2/create_admin_user.sh | UTF-8 | 1,672 | 2.609375 | 3 | [] | no_license | #create_admin_user.sh
### install baseic ###
yum -y groupinstall "Base" "Development tools"
yum -y install screen
### bash setting ###
cat >> /home/${ADMIN_USER}/.bash_profile <<EOF
export PS1="[\u@\h \W]\\$ "
export EDITOR=vim
alias V='vim -R -'
EOF
source ~/.bash_profile
### screen setting ###
cat > /home/${ADMIN_USER}/.screenrc <<EOF
escape ^Jj
hardstatus alwayslastline "[%02c] %-w%{=b bw}%n %t%{-}%+w"
startup_message off
vbell off
autodetach on
defscrollback 10000
termcapinfo xterm* ti@:te@
EOF
chown ${ADMIN_USER}. /home/${ADMIN_USER}/.screenrc
### vim setting ###
cat > /home/${ADMIN_USER}/.vimrc <<EOF
syntax on
"set number
set enc=utf-8
set fenc=utf-8
set fencs=iso-2022-jp,euc-jp,cp932
set backspace=2
set noswapfile
"set shiftwidth=4
"set tabstop=4
set shiftwidth=2
set tabstop=2
"set expandtab
set hlsearch
set backspace=indent,eol,start
"" for us-keybord
"nnoremap ; :
"nnoremap : ;
"" Remove comment out as you like
"hi Comment ctermfg=DarkGray
EOF
chown ${ADMIN_USER}. /home/${ADMIN_USER}/.vimrc
ln -s /home/${ADMIN_USER}/.vimrc /root/
### git setting
cat > /home/${ADMIN_USER}/.gitconfig <<EOF
[color]
diff = auto
status = auto
branch = auto
interactive = auto
[alias]
co = checkout
st = status
ci = commit -v
di = diff
di-file = diff --name-only
up = pull --rebase
br = branch
ll = log --graph --pretty=full --stat
l = log --oneline
EOF
echo "[user]" >> /home/${ADMIN_USER}/.gitconfig
echo " email = ${GIT_USER_EMAIL}" >> /home/${ADMIN_USER}/.gitconfig
echo " name = ${GIT_USER_NAME}" >> /home/${ADMIN_USER}/.gitconfig
chown ${ADMIN_USER}. /home/${ADMIN_USER}/.gitconfig
ln -s /home/${ADMIN_USER}/.gitconfig /root/
| true |
80b3d67be8f2ce58a6919b2f9c26cf6b97deebb1 | Shell | jketterl/virtualradarserver-docker | /scripts/install-vrs.sh | UTF-8 | 478 | 2.984375 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env bash
set -euo pipefail
cd /tmp
apt-get update
apt-get install -y --no-install-recommends awscli
apt-get clean
for FILENAME in VirtualRadar.tar.gz VirtualRadar.WebAdminPlugin.tar.gz VirtualRadar.exe.config.tar.gz VirtualRadar.DatabaseWriterPlugin.tar.gz VirtualRadar.CustomContentPlugin.tar.gz; do
echo "Downloading File: $FILENAME"
curl -o $FILENAME http://www.virtualradarserver.co.uk/Files/$FILENAME --silent
tar -xf $FILENAME -C /opt/vrs/
done
| true |
d0d749a99b0e04467fe92e4ea73867e449daaa80 | Shell | aryonp/mongodb3-armhf | /install.sh | UTF-8 | 799 | 3.453125 | 3 | [
"MIT"
] | permissive | #!/bin/bash
FOLDER=mongodb3-bin
#create mongodb user
sudo adduser --ingroup nogroup --shell /etc/false --disabled-password --gecos --no-create-home mongodb
#create temporary folder
sudo mkdir $FOLDER
sudo unzip *zip -d $FOLDER/
sudo chown -R root:root $FOLDER/
sudo chmod -R 0755 $FOLDER/
sudo cp -p $FOLDER/* /usr/bin
#create log file directory with appropriate owner & permissions
sudo mkdir /var/log/mongodb
sudo chown mongodb:nogroup /var/log/mongodb
#create the DB data directory with convenient access perms
sudo mkdir /var/lib/mongodb
sudo chown mongodb:root /var/lib/mongodb
sudo chmod 0775 /var/lib/mongodb
#move necessary files
sudo mv mongodb.conf /etc/mongodb.conf
sudo mv mongodb.service /lib/systemd/system/
#check service
sudo service mongodb start
sudo service mongodb status
| true |
ad12c3718592ce5cf29c3cc27be5ddad21d1f9ab | Shell | input-output-hk/cardano-js-sdk | /packages/cardano-services/config/.github/scripts/download-all.sh | UTF-8 | 3,298 | 3.4375 | 3 | [
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] | permissive | #!/bin/bash
# Download configuration files from a nominated URL for a nominated network
# If p2p is enabled, create an explicit p2p version, keeping the default aligned with the production
# networks, at least until https://github.com/input-output-hk/ouroboros-network/pull/3844 has been
# inclded in a cardano-node release.
CARDANO_CONFIG_URL=$1
CARDANO_NETWORK=$2
mkdir -p \
network/$CARDANO_NETWORK/cardano-node \
network/$CARDANO_NETWORK/genesis \
network/$CARDANO_NETWORK/cardano-db-sync
SOURCE_TOPOLOGY=$(wget -qO- $CARDANO_CONFIG_URL/$CARDANO_NETWORK/topology.json)
NODE_CONFIG=$(wget -qO- $CARDANO_CONFIG_URL/$CARDANO_NETWORK/config.json | jq '.ByronGenesisFile = "../genesis/byron.json" | .ShelleyGenesisFile = "../genesis/shelley.json" | .AlonzoGenesisFile = "../genesis/alonzo.json"')
DB_SYNC_CONFIG=$(wget -qO- $CARDANO_CONFIG_URL/$CARDANO_NETWORK/db-sync-config.json | jq '.NodeConfigFile = "../cardano-node/config.json"')
wget -q $CARDANO_CONFIG_URL/$CARDANO_NETWORK/byron-genesis.json -O network/$CARDANO_NETWORK/genesis/byron.json
wget -q $CARDANO_CONFIG_URL/$CARDANO_NETWORK/shelley-genesis.json -O network/$CARDANO_NETWORK/genesis/shelley.json
wget -q $CARDANO_CONFIG_URL/$CARDANO_NETWORK/alonzo-genesis.json -O network/$CARDANO_NETWORK/genesis/alonzo.json
if [ $(echo $SOURCE_TOPOLOGY | jq 'has("PublicRoots")') = true ];
then
ACCESS_POINT=$(echo $SOURCE_TOPOLOGY | jq '.PublicRoots[0].publicRoots.accessPoints[0]')
# Add separate p2p config
mkdir -p \
network/${CARDANO_NETWORK}_p2p/cardano-node \
network/${CARDANO_NETWORK}_p2p/genesis \
network/${CARDANO_NETWORK}_p2p/cardano-db-sync
wget -q $CARDANO_CONFIG_URL/$CARDANO_NETWORK/byron-genesis.json -O network/${CARDANO_NETWORK}_p2p/genesis/byron.json
wget -q $CARDANO_CONFIG_URL/$CARDANO_NETWORK/shelley-genesis.json -O network/${CARDANO_NETWORK}_p2p/genesis/shelley.json
wget -q $CARDANO_CONFIG_URL/$CARDANO_NETWORK/alonzo-genesis.json -O network/${CARDANO_NETWORK}_p2p/genesis/alonzo.json
echo $SOURCE_TOPOLOGY | jq '.' > network/${CARDANO_NETWORK}_p2p/cardano-node/topology.json
echo $NODE_CONFIG | jq '.' > network/${CARDANO_NETWORK}_p2p/cardano-node/config.json
echo $DB_SYNC_CONFIG | jq '.' > network/${CARDANO_NETWORK}_p2p/cardano-db-sync/config.json
# Transform defaults to disable p2p
jq -nj --argjson address $(echo $ACCESS_POINT | jq '.address') --argjson port $(echo $ACCESS_POINT | jq '.port') '{"Producers": [{"addr": $address, "port": $port, "valency": 1 }]}' > network/$CARDANO_NETWORK/cardano-node/topology.json
# See https://github.com/input-output-hk/cardano-node/blob/0681cdeb07d81b3b088a6c14e703d03751c3d25d/cardano-node/src/Cardano/Node/Tracing/Tracers/Startup.hs#L366
echo $NODE_CONFIG | jq '.EnableP2P = false | del(.TestEnableDevelopmentNetworkProtocols)'> network/$CARDANO_NETWORK/cardano-node/config.json
echo $DB_SYNC_CONFIG | jq '.' > network/$CARDANO_NETWORK/cardano-db-sync/config.json
else
# Source config doesn't have p2p enabled, so no further transformation required
echo $SOURCE_TOPOLOGY | jq '.' > network/$CARDANO_NETWORK/cardano-node/topology.json
echo $NODE_CONFIG | jq '.' > network/$CARDANO_NETWORK/cardano-node/config.json
echo $DB_SYNC_CONFIG | jq '.' > network/$CARDANO_NETWORK/cardano-db-sync/config.json
fi
| true |
0b18c7135c88ce6931ca130973d70fc9fe73fde5 | Shell | cpucortexm/python_IT | /python_interacting _with_os/bash/loop_command.sh | UTF-8 | 1,094 | 4.125 | 4 | [] | no_license | #!/bin/bash
# Run the bash script with 1st param as $./random_exit.sh ./random_exit.py.
# $command below gets replaced by ./random_exit.py which generates a number between 0-3.
# Basically we are retrying to run the python script ./random_exit.py until it returns 0.
# Any value other than 0 means ./random_exit.py was not successful (i.e 1,2,3 are not successful)
# The random-exit.py produces 0,1,2,3, we need 0 for the "command" to be successful. This is done to simulate a command that sometimes succeeds and sometimes fails.
# i.e as soon as we have $command as success, we must exit the while loop
# It is like while(! (True/false)), continue looping if sys.exit(1-3) is false and stop if sys.exit(0) = true in bash
# e.g.
# python script.py && echo 'OK' || echo 'Not OK'
# If Python script calls sys.exit(0), the shell returns 'OK'
# If Python script calls sys.exit(1) (or any non-zero integer), the shell returns 'Not OK'.
n=0
command=$1
while (! $command) && [ $n -le 5 ]; do
sleep $n
((n=n+1)) # to increment the variable in bash use (())
echo "Retry #$n"
done
| true |
aef3c5c78e72fbfdd0f67ca169cfbf0af4316463 | Shell | MarkBorcherding/zsh-plugins | /themes/fooberry.zsh-theme | UTF-8 | 1,883 | 3.328125 | 3 | [] | no_license | #!/bin/zsh
local user_prompt='%{$terminfo[bold]$fg[blue]%}%n%{$reset_color%}'
local host_prompt='%{$terminfo[bold]$fg[black]%}at %{$FG[208]%}%m%{$reset_color%}'
local pwd_prompt='%{$terminfo[bold]$fg[black]%}in %{$fg[green]%}%~%{$reset_color%}'
PROMPT="$user_prompt $host_prompt ${pwd_prompt}"
local git_branch='$(git_prompt_info)$(git_remote_status)%{$reset_color%}'
ZSH_THEME_GIT_PROMPT_PREFIX=" %{$terminfo[bold]$fg[black]%}on %{$fg[magenta]%}"
ZSH_THEME_GIT_PROMPT_SUFFIX="%{$reset_color%}"
ZSH_THEME_GIT_PROMPT_BEHIND_REMOTE="%{$FG[088]%}▼"
ZSH_THEME_GIT_PROMPT_AHEAD_REMOTE="%{$FG[214]%}▲"
ZSH_THEME_GIT_PROMPT_DIVERGED_REMOTE="%{$FG[226]%}⧓"
ZSH_THEME_GIT_PROMPT_DIRTY="%{$fg[red]%}x"
ZSH_THEME_GIT_PROMPT_CLEAN=""
git_mode() {
local repo_path=$(git rev-parse --git-dir 2>/dev/null)
if [[ -e "$repo_path/BISECT_LOG" ]]; then
echo -e " %{$fg[yellow]%}bisecting"
elif [[ -e "$repo_path/MERGE_HEAD" ]]; then
echo -e " %{$fg[yellow]%}mergeing"
elif [[ -e "$repo_path/rebase" || -e "$repo_path/rebase-apply" || -e "$repo_path/rebase-merge" || -e "$repo_path/../.dotest" ]]; then
echo -e " %{$fg[yellow]%}rebasing"
fi
}
PROMPT+="${git_branch}${git_mode}"
ruby_prompt_info(){
local ruby_version="r$(ruby -v | cut -f 2 -d ' ' | sed 's/p.*//')"
echo -e "%{$terminfo[bold]$fg[black]%}w/ %{$fg[red]%}${ruby_version}%{$reset_color%}"
}
[[ "${ZSH_THEME_RUBY_VERSION:-Y}" = "Y" ]] && PROMPT+=' $(ruby_prompt_info)'
node_prompt_info(){
local node_version="$(node -v | sed s/v/n/)"
echo -e "%{$fg[yellow]%}${node_version}%{$reset_color%}"
}
which -s node > /dev/null
[[ "${ZSH_THEME_NODE_VERSION:-Y}" = "Y" && "$?" -eq 0 ]] && PROMPT+=' $(node_prompt_info)'
personal_prompt_things() {}
PROMPT+='$(personal_prompt_things)'
local return_code='%{$terminfo[bold]%}%(?,%{$fg[black]%},%{$fg[red]%})'
PROMPT+="
${return_code}$ %{$reset_color%}"
| true |
96ee90868982c4578c1580d39bab72529df3d8fe | Shell | blz-mus/HOT-Heat-Orchestration-Template-for-Openstack | /hot template_bash script from comtroller/script/2.2.network.sh | UTF-8 | 2,167 | 3.59375 | 4 | [] | no_license | #!/bin/bash
# script for OpenStack Orchestration
echo -e "*** script for OpenStack Orchestration ***** \n"
echo -e "* 1: Add a network stack \n"
echo -e "* 2: Delete a network stack \n"
echo -e "* \n"
echo -e "**************************************************** \n"
read -p "Please select your choice :" choice
while [ $choice != 1 ] || [ $choice != 2 ]
do
case $choice in
"1")
echo -e "Adding a network \n"
echo -e "stack list : \n"
heat stack-list
read -p "Please put the name of your stack :" p_stack_name
read -p "Please put the name of your network name :" p_network_name
read -p "Please put your network CIDR like 192.168.1.0/24 :" p_net_cidr
heat stack-create -f template/2.2.template_network.yaml -P network_name=$p_network_name -P network_cidr=$p_net_cidr $p_stack_name
sleep 6
heat stack-list
;;
"2")
echo -e "Deleting network stack \n"
echo -e "stack list : \n"
heat stack-list
read -p "Please put the name of your network stack :" p_stack_name
heat stack-delete $p_stack_name
sleep 3
heat stack-list
;;
*)
echo "other choice"
;;
esac
echo -e "*** script for OpenStack Orchestration ***** \n"
echo -e "* 1: Add a network stack \n"
echo -e "* 2: Delete a network stack \n"
echo -e "* \n"
echo -e "**************************************************** \n"
read -p "Please select your choice :" choice
done
| true |
c38f9d2da879de991f9008e548b014480c684d11 | Shell | jmdjr/sdf-mud | / sdf-mud/splint.sh | UTF-8 | 108 | 2.78125 | 3 | [] | no_license | a=*.c
b="${a/ sha256.c /i }"
echo $b
for f in *.c
do
splint $f
echo "press key to continue"
read var1
done
| true |
3c4cb13178d90b82da2f26bca18fede9754cb0fb | Shell | Github1886/Raspbarry_Tensorflow_Robot | /run-tensorflow-service.sh | UTF-8 | 362 | 2.984375 | 3 | [] | no_license | #!/bin/bash
# A script to run the TensorFlow service.
PYTHON_BIN=`which python3`
if [ $? -ne 0 ]; then
echo "Python 3 not found, quit"
exit 1
fi
WARM_UP_IMAGE_FILE_PATH=/home/pi/tensorflow_model/cropped_panda.jpg
# start the service
$PYTHON_BIN tensorflow_service.py --model_dir /home/pi/tensorflow_model --warm_up_image_file $WARM_UP_IMAGE_FILE_PATH
| true |
4f12c956edce2e14eeed3628c955791480fd3d56 | Shell | mef/pentaho-standardised-git-repo-setup | /initialise-repo.sh | UTF-8 | 26,161 | 3.953125 | 4 | [] | no_license | #!/bin/bash
## ~~~~~~~~~~~~~~~~~~~~~~~~~ DO NOT CHANGE ~~~~~~~~~~~~~~~~~~~~~~~~~~~##
## ______________________ ##
if [ $# -eq 0 ] || [ -z "$1" ]
then
echo "ERROR: Not all mandatory arguments supplied, please supply environment and/or job arguments"
echo
echo "Usage: initialise-repo.sh ..."
echo "Creates a basic folder structure for a Pentaho code or config repository"
echo
echo "Mandatory arguments"
echo
echo "-a PSGRS_ACTION: Choose number"
echo " (1) Project Repo with Common Config and Modules"
echo " (2) Standalone Project and Config (No Common Artefacts)"
echo " pdi_module"
echo " pdi_module_repo"
echo " project_code"
echo " project_config"
echo " standalone_project_config"
echo " common_code"
echo " common_config"
echo " project_docu"
echo " common_docu"
echo " "
echo "Mandatory arguments:"
echo " "
echo "-g GROUP NAME: Full Project Name, e.g. world-wide-trading"
echo " Lower case, only letters allowed, no underscores, dashes etc."
echo " Minimum of 3 to a maximum of 20 letters."
echo "-p PROJECT NAME: Project name abbreviation, e.g. wwt"
echo " Lower case, only letters allowed, no underscores, dashes etc."
echo " Minimum of 3 to a maximum of 20 letters."
echo "-e ENVIRONMENT: Name of the environment: dev, test, prod or similiar. "
echo " Lower case, only letters allowed, no underscores, dashes etc"
echo " Minimum of 3 to a maximum of 10 letters."
echo "-s STORAGE TYPE: Which type of PDI storage type to use."
echo " Possible values: file-based, file-repo. Not supported: db-repo, ee-repo"
echo "-w WEB-SPOON: Optional. If you intend to run everything within a WebSpoon Docker container."
echo " For now only relevant if you use the file-based PDI repository."
echo " Possible values: yes"
echo ""
echo "Sample usage:"
echo "initialise-repo.sh -a 1 -g mysampleproj -p mys -e dev -s file-repo -w yes"
echo "initialise-repo.sh -a 2 -g mysampleproj -p msp -e dev -s file-repo"
echo "initialise-repo.sh -a standalone_project_config -g mysampleproj -p msp -e dev -s file-based"
echo ""
echo "exiting ..."
exit 1
fi
while getopts ":a:g:p:e:s:w:" opt; do
case $opt in
a) PSGRS_ACTION="$OPTARG"
echo "Submitted PSGRS_ACTION value: ${PSGRS_ACTION}"
;;
g) export PSGRS_GROUP_NAME="$OPTARG"
echo "Submitted project name value: ${PSGRS_GROUP_NAME}"
if [[ ! ${PSGRS_GROUP_NAME} =~ ^[a-z\-]{3,40}$ ]]; then
echo "Unsupported group name!"
echo "Lower case, only letters and dashes allowed, no underscores etc."
echo "Minimum of 3 to a maximum of 40 characters."
exit 1
fi
;;
p) export PSGRS_PROJECT_NAME="$OPTARG"
echo "Submitted project name value: ${PSGRS_PROJECT_NAME}"
if [[ ! ${PSGRS_PROJECT_NAME} =~ ^[a-z]{3,20}$ ]]; then
echo "Unsupported project name!"
echo "Lower case, only letters allowed, no underscores, dashes, spaces etc."
echo "Minimum of 3 to a maximum of 20 characters."
exit 1
fi
;;
e) export PSGRS_ENV="$OPTARG"
echo "Submitted environment value: ${PSGRS_ENV}"
if [[ ! ${PSGRS_ENV} =~ ^[a-z]{3,10}$ ]]; then
echo "Unsupported environment name!"
echo "Lower case, only letters allowed, no underscores, dashes, spaces etc."
echo "Minimum of 3 to a maximum of 10 letters."
exit 1
fi
;;
s) PSGRS_PDI_STORAGE_TYPE="$OPTARG"
echo "Submitted environment value: ${PSGRS_PDI_STORAGE_TYPE}"
# check that supplied value is in the list of possible values
# validate() { echo "files file-repo ee-repo" | grep -F -q -w "${PSGRS_PDI_STORAGE_TYPE}"; }
LIST_CHECK=$(echo "file-based file-repo ee-repo" | grep -F -q -w "${PSGRS_PDI_STORAGE_TYPE}" && echo "valid" || echo "invalid")
echo "List check: ${LIST_CHECK}"
if [ ${LIST_CHECK} = "invalid" ]; then
echo "Unsupported storage type!"
echo "Possible values: file-based, file-repo, ee-repo"
exit 1
fi
;;
w) PSGRS_PDI_WEBSPOON_SUPPORT="$OPTARG"
echo "Submitted WebSpoon Support value: ${PSGRS_PDI_WEBSPOON_SUPPORT}"
;;
\?)
echo "Invalid option -$OPTARG" >&2
exit 1
;;
esac
done
# Example Usage:
# /home/dsteiner/git/pentaho-standardised-git-repo-setup/initialise-repo.sh -a standalone_project_config -g mysampleproj -p mys -e dev -s file-based
# /home/dsteiner/git/pentaho-standardised-git-repo-setup/initialise-repo.sh -a 1 -g mysampleproj -p mys -e dev -s file-based
# /home/dsteiner/git/pentaho-standardised-git-repo-setup/initialise-repo.sh -a 1 -g mysampleproj -p mys -e dev -s file-repo
# /home/dsteiner/git/pentaho-standardised-git-repo-setup/initialise-repo.sh -a 1 -g mysampleproj -p mys -e dev -s file-repo -w yes
# Main Script
PSGRS_WORKING_DIR=`pwd`
PSGRS_SHELL_DIR=$(dirname $0)
# create top level folder to not pollute any other folder
# make sure group name value is set
if [ -z ${PSGRS_GROUP_NAME} ]; then
echo "Not all required arguments were supplied. Group Name value missing."
echo "exiting ..."
exit 1
fi
# check if directory already exists
# otherwise create it
if [ ! -d "${PSGRS_GROUP_NAME}" ]; then
mkdir ${PSGRS_GROUP_NAME}
fi
cd ${PSGRS_GROUP_NAME}
export PSGRS_BASE_DIR=${PSGRS_WORKING_DIR}/${PSGRS_GROUP_NAME}
echo "=============="
echo "PSGRS SHELL DIR: " ${PSGRS_SHELL_DIR}
echo "PSGRS BASE DIR: " ${PSGRS_BASE_DIR}
# Source config settings
source ${PSGRS_SHELL_DIR}/config/settings.sh
function pdi_module {
# check if required parameter values are available
if [ -z ${PSGRS_ACTION} ]; then
echo "Not all required arguments were supplied. Required:"
echo "-a <PSGRS_ACTION>"
echo "exiting ..."
exit 1
fi
echo "================PDI MODULES===================="
PDI_MODULES_DIR=${PSGRS_BASE_DIR}/modules
echo "PDI_MODULES_DIR: ${PDI_MODULES_DIR}"
if [ ! -d "${PDI_MODULES_DIR}" ]; then
echo "Creating and pointing to default git branch"
git checkout -b dev
echo "Creating PDI modules folder ..."
mkdir ${PDI_MODULES_DIR}
cd ${PDI_MODULES_DIR}
echo "Initialising Git Repo ..."
git init .
# git hooks wont work here since the directory structure is different
# echo "Adding Git hooks ..."
# cp ${PSGRS_SHELL_DIR}/artefacts/git/hooks/* ${PDI_MODULES_DIR}/.git/hooks
# we have to create a file so that the master branch is created
echo "creating README file ..."
touch readme.md
echo "adding module_1 sample module ..."
cp -r ${PSGRS_SHELL_DIR}/artefacts/pdi/repo/module_1 .
git add --all
git commit -am "initial commit"
fi
}
function project_code {
# check if required parameter values are available
if [ -z ${PSGRS_ACTION} ] || [ -z ${PSGRS_PROJECT_NAME} ] || [ -z ${PSGRS_PDI_STORAGE_TYPE} ]; then
echo "Not all required arguments were supplied. Required:"
echo "-a <PSGRS_ACTION>"
echo "-p <Project Name>"
echo "-s <PDI Storage Type>"
echo "exiting ..."
exit 1
fi
echo "================PROJECT CODE===================="
PROJECT_CODE_DIR=${PSGRS_BASE_DIR}/${PSGRS_PROJECT_NAME}-code
echo "PROJECT_CODE_DIR: ${PROJECT_CODE_DIR}"
if [ ! -d "${PROJECT_CODE_DIR}" ]; then
echo "Creating project code folder ..."
echo "location: ${PROJECT_CODE_DIR}"
mkdir ${PROJECT_CODE_DIR}
cd ${PROJECT_CODE_DIR}
echo "Initialising Git Repo ..."
git init .
echo "Adding Git hooks ..."
cp ${PSGRS_SHELL_DIR}/artefacts/git/hooks/* ${PROJECT_CODE_DIR}/.git/hooks
cp ${PSGRS_SHELL_DIR}/config/settings.sh ${PROJECT_CODE_DIR}/.git/hooks
perl -0777 \
-pe "s@\{\{ IS_CONFIG \}\}@N@igs" \
-i ${PROJECT_CODE_DIR}/.git/hooks/pre-commit
if [ ${PSGRS_PDI_STORAGE_TYPE} = "file-based" ]; then
perl -0777 \
-pe "s@\{\{ IS_REPO_BASED \}\}@N@igs" \
-i ${PROJECT_CODE_DIR}/.git/hooks/pre-commit
else
perl -0777 \
-pe "s@\{\{ IS_REPO_BASED \}\}@Y@igs" \
-i ${PROJECT_CODE_DIR}/.git/hooks/pre-commit
fi
echo "Creating and pointing to default git branch"
git checkout -b dev
echo "Creating basic folder structure ..."
mkdir -p pdi/repo/${PSGRS_PROJECT_NAME}
mkdir -p pdi/sql/ddl
mkdir -p pentaho-server/repo
mkdir -p pentaho-server/metadata
mkdir -p pentaho-server/mondrian
mkdir -p pentaho-server/prd
mkdir -p shell-scripts
# adding file so folders can be committed
touch pdi/repo/${PSGRS_PROJECT_NAME}/.gitignore
touch pdi/sql/ddl/.gitignore
touch pentaho-server/repo/.gitignore
touch pentaho-server/metadata/.gitignore
touch pentaho-server/mondrian/.gitignore
touch pentaho-server/prd/.gitignore
touch shell-scripts/this-folder-contains-non-environment-specific-shell-files.md
cp ${PSGRS_SHELL_DIR}/artefacts/pdi/repo/jb_master.kjb \
${PROJECT_CODE_DIR}/pdi/repo/${PSGRS_PROJECT_NAME}
mv ${PROJECT_CODE_DIR}/pdi/repo/${PSGRS_PROJECT_NAME}/jb_master.kjb \
${PROJECT_CODE_DIR}/pdi/repo/${PSGRS_PROJECT_NAME}/jb_${PSGRS_PROJECT_NAME}_master.kjb
echo "Creating basic README file ..."
echo "Documentation can be found in the dedicated documentation Git repo called ${PSGRS_PROJECT_NAME}-documentation" > readme.md
if [ ${PSGRS_PDI_STORAGE_TYPE} = "file-repo" ]; then
echo "Adding kettle db connection files ..."
cp -r ${PSGRS_SHELL_DIR}/artefacts/pdi/repo/*.kdb pdi/repo
perl -0777 \
-pe "s@\{\{ VAR_DB_CONNECTION_NAME \}\}@sample_db_connection@igs" \
-i ${PROJECT_CODE_DIR}/pdi/repo/db_connection_template.kdb
perl -0777 \
-pe "s@\{\{ PSGRS_PROJECT_NAME \}\}@${PSGRS_PROJECT_NAME}@igs" \
-i ${PROJECT_CODE_DIR}/pdi/repo/${PSGRS_PROJECT_NAME}/jb_${PSGRS_PROJECT_NAME}_master.kjb
fi
if [ ${PSGRS_PDI_STORAGE_TYPE} = "file-based" ]; then
# nothing to do: shared.xml is part of .kettle, which lives in the config repo
perl -0777 \
-pe "s@\{\{ PSGRS_PROJECT_NAME \}\}@@igs" \
-i ${PROJECT_CODE_DIR}/pdi/repo/${PSGRS_PROJECT_NAME}/jb_${PSGRS_PROJECT_NAME}_master.kjb
fi
echo "Adding pdi modules as a git submodule ..."
git submodule add -b master ${PSGRS_MODULES_GIT_REPO_URL} pdi/repo/modules
git submodule init
git submodule update
# echo "Setting branch for submodule ..."
# cd pdi/repo/modules
# git checkout master
# committing new files
git add --all
git commit -am "initial commit"
cd ${PROJECT_CODE_DIR}
# enable pre-commit hook
chmod 700 ${PROJECT_CODE_DIR}/.git/hooks/pre-commit
chmod 700 ${PROJECT_CODE_DIR}/.git/hooks/settings.sh
fi
}
function project_config {
# check if required parameter values are available
if [ -z ${PSGRS_ACTION} ] || [ -z ${PSGRS_PROJECT_NAME} ] || [ -z ${PSGRS_ENV} ] || [ -z ${PSGRS_PDI_STORAGE_TYPE} ]; then
echo "Not all required arguments were supplied. Required:"
echo "-a <PSGRS_ACTION>"
echo "-p <Project Name>"
echo "-e <Environment>"
echo "-s <PDI Storage Type>"
echo "exiting ..."
exit 1
fi
echo "================PROJECT CONFIG=================="
PROJECT_CONFIG_DIR=${PSGRS_BASE_DIR}/${PSGRS_PROJECT_NAME}-config-${PSGRS_ENV}
echo "PROJECT_CONFIG_DIR: ${PROJECT_CONFIG_DIR}"
if [ ! -d "${PROJECT_CONFIG_DIR}" ]; then
echo "Creating project config folder ..."
echo "location: ${PROJECT_CONFIG_DIR}"
mkdir ${PROJECT_CONFIG_DIR}
cd ${PROJECT_CONFIG_DIR}
echo "Initialising Git Repo ..."
git init .
echo "Creating and pointing to default git branch"
git checkout -b master
echo "Adding Git hooks ..."
cp ${PSGRS_SHELL_DIR}/artefacts/git/hooks/* ${PROJECT_CONFIG_DIR}/.git/hooks
cp ${PSGRS_SHELL_DIR}/config/settings.sh ${PROJECT_CONFIG_DIR}/.git/hooks
perl -0777 \
-pe "s@\{\{ IS_CONFIG \}\}@Y@igs" \
-i ${PROJECT_CONFIG_DIR}/.git/hooks/pre-commit
perl -0777 \
-pe "s@\{\{ IS_REPO_BASED \}\}@N@igs" \
-i ${PROJECT_CONFIG_DIR}/.git/hooks/pre-commit
echo "Creating basic folder structure ..."
# mkdir -p pdi/.kettle -> standalone project only
mkdir -p pdi/metadata
mkdir -p pdi/properties
mkdir -p pdi/schedules
mkdir -p pdi/shell-scripts
mkdir -p pdi/test-data
mkdir -p pentaho-server/connections
# adding file so that the folders can be commited
touch pdi/metadata/.gitignore
touch pdi/properties/.gitignore
touch pdi/schedules/.gitignore
touch pdi/shell-scripts/.gitignore
touch pdi/test-data/.gitignore
touch pentaho-server/connections/.gitignore
echo "Adding essential shell files ..."
cp ${PSGRS_SHELL_DIR}/artefacts/project-config/wrapper.sh \
${PROJECT_CONFIG_DIR}/pdi/shell-scripts
perl -0777 \
-pe "s@\{\{ PSGRS_PROJECT_NAME \}\}@${PSGRS_PROJECT_NAME}@igs" \
-i ${PROJECT_CONFIG_DIR}/pdi/shell-scripts/wrapper.sh
cp ${PSGRS_SHELL_DIR}/artefacts/project-config/run_jb_name.sh \
${PROJECT_CONFIG_DIR}/pdi/shell-scripts
mv ${PROJECT_CONFIG_DIR}/pdi/shell-scripts/run_jb_name.sh \
${PROJECT_CONFIG_DIR}/pdi/shell-scripts/run_jb_${PSGRS_PROJECT_NAME}_master.sh
perl -0777 \
-pe "s@your_project_name@${PSGRS_PROJECT_NAME}@igs" \
-i ${PROJECT_CONFIG_DIR}/pdi/shell-scripts/run_jb_${PSGRS_PROJECT_NAME}_master.sh
perl -0777 \
-pe "s@jb_name@jb_${PSGRS_PROJECT_NAME}_master@igs" \
-i ${PROJECT_CONFIG_DIR}/pdi/shell-scripts/run_jb_${PSGRS_PROJECT_NAME}_master.sh
cp ${PSGRS_SHELL_DIR}/artefacts/utilities/start-webspoon.sh \
${PROJECT_CONFIG_DIR}/pdi/shell-scripts
chmod 700 ${PROJECT_CONFIG_DIR}/pdi/shell-scripts/*.sh
echo "Adding essential properties files ..."
envsubst \
< ${PSGRS_SHELL_DIR}/artefacts/project-config/project.properties \
> ${PROJECT_CONFIG_DIR}/pdi/properties/project.properties
# rename project properies file
mv ${PROJECT_CONFIG_DIR}/pdi/properties/project.properties \
${PROJECT_CONFIG_DIR}/pdi/properties/${PSGRS_PROJECT_NAME}.properties
touch ${PROJECT_CONFIG_DIR}/pdi/properties/jb_${PSGRS_PROJECT_NAME}_master.properties
# copy deployment scripts across
# [OPEN]
# rpm script
mkdir -p utilities/build-rpm
cp \
${PSGRS_SHELL_DIR}/artefacts/git/package-git-repo.sh \
${PROJECT_CONFIG_DIR}/utilities/build-rpm
cp \
${PSGRS_SHELL_DIR}/config/settings.sh \
${PROJECT_CONFIG_DIR}/utilities/build-rpm
envsubst \
< ${PSGRS_SHELL_DIR}/artefacts/utilities/build-rpm/template.spec \
> ${PROJECT_CONFIG_DIR}/utilities/build-rpm/template.spec
echo "Creating basic README file ..."
echo "Project specific configuration for ${PSGRS_ENV} environment." > ${PROJECT_CONFIG_DIR}/readme.md
# commit new files
git add --all
git commit -am "initial commit"
# enable pre-commit hook
chmod 700 ${PROJECT_CONFIG_DIR}/.git/hooks/pre-commit
chmod 700 ${PROJECT_CONFIG_DIR}/.git/hooks/settings.sh
fi
}
function standalone_project_config {
# This caters for projects that do not need a common project or config
# check if required parameter values are available
if [ -z ${PSGRS_ACTION} ] || [ -z ${PSGRS_PROJECT_NAME} ] || [ -z ${PSGRS_ENV} ] || [ -z ${PSGRS_PDI_STORAGE_TYPE} ]; then
echo "Not all required arguments were supplied. Required:"
echo "-a <PSGRS_ACTION>"
echo "-p <Project Name>"
echo "-e <Environment>"
echo "-s <PDI Storage Type>"
echo "exiting ..."
exit 1
fi
project_config
mkdir -p pdi/.kettle
cp ${PSGRS_SHELL_DIR}/artefacts/pdi/.kettle/.gitignore \
${PROJECT_CONFIG_DIR}/pdi/.kettle
echo "Adding essential shell files ..."
export PSGRS_KETTLE_HOME=${PROJECT_CONFIG_DIR}/pdi
envsubst \
< ${PSGRS_SHELL_DIR}/artefacts/common-config/set-env-variables.sh \
> ${PROJECT_CONFIG_DIR}/pdi/shell-scripts/set-env-variables.sh
# add_kettle_artefacts
echo "Adding .kettle files for ${PSGRS_PDI_STORAGE_TYPE} ..."
cp ${PSGRS_SHELL_DIR}/artefacts/pdi/.kettle/kettle.properties \
${PROJECT_CONFIG_DIR}/pdi/.kettle
if [ ${PSGRS_PDI_STORAGE_TYPE} = 'file-repo' ]; then
export PSGRS_PDI_REPO_NAME=${PSGRS_PROJECT_NAME}
export PSGRS_PDI_REPO_DESCRIPTION="This is the repo for the ${PSGRS_PROJECT_NAME} project"
if [ "${PSGRS_PDI_WEBSPOON_SUPPORT}" = "yes" ]; then
# we mount the project code repo into the Docker container under /root/my-project
export PSGRS_PDI_REPO_PATH=/root/${PSGRS_GROUP_NAME}/${PSGRS_PROJECT_NAME}-code/pdi/repo
else
export PSGRS_PDI_REPO_PATH=${PSGRS_BASE_DIR}/${PSGRS_PROJECT_NAME}-code/pdi/repo
fi
envsubst \
< ${PSGRS_SHELL_DIR}/artefacts/pdi/.kettle/repositories-file.xml \
> ${PROJECT_CONFIG_DIR}/pdi/.kettle/repositories.xml
fi
if [ ${PSGRS_PDI_STORAGE_TYPE} = "file-based" ]; then
cp ${PSGRS_SHELL_DIR}/artefacts/pdi/.kettle/shared.xml \
${PROJECT_CONFIG_DIR}/pdi/.kettle
fi
cp ${PSGRS_SHELL_DIR}/artefacts/pdi/.kettle/.spoonrc \
${PROJECT_CONFIG_DIR}/pdi/.kettle
# disable pre-commit hook
chmod 400 ${PROJECT_CONFIG_DIR}/.git/hooks/pre-commit
chmod 400 ${PROJECT_CONFIG_DIR}/.git/hooks/settings.sh
# commit new files
git add --all
git commit -am "initial commit"
# enable pre-commit hook
chmod 700 ${PROJECT_CONFIG_DIR}/.git/hooks/pre-commit
chmod 700 ${PROJECT_CONFIG_DIR}/.git/hooks/settings.sh
echo ""
echo "==============================="
echo ""
echo -e "\e[34m\e[47mIMPORTANT\e[0m"
echo "Amend the following configuration file:"
echo "${PROJECT_CONFIG_DIR}/pdi/shell-scripts/set-env-variables.sh"
echo ""
echo "Before using Spoon, source this file:"
echo "source ${PROJECT_CONFIG_DIR}/pdi/shell-scripts/set-env-variables.sh"
echo "==============================="
echo ""
# echo "Running set-env-variables.sh now so that at least KETTLE_HOME is defined."
# echo "You can start PDI Spoon now if working on a dev machine."
echo ""
source ${PROJECT_CONFIG_DIR}/pdi/shell-scripts/set-env-variables.sh
}
function common_config {
# check if required parameter values are available
if [ -z ${PSGRS_ACTION} ] || [ -z ${PSGRS_ENV} ] || [ -z ${PSGRS_PDI_STORAGE_TYPE} ]; then
echo "Not all required arguments were supplied. Required:"
echo "-a <PSGRS_ACTION>"
echo "-e <Environment>"
echo "-s <PDI Storage Type>"
echo "exiting ..."
exit 1
fi
echo "==========COMMON CONFIG=================="
COMMON_CONFIG_DIR=${PSGRS_BASE_DIR}/common-config-${PSGRS_ENV}
echo "COMMON_CONFIG_DIR: ${COMMON_CONFIG_DIR}"
if [ ! -d "${COMMON_CONFIG_DIR}" ]; then
echo "Creating common config folder ..."
echo "location: ${COMMON_CONFIG_DIR}"
mkdir ${COMMON_CONFIG_DIR}
cd ${COMMON_CONFIG_DIR}
echo "Initialising Git Repo ..."
git init .
echo "Creating and pointing to default git branch"
git checkout -b master
echo "Creating basic folder structure ..."
mkdir -p pdi/.kettle
cp ${PSGRS_SHELL_DIR}/artefacts/pdi/.kettle/.gitignore \
${COMMON_CONFIG_DIR}/pdi/.kettle
mkdir -p pdi/shell-scripts
echo "Adding Git hooks ..."
cp ${PSGRS_SHELL_DIR}/artefacts/git/hooks/* ${COMMON_CONFIG_DIR}/.git/hooks
cp ${PSGRS_SHELL_DIR}/config/settings.sh ${COMMON_CONFIG_DIR}/.git/hooks
perl -0777 \
-pe "s@\{\{ IS_CONFIG \}\}@Y@igs" \
-i ${COMMON_CONFIG_DIR}/.git/hooks/pre-commit
perl -0777 \
-pe "s@\{\{ IS_REPO_BASED \}\}@N@igs" \
-i ${COMMON_CONFIG_DIR}/.git/hooks/pre-commit
# add_kettle_artefacts
echo "Adding .kettle files ..."
cp ${PSGRS_SHELL_DIR}/artefacts/pdi/.kettle/kettle.properties \
pdi/.kettle
if [ ${PSGRS_PDI_STORAGE_TYPE} = "file-repo" ]; then
export PSGRS_PDI_REPO_NAME=${PSGRS_PROJECT_NAME}
export PSGRS_PDI_REPO_DESCRIPTION="This is the repo for the ${PSGRS_PROJECT_NAME} project"
if [ "${PSGRS_PDI_WEBSPOON_SUPPORT}" = "yes" ]; then
# we mount the project code repo into the Docker container under /root/my-project
export PSGRS_PDI_REPO_PATH=/root/${PSGRS_GROUP_NAME}/${PSGRS_PROJECT_NAME}-code/pdi/repo
else
export PSGRS_PDI_REPO_PATH=${PSGRS_BASE_DIR}/${PSGRS_PROJECT_NAME}-code/pdi/repo
fi
envsubst \
< ${PSGRS_SHELL_DIR}/artefacts/pdi/.kettle/repositories-file.xml \
> ${COMMON_CONFIG_DIR}/pdi/.kettle/repositories.xml
fi
if [ ${PSGRS_PDI_STORAGE_TYPE} = "file-based" ]; then
cp ${PSGRS_SHELL_DIR}/artefacts/pdi/.kettle/shared.xml \
pdi/.kettle
fi
# ---
echo "Adding essential shell files ..."
export PSGRS_KETTLE_HOME=${COMMON_CONFIG_DIR}/pdi
envsubst \
< ${PSGRS_SHELL_DIR}/artefacts/common-config/set-env-variables.sh \
> ${COMMON_CONFIG_DIR}/pdi/shell-scripts/set-env-variables.sh
cp ${PSGRS_SHELL_DIR}/artefacts/pdi/.kettle/.spoonrc \
${COMMON_CONFIG_DIR}/pdi/.kettle
# commit new files
git add --all
git commit -am "initial commit"
# enable pre-commit hook
chmod 700 ${COMMON_CONFIG_DIR}/.git/hooks/pre-commit
chmod 700 ${COMMON_CONFIG_DIR}/.git/hooks/settings.sh
echo "Creating basic README file ..."
echo "Common configuration for ${PSGRS_ENV} environment." > ${COMMON_CONFIG_DIR}/readme.md
echo ""
echo "==============================="
echo ""
echo -e "\e[34m\e[47mIMPORTANT\e[0m"
echo "Amend the following configuration file:"
echo "${COMMON_CONFIG_DIR}/pdi/shell-scripts/set-env-variables.sh"
echo ""
echo ""
echo "Before using Spoon, source this file:"
echo "source ${COMMON_CONFIG_DIR}/pdi/shell-scripts/set-env-variables.sh"
echo "==============================="
echo ""
# echo "Running set-env-variables.sh now so that at least KETTLE_HOME is defined."
# echo "You can start PDI Spoon now if working on a dev machine."
echo ""
source ${COMMON_CONFIG_DIR}/pdi/shell-scripts/set-env-variables.sh
fi
}
function project_docu {
# check if required parameter values are available
if [ -z ${PSGRS_ACTION} ] || [ -z ${PSGRS_PROJECT_NAME} ]; then
echo "Not all required arguments were supplied. Required:"
echo "-a <PSGRS_ACTION>"
echo "-p <Project Name>"
echo "exiting ..."
exit 1
fi
echo "===========PROJECT DOCUMENTATION=================="
PROJECT_DOCU_DIR=${PSGRS_BASE_DIR}/${PSGRS_PROJECT_NAME}-documentation
echo "PROJECT_DOCU_DIR: ${PROJECT_DOCU_DIR}"
if [ ! -d "${PROJECT_DOCU_DIR}" ]; then
echo "Creating project documentation folder ..."
echo "location: ${PROJECT_DOCU_DIR}"
mkdir ${PROJECT_DOCU_DIR}
cd ${PROJECT_DOCU_DIR}
echo "Initialising Git Repo ..."
git init .
echo "Creating and pointing to default git branch"
git checkout -b master
echo "Creating basic README file ..."
echo "# Documentation for ${PSGRS_PROJECT_NAME}" > ${PROJECT_DOCU_DIR}/readme.md
# commit new files
git add --all
git commit -am "initial commit"
fi
}
function common_docu {
# check if required parameter values are available
if [ -z ${PSGRS_ACTION} ]; then
echo "Not all required arguments were supplied. Required:"
echo "-a <PSGRS_ACTION>"
echo "exiting ..."
exit 1
fi
echo "===========COMMON DOCUMENTATION=================="
COMMON_DOCU_DIR=${PSGRS_BASE_DIR}/common-documentation
echo "COMMON_DOCU_DIR: ${COMMON_DOCU_DIR}"
if [ ! -d "${COMMON_DOCU_DIR}" ]; then
echo "Creating project documentation folder ..."
echo "location: ${COMMON_DOCU_DIR}"
mkdir ${COMMON_DOCU_DIR}
cd ${COMMON_DOCU_DIR}
echo "Initialising Git Repo ..."
git init .
echo "Creating and pointing to default git branch"
git checkout -b master
echo "Creating basic README file ..."
echo "# Common Documentation" > ${COMMON_DOCU_DIR}/readme.md
# commit new files
git add --all
git commit -am "initial commit"
fi
}
# full setup with common config
if [ ${PSGRS_ACTION} = "1" ]; then
project_docu
common_docu
project_code
project_config
common_config
# copy utility scripts
cd ${PSGRS_BASE_DIR}
cp ${PSGRS_SHELL_DIR}/artefacts/git/update_all_git_repos.sh .
cat > ${PSGRS_BASE_DIR}/start-webspoon.sh <<EOL
sudo docker run -it --rm \
-e JAVA_OPTS="-Xms1024m -Xmx2048m" \
-e KETTLE_HOME="/root/${PSGRS_GROUP_NAME}/common-config-${PSGRS_ENV}/pdi/" \
-p 8080:8080 \
-v ${PSGRS_BASE_DIR}:/root/${PSGRS_GROUP_NAME}/:z \
hiromuhota/webspoon:latest-full
EOL
chmod 700 ${PSGRS_BASE_DIR}/*.sh
fi
# full setup without common config
if [ ${PSGRS_ACTION} = "2" ]; then
project_code
project_docu
standalone_project_config
# copy utility scripts
cd ${PSGRS_BASE_DIR}
cp ${PSGRS_SHELL_DIR}/artefacts/git/update_all_git_repos.sh .
cat > ${PSGRS_BASE_DIR}/start-webspoon.sh <<EOL
sudo docker run -it --rm \
-e JAVA_OPTS="-Xms1024m -Xmx2048m" \
-e KETTLE_HOME="/root/${PSGRS_GROUP_NAME}/${PSGRS_PROJECT_NAME}-config-${PSGRS_ENV}/pdi/" \
-p 8080:8080 \
-v ${PSGRS_BASE_DIR}:/root/${PSGRS_GROUP_NAME}/:z \
hiromuhota/webspoon:latest-full
EOL
chmod 700 ${PSGRS_BASE_DIR}/*.sh
fi
if [ ${PSGRS_ACTION} = "pdi_module" ]; then
pdi_module
fi
if [ ${PSGRS_ACTION} = "project_code" ]; then
project_code
fi
if [ ${PSGRS_ACTION} = "project_config" ]; then
project_config
fi
if [ ${PSGRS_ACTION} = "standalone_project_config" ]; then
standalone_project_config
fi
if [ ${PSGRS_ACTION} = "common_config" ]; then
common_config
fi
if [ ${PSGRS_ACTION} = "project_docu" ]; then
project_docu
fi
if [ ${PSGRS_ACTION} = "common_docu" ]; then
common_docu
fi | true |
88ef40c55d098c237bbdecb0d5913d9fb3101d89 | Shell | michaeladam0/torrent-annex-testing-scripts | /cmdstep.sh | UTF-8 | 1,312 | 3.703125 | 4 | [] | no_license | #!/bin/bash
# ************************************************************
# CMDSTEP.SH
# Runs the current test until all iterations are exhausted
# ************************************************************
# Get and increment iteration
current_iteration=$(($(grep -e current_iteration /vagrant/remote/settings | awk '{ print $2 }')+1))
# Get iterations cap
num_iterations=$(grep -e num_iterations /vagrant/remote/settings | awk '{ print $2 }')
# Get test descriptor
description=$(grep -e description /vagrant/remote/settings | awk '{ print $2 }')
# Get test id
test_id=$(grep -e test_id /vagrant/remote/settings | awk '{ print $2 }')
# Check if iterations have maxed out yet
if ((current_iteration > num_iterations)); then
# Iterations are maxed, run next test
echo "Iterations maxed. Moving to next test."
/vagrant/remote/scripts/next_test.sh
else
# Iterations still remaining, increment iterator in settings
echo "Iterations remaining. Iterating."
sed -i "s/.*current_iteration.*/current_iteration $current_iteration/" /vagrant/remote/settings
# Create dir for next iteration
mkdir -p /vagrant/remote/data/$description/$test_id/$current_iteration
# Set clientstatus file
echo "alternate_ran FALSE" > /vagrant/remote/clientstatus
# Run test
/vagrant/remote/current_test/runtest.sh
fi | true |
65381e30b778d3d0fc03a555532cf73ebe50a369 | Shell | dankernel/aws_ubuntu16.04_init | /2.anaconda/test.sh | UTF-8 | 137 | 2.609375 | 3 | [] | no_license | echo -e "Enter env name : \c"
read name
echo "env : $name"
conda create --name $name python=3.6
conda info --envs
source activate $name
| true |
8f64be85372c6f29784896bddda0506a406fbfeb | Shell | lupeordaz/csctoss | /denoss02/jobs/dbbackup.sh | UTF-8 | 2,169 | 3.828125 | 4 | [] | no_license | #!/bin/bash
# Script to periodically backup the csctmon database to standard postgres dumpfile.
#set -x
source /home/postgres/.bash_profile
BASEDIR=/home/postgres/dba
BACKDIR=/pgdata/backups
export BAKDATE=`date '+%Y%m%d%H%M%S'`
export BAKFILE=$BACKDIR/csctmon_backup.$BAKDATE
export BAKLOG=$BASEDIR/logs/csctmon_backup.$BAKDATE.log
export CRONLOG=$BASEDIR/logs/csctmon_backup.cronlog
touch $CRONLOG
echo "CSCT `hostname` CSCTOSS Database Backup Report" > $BAKLOG
echo "----------------------------------------------------" >> $BAKLOG
echo "Start Date: `date`" >> $BAKLOG
echo "" >> $BAKLOG
# make sure database is up, if not send down message
if [ `pg_ctl status | grep "server is running" | wc -l` -lt 1 ]; then
echo "CSCT `hostname` CSCTOSS Database is not running ... backup aborted" >> $BAKLOG
cat $BAKLOG $CRONLOG | mail -s "CSCT `hostname` DB Backup FAILED!" dba@cctus.com
exit
fi
# remove stale backups - retain rolling set of 12
if [ `ls $BACKDIR/csctmon_backup.* | wc -l` -gt 12 ]; then
kounter=`ls $BACKDIR/csctmon_backup.* | wc -l`
kounter=`expr $kounter - 12`
ls -tr $BACKDIR/csctmon_backup.* | head -$kounter | xargs -i rm {} >> $BAKLOG
fi
# remove stale logs - retail rolling set of 12
if [ `ls $BASEDIR/logs/csctmon_backup.*.log | wc -l` -gt 12 ]; then
kounter=`ls $BASEDIR/logs/csctmon_backup.*.log | wc -l`
kounter=`expr $kounter - 12`
ls -tr $BASEDIR/logs/csctmon_backup.*.log | head -$kounter | xargs -i rm {} >> $BAKLOG
fi
# run the pg_dump command with appropriate parameters
pg_dump -C -U postgres -v csctmon > $BAKFILE
gzip $BAKFILE
BAKFILE=$BAKFILE.gz
# check to see file exists and has size (success), otherwise send failure report
if [ -s "$BAKFILE" ]; then
# cat $BAKLOG $CRONLOG | mail -s "CSCT `hostname` DB Backup Report" dba@cctus.com
echo "Do not mail success"
else
cat $BAKLOG $CRONLOG | mail -s "CSCT `hostname` DB Backup FAILED!!!" dba@cctus.com
fi
# cleanup and exit
cat $BAKLOG $CRONLOG > $BAKLOG.new
mv $BAKLOG.new $BAKLOG
rm -f $CRONLOG
exit 0
| true |
cfe6eaf06495f9ef486b7b0b48bda6d9dd3ff8f6 | Shell | nacs-lab/nacs-pkgs | /github/setup-archlinux-arm.sh | UTF-8 | 310 | 2.625 | 3 | [] | no_license | #!/bin/bash
pacman -S --noconfirm arch-arm-git armv7l-linux-gnueabihf-gcc
mount binfmt_misc -t binfmt_misc /proc/sys/fs/binfmt_misc
for ((i = 0; i < 10; i++)); do
# Retry a few times since the ArchLinux ARM mirrors seem to be unreliable at times.
pacman-armv7 -Sy --noconfirm base-devel && break
done
| true |
f6baa6ad46771ba9e827bc3bac99486a3fc7fcf6 | Shell | Shakti-Prasad-Satapathy/Shell-Scripting | /Selection_Practice_Problems_with_if_and_else/flip_the_coin.sh | UTF-8 | 98 | 2.828125 | 3 | [] | no_license | #!/bin/bash -x
x=$(( (RANDOM % 100) +1 ))
if [[ $x -lt 50 ]]
then
echo TAILS
else
echo Heads
fi
| true |
d134c3092bb63e0f027747bade3e00feae3ea845 | Shell | Gasol/dotfiles | /misc/dot-pbuilderrc | UTF-8 | 3,199 | 3.21875 | 3 | [] | no_license | # vim: set ft=sh:
# ccache
sudo mkdir -p /var/cache/pbuilder/ccache
sudo chmod a+w /var/cache/pbuilder/ccache
export CCACHE_DIR="/var/cache/pbuilder/ccache"
export CCACHEDIR="$CCACHE_DIR"
export PATH="/usr/lib/ccache:$PATH"
EXTRAPACKAGES="$EXTRAPACKAGES ccache"
BINDMOUNTS="$BINDMOUNTS $CCACHE_DIR"
PDEBUILD_PBUILDER=cowbuilder
# Codenames for Debian suites according to their alias. Update these when
# needed.
UNSTABLE_CODENAME="sid"
TESTING_CODENAME="buster"
STABLE_CODENAME="stretch"
STABLE_BACKPORTS_SUITE="$STABLE_CODENAME-backports"
# List of Debian suites.
DEBIAN_SUITES=($UNSTABLE_CODENAME $TESTING_CODENAME $STABLE_CODENAME
"unstable" "testing" "stable")
# List of Ubuntu suites. Update these when needed.
UBUNTU_SUITES=("yakkety", "xenial", "trusty", "precise", "oneiric" "natty" "maverick" "lucid" "karmic" "jaunty" "intrepid" "hardy")
# Mirrors to use. Update these to your preferred mirror.
DEBIAN_MIRROR="free.nchc.org.tw"
UBUNTU_MIRROR="free.nchc.org.tw"
#DEBIAN_MIRROR="mirrors.linode.com"
#UBUNTU_MIRROR="mirrors.linode.com"
# Optionally use the changelog of a package to determine the suite to use if
# none set.
if [ -z "${DIST}" ] && [ -r "debian/changelog" ]; then
DIST=$(dpkg-parsechangelog | awk '/^Distribution: / {print $2}')
# Use the unstable suite for certain suite values.
if $(echo "experimental UNRELEASED" | grep -q $DIST); then
DIST="$UNSTABLE_CODENAME"
fi
# Use the stable suite for stable-backports.
if $(echo "$STABLE_BACKPORTS_SUITE" | grep -q $DIST); then
DIST="$STABLE"
fi
fi
# Optionally set a default distribution if none is used. Note that you can set
# your own default (i.e. ${DIST:="unstable"}).
: ${DIST:="$(lsb_release --short --codename)"}
# Optionally change Debian release states in $DIST to their names.
case "$DIST" in
unstable)
DIST="$UNSTABLE_CODENAME"
;;
testing)
DIST="$TESTING_CODENAME"
;;
stable)
DIST="$STABLE_CODENAME"
;;
esac
# Optionally set the architecture to the host architecture if none set. Note
# that you can set your own default (i.e. ${ARCH:="i386"}).
: ${ARCH:="$(dpkg --print-architecture)"}
NAME="$DIST"
if [ -n "${ARCH}" ]; then
NAME="$NAME-$ARCH"
DEBOOTSTRAPOPTS=("--arch" "$ARCH" "${DEBOOTSTRAPOPTS[@]}")
fi
BASETGZ="/var/cache/pbuilder/$NAME-base.tgz"
# Optionally, set BASEPATH (and not BASETGZ) if using cowbuilder
BASEPATH="/var/cache/pbuilder/base-${NAME}.cow/"
DISTRIBUTION="$DIST"
BUILDRESULT="/var/cache/pbuilder/$NAME/result/"
APTCACHE="/var/cache/pbuilder/$NAME/aptcache/"
BUILDPLACE="/var/cache/pbuilder/build/"
if $(echo ${DEBIAN_SUITES[@]} | grep -q $DIST); then
# Debian configuration
MIRRORSITE="http://$DEBIAN_MIRROR/debian/"
COMPONENTS="main contrib non-free"
DEBOOTSTRAPOPTS=("${DEBOOTSTRAPOPTS[@]}" "--keyring=/usr/share/keyrings/debian-archive-keyring.gpg")
OTHERMIRROR="deb http://$DEBIAN_MIRROR/debian/ $DIST-backports main"
elif $(echo ${UBUNTU_SUITES[@]} | grep -q $DIST); then
# Ubuntu configuration
MIRRORSITE="http://$UBUNTU_MIRROR/ubuntu/"
COMPONENTS="main restricted universe multiverse"
DEBOOTSTRAPOPTS=("${DEBOOTSTRAPOPTS[@]}" "--keyring=/usr/share/keyrings/ubuntu-archive-keyring.gpg")
else
echo "Unknown distribution: $DIST"
exit 1
fi
| true |
cc6510f34301f7580946db448fcf584cf1556a80 | Shell | fizyr/ros-pkgbuilds | /base/aprt/PKGBUILD | UTF-8 | 756 | 2.625 | 3 | [] | no_license | # Maintainer: Maarten de Vries
pkgname=aprt
pkgdesc='Arch Linux binary packare repository tools'
url='https://github.com/delftrobotics/python-aprt'
pkgver=0.1.7
pkgrel=1
arch=(any)
license=(BSD)
provides=(python-aprt)
depends=(python python-libarchive-c)
makedepends=(python-setuptools)
source=("python-aprt-$pkgver.tar.gz::https://github.com/delftrobotics/python-aprt/archive/$pkgver.tar.gz")
sha512sums=('9b9a61331244a0932b318d156415595c1e6cef13a846c01cd106580a3eb5b4c6c4bf58608ebe98a7352dcdaba84e12e17a604a827d2c1a5b8cc77f507c057849')
build() {
cd "$srcdir/python-aprt-$pkgver"
python ./setup.py build
}
package() {
cd "$srcdir/python-aprt-$pkgver"
python ./setup.py install --root "$pkgdir"
install -Dt "$pkgdir/usr/share/licenses/$pkgname" LICENSE
}
| true |
6e54f3c004788805cf6ca5a2960056d80ae4ffd7 | Shell | JuanZurak2020/tools | /virtual_hosts.sh | UTF-8 | 1,204 | 3.921875 | 4 | [] | no_license | #!/bin/bash
# Tool for admin virtual hosts
# Run as SU
# @TODO enable https (cerbot)
SCRIPT=$( basename "$0" )
version='1.1.0'
delete=false
while getopts "d" opt; do
case $opt in
d) delete=true
;;
esac
done
#Set vars
DNS=$1
HTTPS=$2
if [ "$delete" = true ]; then
DNS=$2
HTTPS=$3
fi
#Validate
if [ -z "$DNS" ]
then
echo "usage: ./virtual_host.sh example.com"
exit 1
fi
#VARS
DocumentRoot=/var/www/html/mds-tools/www/en/funnels/pages/
vhFile="000-$DNS.conf"
pathfileVH="/etc/apache2/sites-available/$vhFile"
if [ "$delete" = true ]; then
echo "delete $DNS"
cd /etc/apache2/sites-available/
a2dissite $vhFile
rm $vhFile
sed -i "/$DNS/d" /etc/hosts
service apache2 restart
exit 0
fi
#create file
echo "#Virtual Host $DNS
<VirtualHost *:80>
ServerName $DNS
SetEnv MDS_ENVIRONMENT dev
DocumentRoot $DocumentRoot
ErrorLog \${APACHE_LOG_DIR}/error.log
CustomLog \${APACHE_LOG_DIR}/access.log combined
</VirtualHost>
" >> $pathfileVH
#enable virtual host
cd /etc/apache2/sites-available/
a2ensite $vhFile
#add dns to hosts
echo "127.0.0.1 localhost $DNS" >> /etc/hosts
#restart apache
service apache2 restart
| true |
0cc648d4221e6a65a3b8641a16d564fedb9ec028 | Shell | luisrico/ocs-operator | /hack/build-registry-bundle.sh | UTF-8 | 928 | 3.609375 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash
set -e
IMAGE_REGISTRY="${IMAGE_REGISTRY:-quay.io}"
REGISTRY_NAMESPACE="${REGISTRY_NAMESPACE:-}"
IMAGE_NAME="ocs-registry"
IMAGE_TAG="${IMAGE_TAG:-latest}"
IMAGE_BUILD_CMD="${IMAGE_BUILD_CMD:-docker}"
FULL_IMAGE_NAME="${IMAGE_REGISTRY}/${REGISTRY_NAMESPACE}/${IMAGE_NAME}:${IMAGE_TAG}"
if [ -z "${REGISTRY_NAMESPACE}" ]; then
echo "Please set REGISTRY_NAMESPACE"
echo " REGISTRY_NAMESPACE=<your-quay-username> ./hack/build-registry-bundle.sh"
echo " make bundle-registry REGISTRY_NAMESPACE=<your-quay-username>"
exit 1
fi
TMP_ROOT="$(dirname "${BASH_SOURCE[@]}")/.."
REPO_ROOT=$(readlink -e "${TMP_ROOT}" 2> /dev/null || perl -MCwd -e 'print Cwd::abs_path shift' "${TMP_ROOT}")
pushd "${REPO_ROOT}/deploy"
$IMAGE_BUILD_CMD build --no-cache -t "$FULL_IMAGE_NAME" -f Dockerfile .
echo
echo "Run '${IMAGE_BUILD_CMD} push ${FULL_IMAGE_NAME}' to push built container image to the registry."
popd
| true |
dcda022a4129236ada2bbd064120fc192d8f86f8 | Shell | telmich/cinv | /examples/backends-v1/oldsexy/sexy-host-del-from-network | UTF-8 | 1,432 | 3.28125 | 3 | [] | no_license | #!/bin/sh
#
# 2009 Nico Schottelius (nico-sexy at schottelius.org)
#
# This file is part of sexy.
#
# sexy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# sexy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with sexy. If not, see <http://www.gnu.org/licenses/>.
#
#
# Remove a host from a network and record the free address
#
set -e
. "$(which sexy-config-load)"
sexy_args network fqdn -- "$@"
if ! sexy_object_exists networks "${network}"; then
sexy_errormsg "Network $network does not exist"
fi
if ! sexy_object_exists hosts "${fqdn}"; then
sexy_errormsg "Host $fqdn does not exist"
fi
address="$(sexy-attribute-get networks "${network}" "${fqdn}/ipv4a")"
hostinnetwork="$(sexy_object_path networks "${network}")/${fqdn}"
# remember unsued ip address
sexy_object_config_add networks "${network}" addresses_free "${address}"
# remove address from host
sexy-attribute-del hosts "${fqdn}" ipv4a
# remove host from network
sexy-attribute-del networks "${network}" "${fqdn}"
| true |
350f375a15d01dbcb4d4a6b8bb7947b9264701c9 | Shell | figgefred/crunchbang-setup | /tunnelSupermuc | UTF-8 | 480 | 3.015625 | 3 | [] | no_license | #!/bin/bash
middleman="ceder@halle.in.tum.de"
target="supermuc.lrz.de:22"
port=55555
echo "Setting up tunnel to '$target'"
ssh -4 -L 55555:$target -f -N $middleman
echo "$target is now reachable on <user>@localhost (on port $port)"
# echo "Mount a remote directory"
# sshfs -p $port h039vao@localhost:/home/hpc/h039v/h039vao/projects test1
# Sync file from local <source> to remote <user@localhost:destination> with rsync
# rsync -av -e "ssh -p55555" <source> <user@localhost:destination>
| true |
bc3ab6f48063874de6fe88c4709d135a15f27714 | Shell | NBISweden/Knox-ePouta | /experiments/lib/settings/common.rc | UTF-8 | 2,506 | 3.046875 | 3 | [] | no_license | #!/usr/bin/env bash
[ ${BASH_VERSINFO[0]} -lt 4 ] && exit 1
# Find the absolute path to the folder one level up
ABOVE=$(cd $(dirname ${BASH_SOURCE[0]})/../.. && pwd -P)
export VERBOSE=yes
#################################################################
# Making these variables immutable
# Note: Can source this file several times
[ -n "$KE_HOME" ] || readonly KE_HOME=$ABOVE
[ -n "$KE_TMP" ] || readonly KE_TMP=${KE_HOME}/tmp
mkdir -p ${KE_TMP}
export KE_TMP
[ -n "$BIO_DATA" ] || readonly BIO_DATA=/home/fred/BioInfo/data
[ -n "$BIO_SW" ] || readonly BIO_SW=/home/fred/BioInfo/sw
#################################################################
# Declaring the machines
declare -a KNOX_MACHINES=('supernode' 'storage' 'knox1' 'knox2' 'knox3')
declare -a EPOUTA_MACHINES=('epouta1' 'epouta2' 'epouta3')
declare -a MACHINES=("${KNOX_MACHINES[@]}" "${EPOUTA_MACHINES[@]}")
declare -A MACHINE_IPs
MACHINE_IPs=(\
[supernode]=10.101.128.100 \
[knox1]=10.101.128.101 \
[knox2]=10.101.128.102 \
[knox3]=10.101.128.103 \
[storage]=10.101.128.104 \
[epouta1]=10.101.0.21 \
[epouta2]=10.101.0.22 \
[epouta3]=10.101.0.23 \
)
export MGMT_GATEWAY=10.101.0.1
export MGMT_CIDR=10.101.0.0/16
declare -A MACHINE_PROFILES
MACHINE_PROFILES=(\
[supernode]=supernode \
[storage]=storage \
[knox1]=compute \
[knox2]=compute \
[knox3]=compute \
[epouta1]=compute \
[epouta2]=compute \
[epouta3]=compute \
)
#################################################################
# SSH configuration
SSH_CONFIG=${KE_TMP}/ssh_config
SSH_KNOWN_HOSTS=${KE_TMP}/ssh_known_hosts
[ ! -r ${SSH_CONFIG} ] && cat > ${SSH_CONFIG} <<ENDSSHCFG
Host ${MGMT_CIDR%0.0/16}*.*
User centos
StrictHostKeyChecking no
UserKnownHostsFile ${SSH_KNOWN_HOSTS}
ENDSSHCFG
########################################
export NFS_ROOT=/mnt
export UU_PROXY="http://uu_proxy:3128/"
export KE_JAVA_OPTIONS='-Dhttp.proxyHost=uu_proxy -Dhttp.proxyPort=3128 -Djava.net.preferIPv4Stack=true'
# Settings for the CAW example
export CAW_DATA=/mnt/data
export KE_PROJECTS=/mnt/projects
export MANTA_VERSIONS="1.0.0" # previously 0.27.1
export STRELKA_VERSIONS="1.0.15"
#export SAMTOOLS_VERSIONS="1.3 0.1.19" # Not 1.3.1
export SAMTOOLS_VERSIONS="1.3"
export SAMTOOLS_VERSIONS_EXTRA="0.1.19"
export BWA_VERSIONS="0.7.13" # 0.7.8 in the README
export SNPEFF_VERSIONS="4.2"
export VCFTOOLS_VERSIONS="0.1.14"
export VEP_VERSIONS="84"
export BEDTOOLS_VERSIONS="2.26.0"
#export GCC_VERSION=4.9.2
| true |
a9a252af508e0137508130c0fcb80137e461c463 | Shell | sanvu88/ubuntu-lemp-stack | /menu/controller/domain/change_pass_sftp | UTF-8 | 2,068 | 3.40625 | 3 | [] | no_license | #!/bin/bash
######################################################################
# Auto Install & Optimize LEMP Stack on Ubuntu #
# #
# Author: Sanvv - HOSTVN Technical #
# Website: https://hostvn.vn #
# #
# Please do not remove copyright. Thank! #
# Please do not copy under any circumstance for commercial reason! #
######################################################################
source /var/hostvn/menu/validate/rule
source /var/hostvn/menu/helpers/function
_run(){
user=$(grep -w "username" "${USER_DIR}/.${domain}.conf" | cut -f2 -d'=')
while true; do
read -r -p "Nhap vao mat khau moi dai tu 8 ky tu tro len [0 = thoat]: " new_password
PASS_LEN=${#new_password}
if [[ ${PASS_LEN} -ge 8 || "${new_password}" == "0" ]]; then
break
else
printf "%s\n" "${RED}Ban chua nhap mat khau moi.${NC}"
fi
done
if [[ -z "${new_password}" || "${new_password}" == "0" ]]; then
clear
printf "%s\n" "${RED}Huy thao tac.${NC}"
else
passwd "${user}" << EOF
$new_password
$new_password
EOF
sed -i '/user_pass/d' "${USER_DIR}"/."${domain}".conf
echo "user_pass=${new_password}" >> "${USER_DIR}"/."${domain}".conf
clear
printf "%s\n" "${GREEN}Doi mat khau SFTP cho user${NC} ${RED}${user}${NC} ${GREEN}thanh cong.${NC}"
printf "%s\n" "${GREEN}Mat khau moi la:${NC} ${RED}${new_password}${NC}"
printf "%s\n" "${GREEN}Port dang nhap SFTP la:${NC} ${RED}${ssh_port}${NC}"
fi
}
ALERT=""
domain=""
_select_domain
if [[ -z "${domain}" && -z "${ALERT}" ]]; then
clear
printf "%s\n" "${RED}Ban da chon huy thao tac.${NC}"
else
if [ -z "${ALERT}" ]; then
_run
else
clear
printf "%s\n" "${ALERT}"
fi
fi
menu_domain
| true |
47ad7697933ff04c377bc1a9a846eceed20ce8d0 | Shell | hebinhao1993/nixos | /.config/polybar/launch.sh | UTF-8 | 323 | 2.703125 | 3 | [] | no_license | #!/usr/bin/env sh
# Terminate already running bar instances
# killall -q polybar
# since we do not have killall
# todo: use built in `command`
ps -ef |grep polybar |awk '{print $2}'| xargs kill -9
# Wait until the processes have been shut down
while pgrep -x polybar >/dev/null; do sleep 1; done
# Launch
polybar i3bar & | true |
de6c1bba27c3ec8c609947aa56f4fb0c6ffeb850 | Shell | dib-lab/2020-ibd | /sandbox/02_filt_comp_all/prep_hmp.sh | UTF-8 | 309 | 2.671875 | 3 | [
"BSD-3-Clause"
] | permissive | for infile in *sig
do
j=$(basename $infile .scaled2k.sig)
sourmash signature intersect -o ${j}_filt.sig -A ${infile} -k 31 ${infile} ../../01_greater_than_one_filt_sigs/greater_than_one_count_hashes.sig
sourmash signature rename -o ${j}_filt_named.sig -k 31 ${j}_filt.sig ${j}_filt
done | true |
1a9b2b4d84b240c200a74d4ec8d44273e5204b6e | Shell | postmodern/dotfiles | /.bashrc | UTF-8 | 1,427 | 3.21875 | 3 | [] | no_license | # .bashrc
# Source global definitions
if [ -f /etc/bashrc ]; then
. /etc/bashrc
fi
# User specific aliases and functions
# Shell ENV variables
export HISTFILE=/dev/null
export HISTFILESIZE=0
export PATH="$PATH:$HOME/bin"
export EDITOR="vim"
PACKAGE_MANAGERS=(synaptic gpk-application)
for program in ${PACKAGE_MANAGERS[*]}; do
if [[ -x /usr/bin/$program ]]; then
export PACKAGE_MANAGER="$program"
break
fi
done
# Shell ENV variables
export PYTHON_SHELL="python3"
export PHP_SHELL="php -a"
export JS_SHELL="js"
export RUBY_SHELL="irb"
# Aliases
alias forget='ssh-add -D; pkill -HUP gpg-agent'
alias stlink-openocd="openocd -f board/stm32f429discovery.cfg"
alias openocd-telnet="telnet localhost 4444"
alias openocd-gdb='arm-none-eabi-gdb -ex "tar ext :3333"'
alias st-gdb='arm-none-eabi-gdb -ex "tar ext :4242"'
if [[ -f /etc/fedora-release ]] || [[ -f /etc/redhat-version ]]; then
alias update='sudo dnf update --enablerepo=updates-testing --security'
elif [[ -f /etc/debian_version ]]; then
alias update='sudo apt update && apt list --upgradeable && sudo unattended-upgrade'
fi
# Ruby Aliases
alias be='bundle exec'
function __bundle_exec()
{
local dir="$PWD/"
until [[ -z "$dir" ]]; do
dir="${dir%/*}"
if [[ -f "$dir/Gemfile" ]]; then
bundle exec $@
return $?
fi
done
command $@
return $?
}
for cmd in rake rspec yard yard-spellcheck dead_end; do
alias $cmd="__bundle_exec $cmd"
done
| true |
506e23d4c54128b90420118349e34b0e6f1fb101 | Shell | MisterPresident/LinuxSetup | /screenlayout/screenlayout.sh | UTF-8 | 1,616 | 3.59375 | 4 | [] | no_license | #!/bin/bash
DEVICES=$(find /sys/class/drm/*/status)
if [ -n "$1" ]; then
{
while read l
do
dir=$(dirname $l);
status=$(cat $l);
dev=$(echo $dir | cut -d\- -f 2-);
if [ $(expr match $dev "HDMI") != "0" ]
then
dev=HDMI${dev#HDMI-?-}
else
dev=$(echo $dev | tr -d '-')
fi
if [ "connected" == "$status" ]
then
declare $dev="yes";
fi
done <<< "$DEVICES"
if [ ! -z "$HDMI1" ]
then
OK=2
else
OK=1
fi
COUNT=0
while [ $COUNT -ne $OK ] ; do
COUNT=$(($(hwinfo --monitor --short | wc -l) - 1))
sleep 1
done
$0
} >> /tmp/monitor.out & disown
exit 0
fi
cd $(dirname "$0")
displaynum=`ls /tmp/.X11-unix/* | sed s#/tmp/.X11-unix/X##`
display=":$displaynum.0"
export DISPLAY=":$displaynum.0"
export XAUTHORITY=$(ps -C Xorg -f --no-header | sed -n 's/.*-auth //; s/ -[^ ].*//; p')
MONITORS=$(hwinfo --monitor --short)
# echo "$MONITORS"
COUNT=$(($(echo "$MONITORS" | wc -l) - 1))
# echo $COUNT
echo $MONITORS | grep "LG ELECTRONICS L246WH" > /dev/null
ROHRBACH=$?
echo $MONITORS | grep "BenQ V2400W" > /dev/null
GRAZ=$?
if [ $COUNT -eq 2 ]; then
echo "External monitors are plugged in"
if [ $ROHRBACH -eq 0 ]; then
LAYOUT="Rohrbach"
FILE="./screenlayout_rohrbach.sh"
elif [ $GRAZ -eq 0 ]; then
LAYOUT="Graz"
FILE="./screenlayout_graz.sh"
else
LAYOUT="Other"
FILE="arandr"
fi
else
echo "No external monitors are plugged in"
LAYOUT="Laptop"
FILE="./screenlayout_basic.sh"
fi
echo "Use 'layout $LAYOUT', exec $FILE"
$FILE &
| true |
9b8a1af153a51d9cbdce3cd9e381b701d2e8f6ec | Shell | mklement0/whichpm | /test/Warns about duplicate INC entries | UTF-8 | 684 | 3.109375 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env bash
# ---
# IMPORTANT: Use the following statement at the TOP OF EVERY TEST SCRIPT
# to ensure that this package's 'bin/' subfolder is added to the path so that
# this package's CLIs can be invoked by their mere filename in the rest
# of the script.
# ---
PATH=${PWD%%/test*}/bin:$PATH
# Helper function for error reporting.
die() { (( $# > 0 )) && echo "ERROR: $*" >&2; exit 1; }
# Duplicate the first entry in @INC, which should trigger a warning.
PERL5LIB="$(perl -e "print @INC[0]")" whichpm File::Spec 2>&1 1>/dev/null | grep -q 'duplicate entries' || die "Expected warning about duplicate @INC entries, but got none."
exit 0
| true |
209c35341f2100f6bdf1645233110d1d6ae94fdd | Shell | aaron-ai/rocketmq-client-cpp | /tools/coverage.sh | UTF-8 | 586 | 3.28125 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env bash
TOOLS_DIR=$(dirname "$0")
WORKSPACE_DIR=$(dirname "$TOOLS_DIR")
cd $WORKSPACE_DIR
if test "$#" -lt 1; then
echo "Use bazel cache for Development Environment"
bazel coverage --config=remote_cache //src/test/cpp/ut/...
elif test "$1" = "ci"; then
echo "Use bazel cache for CI"
bazel coverage --config=ci_remote_cache //src/test/cpp/ut/...
else
echo "Unknown argument $*. Use bazel cache for Development Environment"
bazel coverage --config=remote_cache //src/test/cpp/ut/...
fi
genhtml bazel-out/_coverage/_coverage_report.dat --output-directory coverage | true |
00b3e3c192d693a0d571c4dad3cbeb1f05e75ba2 | Shell | parimarjan/postgres_setup_scripts | /pg_setup.sh | UTF-8 | 1,451 | 3.109375 | 3 | [] | no_license | #!/usr/bin/env bash
## installs postgres, and sets up environment variables appropriately
echo "export PG_DATA_DIR=/pgfs/pg_data_dir" >> ~/.bashrc_exports
echo "export PGPORT=5432" >> ~/.bashrc_exports
echo "export PG_BUILD_DIR=/pgfs/build" >> ~/.bashrc_exports
source ~/.bashrc_exports
# also, set these for the current run
# clone postgres, and checkout appropriate branch
cd /pgfs/
git clone https://github.com/postgres/postgres.git
cd postgres
git checkout REL_12_STABLE
#git checkout REL_10_STABLE
sudo apt-get --assume-yes install libreadline-dev zlib1g-dev flex bison-devel \
zlib-devel openssl-devel wget
sudo apt-get --assume-yes install build-essential libreadline-dev zlib1g-dev \
flex bison libxml2-dev libxslt-dev libssl-dev
./configure --enable-cassert --enable-debug --prefix $PG_BUILD_DIR CFLAGS="-ggdb -Og -g3 -fno-omit-frame-pointer"
# now we can compile postgres (just need to do this first time we're linking
# with aqo)
cd /pgfs/postgres
make -j4 -s
make install -j4 -s
#echo "export PG_BUILD_DIR=/pgfs/build" >> ~/.bashrc_exports
echo "alias startpg=\"$PG_BUILD_DIR/bin/postgres -D $PG_DATA_DIR -p $PGPORT\"" >> ~/.bashrc_exports
echo "export PATH=$PG_BUILD_DIR/bin:$PATH" >> ~/.bashrc_exports
export PATH=$PG_BUILD_DIR/bin:$PATH
$PG_BUILD_DIR/bin/initdb -D $PG_DATA_DIR
cp ~/postgres_setup_scripts/postgresql.conf $PG_DATA_DIR/
source ~/.bashrc_exports
pg_ctl -D $PG_DATA_DIR -l logfile start
echo "started postgres"
| true |
0d61697d5bf7e035556bee571497c9b8f52d56f9 | Shell | viniciussbs/bash_config | /scripts/alias.sh | UTF-8 | 246 | 2.6875 | 3 | [] | no_license | alias ll="ls -Glahs"
alias ls="/bin/ls -G"
alias psgrep="ps aux | egrep -v egrep | egrep"
alias preview="open -a Preview"
alias md="open -a MacDown"
# alias to open repos
for repo in $(ls ~/Projects); do
alias "r-$repo"="repo open $repo"
done
| true |
b5401231f16fb248bba92655debdb3deedc000ba | Shell | dantiston/GrammarMatrix | /.svn/pristine/b5/b5401231f16fb248bba92655debdb3deedc000ba.svn-base | UTF-8 | 10,796 | 3.765625 | 4 | [
"MIT"
] | permissive | #!/bin/bash
# Script for running all existing matrix regression tests.
# Be sure that $CUSTOMIZATIONROOT is set appropriately
# (i.e., to point to the matrix/gmcs directory you
# intend to test... the regression_tests directory with that
# gmcs/directory is the one that will be active).
# Much copied from logon/parse.
unset DISPLAY
unset LUI
###
### INITIALIZATION CHECKS
###
if [ -z "${LOGONROOT}" ]; then
echo "run-regression-tests: unable to determine \$LOGONROOT directory; exit."
exit 1
fi
if [ -z "${CUSTOMIZATIONROOT}" ]; then
echo "run-regression-tests: unable to determine \$CUSTOMIZATIONROOT directory; exit."
exit 1
fi
if ! hash ace 2>/dev/null; then
echo "run-regression-tests: no ace command in \$PATH, checking \$ACEROOT..."
if [ -z "${ACEROOT}" ]; then
echo "run-regression-tests: unable to determine \$ACEROOT directory; exit."
exit 1
else
ACECMD="$ACEROOT/ace"
fi
else
ACECMD="ace"
fi
logon32=''
if [ ! -z "${LOGON32}" ]; then
echo "The regression test with ACE is not currently possible on a 32-bit machine; exit."
exit 1
fi
# set the appropriate Python version
python_cmd='python'
if ! echo $( $python_cmd -V 2>&1 ) | grep -q "Python 2\.[5,6,7]"; then
echo "Default Python version incompatible. Attempting to find another..." >&2
if which python2.5 >/dev/null; then
python_cmd='python2.5'
elif which python2.6 >/dev/null; then
python_cmd='python2.6'
elif which python2.7 >/dev/null; then
python_cmd='python2.7'
else
echo "No compatible Python version found. Exiting."
exit 1
fi
echo " Found $( $python_cmd -V 2>&1 ). Continuing." >&2
fi
# for convenience
matrix_cmd="$python_cmd ${CUSTOMIZATIONROOT}/../matrix.py"
###
### COMMON VARIABLES AND SETTINGS
###
#
# include a shared set of shell functions and global parameters, including the
# architecture identifier .LOGONOS.
#
. ${LOGONROOT}/etc/library.bash
date=$(date "+%Y-%m-%d")
datetime=$(date)
count=1
limit=1000
best=1000
# Parameters which are the same for all regression test:
rtestdir="${CUSTOMIZATIONROOT}/regression_tests/"
skeletons="$rtestdir/skeletons/"
choices="$rtestdir/choices/"
grammars="$rtestdir/grammars"
tsdbhome="$rtestdir/home/"
logs="$rtestdir/logs/"
### LOG FILES
# Log file to look at tsdb output.
TSDBLOG="$logs/tsdb.${date}.log"
if [ -e ${TSDBLOG} ]; then
rm ${TSDBLOG}
fi
# We need to concatenate all TSDBLOGs together because they get overwritten
ALLTSDBLOG="$logs/alltsdb.${date}.log"
if [ -e ${ALLTSDBLOG} ]; then
rm ${ALLTSDBLOG}
fi
# Create one log file with results from all tests, appending on
# comments.
# 2008-08-22: By request not overwriting the log file
# but appending instead, with time stamps.
masterlog="$logs/regression-tests.$date"
echo "============ $datetime ============" >> $masterlog
###
### Tasks
###
while getopts "vcp" Option
do
case $Option in
v ) validate=true;;
c ) customize=true;;
p ) performance=true;;
esac
done
# if none were set, do all tasks
if ! [[ $validate || $customize || $performance ]]
then
validate=true
customize=true
performance=true
fi
# Now move the argument pointer to the first argument
shift $((OPTIND-1))
###
### TEST PREPARATION
###
# Get the list of regression tests from the regression-test-index:
# or from command-line input.
if [ -z $1 ]; then
lgnames=`$python_cmd ${CUSTOMIZATIONROOT}/regression_tests/regressiontestindex.py --lg-names`
if [ $? != 0 ]; then
echo "run-regression-tests: Problem with regression-test-index, cannot run regression tests."
exit 1
fi
else
lgnames=$@
fi
# Clear any existing regression test files that can cause conflicts
for lgname in $lgnames
do
rm -rf $grammars/$lgname
rm -f $logs/$lgname.$date
rm -rf $tsdbhome/current/$lgname
done
###
### TSDB GOLD STANDARD COMPARISONS
###
# Now do essentially the same things as one-regression-test for each one:
# comparison without restarting tsdb++
profiles=""
comparison=""
echo "== All grammars are being created. =="
for lgname in $lgnames
do
printf "%-70s " "$lgname..."
# Set skeleton, grammar, gold-standard for comparison, and
# target directory.
skeleton="$skeletons/$lgname"
gold="gold/$lgname"
choicesfile="$choices/$lgname"
grammardir="$grammars/$lgname"
target="current/$lgname"
log="$logs/$lgname.$date"
# Check for existence of choices file
if [ ! -e $choicesfile ]; then
echo "ERROR!"
echo "$lgname choices file does not exist: $choicesfile" >> $log
continue
fi
# Validate
if [[ $validate ]]; then
$matrix_cmd v $choicesfile >> $log
if [ $? != 0 ]; then
echo "INVALID!"
echo "$lgname choices file did not pass validation." >> $log
continue
fi
fi
# Customize (Performance needs a grammar, too, though)
if [[ $customize || $performance ]]; then
$matrix_cmd --cheap-hack cf $choicesfile $grammardir >> $log
if [[ $customize && $? != 0 ]]; then
echo "FAIL!"
echo "There was an error during the customization of the grammar." >> $log
continue
fi
fi
# Parsing Performance
if [[ $performance ]]; then
# Check for existence of gold profile
if [ ! -e $tsdbhome/$gold ]; then
echo "ERROR!"
echo "Gold profile does not exist: $tsdbhome/$gold" >> $log
continue
fi
fi
subdir=`ls -d $grammardir/*/`
dat_file=$subdir/${lgname}.dat
config_file=$subdir/ace/config.tdl
$ACECMD -G $dat_file -g $config_file 1>/dev/null 2>/dev/null
#echo "Running ACE with $lgname..."
mkdir -p $tsdbhome/$target
cp ${LOGONROOT}/lingo/lkb/src/tsdb/skeletons/english/Relations $tsdbhome/$target/relations
touch $tsdbhome/$target/item-set
touch $tsdbhome/$target/run
touch $tsdbhome/$target/parse
touch $tsdbhome/$target/result
touch $tsdbhome/$target/edge
touch $tsdbhome/$target/decision
touch $tsdbhome/$target/preference
touch $tsdbhome/$target/tree
cp $skeleton/item $tsdbhome/$target/item
cut -d@ -f7 $skeleton/item | ${CUSTOMIZATIONROOT}/regression_tests/art-static-prerelease -a "$ACECMD -g $dat_file 2>/dev/null" $tsdbhome/$target 1>/dev/null
echo "DONE"
#echo "Working on ACE is done!!!"
sed "s;@@;@0@;g" $tsdbhome/$target/parse > $tsdbhome/$target/tmp
mv -f $tsdbhome/$target/tmp $tsdbhome/$target/parse
sed "s;@@;@0@;g" $tsdbhome/$target/parse > $tsdbhome/$target/tmp
mv -f $tsdbhome/$target/tmp $tsdbhome/$target/parse
sed "s;@@;@0@;g" $tsdbhome/$target/result > $tsdbhome/$target/tmp
mv -f $tsdbhome/$target/tmp $tsdbhome/$target/result
sed "s;@@;@0@;g" $tsdbhome/$target/result > $tsdbhome/$target/tmp
mv -f $tsdbhome/$target/tmp $tsdbhome/$target/result
#$python_cmd ${CUSTOMIZATIONROOT}/regression_tests/cleanup_parse.py < $tsdbhome/$target/parse > $tsdbhome/$target/tmp
#mv -f $tsdbhome/$target/tmp $tsdbhome/$target/parse
echo "=== Readings-Compare ===" >> $log
profiles+="(tsdb:tsdb :create \"$target\" :skeleton \"$lgname\")"
comparison+="(tsdb::compare-in-detail \"$target\" \"$gold\" :format :ascii :compare '(:readings) :append \"$log\")"
done
echo "== All profiles are being compared to the gold standards. =="
{
options=":error :exit :wait 300"
echo "(setf (system:getenv \"DISPLAY\") nil)"
echo "(setf tsdb::*process-suppress-duplicates* nil)"
echo "(setf tsdb::*process-raw-print-trace-p* t)"
echo "(setf tsdb::*tsdb-home* \"$tsdbhome\")"
echo "(tsdb:tsdb :skeletons \"$skeletons\")"
echo "$profiles"
echo "$comparison"
} | ${LOGONROOT}/bin/logon -I base -locale no_NO.UTF-8 -qq 2> ${TSDBLOG} > ${TSDBLOG}
# The $TSDBLOG is overwritten each time, so copy it to $ALLTSDBLOG
echo "== BEGIN TSDB LOG for $lgname ==" >> $ALLTSDBLOG
cat $TSDBLOG >> $ALLTSDBLOG
# checking out the results of comparion
for lgname in $lgnames
do
# When the grammar fails to load, [incr tsdb()] is not creating
# the directory. So use existence of $tsdbhome/$target to check
# for grammar load problems.
printf "%-70s " "$lgname..."
log="$logs/$lgname.$date"
gold="${CUSTOMIZATIONROOT}/regression_tests/home/gold/$lgname"
target="${CUSTOMIZATIONROOT}/regression_tests/home/current/$lgname"
echo "" >> $log
echo "" >> $log
echo "=== MRS-Compare ===" >> $log
echo "" >> $log
cp $gold/result $gold/result.tmp
cp $gold/parse $gold/parse.tmp
cp $gold/item $gold/item.tmp
cp $target/result $target/result.tmp
cp $target/parse $target/parse.tmp
cp $target/item $target/item.tmp
$python_cmd ${CUSTOMIZATIONROOT}/regression_tests/multiple-mrs.py $gold
$python_cmd ${CUSTOMIZATIONROOT}/regression_tests/multiple-mrs.py $target
${CUSTOMIZATIONROOT}/regression_tests/mrs-compare $target $gold >> $log
mv $gold/result.tmp $gold/result
mv $gold/parse.tmp $gold/parse
mv $gold/item.tmp $gold/item
mv $target/result.tmp $target/result
mv $target/parse.tmp $target/parse
mv $target/item.tmp $target/item
target="current/$lgname"
if [ ! -e $tsdbhome/$target ]
then
echo "ERROR!"
echo "Probable tdl error; grammar failed to load." >> $log
continue
# newer versions of [incr tsdb()] write that there were 0 diffs, so
# the file is no longer empty for success
elif [ -n "$(grep -i "error" ${TSDBLOG} | grep -v "^([0-9]\+) \`\*")" ]
then
echo "ERROR!"
echo "TSDB error; check ${TSDBLOG}" >> $log
continue
elif [ -z "$(grep "compare-in-detail(): 0 differences" $log)" ]
then
echo "DIFFS!"
echo "Diffs were found in the current and gold profiles." >> $log
continue
elif [ "$(grep "^item [0-9]*:" $log)" ]
then
echo "DIFFS!"
echo "Diffs were found in the current and gold profiles." >> $log
continue
fi
# if we made it here, it's probably a success
echo "Success!"
done
# Check through tsdb log file for any errors, and report
# whether the results can be considered valid.
for lgname in $lgnames
do
log="$logs/$lgname.$date"
echo -ne "$lgname" >> $masterlog
if [ -s $log ]; then
echo -ne ": " >> $masterlog
$python_cmd ${CUSTOMIZATIONROOT}/regression_tests/regressiontestindex.py --comment $lgname | cat >> $masterlog
echo "" >> $masterlog
cat $log >> $masterlog
echo "" >> $masterlog
else
echo "... Success!" >> $masterlog
fi
done
# Notify user of results:
#if [[ $performance ]]; then
# echo "Grepping for 'error' in tsdb log:"
# echo ""
# Don't report errors for starred items
# grep -i "error" ${ALLTSDBLOG} | grep -v "^([0-9]\+) \`\*"
#fi
echo ""
echo "Results of the regression tests can be seen in"
echo "$masterlog"
| true |
14d49f3f93ed7a76d50707a06cca270a59a0cf59 | Shell | clarenceb/coles-shopping-list | /scripts/stop-local.sh | UTF-8 | 257 | 3.421875 | 3 | [] | no_license | #!/bin/sh -e
EXISTING_SERVER_PID=`ps -ef | grep "play\.core\.server\.NettyServer" | awk '{ print $2 }'`
if [ ! -z "${EXISTING_SERVER_PID}" ]; then
echo "Terminating existing Play server (PID: ${EXISTING_SERVER_PID})"
kill -1 ${EXISTING_SERVER_PID}
fi
| true |
a23877ed0afba711e0eddb802b7c3e6c798cede9 | Shell | darencard/tacc-launcher-bio | /rad-pipeline/07-picard-to-bam/make-tobam-index.sh | UTF-8 | 726 | 3.90625 | 4 | [
"Unlicense"
] | permissive | #!/bin/bash
# Kyle Hernandez
# make-tobam-index.sh - create parameter file for converting sam to bam and indexing
if [[ -z $1 ]] || [[ -z $2 ]] || [[ -z $3 ]]; then
echo "Usage: make-bam.sh <in/dir> <out/dir> out.param"
exit 1;
fi
INDIR=$1
ODIR=$2
PARAM=$3
SCRIPT="/home1/01832/kmhernan/bin/picard-tools-1.92/SamFormatConverter.jar"
LOG="logs/"
if [ ! -d $LOG ]; then mkdir $LOG; fi
if [ -e $PARAM ]; then rm $PARAM; fi
touch $PARAM
for fil in ${INDIR}*.sam; do
BASE=$(basename $fil)
NAME=${BASE%.*}
OUT="${ODIR}${NAME}.bam"
# Use the CREATE_INDEX=true to also make the idx file
echo "java -Xms1G -Xmx2G -jar $SCRIPT CREATE_INDEX=true INPUT=$fil OUTPUT=$OUT" >> $PARAM
done
| true |
7f14b82529a889832a6baf3ddf92fa42770869f7 | Shell | maciek-slon/rapp-platform | /rapp_scripts/backup/dumpRAPPMysqlDatabase.sh | UTF-8 | 333 | 2.921875 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
DATE=`date +%Y-%m-%d:%H:%M:%S`
i=0;
for j in `cat /etc/db_credentials`
do
array[$i]=$j;
#do manipulations here
i=$(($i+1));
done
echo "Value of third element in my array : ${array[1]} ";
username=${array[0]};
password=${array[1]};
mysqldump -u $username -p$password RappStore > $HOME/mysqlBckup_$DATE
| true |
7fa53d75600d743b14d87ba7f6cec0066dec3551 | Shell | kadrivillem/skriptimine | /praktikum4/praks4yl1 | UTF-8 | 180 | 3.125 | 3 | [] | no_license | #!/bin/bash
# paaris või paaritu arv
echo "Sisesta suvaline täisarv"
read arv
if [ $(($arv%2)) -eq 0 ]
then
echo "$arv on paaris."
else
echo "$arv on paaritu."
fi
| true |
157c679894d73e75bd4c18ef45db377b07ceebd6 | Shell | raosudha89/amr_emnlp | /scripts/preprocess_test_dataset.sh | UTF-8 | 1,382 | 3.109375 | 3 | [] | no_license | #!/bin/sh
# Usage preprocess_training_dataset <amr_aligned_file>
amr_name="$(basename "$1")"
amr_name="${amr_name%.*}"
echo Creating graph structure...
#Create Networkx graph structure from amr aligned file. This will create the amr_nx_graphs.p file.
python amr_reader.py ../data/$amr_name/$amr_name.aligned > ../data/$amr_name/amr_nx_graphs
mv amr_nx_graphs.p ../data/$amr_name/
echo Done!
echo
echo Aggregating metadata...
#Aggregate all metadata for amr sentences. This will create the amr_aggregated_metadata.p file.
python aggregate_sentence_metadata.py ../data/$amr_name/$amr_name.aligned ../data/$amr_name/$amr_name.sentences ../data/$amr_name/$amr_name.pos ../data/$amr_name/$amr_name.ner ../data/$amr_name/$amr_name.parse
mv amr_aggregated_metadata.p ../data/$amr_name/
mv dep_parse.p dep_parse ../data/$amr_name/
echo Done!
echo
echo Creating concept dataset...
#Create concept datatset i.e. create concept_dataset.p
python create_concept_dataset.py ../data/$amr_name/amr_nx_graphs.p ../data/$amr_name/amr_aggregated_metadata.p > ../data/$amr_name/concept_dataset
mv concept_dataset.p ../data/$amr_name/
echo Done!
echo
echo Creating relation learning dataset
#
python create_relation_dataset.py ../data/$amr_name/amr_nx_graphs.p ../data/$amr_name/concept_dataset.p 0 > ../data/$amr_name/relation_dataset
mv relation_dataset.p ../data/$amr_name/
echo Done!
echo
| true |
3d644f4e020b8b3262ce3680470f10d42636a576 | Shell | hhndez/blockchain-ait | /ubuntu/start.sh | UTF-8 | 307 | 2.640625 | 3 | [] | no_license | #!/bin/bash
echo "Starting..."
docker run -d bitcoin_ubuntu sleep infinity
containerId=`docker ps -a -q --filter ancestor="bitcoin_ubuntu"`
#docker exec -it $(docker ps -a -q --filter ancestor="bitcoin_ubuntu" --format="{{.ID}}") /bin/bash
echo "CID=$containerId"
docker exec -it bitcoin_ubuntu /bin/bash
| true |
1adb225ab8104939428bf3a88ceea421a941e3c4 | Shell | ethan42411/MediaPipeUnityPlugin | /docker/linux/x86_64/opencv3/PKGBUILD | UTF-8 | 1,684 | 2.859375 | 3 | [
"MIT",
"Apache-2.0"
] | permissive | pkgbase=opencv3-opt
pkgname=($pkgbase)
pkgver=3.4.12
pkgrel=1
pkgdesc="Open Source Computer Vision Library (Legacy Version & /opt directory version)"
arch=(x86_64)
license=(BSD)
url="http://opencv.org/"
makedepends=(cmake)
source=(
"opencv-$pkgver.tar.gz::https://github.com/opencv/opencv/archive/$pkgver.tar.gz"
"opencv_contrib-$pkgver.tar.gz::https://github.com/opencv/opencv_contrib/archive/$pkgver.tar.gz"
)
sha256sums=('c8919dfb5ead6be67534bf794cb0925534311f1cd5c6680f8164ad1813c88d13'
'b207024589674dd2efc7c25740ef192ee4f3e0783e773e2d49a198c37e3e7570')
prepare() {
mkdir -p build
}
build() {
cd build
cmake ../opencv-$pkgver \
-DCMAKE_BUILD_TYPE=Release \
-DBUILD_LIST=calib3d,core,features2d,highgui,imgcodecs,imgproc,video,videoio \
-DBUILD_EXAMPLES=OFF \
-DBUILD_PERF_TESTS=OFF \
-DBUILD_SHARED_LIBS=ON \
-DBUILD_TESTS=OFF \
-DBUILD_WITH_DEBUG_INFO=OFF \
-DOPENCV_SKIP_VISIBILITY_HIDDEN=OFF \
-DOPENCV_SKIP_PYTHON_LOADER=ON \
-DBUILD_opencv_python=OFF \
-DWITH_ITT=OFF \
-DWITH_JASPER=OFF \
-DWITH_WEBP=OFF \
-DCMAKE_INSTALL_PREFIX=/opt/opencv3 \
-DCMAKE_INSTALL_LIBDIR=lib \
-DOPENCV_EXTRA_MODULES_PATH="$srcdir/opencv_contrib-$pkgver/modules"
make
}
package_opencv3-opt() {
options=(staticlibs)
provides=(opencv3)
cd build
make DESTDIR="$pkgdir" install
# install license file
install -Dm644 "$srcdir"/opencv-$pkgver/LICENSE -t "$pkgdir"/usr/share/licenses/$pkgname
cd "$pkgdir"/opt/opencv3/share
# separate samples package
mv OpenCV opencv3 # otherwise folder naming is inconsistent
ln -sf /opt/opencv3/share/opencv3 OpenCV # fix some compatibility problems
}
| true |
c1538e65bb16c8c5e4a34ac2a9b8fa2cdea06e42 | Shell | Julia-Embedded/julia | /.architectures-lib | UTF-8 | 1,015 | 3.609375 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env bash
_awkArch() {
local version="$1"; shift
local awkExpr="$1"; shift
local file="$version/release-architectures"
[ -f "$file" ] || file='release-architectures'
awk "$@" "/^#|^\$/ { next } $awkExpr" "$file"
}
dpkgArches() {
local version="$1"; shift
_awkArch "$version" '{ print $2 }'
}
hasBashbrewArch() {
local version="$1"; shift
local bashbrewArch="$1"; shift
_awkArch "$version" 'BEGIN { exitCode = 1 } $1 == bashbrewArch { exitCode = 0 } END { exit exitCode }' -v bashbrewArch="$bashbrewArch"
}
dpkgToBashbrewArch() {
local version="$1"; shift
local dpkgArch="$1"; shift
_awkArch "$version" '$2 == dpkgArch { print $1; exit }' -v dpkgArch="$dpkgArch"
}
dpkgToJuliaTarArch() {
local version="$1"; shift
local dpkgArch="$1"; shift
_awkArch "$version" '$2 == dpkgArch { print $3; exit }' -v dpkgArch="$dpkgArch"
}
dpkgToJuliaDirArch() {
local version="$1"; shift
local dpkgArch="$1"; shift
_awkArch "$version" '$2 == dpkgArch { print $4; exit }' -v dpkgArch="$dpkgArch"
}
| true |
4cdb68501030104f8200f6230406e5dad08e88e0 | Shell | tomohikoseven/DPlatform-ShellCore | /apps/Transmission.sh | UTF-8 | 685 | 2.84375 | 3 | [
"MIT"
] | permissive | #!/bin/sh
[ "$1" = update ] && { $install transmission-daemon; whiptail --msgbox "Transmission updated!" 8 32; break; }
[ "$1" = remove ] && { $remove transmission-daemon; whiptail --msgbox "Transmission removed." 8 32; break; }
$install transmission-daemon
#sed -i 's/"rpc-whitelist": "127.0.0.1"/"rpc-whitelist": "127.0.0.1"/' /etc/transmission-daemon/settings.json
sed -i 's/"rpc-whitelist-enabled": true/"rpc-whitelist-enabled": false/' /etc/transmission-daemon/settings.json
#~/.config/transmission-daemon/settings.json
whiptail --msgbox "$CHOICE installed!
Open http://$URL:9091 in your browser to access to the web UI
User Name: transmission | Password: transmission" 10 64
| true |
49cde99ce99bf16fd2abc3841c1eb9f27a84d0da | Shell | macarthur-lab/gnomad_browser | /deploy/scripts/takedown-serve.sh | UTF-8 | 1,322 | 3.109375 | 3 | [
"MIT"
] | permissive | #!/bin/bash
# halt on any error
# set -e
python deploy/scripts/render.py
. "$(dirname "$0")"/../config/config.sh
gcloud config set project $GCLOUD_PROJECT
gcloud container clusters get-credentials $SERVING_CLUSTER_NAME --zone=$GCLOUD_ZONE
kubectl config set-context $SERVING_CLUSTER
kubectl config set-cluster $SERVING_CLUSTER
echo "Project name is ${PROJECT_NAME}"
echo "Project environment is ${DEPLOYMENT_ENV}"
echo "Environment is ${PROJECT_ENVIRONMENT}"
echo "Images will be rebuilt: ${REBUILD_IMAGES}"
echo "Mongo disk set to: ${MONGO_DISK}"
echo "Readviz disk set to: ${READVIZ_DISK}"
echo "Mongo will be restarted: ${RESTART_MONGO}"
echo "Monitor loading cluster? ${MONITOR_LOADING}"
read -p "Are you sure you want to bring down the server?" input
if [[ $input = "n" ]]; then
exit 0
fi
# Bring down previous replication controller
kubectl delete service $SERVER_REPLICATION_CONTROLLER_NAME
# kubectl delete hpa $SERVER_REPLICATION_CONTROLLER_NAME
kubectl delete deployment $SERVER_REPLICATION_CONTROLLER_NAME
kubectl delete service $MONGO_SERVICE_NAME
kubectl delete deployment $MONGO_REPLICATION_CONTROLLER
read -p "Are you sure you want to delete the cluster?" input
if [[ $input = "n" ]]; then
exit 0
fi
# Delete the cluster
gcloud container clusters delete $SERVING_CLUSTER_NAME --zone $GCLOUD_ZONE
| true |
8f13e6a0ca30e4fca20d9d4387fb6a857c919185 | Shell | zengxiaoqing/DIVA | /DIVA3D/divastripped/divaintegral | UTF-8 | 9,018 | 3.140625 | 3 | [] | no_license | #!/bin/bash
export LC_ALL=C
echo //////////////////////////////////////////////////
echo integral and error on integrals
echo works with no coordinate change or -xscale
echo For icoordchange: to do
echo ASSUMES integrationpoints.dat are ALL in water ///
echo ASSUMES Bessel correlation function when ispec positive
echo //////////////////////////////////////////////////
echo ' '
Filepar=./input/param.par
{
read linecomment
read lc
read linecomment
read icoordchange
read linecomment
read ispec
read linecomment
read ireg
read linecomment
read xori
read linecomment
read yori
read linecomment
read dx
read linecomment
read dy
read linecomment
read nx
read linecomment
read ny
read linecomment
read valex
read linecomment
read snr
read linecomment
read varbak
} < $Filepar
if [ -f ./input/integrationpoints.dat ]
then
echo Found a list of integration points :hope they are all wet
else
echo Did not find a list of integration points.
echo Will create a list based on the gridded output field
if [ -f ./output/fieldgher.anl ]
then
cp ./output/fieldgher.anl ./divawork/fort.20
cp ./output/ghertonetcdf/GridInfo.dat ./divawork/fort.21
cd divawork
##../../bin/gridpointlist.a
if [ -f ../output/diva.log ] ; then
cp -f ../output/diva.log .
fi
../../bin/gridpointlist.a >> diva.log
if [ $? -ne 0 ];then
echo ' '
echo --------------------------------------------
echo A problem was encountered during execution !
echo divaintegral gridpointlist.a
echo Check execution track
echo --------------------------------------------
echo ' ' >> diva.log
echo -------------------------------------------- >> diva.log
echo A problem was encountered during execution ! >> diva.log
echo divaintegral gridpointlist.a >> diva.log
echo Check execution track >> diva.log
echo -------------------------------------------- >> diva.log
fi
cp -f diva.log ../output/.
cd ..
mv ./divawork/fort.22 ./input/integrationpoints.dat
echo Now select only special points for the integral
dvintegral
else
echo Please run first an analysis or use an existing integrationpoints.dat
exit
fi
fi
inflation=$(echo $lc $dx $dy | awk '{print $1*sqrt(4*3.141593)/sqrt($2*$3)}')
scale=$icoordchange
if [ "$icoordchange" == "1" -o "$icoordchange" == "2" ]
then
echo will assume latitude of analysis grid center
scale=$(echo $yori $dy $ny | awk '{print -cos(3.141593/180*($1+$2*$3/2))}')
echo Scale $scale
inflation=$(echo $lc $dx $dy $scale | awk '{print $1*sqrt(4*3.141593)/sqrt($2*$3*(-$4))}')
dxm=$(echo $dx $scale | awk '{print -$1*$2*6370000*3.141593/180.}')
echo dx in meters $dxm
dym=$(echo $dy | awk '{print $1*6370000*3.141593/180.}')
echo dy in meters $dym
dx=$dxm
dy=$dym
echo Integration will be done with surface units in meters
fi
echo Calculating integral
INTEGRAL=$(awk -v val=$valex '{ if ($3!=val) {s=s+$3*$4}} END {print s}' ./input/integrationpoints.dat)
SURFACE=$(awk -v val=$valex '{if ($3!=val) {s=s+$4}} END {print s}' ./input/integrationpoints.dat)
echo $INTEGRAL $SURFACE $dx $dy | awk '{print $1*$4*$3,$2*$4*$3,$1/$2}' > ./output/integral.dat
if [ "$varbak" == "0" -o "$ispec" == "0" ]
then
echo Sorry no error on integral will be calculated since error fields not requested
else
if [ "$ispec" -gt "0" ]
then
echo hybrid errors, also for the sum
cp ./input/integrationpoints.dat ./divawork/fort.10
cp ./input/data.dat ./divawork/fort.11
rm ./divawork/fort.12
datacol=$(head -n 1 ./input/data.dat | wc -w)
cd divawork
##echo $scale $lc $datacol | ../../bin/erroronintegrals.a
if [ -f ../output/diva.log ] ; then
cp -f ../output/diva.log .
fi
echo $scale $lc $datacol | ../../bin/erroronintegrals.a >> diva.log
if [ $? -ne 0 ];then
echo ' '
echo --------------------------------------------
echo A problem was encountered during execution !
echo divaintegral erroronintegrals.a
echo Check execution track
echo --------------------------------------------
echo ' ' >> diva.log
echo -------------------------------------------- >> diva.log
echo A problem was encountered during execution ! >> diva.log
echo divaintegral erroronintegrals.a >> diva.log
echo Check execution track >> diva.log
echo -------------------------------------------- >> diva.log
fi
cp -f diva.log ../output/.
cd ..
cp -v ./input/data.dat ./input/data.dat.nointegral
cp -v ./input/param.par ./input/param.par.nointegral
cp -v ./input/valatxy.coord ./input/valatxy.coord.nointegral
cp ./divawork/fort.12 ./input/data.dat
#cat ./input/data.dat
cp -v ./divawork/fort.14 ./output/Pfsum.dat
cp -v ./input/integrationpoints.dat ./input/valatxy.coord
head -5 ./input/param.par.nointegral > ./input/param.par
if [ "$1" == "-naive" ]
then
cp ./input/integrationpoints.dat ./input/valatxy.coord
echo 4 >> ./input/param.par
else
echo 0 >> ./input/param.par
fi
echo 0 >> ./input/param.par
echo 0 >> ./input/param.par
head -26 ./input/param.par.nointegral | tail -18 >> ./input/param.par
echo need to save original outputs...
mkdir -p ./output3
cp -r ./output/* ./output3
cp -v ./input/data.dat ./output3/Csum.dat
divamesh
divacalc
echo summing at discrete locations
head ./output/valatxyascii.anl
# need to add non-uniform grid here
paste ./output/valatxyascii.anl ./input/integrationpoints.dat > bidon
awk -v val=$valex '{if ($3!=val) {s=s+$3*$7}} END {print -s}' bidon >> ./output3/Pfsum.dat
cat ./output3/Pfsum.dat
PFSUM=$(awk '{s=s+$1} END {if (s>0) {print s} else {print 0}}' ./output3/Pfsum.dat)
echo For errors $PFSUM $varbak $dx $dy
echo $PFSUM $varbak $dx $dy > bidon
awk '{ print sqrt($1*$2*$3*$4*$3*$4)}' bidon > ./output3/erroronintegral.dat
#head ./input/valatxy.coord
#ls -l ./output
if [ "$1" == "-naive" ]
then
echo summing error at discrete locations
head ./output/erroratxyascii.anl
# need to add non-uniform grid here
paste ./output/erroratxyascii.anl ./input/integrationpoints.dat > bidon
sumsquare=$(awk -v val=$valex '{if ($3!=val) {s=s+$3*$3*$7*$7}} END {print s}' bidon)
echo $sumsquare $dx $dy $inflation> bidon
awk '{ print sqrt($1*$2*$3*$2*$3),$4,$4*sqrt($1*$2*$3*$2*$3)}' bidon > ./output3/erroronintegralnaive.dat
fi
mv -f ./input/data.dat.nointegral ./input/data.dat
mv -f ./input/valatxy.coord.nointegral ./input/valatxy.coord
mv -f ./input/param.par.nointegral ./input/param.par
mv -f ./input/integrationpoints.dat ./output3/integrationpoints.dat
echo Pushing back original output
rm -r ./output/*
mv -f ./output3/* ./output
rmdir ./output3
else
echo Full error calculation, also on sum
echo To include here
cp -v ./input/data.dat ./input/data.dat.nointegral
cp -v ./input/param.par ./input/param.par.nointegral
cp -v ./input/valatxy.coord ./input/valatxy.coord.nointegral
head -5 ./input/param.par.nointegral > ./input/param.par
echo -250 >> ./input/param.par # old version : ispec=-116
echo 0 >> ./input/param.par
echo 0 >> ./input/param.par
head -26 ./input/param.par.nointegral | tail -18 >> ./input/param.par
echo need to save original outputs...
mkdir -p ./output3
cp -r ./output/* ./output3
divamesh
divacalc -pfsum
awk '{print $1,$2}' ./input/data.dat.nointegral > bidon
paste bidon ./output/Csum.dat > ./input/data.dat
NCOL=$(head -1 ./input/data.dat.nointegral | wc -w)
if [ "$NCOL" -gt "3" ]
then
awk '{print $4}' ./input/data.dat.nointegral > bidon
cp ./input/data.dat bidon2
paste bidon2 bidon > ./input/data.dat
fi
cp -v ./input/data.dat ./output3/Csum.dat
head -5 ./input/param.par.nointegral > ./input/param.par
if [ "$1" == "-naive" ]
then
cp ./input/integrationpoints.dat ./input/valatxy.coord
echo -4 >> ./input/param.par
else
echo 0 >> ./input/param.par
fi
echo 0 >> ./input/param.par
echo 0 >> ./input/param.par
head -26 ./input/param.par.nointegral | tail -18 >> ./input/param.par
divacalc
cp ./output/Pfsumum.dat ./output3/Pfsum.dat
echo summing at discrete locations
head ./output/valatxyascii.anl
# need to add non-uniform grid here
paste ./output/valatxyascii.anl ./input/integrationpoints.dat > bidon
awk -v val=$valex '{if ($3!=val) {s=s+$3*$7}} END {print -s}' bidon >> ./output3/Pfsum.dat
PFSUM=$(awk '{s=s+$1} END {if (s>0) {print s} else {print 0}}' ./output3/Pfsum.dat)
echo $PFSUM $varbak $dx $dy > bidon
awk '{ print sqrt($1*$2*$3*$4*$3*$4)}' bidon > ./output3/erroronintegral.dat
if [ "$1" == "-naive" ]
then
echo summing error at discrete locations
head ./output/erroratxyascii.anl
# need to add non-uniform grid here
paste ./output/erroratxyascii.anl ./input/integrationpoints.dat > bidon
sumsquare=$(awk -v val=$valex '{if ($3!=val) {s=s+$3*$3*$7*$7}} END {print s}' bidon)
echo $sumsquare $dx $dy $inflation> bidon
awk '{ print sqrt($1*$2*$3*$2*$3),$4,$4*sqrt($1*$2*$3*$2*$3)}' bidon > ./output3/erroronintegralnaive.dat
fi
mv -f ./input/data.dat.nointegral ./input/data.dat
mv -f ./input/valatxy.coord.nointegral ./input/valatxy.coord
mv -f ./input/param.par.nointegral ./input/param.par
mv -f ./input/integrationpoints.dat ./output3/integrationpoints.dat
echo Pushing back original output
rm -r ./output/*
mv -f ./output3/* ./output
rmdir ./output3
fi
fi
echo ' '
echo --------------------------
echo
echo --------------------------
| true |
de67cf920bf50a8067819d24ca55c0205a3d2d05 | Shell | janztec/empc-cxplus-linux-drivers | /src/jtec_can | UTF-8 | 1,174 | 3.265625 | 3 | [] | no_license | #! /bin/bash
### BEGIN INIT INFO
# Provides: jtec_can
# Required-Start: $remote_fs $syslog
# Required-Stop: $remote_fs $syslog
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: Configure jtec_can
### END INIT INFO
# emPC-CX+ vendor id, device id
PCI=13c3:2600
if lspci -n | grep "13c3:2a00" > /dev/null; then
# emPC-CXR vendor id, device id
PCI=13c3:2a00
fi
do_start() {
modprobe jhal
IRQ=$(lspci -d $PCI -v | grep IRQ | tr ' ' '\n' | tail -n1)
ADDRESSCAN0=0x$(lspci -d $PCI -v | grep =8K | head -n 1 | grep -o '[0-9a-f]\{6\}00')
ADDRESSCAN1=$(printf "0x%X\n" $(($ADDRESSCAN0 + 0x200)))
modprobe can
modprobe can-dev
modprobe sja1000
#modprobe sja1000_platform
# always two CANs, even if only one port is externally available
modprobe jtec_can "io=0,$ADDRESSCAN0,$IRQ,$ADDRESSCAN1,$IRQ"
}
case "$1" in
start|"")
do_start
;;
restart|reload|force-reload)
echo "Error: argument '$1' not supported" >&2
exit 3
;;
stop)
rmmod jtec_can
;;
*)
echo "Usage: jtec_can [start|stop]" >&2
exit 3
;;
esac
| true |
5c83597f3af83c732379e70f41c168ac5c3dac00 | Shell | iron-cirrus/ovhVpn | /ovhVpn | UTF-8 | 1,087 | 3.515625 | 4 | [] | no_license | #!/bin/sh
### BEGIN INIT INFO
# Provides: OpenVPN
# Required-Start:
# Required-Stop:
# Should-Start:
# Should-Stop:
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: Start and stop OpenVPN
# Description: OpenVPN
### END INIT INFO
fichName="ovhVpn"
path_to_ovpn_files="/home/android"
ovpn_file_to_use="oneplus6.ovpn"
# Do NOT change anything below this line unless you know what you are doing!
case "$1" in
start)
echo "Connecting to OpenVPN "
cd "$path_to_ovpn_files"
exec 1>/var/log/$fichName.service.log 2>&1
/usr/sbin/openvpn --config "$ovpn_file_to_use" &
;;
install)
$0 stop
echo installation service
cp $0 /etc/init.d
update-rc.d "$fichName" defaults
service "$fichName" start
ps -ef | grep openvpn
;;
'status')
# if mysqld_status check_alive nowarn; then
# log_action_msg "$($MYADMIN version)"
# else
log_action_msg "ovhVPN is stopped."
exit 0
# fi
;;
stop)
echo "Closing connection to OpenVPN "
exec 1>/var/log/"$fichName".service.log 2>&1
killall openvpn
;;
*)
echo "Usage: /etc/init.d/vpn {start|stop}"
exit 1
;;
esac
exit 0
| true |
fc43b395bde486627e44ef1d29677ae265630a2d | Shell | nejcet/react-native-zip-archive | /windows/libs/miniz/amalgamate.sh | UTF-8 | 1,808 | 3.421875 | 3 | [
"LicenseRef-scancode-public-domain",
"MIT"
] | permissive | #!/bin/bash
set -e
mkdir -p amalgamation
OUTPUT_PREFIX=amalgamation/miniz
cat miniz.h > $OUTPUT_PREFIX.h
cat miniz.c > $OUTPUT_PREFIX.c
cat miniz_common.h >> $OUTPUT_PREFIX.h
cat miniz_tdef.c >> $OUTPUT_PREFIX.c
cat miniz_tdef.h >> $OUTPUT_PREFIX.h
cat miniz_tinfl.c >> $OUTPUT_PREFIX.c
cat miniz_tinfl.h >> $OUTPUT_PREFIX.h
cat miniz_zip.c >> $OUTPUT_PREFIX.c
cat miniz_zip.h >> $OUTPUT_PREFIX.h
sed -i '0,/#include "miniz.h"/{s/#include "miniz.h"/#include "miniz.h"/}' $OUTPUT_PREFIX.c
for i in miniz miniz_common miniz_tdef miniz_tinfl miniz_zip
do
sed -i "s/#include \"$i.h\"//g" $OUTPUT_PREFIX.h
sed -i "s/#include \"$i.h\"//g" $OUTPUT_PREFIX.c
done
echo "int main() { return 0; }" > main.c
echo "Test compile with GCC..."
gcc -pedantic -Wall main.c $OUTPUT_PREFIX.c -o test.out
echo "Test compile with GCC ANSI..."
gcc -ansi -pedantic -Wall main.c $OUTPUT_PREFIX.c -o test.out
if command -v clang
then
echo "Test compile with clang..."
clang -Wall -Wpedantic -fsanitize=unsigned-integer-overflow main.c $OUTPUT_PREFIX.c -o test.out
fi
for def in MINIZ_NO_STDIO MINIZ_NO_TIME MINIZ_NO_ARCHIVE_APIS MINIZ_NO_ARCHIVE_WRITING_APIS MINIZ_NO_ZLIB_APIS MINIZ_NO_ZLIB_COMPATIBLE_NAMES MINIZ_NO_MALLOC
do
echo "Test compile with GCC and define $def..."
gcc -ansi -pedantic -Wall main.c $OUTPUT_PREFIX.c -o test.out -D${def}
done
rm test.out
rm main.c
cp ChangeLog.md amalgamation/
cp LICENSE amalgamation/
cp readme.md amalgamation/
mkdir -p amalgamation/examples
cp examples/* amalgamation/examples/
cd amalgamation
! test -e miniz.zip || rm miniz.zip
cat << EOF | zip -@ miniz
miniz.c
miniz.h
ChangeLog.md
LICENSE
readme.md
examples/example1.c
examples/example2.c
examples/example3.c
examples/example4.c
examples/example5.c
examples/example6.c
EOF
cd ..
echo "Amalgamation created."
| true |
c528f717eec18e98ea5f7d1dee54a225fadddb7f | Shell | tchx84/sugarizer-electron | /make_electron.sh | UTF-8 | 1,950 | 2.953125 | 3 | [
"Apache-2.0"
] | permissive | date
echo --- Deleting previous content...
cd sugarizer
rm -rf *
cd ..
rm -rf build
echo --- Copying content
rsync -av --exclude-from='exclude.electron' ../sugarizer/* sugarizer
cp package.json.snap sugarizer/package.json
#cp package.json.linux sugarizer/package.json
sed -i -e "s/\({\"id\": \"org.sugarlabs.TurtleBlocksJS\",.*\},\)//" sugarizer/activities.json
sed -i -e "s/\({\"id\": \"org.somosazucar.JappyActivity\",.*\},\)//" sugarizer/activities.json
rm sugarizer/activities.json-e
rm -rf dist/*
if [ "$1" != "full" -a "$2" != "full" -a "$3" != "full" ]; then
echo --- Minimize
cd ../sugarizer
grunt -v
cd ../sugarizer-electron
cp -r ../sugarizer/build/* sugarizer/
rm -rf sugarizer/activities/Jappy.activity
rm -rf sugarizer/activities/TurtleBlocksJS.activity
fi
cp etoys_remote.index.html sugarizer/activities/Etoys.activity/index.html
echo --- Create package
date
cd sugarizer
mkdir build
mkdir build/icons
cp res/icon/electron/icon-512.png build/icons/512x512.png
if [ "$1" != "docker" -a "$2" != "docker" -a "$3" != "docker" ]; then
# --- Build locally
npm install --save
npm install electron-builder --save-dev
#export DEBUG=electron-builder
npm run dist
cd ..
cp -r sugarizer/dist/* dist
else
# --- Build using docker
echo "mkdir /project" > npm_run_dist.sh
echo "mkdir /project/sugarizer" >> npm_run_dist.sh
echo "cd /project/sugarizer" >> npm_run_dist.sh
echo "npm install" >> npm_run_dist.sh
echo "npm install electron-builder --save-dev" >> npm_run_dist.sh
echo "npm run dist" >> npm_run_dist.sh
docker run --privileged=true -d --name sugarizer-builder -it sugarizer-electron:deploy /bin/bash
docker cp sugarizer sugarizer-builder:/project
docker cp npm_run_dist.sh sugarizer-builder:/
docker exec -t sugarizer-builder /bin/bash /npm_run_dist.sh
docker cp sugarizer-builder:/project/sugarizer/build .
docker stop sugarizer-builder
#docker rm sugarizer-builder
fi
date
| true |
fd3c146ccd90401552771dcb614734613ef78641 | Shell | petronny/aur3-mirror | /cinnamon-settings-daemon-dev/PKGBUILD | UTF-8 | 1,176 | 2.703125 | 3 | [] | no_license | # $Id$
# Maintainer: Realex
# Based on cinnamon-settings-daemon PKGBUILD
_pkgname=cinnamon-settings-daemon
pkgname=${_pkgname}-dev
pkgver=221.ec2ca3a
pkgrel=1
pkgdesc="The Cinnamon Settings daemon"
arch=('i686' 'x86_64')
license=('GPL')
depends=('cinnamon-desktop-dev' 'libibus' 'libcanberra-pulse' 'librsvg' 'nss' 'pulseaudio-alsa' 'upower' 'libnotify' 'libwacom' 'libgnomekbd')
makedepends=('intltool' 'docbook-xsl' 'gnome-common' 'cinnamon-desktop-dev' 'git')
options=('!emptydirs' '!libtool')
conflicts=("${_pkgname}")
install=${pkgname}.install
url="http://cinnamon.linuxmint.com/"
source=(${_pkgname}::git+https://github.com/linuxmint/cinnamon-settings-daemon.git)
sha256sums=('SKIP')
pkgver() {
cd $_pkgname
echo $(git rev-list --count master).$(git rev-parse --short master)
}
build() {
cd ${srcdir}/${_pkgname}
./autogen.sh --prefix=/usr --sysconfdir=/etc --localstatedir=/var \
--libexecdir=/usr/lib/${_pkgname} --disable-static --enable-systemd
#https://bugzilla.cinnamon.org/show_bug.cgi?id=656231
sed -i -e 's/ -shared / -Wl,-O1,--as-needed\0/g' libtool
make
}
package() {
cd ${srcdir}/${_pkgname}
make DESTDIR="$pkgdir" install
}
| true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.