blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 4 115 | path stringlengths 2 970 | src_encoding stringclasses 28 values | length_bytes int64 31 5.38M | score float64 2.52 5.28 | int_score int64 3 5 | detected_licenses listlengths 0 161 | license_type stringclasses 2 values | text stringlengths 31 5.39M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
64b6dc9b5387ab7fcbf725b503f35f7eac32fb8b | Shell | LeelaPakanati/dotfiles | /.zshrc | UTF-8 | 4,012 | 2.765625 | 3 | [] | no_license | HISTFILE=~/.histfile
HISTSIZE=1000
SAVEHIST=1000
setopt appendhistory autocd extendedglob notify
bindkey -v
zstyle :compinstall filename '/home/lee/.zshrc'
export TERM=xterm-256color
autoload -U colors && colors
PS1="%{$fg[yellow]%}%m%{$fg[green]%}|%{$fg[cyan]%}%~%{$fg[blue]%}$%b "
autoload -Uz compinit
zstyle ':completion:*' menu select
zmodload zsh/complist
compinit
_comp_options+=(globdots) # Include hidden files.
bindkey "^P" up-line-or-search
bindkey "^N" down-line-or-search
############ Useful commands ###########################
alias startdocker='systemctl start docker'
alias stopdocker='systemctl stop docker'
alias fxp='xdg-open .' # Open GUI file explorer
alias chrome='google-chrome-stable'
alias ka='killall'
alias ll='ls -al'
whosmans(){
man -k . | dmenu -l 30 -p 'select man page' -c | awk '{print $1}' | xargs -r man -Tpdf | zathura -
}
gaytime() {
watch -ct -n1 "date '+%T' | figlet | toilet -f term --gay"
}
alias showtime='tty-clock -csBbtC 6'
# repeat a command over and over
rerun() {
while true; do "$@"; done
}
# Run command and then ring bell (can run just 'bell' after cmd already running)
bell() {
echo "$@"
"$@"; echo -e "\a"
}
# Run command and then ring bell (can run just 'bell' after cmd already running)
notify() {
if [ $# -eq 0 ];
then
fc -W
notify-send "`tail -n2 ~/.histfile | head -n1` Finished Running"
else
echo "$@"
"$@";
notify-send "$@ Finished Running"
fi
}
################### General shortcuts #################################
alias v="vim"
alias vi="vim"
alias r="ranger"
#alias vsim="/opt/intelFPGA_pro/18.1/modelsim_ae/bin/vsim"
#alias qsim="/opt/questa/questasim/bin/vsim"
#export PATH=/opt/questa/2020_4/questasim/bin/:$PATH
#alias vivado="/home/lpakanati/bin/vivado.sh"
alias vsim="/home/lpakanati/bin/vsim"
############### Overwrite regular commands w/ options#################555~C~#
alias cd="cd -P"
alias apt="sudo apt"
alias please="sudo"
#alias python="python3"
#alias pip="pip3"
alias grep="grep --color=auto"
alias ls="ls -hN --color=auto --group-directories-first"
alias sl="ls -hN --color=auto --group-directories-first"
alias ccat="highlight --out-format=ansi"
alias tree="tree -CF"
alias pdflatex="pdflatex -halt-on-error"
alias find="fdfind"
################## Git aliases #########################################
function gpush {
currbranch=$(git name-rev --name-only HEAD)
git push origin $currbranch
}
function gpushf {
currbranch=$(git name-rev --name-only HEAD)
git push origin -f $currbranch
}
alias gco="git checkout"
alias gadd="git add ."
alias gcom="git commit -m"
alias gundo="git reset HEAD~1"
alias gstat="git status"
alias glog="git log"
alias greset1="git reset HEAD~1"
alias gdiff="git diff"
alias gpull="git pull"
##########TMUX and VIM Stuff#################################
export VISUAL=vim
export EDITOR="$VISUAL"
## >>> conda initialize >>>
## !! Contents within this block are managed by 'conda init' !!
#__conda_setup="$('/home/lee/anaconda3/bin/conda' 'shell.bash' 'hook' 2> /dev/null)"
#if [ $? -eq 0 ]; then
# eval "$__conda_setup"
#else
# if [ -f "/home/lee/anaconda3/etc/profile.d/conda.sh" ]; then
# . "/home/lee/anaconda3/etc/profile.d/conda.sh"
# else
# export PATH="/home/lee/anaconda3/bin:$PATH"
# fi
#fi
#unset __conda_setup
## <<< conda initialize <<<
# Load zsh-syntax-highlighting; should be last.
source /usr/share/zsh/plugins/zsh-syntax-highlighting/zsh-syntax-highlighting.zsh 2>/dev/null
export XILINXD_LICENSE_FILE="2100@chhq-vuplynx01"
export LM_LICENSE_FILE="29000@ch2d-supdesql02:1717@chhq-vuplynx01"
source /tools/Xilinx/Vivado/2023.1/settings64.sh
[ -f ~/.fzf.zsh ] && source ~/.fzf.zsh
export FZF_DEFAULT_COMMAND='fdfind --type file --hidden --no-ignore'
export FZF_DEFAULT_OPTS="--preview='~/.vim/bundle/fzf.vim/bin/preview.sh {}'"
alias vf='vim $(fzf)'
export INTELFPGAOCLSDKROOT="/root/intelFPGA_pro/21.4/hld"
export QSYS_ROOTDIR="/opt/intelFPGA/21.1/quartus/sopc_builder/bin"
| true |
5a01488f8133fecef8acdfc3c70e546d11ca23de | Shell | wilmardo/docker-platformio-core | /platformio.sh | UTF-8 | 636 | 3.28125 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
VOLUME_CONTAINER_NAME=vc_platformio
VOLUME_CONTAINER_IMAGE=sglahn/vc_platformio:latest
IMAGE_NAME=sglahn/platformio-core:3.4.0
if [ ! "$(docker ps -a | grep $VOLUME_CONTAINER_NAME)" ]; then
docker run -u `id -u $USER`:`id -g $USER` --name $VOLUME_CONTAINER_NAME $VOLUME_CONTAINER_IMAGE
fi
DEVICE=
if [ -e /dev/ttyUSB0 ]; then
DEVICE="--device=/dev/ttyUSB0"
fi
if [ "$UPLOAD_PORT" ]; then
DEVICE=$UPLOAD_PORT
fi
echo "Using upload port $DEVICE"
docker run --rm \
-v `pwd`:/workspace \
--volumes-from=$VOLUME_CONTAINER_NAME \
-u `id -u $USER`:`id -g $USER` \
$DEVICE \
$IMAGE_NAME \
$@
| true |
54889a38da2c38684c36a3f9352956ae4afc52f4 | Shell | VsrsCif/ebMS-SVEV | /ebMS-SVEV-example/msh/config/gen-keytores.sh | UTF-8 | 4,606 | 2.640625 | 3 | [] | no_license | #/bin/sh
mkdir keystore
# generate key
echo "generate key"
$JAVA_HOME/bin/keytool -genkey -alias msh.e-box-a.si -keypass key1234 -keystore keystore/msh.e-box-a-keystore.jks -storepass test1234 -dname "cn=msh.e-box-a.si,ou=test,ou=msh,ou=jrc,ou=si" -keyalg RSA
$JAVA_HOME/bin/keytool -genkey -alias msh.e-box-b.si -keypass key1234 -keystore keystore/msh.e-box-b-keystore.jks -storepass test1234 -dname "cn=msh.e-box-b.si,ou=test,ou=msh,ou=jrc,ou=si" -keyalg RSA
$JAVA_HOME/bin/keytool -genkey -alias client-user -keypass key1234 -keystore keystore/client-user.jks -storepass test1234 -dname "cn=Johan Pohan, ou=ebox,ou=jrc,ou=si" -keyalg RSA
# extract certs
echo "extract certs"
$JAVA_HOME/bin/keytool -exportcert -keystore keystore/msh.e-box-a-keystore.jks -storepass test1234 -alias msh.e-box-a.si -file keystore/msh.e-box-a.csr
$JAVA_HOME/bin/keytool -exportcert -keystore keystore/msh.e-box-b-keystore.jks -storepass test1234 -alias msh.e-box-b.si -file keystore/msh.e-box-b.csr
# import certs to trustores
echo "import certs to trustores"
$JAVA_HOME/bin/keytool -importcert -trustcacerts -noprompt -alias msh.e-box-a.si -keystore keystore/msh.e-box-a-truststore.jks -storepass test1234 -file keystore/msh.e-box-a.csr
$JAVA_HOME/bin/keytool -importcert -trustcacerts -noprompt -alias msh.e-box-b.si -keystore keystore/msh.e-box-a-truststore.jks -storepass test1234 -file keystore/msh.e-box-b.csr
$JAVA_HOME/bin/keytool -importcert -trustcacerts -noprompt -alias msh.e-box-a.si -keystore keystore/msh.e-box-b-truststore.jks -storepass test1234 -file keystore/msh.e-box-a.csr
$JAVA_HOME/bin/keytool -importcert -trustcacerts -noprompt -alias msh.e-box-b.si -keystore keystore/msh.e-box-b-truststore.jks -storepass test1234 -file keystore/msh.e-box-b.csr
# delete certs
echo "cleanup"
rm keystore/msh.e-box-a.csr
rm keystore/msh.e-box-b.csr
echo "generate property files"
# generate server e-box-a sign properties
echo "org.apache.ws.security.crypto.provider=org.apache.wss4j.common.crypto.Merlin" > msh_e-box-a_sign.properties
echo "org.apache.ws.security.crypto.merlin.keystore.type=jks" >> msh_e-box-a_sign.properties
echo "org.apache.ws.security.crypto.merlin.keystore.password=test1234" >> msh_e-box-a_sign.properties
echo "org.apache.ws.security.crypto.merlin.keystore.alias=msh.e-box-a.si" >> msh_e-box-a_sign.properties
echo "org.apache.ws.security.crypto.merlin.keystore.file=msh/config/keystore/msh.e-box-a-keystore.jks" >> msh_e-box-a_sign.properties
# generate server e-box-b sign properties
echo "org.apache.ws.security.crypto.provider=org.apache.wss4j.common.crypto.Merlin" > msh_e-box-b_sign.properties
echo "org.apache.ws.security.crypto.merlin.keystore.type=jks" >> msh_e-box-b_sign.properties
echo "org.apache.ws.security.crypto.merlin.keystore.password=test1234" >> msh_e-box-b_sign.properties
echo "org.apache.ws.security.crypto.merlin.keystore.alias=msh.e-box-b.si" >> msh_e-box-b_sign.properties
echo "org.apache.ws.security.crypto.merlin.keystore.file=msh/config/keystore/msh.e-box-b-keystore.jks" >> msh_e-box-b_sign.properties
# generate server e-box-a trustotre properties
echo "org.apache.ws.security.crypto.provider=org.apache.wss4j.common.crypto.Merlin" > msh_e-box-a_signVer.properties
echo "org.apache.ws.security.crypto.merlin.keystore.type=jks" >> msh_e-box-a_signVer.properties
echo "org.apache.ws.security.crypto.merlin.keystore.password=test1234" >> msh_e-box-a_signVer.properties
echo "org.apache.ws.security.crypto.merlin.keystore.alias=msh.e-box-b.si" >> msh_e-box-a_signVer.properties
echo "org.apache.ws.security.crypto.merlin.keystore.file=msh/config/keystore/msh.e-box-a-truststore.jks" >> msh_e-box-a_signVer.properties
# generate server e-box-b trustotre properties
echo "org.apache.ws.security.crypto.provider=org.apache.wss4j.common.crypto.Merlin" > msh_e-box-b_signVer.properties
echo "org.apache.ws.security.crypto.merlin.keystore.type=jks" >> msh_e-box-b_signVer.properties
echo "org.apache.ws.security.crypto.merlin.keystore.password=test1234" >> msh_e-box-b_signVer.properties
echo "org.apache.ws.security.crypto.merlin.keystore.alias=msh.e-box-a.si" >> msh_e-box-b_signVer.properties
echo "org.apache.ws.security.crypto.merlin.keystore.file=msh/config/keystore/msh.e-box-b-truststore.jks" >> msh_e-box-b_signVer.properties
# generate keypasswords
echo "msh.e-box-a.si=1234" > msh_key-passwords.properties
echo "msh.e-box-b.si=1234" >> msh_key-passwords.properties
| true |
8e6f4064a06982db8dcd202528256a7be614e8ee | Shell | ragusa87/symfony4-bootstrap | /bin/quality_control | UTF-8 | 757 | 3.21875 | 3 | [] | no_license | #!/bin/bash
ENV_PATH=".env"
if [ -f "${ENV_PATH}" ]; then
source ${ENV_PATH}
fi
# Syntax checks
php -l src/
bin/console lint:twig templates/
bin/console lint:xliff translations/
bin/console lint:yaml etc/
# Coding standard checks
echo -e "\n\e[33mRunning PHPCS...\e[0m\n"
vendor/bin/phpcs -p --colors --standard=PSR2 --extensions=php src/
res=$?
echo -e ""
if [ "${res}" -gt 0 ]; then
exit 1
fi
# Configuration checks
bin/console doctrine:schema:validate --skip-sync
if [ "${APP_ENV}" == "prod" ]; then
bin/console doctrine:ensure-production-settings
fi
# Automated tests
echo -e "\n\e[33mRunning PHPUnit...\e[0m\n"
vendor/bin/phpunit
res=$?
echo -e ""
if [ "${res}" -gt 0 ]; then
exit 1
fi
# Security checks
bin/console security:check
| true |
d6517269e7abdd387340e2536440da0f1584078a | Shell | UCSF-Costello-Lab/LG3_Pipeline | /scripts/mutation_overlaps.sh | UTF-8 | 1,284 | 3.34375 | 3 | [] | no_license | #!/bin/bash
# shellcheck source=scripts/utils.sh
source "${LG3_HOME:?}/scripts/utils.sh"
source_lg3_conf
PROGRAM=${BASH_SOURCE[0]}
echo "[$(date +'%Y-%m-%d %H:%M:%S %Z')] BEGIN: $PROGRAM"
echo "Call: ${BASH_SOURCE[*]}"
echo "Script: $PROGRAM"
echo "Arguments: $*"
LG3_HOME=${LG3_HOME:?}
LG3_OUTPUT_ROOT=${LG3_OUTPUT_ROOT:-output}
LG3_INPUT_ROOT=${LG3_INPUT_ROOT:-${LG3_OUTPUT_ROOT}}
PROJECT=${PROJECT:?}
LG3_DEBUG=${LG3_DEBUG:-true}
### Debug
if [[ $LG3_DEBUG ]]; then
echo "Settings:"
echo "- LG3_HOME=$LG3_HOME"
echo "- LG3_INPUT_ROOT=${LG3_INPUT_ROOT:?}"
echo "- LG3_OUTPUT_ROOT=$LG3_OUTPUT_ROOT"
echo "- LG3_SCRATCH_ROOT=$LG3_SCRATCH_ROOT"
echo "- PWD=$PWD"
echo "- USER=$USER"
echo "- PBS_NUM_PPN=$PBS_NUM_PPN"
fi
echo "Software:"
echo "- PYTHON=${PYTHON:?}"
assert_python "$PYTHON"
unset PYTHONPATH ## ADHOC: In case it is set by user
## Input
MUTFILE=$1
PATIENT=$2
OUTFILE=$3
echo "Input:"
echo "- MUTFILE=${MUTFILE:?}"
echo "- PATIENT=${PATIENT:?}"
echo "- OUTFILE=${OUTFILE:?}"
$PYTHON "${LG3_HOME}/scripts/mutation_overlaps.py" "${MUTFILE}" "${PATIENT}" "${OUTFILE}" || error "mutation_overlaps.py failed"
assert_file_exists "${OUTFILE}"
awk -F'\t' '{print $NF}' "${OUTFILE}" | sort | uniq -c
echo "[$(date +'%Y-%m-%d %H:%M:%S %Z')] END: $PROGRAM"
| true |
1334af3690c184ff775e72f28c2b7f078004c934 | Shell | gcardy/ansible-laptop | /roles/dotfiles/files/bash/bash.d/auto-complete.sh | UTF-8 | 672 | 3.03125 | 3 | [] | no_license | # bash autocomplete
# aws cli bash autocomplete
complete -C aws_completer aws
# We assume that its only linux or mac
# so just 2 conditions here
if [[ "$OSTYPE" == "darwin"* ]]; then
# assumes you install bash_completion in brew
if [ -f `brew --prefix`/etc/bash_completion ]; then
. `brew --prefix`/etc/bash_completion
. /usr/local/etc/bash_completion.d/git-completion.bash
fi
else
# enable programmable completion features (you don't need to enable
# this, if it's already enabled in /etc/bash.bashrc and /etc/profile
# sources /etc/bash.bashrc).
if [ -f /etc/bash_completion ]; then
. /etc/bash_completion
fi
fi
| true |
35c3a39e2c728664706d22e2f1db491ae0d5eb8a | Shell | shirasagi/ss-vagrant | /packer/scripts/disable-ipv6.sh | UTF-8 | 547 | 3.21875 | 3 | [
"MIT"
] | permissive | os=${os:-"centos7"}
if [ "$os" == "centos6" ]; then
echo 'NETWORKING_IPV6=no' >> /etc/sysconfig/network
echo "options ipv6 disable=1" > /etc/modprobe.d/disable-ipv6.conf
/sbin/chkconfig ip6tables off
sed -ie 's/::1/# ::1/' /etc/hosts
fi
if [ "$os" == "centos7" ]; then
sysctl -w net.ipv6.conf.all.disable_ipv6=1
sysctl -w net.ipv6.conf.default.disable_ipv6=1
cat << _EOT_ | tee /etc/sysctl.d/99-disableipv6.conf
net.ipv6.conf.all.disable_ipv6 = 1
net.ipv6.conf.default.disable_ipv6 = 1
_EOT_
sed -ie 's/::1/# ::1/' /etc/hosts
fi
| true |
7bdebd09912fa5b77523910bfe957aeb34bc5890 | Shell | anyWareSculpture/sculpture-client | /scripts/anyware | UTF-8 | 1,551 | 4.0625 | 4 | [
"MIT"
] | permissive | #!/bin/sh
get_script_dir() {
SOURCE="${BASH_SOURCE[0]}"
# While $SOURCE is a symlink, resolve it
while [ -h "$SOURCE" ]; do
DIR=`cd -P "$( dirname "$SOURCE" )" && pwd`
SOURCE=`readlink "$SOURCE"`
# If $SOURCE was a relative symlink (so no "/" as prefix, need to resolve it relative to the symlink base directory
[[ $SOURCE != /* ]] && SOURCE="$DIR/$SOURCE"
done
DIR=`cd -P "$(dirname "$SOURCE")" && pwd`
echo "$DIR"
}
DIR=`get_script_dir`
if [[ ! -d "$DIR" ]]; then DIR="$PWD"; fi
. "$DIR/tools.sh"
if [ $# -lt 1 ]; then
echo "Usage: $0 <command> [<sculptureId> ...]"
echo ""
echo " sculptureId One or more of sculpture1, sculpture2 or sculpture3. Defaults to all sculptures"
echo ""
echo "Commands:"
echo " start Start the sculpture(s)"
echo " stop Stop the sculpture(s)"
echo " restart Restart the sculpture(s)"
echo " reboot Reboot the Raspberry Pi(s)"
echo " halt Shut down the Raspberry Pi(s)"
echo " get config Downloads the current config from the sculpture(s)"
echo " clear config Clears any custom config on the sculpture(s)"
echo " config <config.js> Upload and use the given config file"
echo " publish Publish the local development build"
echo " fullpublish Publish with all assets (takes more time)"
echo " version <build> Select software version by name"
exit 1
fi
do_anyware $@
| true |
f46a5cb0ab47062eb5f1175e193e7ed55498ab14 | Shell | gilbertoamarcon/local-osm | /run.sh | UTF-8 | 601 | 3.765625 | 4 | [] | no_license | #!/bin/bash
# Tiles obtained from
# https://openmaptiles.com/downloads/planet/
# Example usage:
# ./run.sh /media/gil/storage/maps/2017-07-03_planet_z0_z14.mbtiles 8080
# Default Parameters
port=8080
# Input Args
args=("$@")
if [ "$#" -lt 1 ];then
printf 'Invalid Arguments. Usage:\n./run.sh <path-to-map> <port-number>\n'
exit
else
map_dir=$(dirname $1)
map_file=$(basename $1)
if [ "$#" -gt 1 ];then
port=$2
fi
fi
com="sudo docker run --rm -d -v $map_dir:/data -p $port:80 klokantech/tileserver-gl $map_file"
$com > tmp/docker_id
echo 'Server launched. Use ./kill.sh to finish server.'
| true |
84401bbe29f29fa4523d602a73ecbca6b8d94662 | Shell | jessie-morris/hindsight-aws | /deploy.sh | UTF-8 | 990 | 3.640625 | 4 | [] | no_license | #!/usr/bin/env bash
function deploy {
local -r bucket="${1:?Bucket name required.}"
shift
aws cloudformation deploy \
--stack-name=${STACK_NAME} \
--template-file hindsight.yaml \
--capabilities CAPABILITY_NAMED_IAM \
--parameter-overrides DataBucketName=${bucket} \
"${@}"
}
function get_output {
local -r key="${1}"
aws cloudformation describe-stacks \
| jq -r ".Stacks[] | select(.StackName == \"${STACK_NAME}\") | .Outputs[] | select(.OutputKey == \"${key}\") | .OutputValue"
}
function iam_mapping {
local -r node=$(get_output NodeRole)
local -r user=$(get_output UserRole)
helm template aws ./helm --set aws.role.node="${node}",aws.role.user="${user}" | kubectl apply -n kube-system -f -
}
if [[ $1 == "-h" || $1 == "--help" ]]; then
echo "Usage: ./deploy.sh [STACK_NAME] [BUCKET_NAME] [cf_flags]"
exit 0
fi
declare -r STACK_NAME="${1:?Stack name required.}"
shift
deploy $@
iam_mapping
| true |
86694ba612a51bb3e3697eae0c245a1a8119a931 | Shell | saradhathiagu/upload_files | /Commands_Ubuntu/Commands_1.sh | UTF-8 | 3,816 | 2.625 | 3 | [] | no_license | #! /bin/bash
BASEDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
BASE_IMAGENAME=api:base;
IMAGENAME=solar_panel_2;
function remove()
{
if [ "$(docker ps -aq)" ];then
docker rm $(docker ps -aq)
fi
if [ " $(docker images -f dangling=true -q) " ];then
docker rmi $(docker images -f dangling=true -q)
fi
if [ "$(docker ps -q -f name=api:base)" ]; then
docker rm -fv api:base
echo 'Container removed.'
else
if [ "$(docker ps -aq -f name=api:base)" ]; then
docker rm api:base
echo 'Container removed.'
fi
fi
if [ "$(docker ps -q -f name=solar_panel_2)" ]; then
docker rm -fv solar_panel_2
else
if [ "$(docker ps -aq -f name=solar_panel_2)" ]; then
docker rm -fv solar_panel_2
fi
fi
}
function clean()
{
remove
if [ "$(docker images -q ${BASE_IMAGENAME})" ]; then
docker rmi -f $BASE_IMAGENAME
fi
if [ "$(docker images -q ${BASE_IMAGENAME})" ]; then
docker rmi -f $BASE_IMAGENAME
fi
if [ "$(docker images -q ${IMAGENAME})" ]; then
docker rmi -f $IMAGENAME
fi
if [ "$(docker images -q ${IMAGENAME})" ]; then
docker rmi -f $IMAGENAME
fi
}
function buildServerStub()
{
if [ "$(docker images -q ${BASE_IMAGENAME})" ]; then
clean
fi
if [ "$(docker images -q ${IMAGENAME})" ]; then
clean
fi
cd $BASEDIR/1_Solar_Panel_Web_Portal/web-app/server-stub
docker build --no-cache -f Dockerfile.base -t $BASE_IMAGENAME .
docker build -t $IMAGENAME .
docker-compose down && docker-compose build --no-cache && docker-compose up -d
}
function buildnginx()
{
cd $BASEDIR/1_Solar_Panel_Web_Portal/web-app/nginx
bash build.sh build
bash build.sh run
}
function buildCloudPortalBackend()
{
cd $BASEDIR/6_Cloud_Web_Portal_Routing_Method/uav-solar-panel/uav-solar-panel/backend
bash build.sh buildbase
bash build.sh build
}
function buildCloudPortalFrontend()
{
cd $BASEDIR/6_Cloud_Web_Portal_Routing_Method/uav-solar-panel/uav-solar-panel/frontend
bash build.sh buildbase
bash build.sh build
}
forever stop 1_Solar_Panel_Web_Portal/web-app/front-end/node_modules/@angular/cli/bin/ng
buildServerStub
buildnginx
cd $BASEDIR/1_Solar_Panel_Web_Portal/web-app/front-end
forever start node_modules/@angular/cli/bin/ng serve --host 0.0.0.0 --prod
#buildCloudPortalBackend
#buildCloudPortalFrontend
#forever start node_modules/@angular/cli/bin/ng serve --host 0.0.0.0 --prod
#forever stop 1_Solar_Panel_Web_Portal/web-app/front-end/node_modules/@angular/cli/bin/ng
# front-end/src/app/check-management/check-detail/check-detail.service - defects to defect - return this.http.get(Const.BACKEND_API_ROOT_URL + '/api/v1/station/' + stationId + '/date/' + date + '/defect?' + params.join('&')).map((res:Response) => {
# working url http://10.79.162.105:5000/spi/sungrow/api/v1/station/LN0001/date/2017-09-19/defect?category=1
# front-end/src/app/check-management/checkoverview/check-overview.service - status to station/status getCheckOverviewInfo():Observable<Response> { return this.http.get(Const.BACKEND_API_ROOT_URL + '/api/v1/station/status').map
# percent = Math.ceil(json.healthy/(json.healthy + json.infix + json.confirmed + json.toconfirm)); to json.bad = json.infix;json.toconfirm = item.status.tocomfirm;json.infix = item.status.tofix; percent = Math.ceil(json.healthy/(json.healthy + json.infix + json.toconfirm));
# in src/app/shared/translator.service.ts this._default = translate.getBrowserLang(); to this._default = 'en'; this.translate.use('en' || this.translate.getDefaultLang());
# check-detail-service.ts return this.http.get(Const.BACKEND_API_ROOT_URL + '/api/v1/station/' + stationId + '/date/' + date + '/defect/' + defectId + '/images/ir').map((res:Response) => {
#docker exec -ti server-stub_api_1 /bin/sh
| true |
65d1165e4b7ea983a7515a068314bf6afad3ea27 | Shell | m4x91/Osiris | /scripts/backup/backup.sh | ISO-8859-1 | 3,448 | 3.609375 | 4 | [] | no_license | #!/bin/bash
###############################################################################
#
# backup.sh - Skript, das ein Backup aller wichtigen Komponenten des Servers
# macht.
#
# Copyright (c) 2016 i-Vertix NMS (info@pgum.eu)
#
# Development:
# Jochen Platzgummer
#
# Version 2.3
#
# Changelog
# 20.08.2016: Logstash config backup
# 20.12.2015: Remove backup from centreon_syslog DB
# 09.04.2014: Osiris 2.1 Anpassungen
# 23.01.2014: Backup DB LogAnalyser
# 09.09.2011: Anpassung an Osiris 2.0
# 22.09.2010: Backup um Jasper Server Repository erweitert
# 22.09.2010: Backup um die MySQL-DB von OTRS & JasperServer erweitert
# 20.04.2010: Zielverzeichnis fr Backup syslog war falsch
# 09.04.2010: leere Verzeichnisse werden nun gelscht
# 15.02.2010: Backup um CIFS Syslog (PGP) erweitert
# 05.02.2010: CVS-Repository Backup hinzugefuegt (Rancid)
# 31.01.2010: Datenbank syslog in das Backup aufgenommen
# 27.01.2010: Grundversion:
# - Backup der folgender Datenbanken:
# centreon, glpi, wikidb, nedi, ocsweb, phpmyadmin, syslog
# - Backup der wichtigsten Files und Verzeichnisse
#
###############################################################################
BACKUP_PATH=/opt/bi-s/cifs/backup
DIR=$(date +%Y%m%d)
FILE=$(date +%Y%m%d_%H%M).sql.gz
BACKUP_RETTIME=5
DB_USER=backup
DB_PWD=mFRiQYIuwHhCIk6s753Q
# create backup directory if not exists
if [ ! -d $BACKUP_PATH/$DIR ]
then
mkdir $BACKUP_PATH/$DIR
echo "$(date +%Y.%m.%d-%H:%M:%S) $BACKUP_PATH/$DIR created"
fi
# delete backup file if exists
if [ -f $BACKUP_PATH/$DIR/$FILE ]
then
rm -rf $BACKUP_PATH/$DIR/$FILE
echo "$(date +%Y.%m.%d-%H:%M:%S) duplicated File canceled"
fi
# BEGIN DB BACKUP
echo "$(date +%Y.%m.%d-%H:%M:%S) Begin MySQL database backup"
echo "Start backup db CENTREON"
mysqldump -u $DB_USER -p$DB_PWD centreon| gzip > $BACKUP_PATH/$DIR/centreon_$FILE
echo "End backup db CENTREON"
echo "Start backup db NEDI"
mysqldump -u $DB_USER -p$DB_PWD nedi | gzip > $BACKUP_PATH/$DIR/nedi_$FILE
echo "End backup db NEDI"
echo "Start backup db MEDIAWIKI"
mysqldump -u $DB_USER -p$DB_PWD mediawiki | gzip > $BACKUP_PATH/$DIR/wikidb_$FILE
echo "End backup db MEDIAWIKI"
echo "Start backup db GLPI"
mysqldump -u $DB_USER -p$DB_PWD glpi | gzip > $BACKUP_PATH/$DIR/glpi_$FILE
echo "End backup db GLPI"
echo "Start backup db PHPMYADMIN"
mysqldump -u $DB_USER -p$DB_PWD phpmyadmin | gzip > $BACKUP_PATH/$DIR/phpmyadmin_$FILE
echo "End backup db OCSWEB"
echo "$(date +%Y.%m.%d-%H:%M:%S) End MySQL database backup"
# END BACKUP
#BEGIN FILE BACKUP
echo "$(date +%Y.%m.%d-%H:%M:%S) Begin file backup"
if [ -d $BACKUP_PATH/$DIR/files ]
then
rm -rf $BACKUP_PATH/$DIR/files
fi
mkdir $BACKUP_PATH/$DIR/files
tar czfvP $BACKUP_PATH/$DIR/files/std_plugins.tar.gz /usr/lib/nagios/plugins
tar czfvP $BACKUP_PATH/$DIR/files/smokeping.tar.gz /usr/local/smokeping/etc/
tar czfvP $BACKUP_PATH/$DIR/files/bis_scripts.tar.gz /opt/bi-s/software/scripts/
tar czfvP $BACKUP_PATH/$DIR/files/rancid_cvs.tar.gz /usr/local/rancid/var/
tar czfvP $BACKUP_PATH/$DIR/files/logstash.tar.gz /etc/logstash/
echo "$(date +%Y.%m.%d-%H:%M:%S) File backup completed"
#END FILE BACKUP
# BEGIN BACKUP RETENTION
echo "$(date +%Y.%m.%d-%H:%M:%S) Begin retention"
find $BACKUP_PATH -mtime +$BACKUP_RETTIME -type f | xargs rm -f
find $BACKUP_PATH -depth -type d -empty -exec rmdir {} \;
echo "$(date +%Y.%m.%d-%H:%M:%S) Retention completed" | true |
e19b13943afc3c7b35afd3825889e3fd42a10996 | Shell | clems4ever/minipage-hub | /add-website | UTF-8 | 1,592 | 4.03125 | 4 | [
"MIT"
] | permissive | #!/bin/bash
source config
if [ "$#" -ne 2 ]; then
echo "Usage: add-website <DOMAIN> <WEBSITE_DIR>"
exit 1
fi
DOMAIN=$1
WEBSITE_DIR=$2
WEBSITE_CONFIG_DIR=$CONFIG_DIR/$DOMAIN
ENVFILE_PATH=$WEBSITE_CONFIG_DIR/envfile
echo "=== Website deployment ==="
echo ""
echo "Your website will be hosted at $DOMAIN and will serve $WEBSITE_DIR."
if [ ! -d $WEBSITE_CONFIG_DIR ]
then
mkdir $WEBSITE_CONFIG_DIR
echo -n "What is your email address for letsencrypt [ENTER]: "
read LETSENCRYPT_EMAIL
# Create the envfile
read -d '' ENVFILE << EOF
VIRTUAL_HOST=$DOMAIN
LETSENCRYPT_HOST=$DOMAIN
LETSENCRYPT_PORT=80
LETSENCRYPT_EMAIL=$LETSENCRYPT_EMAIL
EOF
echo "$ENVFILE" > $ENVFILE_PATH
echo -n "Do you want to use mailgun? [y/N]: "
read SELECT_MAILGUN
if [ "$SELECT_MAILGUN" == "y" ]
then
echo -n "What is your mailgun domain [ENTER]: "
read MAILGUN_DOMAIN
echo -n "What is your mailgun API key [ENTER]: "
read -s MAILGUN_API_KEY
echo ""
echo -n "The email address the emails will be sent from [ENTER]: "
read EMAIL_FROM
echo -n "The email address the emails will be sent to [ENTER]: "
read EMAIL_TO
# Create the envfile
read -d '' MAIL_ENVFILE << EOF
MAILGUN_API_KEY=$MAILGUN_API_KEY
MAILGUN_DOMAIN=$MAILGUN_DOMAIN
EMAIL_FROM=$EMAIL_FROM
EMAIL_TO=$EMAIL_TO
EOF
echo "$MAIL_ENVFILE" >> $ENVFILE_PATH
fi
fi
docker run -d --env-file $ENVFILE_PATH \
--name minipage-web-$DOMAIN --expose 80 \
-v $WEBSITE_DIR:/usr/html \
--net $NETWORK_BRIDGE \
$DOCKER_IMAGE_NAME > /dev/null
echo "Deployment done!"
| true |
79179ad1ad40911143b3615947e5ae0ae808f119 | Shell | NokkuPrudhvi/SampleService | /service-jenkins-batch/scripts/jenkin.sh | UTF-8 | 630 | 2.703125 | 3 | [] | no_license | #!/usr/bin/env bash
# It runs on host startup.
# Log everything we do.
set -x
exec >> /var/log/user-data-jenkins.log 2>&1
wget -q -O - https://pkg.jenkins.io/debian/jenkins.io.key | sudo apt-key add -
sudo sh -c 'echo deb http://pkg.jenkins.io/debian-stable binary/ > /etc/apt/sources.list.d/jenkins.list'
sudo apt-get update -y
sudo apt install default-jdk -y
sudo apt install jenkins -y --allow-unauthenticated
sudo systemctl start jenkins
sudo systemctl status jenkins
sudo systemctl enable jenkins
# sudo ufw allow 8080
# sudo ufw status
# Displays Jenkins password
sudo cat /var/lib/jenkins/secrets/initialAdminPassword | true |
6a1a5405ca4aeec42fb53dbb0d6507f1a7c27211 | Shell | orosz-usgs/wqx-load | /download_epa_wqx_dump_files.sh | UTF-8 | 405 | 3.28125 | 3 | [
"LicenseRef-scancode-warranty-disclaimer",
"CC0-1.0",
"LicenseRef-scancode-public-domain"
] | permissive | #!/bin/bash
# download the Postgres wqx dump files from the epa
# looks for the epa download python script in the same directory as this script
# usage: download_epq_wqx_dump_files.sh [script options]
script_dir=`dirname $0`
script="$script_dir/epa_wqx_download.py"
echo Installing python requirements
pip3 install -r $script_dir/requirements.txt
echo Running epa download: $script $*
python $script $*
| true |
3b2c46d846a93260bd6cc584e7cea4fc5426f14e | Shell | davidtheriault/general | /practice/replace.sh | UTF-8 | 495 | 3.171875 | 3 | [] | no_license | # Perl Search and Replace, goes through files from find and replaces all occurances of PUPPY with NEW:
# perl -i tells it to make the changes to the file in place, could also use the -i.bak option which would leave the orginal with a .bak extention
perl -i -pe 's/OLD/NEW/g' filename
# -p option tells perl to run the code in -e for each line in the passed in file, assigning that line as $_
find -xtype f -name \* -a \! -path \*/.svn\* -a \! -path \*/.cvs\* | xargs perl -p -e 's/PUPPY/NEW/g'
| true |
cf415930cda86062d07ed0e4e48238163bdb1463 | Shell | romanperesypkin/cpluplus_patterns | /build.sh | UTF-8 | 345 | 3.71875 | 4 | [] | no_license | #! /bin/bash
# Using:
# ./build.sh - to buuild projects
# ./build.sh clean - to clean projects
printf "start building ------->>\n"
DIRS="./*"
cmd=$1
echo "command to invoke: ${cmd}"
for i in ${DIRS} ; do
if [ -d ${i} ]; then
echo "DIR: ${i}"
cd ${i} && ./build.sh ${cmd}
cd ..
fi
done
printf "<<------- stop building\n"
| true |
a3f9789b582162a7c8dd4a946c50564970cbbf9d | Shell | SMAPPNYU/smapputil | /sh/run_jupyter_dumbo.sh | UTF-8 | 498 | 2.546875 | 3 | [] | no_license | module load python/gnu/2.7.11
module load R/3.3.2
module load java/1.8.0_72
module load spark/2.1.0
PORT=$(shuf -i 6000-9999 -n 1)
echo ssh -L $PORT:localhost:$PORT $USER@dumbo.es.its.nyu.edu
jupyter notebook --port=$PORT --no-browser
':
Run this in the Dumbo cluster to run a Jupyter notebook.
Rather than the sbatch command in the Prince cluster,
you can use the sh command to run this script.
It loads Python, R, and Spark, for their respective
kernel in Jupyter.
Author: Leon Yin
Last updated: 2017-07-05
'
| true |
06875efded4d6f1e372bacde28e641efff1d4c26 | Shell | oswalpalash/dot-files | /.bashrc | UTF-8 | 2,335 | 3.21875 | 3 | [] | no_license | # If not running interactively, don't do anything
if [[ $- != *i* ]] ; then
return
fi
# make less more friendly for non-text input files, see lesspipe(1)
[ -x /usr/bin/lesspipe ] && eval "$(SHELL=/bin/sh lesspipe)"
# set variable identifying the chroot you work in (used in the prompt below)
if [ -z "${debian_chroot:-}" ] && [ -r /etc/debian_chroot ] ; then
debian_chroot=$(cat /etc/debian_chroot)
fi
# load Git info
if [ -f ~/.bash/git-prompt.sh ] ; then
. ~/.bash/git-prompt.sh
fi
export GIT_PS1_SHOWDIRTYSTATE=true
# set the prompt string
PS1='${debian_chroot:+($debian_chroot)}\[\033[01;34m\]\u\[\033[01;32m\]@\h\[\033[00m\]:\[\033[01;34m\]\w\[\033[00m\]\[\033[01;33m\]$(__git_ps1 "<%s>")\[\033[0m\]$ '
# enable color support for the commonly used binaries
if [ -x /usr/bin/dircolors ] ; then
test -r ~/.dircolors && eval "$(dircolors -b ~/.dircolors)" || eval "$(dircolors -b)"
alias ls='ls --color=auto'
alias dir='dir --color=auto'
alias vdir='vdir --color=auto'
alias grep='grep --color=auto'
alias fgrep='fgrep --color=auto'
alias egrep='egrep --color=auto'
fi
# enable bash completion in interactive shells
if ! shopt -oq posix; then
if [ -f /usr/share/bash-completion/bash_completion ] ; then
. /usr/share/bash-completion/bash_completion
elif [ -f /etc/bash_completion ] ; then
. /etc/bash_completion
fi
fi
# load bindings
export INPUTRC="~/.inputrc"
# default text editor
export EDITOR="vim"
# set the MySQL prompt
export MYSQL_PS1="\u@\h [\d]> "
# Python specific
export PYTHONSTARTUP="$HOME/.pythonrc"
# RVM specific
export PATH="$PATH:$HOME/.rvm/bin" # Add RVM to PATH for scripting
[[ -s "$HOME/.rvm/scripts/rvm" ]] && source "$HOME/.rvm/scripts/rvm" # Load RVM into a shell session *as a function*
# Golang specific
export GOPATH="$HOME/go"
export PATH="$PATH:/usr/local/go/bin"
export PATH="$PATH:$GOPATH/bin"
# Android specific
export PATH="$PATH:$HOME/Android/Sdk/platform-tools"
# list of files to source
for file in ~/.bash/*.bash ; do
if [ -f "$file" ] ; then
. "$file"
fi
done
# add the following locations to $PATH if not already present
path_list=('/bin' '/sbin' '/usr/bin' '/usr/sbin' '/usr/local/bin' '/usr/local/sbin')
for i in "${path_list[@]}" ; do
case ":${PATH:=$i}:" in
*":$i:"*)
;;
*)
export PATH="$PATH:$i" ;;
esac
done
| true |
b4d08da460bec712237a339102f5c64cb255c7e8 | Shell | blu-base/fer_steamreforming_supplements | /cfd-furnace/collect-results.h | UTF-8 | 5,365 | 3.609375 | 4 | [
"CC-BY-4.0"
] | permissive | #!/bin/bash
RESULTSTEM="results/output"
## Assorted Data
echo -e "ID\tValid\tCase\tMassFlowPerTube\tInnerRadius(m)\tPressureDrop(Pa)\tOutletTemperature(K)\tHeatTransferBalance_Inlet/Outlet(W)\tHeatTransferPerAreaMean(W/m2)\tHeatTransferPerAreaSTD(W/m2)\tHeatTransferPerAreaVAR(W/m2)\tHeatTransferMean(W)\tHeatTransferSTD(W)\tHeatTransferVAR(W)\tContinuity\tSolvingStatus" > $RESULTSTEM-temp.csv
for id in IDs/*; do
((i=i%4)); ((i++==0)) && wait
(
echo -e "collecting ${id}"
VALIDITYCOUNT=0
NEWESTLOG=$(ls --color=never -t $id/*.log | head -n1)
GREPCOUNT="$(grep -c ERROR ${NEWESTLOG:=error.txt})"
(( VALIDITYCOUNT+=${GREPCOUNT} ))
if [ ! -f $id/rep_Pressure_Drop.val ]; then (( VALIDITYCOUNT+=1 )); fi
RESULTSTRING="${id:6}"
if [ $VALIDITYCOUNT == 0 ] ; then
RESULTSTRING="$RESULTSTRING\t1"
else
RESULTSTRING="$RESULTSTRING\t-$VALIDITYCOUNT"
fi
RESULTSTRING="$RESULTSTRING\t$(sed -n ${id:6}p IDs.csv | cut -d',' -f2)"
RESULTSTRING="$RESULTSTRING\t$(cat $id/massflow.input)"
RESULTSTRING="$RESULTSTRING\t$(cat $id/radius.input)"
RESULTSTRING="$RESULTSTRING\t$(cat $id/rep_Pressure_Drop.val)"
RESULTSTRING="$RESULTSTRING\t$(cat $id/rep_Temperature_Outlet.val)"
RESULTSTRING="$RESULTSTRING\t$(cat $id/rep_Heat_Transfer_Balance_InletOutlet.val)"
RESULTSTRING="$RESULTSTRING\t$(cat $id/rep_Heat_Transfer_AreaAveraged_Wm2_mean.val)"
RESULTSTRING="$RESULTSTRING\t$(cat $id/rep_Heat_Transfer_AreaAveraged_Wm2_std.val)"
RESULTSTRING="$RESULTSTRING\t$(cat $id/rep_Heat_Transfer_AreaAveraged_Wm2_variance.val)"
RESULTSTRING="$RESULTSTRING\t$(cat $id/rep_Heat_Transfer_W_mean.val)"
RESULTSTRING="$RESULTSTRING\t$(cat $id/rep_Heat_Transfer_W_std.val)"
RESULTSTRING="$RESULTSTRING\t$(cat $id/rep_Heat_Transfer_W_variance.val)"
HEADERCOUNT="$(grep -c "Iteration " ${NEWESTLOG})"
if [ $HEADERCOUNT -gt 0 ]; then
CONTINUITY=$(sed -n 'H; /Iteration Continuity/h; ${g;p;}' $NEWESTLOG | head -n 2 | tail -n1 | sed 's/\s\s*/ /g' | cut -d' ' -f3)
if [ $(awk 'BEGIN{print ('$CONTINUITY' > '1e-1')?0:1}') -eq 1 ]; then
RESULTSTRING="$RESULTSTRING\t$CONTINUITY\tconverged"
else
RESULTSTRING="$RESULTSTRING\t$CONTINUITY\tdiverged"
fi
else
RESULTSTRING="$RESULTSTRING\t-1\tcouldnt filter residuals"
fi
echo -e $RESULTSTRING | sed 's/
//g' >> $RESULTSTEM-temp.csv
) &
done
wait
echo -e "Assorted pre-collection done"
cat $RESULTSTEM-temp.csv | sort -n -t$'\t' -k1,1n >$RESULTSTEM-assorted.csv
echo -e "Assorted Data collected"
## Temperature
echo -e "ID,Temperatures(K)" > $RESULTSTEM-temp.csv
for id in IDs/*; do
((i=i%4)); ((i++==0)) && wait
(
RESULTSTRING="${id:6}"
for val in $id/rep_Temperature_PWALL_*.val; do
RESULTSTRING="$RESULTSTRING\t$(cat $val)"
done
echo -e "$RESULTSTRING" | sed 's/^M//g' >> $RESULTSTEM-temp.csv
) &
done
wait
cat $RESULTSTEM-temp.csv | sort -n -t$'\t' -k1,1n >$RESULTSTEM-temperatures.csv
echo -e "Individual Temperatures collected"
## Heat Transfer Area Averaged
echo -e "ID,HeatTransferPerArea(W/m2)" > $RESULTSTEM-temp.csv
for id in IDs/*; do
((i=i%4)); ((i++==0)) && wait
(
RESULTSTRING="${id:6}"
for val in $id/rep_Heat_Transfer_AreaAveraged_Wm2_PWALL_*.val; do
RESULTSTRING="$RESULTSTRING\t$(cat $val)"
done
echo -e "$RESULTSTRING" | sed 's/^M//g' >> $RESULTSTEM-temp.csv
) &
done
wait
cat $RESULTSTEM-temp.csv | sort -n -t$'\t' -k1,1n >$RESULTSTEM-heatTransferPerArea.csv
## Heat Transfer
echo -e "ID,HeatTransfer(W)" > $RESULTSTEM-temp.csv
for id in IDs/*; do
((i=i%4)); ((i++==0)) && wait
(
RESULTSTRING="${id:6}"
for val in $id/rep_Heat_Transfer_W_PWALL_*.val; do
RESULTSTRING="$RESULTSTRING\t$(cat $val)"
done
echo -e "$RESULTSTRING" | sed 's/^M//g' >> $RESULTSTEM-temp.csv
) &
done
wait
cat $RESULTSTEM-temp.csv | sort -n -t$'\t' -k1,1n >$RESULTSTEM-heatTransfer.csv
## Heat Transfer
echo -e "ID,HeatTransferByRadiation(W)" > $RESULTSTEM-temp.csv
for id in IDs/*; do
((i=i%4)); ((i++==0)) && wait
(
RESULTSTRING="${id:6}"
for val in $id/rep_Heat_Flux_Radiation_PWALL_*.val; do
RESULTSTRING="$RESULTSTRING\t$(cat $val)"
done
echo -e "$RESULTSTRING" | sed 's/^M//g' >> $RESULTSTEM-temp.csv
) &
done
wait
cat $RESULTSTEM-temp.csv | sort -n -t$'\t' -k1,1n >$RESULTSTEM-heatFluxRadiation.csv
## Heat Flux through Conduction
echo -e "ID,HeatTransferByConduction(W)" > $RESULTSTEM-temp.csv
for id in IDs/*; do
((i=i%4)); ((i++==0)) && wait
(
RESULTSTRING="${id:6}"
for val in $id/rep_Heat_Flux_Conduction_PWALL_*.val; do
RESULTSTRING="$RESULTSTRING\t$(cat $val)"
done
echo -e "$RESULTSTRING" | sed 's/^M//g' >> $RESULTSTEM-temp.csv
) &
done
cat $RESULTSTEM-temp.csv | sort -n -t$'\t' -k1,1n > $RESULTSTEM-heatFluxConduction.csv
echo -e "Heat Transfer data collected"
##Force
echo -e "ID,Force(N)" > $RESULTSTEM-temp.csv
for id in IDs/*; do
((i=i%4)); ((i++==0)) && wait
(
RESULTSTRING="${id:6}"
for val in $id/rep_Force_PWALL_*.val; do
RESULTSTRING="$RESULTSTRING\t$(cat $val)"
done
echo -e "$RESULTSTRING" >> $RESULTSTEM-temp.csv
) &
done
wait
cat $RESULTSTEM-temp.csv | sort -n -t$'\t' -k1,1n >$RESULTSTEM-force.csv
## Clean up.
rm $RESULTSTEM-temp.csv
echo -e "done."
| true |
76abfda7a7a68b707b6b08baca5d1ce9b08c394b | Shell | butchhoward/scripts | /baz_tools.sh | UTF-8 | 4,535 | 3.640625 | 4 | [] | no_license | #!/usr/bin/env bash
#source this script to get the useful functions
# You have to be already logged in for these to work:
# az login
# az acr login --name leadingagilestudios
DEFAULT_REGISTRY="leadingagilestudios"
function _baz_repositories_help()
{
echo
echo "baz repositories [registry-name]"
echo "List container repositories in the registry. Registry defaults to '${DEFAULT_REGISTRY}'"
echo
echo "baz repositories"
}
function baz_repositories()
{
local REGISTRY="${1:-"${DEFAULT_REGISTRY}"}"
az acr repository list --name "${REGISTRY}" 2>/dev/null | jq -r '.[]'
}
function _baz_repository_tags_help()
{
echo
echo "baz repository_tags repository-name [registry-name] "
echo "List image tags (verison id) for a repository in a registry."
echo "Registry defaults to '${DEFAULT_REGISTRY}'"
echo
echo "baz repository_tags analysis/gather"
}
baz_repository_tags()
{
local REPOSITORY="${1:?"requires repository name"}"
local REGISTRY="${2:-"${DEFAULT_REGISTRY}"}"
az acr manifest list-metadata "${REGISTRY}".azurecr.io/"${REPOSITORY}" 2>/dev/null | jq -r '.[].tags | select(. != null) | .[]'
}
function _baz_repository_images_help()
{
echo
echo "baz repository_images repository-name [registry-name] "
echo "List images for a repository in a registry."
echo "Registry defaults to '${DEFAULT_REGISTRY}'"
echo
echo "baz repository_images analysis/gather"
}
baz_repository_images()
{
local REPOSITORY="${1:?"requires repository name"}"
local REGISTRY="${2:-"${DEFAULT_REGISTRY}"}"
while read -r TAG; do
echo "${REGISTRY}.azurecr.io/${REPOSITORY}:${TAG}"
done < <(baz_repository_tags "${REPOSITORY}" "${REGISTRY}")
}
function _baz_images_all_help()
{
echo
echo "baz images_all [registry-name] "
echo "List all images for all repositories in a registry."
echo "Registry defaults to '${DEFAULT_REGISTRY}'"
echo
echo "baz images_all"
}
function baz_images_all()
{
local REGISTRY="${1:-"${DEFAULT_REGISTRY}"}"
while read -r REPOSITORY; do
baz_repository_images "${REPOSITORY}" "${REGISTRY}"
done < <(baz_repositories "${REGISTRY}")
}
function _baz_images_help()
{
echo
echo "baz images [pattern] [registry-name] "
echo "List images in a registry."
echo "Pattern defaults to '.*' (i.e. all images)"
echo "Registry defaults to '${DEFAULT_REGISTRY}'"
echo
echo "baz images"
echo "baz images 'analysis.*0\.1\.'"
}
function baz_images()
{
local PATTERN="${1:-".*"}"
local REGISTRY="${2:-"${DEFAULT_REGISTRY}"}"
while read -r REPOSITORY; do
grep -E "${PATTERN}" <(baz_repository_images "${REPOSITORY}" "${REGISTRY}")
done < <(baz_repositories "${REGISTRY}")
}
function _baz_delete_image_help()
{
echo
echo "baz delete_image [image] [registry-name] "
echo "Delete an image from the registry."
echo "Image must be the image name with any tags, but not including the registry name."
echo "Registry defaults to '${DEFAULT_REGISTRY}'"
echo
echo "WARNING! Use with caution! The image named will be immediately DELETED."
echo
echo "baz delete_image analysis/gather-example:0.4"
}
function baz_delete_image()
{
# note: 'image' does not include the repository prefix
# for baz_images output 'leadingagilestudios.azurecr.io/analysis/gather-example:0.4'
# use
# baz_delete_image analysis/gather-example:0.4
local IMAGE="${1:?"requires image name:tag"}"
local REGISTRY="${2:-"${DEFAULT_REGISTRY}"}"
az acr repository delete --yes --name "${REGISTRY}" --image "${IMAGE}"
}
function _baz_delete_image_match_help()
{
echo
echo "baz delete_image_match pattern [registry-name] "
echo "Pattern is requried. Use a regex pattern."
echo "Delete all matching images in a registry."
echo "Registry defaults to '${DEFAULT_REGISTRY}'"
echo
echo "WARNING! Use with caution! The images matched will be immediately DELETED."
echo "Check the pattern using 'baz images pattern' "
echo
echo "baz delete_image_match 'analysis.*0\.1\.'"
}
function baz_delete_image_match()
{
local PATTERN="${1:?"requires regex pattern for image matching e.g. 'analysis.*0\.1\.'"}"
local REGISTRY="${2:-"${DEFAULT_REGISTRY}"}"
for REPOSITORY in $(baz_images "${PATTERN}" "${REGISTRY}"); do
# trim to just image tag
baz_delete_image "${REPOSITORY#"${REGISTRY}.azurecr.io/"}" "${REGISTRY}"
done
}
| true |
d8a8a687f6f46dd21d432f523c3b91a26baf0cbf | Shell | gemsanyu/Azure-OnDemand | /azod-slurm/scripts/image-grafana.sh | UTF-8 | 735 | 2.640625 | 3 | [] | no_license | #!/bin/bash
echo "#### Configuration repo for InfluxDB:"
cat <<EOF | tee /etc/yum.repos.d/influxdb.repo
[influxdb]
name = InfluxDB Repository - RHEL \$releasever
baseurl = https://repos.influxdata.com/centos/\$releasever/\$basearch/stable
enabled = 1
gpgcheck = 1
gpgkey = https://repos.influxdata.com/influxdb.key
EOF
echo "#### Configuration repo for Grafana:"
cat <<EOF | tee /etc/yum.repos.d/grafana.repo
[grafana]
name=grafana
baseurl=https://packages.grafana.com/oss/rpm
repo_gpgcheck=1
enabled=1
gpgcheck=1
gpgkey=https://packages.grafana.com/gpg.key
sslverify=1
sslcacert=/etc/pki/tls/certs/ca-bundle.crt
EOF
echo "#### InfluxDB Installation:"
yum -y install influxdb
echo "#### Grafana Installation:"
yum -y install grafana
| true |
abdee60424365dd02d7a600873b9d20a1298fdd3 | Shell | s3strm/worker-instance | /bin/list_movie_bucket_files | UTF-8 | 676 | 3.5625 | 4 | [] | no_license | #!/usr/bin/env bash
PROJECT_DIR="$(dirname $0)/../"
source ${PROJECT_DIR}/etc/settings
b2 authorize-account ${BACKBLAZE_ACCOUNT_ID} ${BACKBLAZE_APPLICATION_KEY} > /dev/null
FILES=()
more_files() {
local file_count=${#FILES[@]}
if [[ ${file_count} -eq 0 ]]; then
FILES+=( $(b2 list-file-names ${BACKBLAZE_MOVIE_BUCKET} | jq -r .files[].fileName) )
else
local last_file=${FILES[-1]}
FILES+=( $(b2 list-file-names ${BACKBLAZE_MOVIE_BUCKET} ${last_file} 500 | jq -r .files[].fileName) )
fi
[[ $(( ${file_count} + 1 )) != ${#FILES[@]} ]]
return $?
}
while more_files; do
echo "fetched ${#FILES[@]}" >&2
done
for f in ${FILES[@]}; do
echo ${f}
done
| true |
1ad305ae9ac69cfb3fbb2633468a0e3155f42633 | Shell | masakeida/monetary_policy_lsa | /mkFreq.sh | UTF-8 | 176 | 2.59375 | 3 | [
"BSD-2-Clause"
] | permissive | #!/bin/sh
. fileList.sh
for file in ${file_list}
do
cp ${file}_fmt.txt input.txt
./morphological.py
cp output.txt ${file}_mecab.txt
done
rm input.txt output.txt
| true |
79e6fe1d716119ae4682ed4e717b8ff095791d8f | Shell | xvlm/watchdog_linux | /shell/startup.sh | UTF-8 | 680 | 3.5625 | 4 | [] | no_license | #!/bin/ksh
# This shell is used to check the fts real time service is alive, if not, restart fts.
trap " " 2
#Define variable
#检测间隔时间
EXEC_INTERVAL=60
#启动脚本所在目录
SHELL_NAME="run.sh"
#当前目录
FILE_DIR=$(cd "$(dirname "$0")"; pwd)
#APP目录
APP_DIR=$FILE_DIR
#JRE目录
SHELL_DIR=$APP_DIR/conf
checkitem="$0"
procCnt=`ps -ef|grep "$checkitem"|grep -v 'grep'|grep -v "$$"|awk '{print $2}'|wc -l`
if [ $procCnt -gt 0 ] ; then
echo "$0 exists [procs=${procCnt}]"
exit 1;
fi
while [ true ];
do
if [ ! -x "$SHELL_DIR/$SHELL_NAME" ]; then
chmod +x $SHELL_DIR/$SHELL_NAME
fi
$SHELL_DIR/$SHELL_NAME
sleep $EXEC_INTERVAL
done
| true |
61ab246528616ee47636b32f306b17f728298d3c | Shell | ngkim/vagrant | /keepalived-bridge-multi-nic/init_node_WAN-SW-BRIDGE.sh | UTF-8 | 549 | 3.453125 | 3 | [] | no_license | #!/bin/bash
BR1="br0"
BR1_ITFS=("eth1" "eth2" "eth3" "eth4" "eth5" "eth6" "eth7")
init_bridge() {
BR_NAME=$1
declare -a BR_ITFS=("${!2}")
sudo sysctl -w net.ipv4.ip_forward=1
sudo brctl addbr $BR_NAME
for idx in ${!BR_ITFS[@]}; do
itf=${BR_ITFS[$idx]}
echo "sudo brctl addif $BR_NAME $itf"
sudo brctl addif $BR_NAME $itf
sudo ifconfig $itf up
done
sudo ifconfig $BR_NAME up
}
sudo apt-get update
sudo apt-get install -y bridge-utils
init_bridge $BR1 BR1_ITFS[@]
| true |
83260826a31a68c74d83463cd8c59e7e2f46d5ff | Shell | hattan/terrabash | /test/spec/spec_helper.sh | UTF-8 | 1,534 | 3.609375 | 4 | [] | no_license | # shellcheck shell=sh
# This callback function will be invoked after core modules has been loaded.
spec_helper_configure() {
# Available functions: import, before_each, after_each, before_all, after_all
import 'support/custom_matcher'
}
# Resource Helper Functions
get_resource_group_by_name() {
resourceGroupName=$1
run_az "az group show -g $resourceGroupName -o json"
}
get_storage_account_by_name() {
storageAccountName=$1
run_az "az storage account show -n $storageAccountName"
}
# AZ CLI functions
run_az() {
command=$1
json=$(exec $command)
echo $json
local code=$?
if [[ ! -z "$json" ]]; then
echo "$json"
fi
return $code
}
# Assertions
query_equals() {
local query="$1"
local expected="$2"
local json="$query_equals"
if [[ "$#" > 2 ]]; then
json="$3"
fi
local actual=$(echo "$json" | jq -r "$query")
if [[ "$actual" == "$expected" ]]; then
return 0
else
new_line
error " query: $query"
error "expected: $expected"
error " actual: $actual"
return 1
fi
}
name_equals() {
local expected="$1"
local json="$name_equals"
query_equals ".name" "$expected" "$json"
}
location_equals() {
local expected="$1"
local json="$location_equals"
query_equals ".location" "$expected" "$json"
}
# LOGGER functions
error() {
printf "\e[31mERROR: $@\n\e[0m"
}
information() {
printf " \e[36m$@\n\e[0m"
}
success() {
printf " \e[32m$@\n\e[0m"
}
clear_print_log() {
rm -f logs/log.txt
}
new_line() {
echo -e "\n"
} | true |
91d1ed55726fbb36e6099d0bd7f763a87f408ab5 | Shell | aduh95-test-account/uppy-test | /bin/endtoend-build-ci | UTF-8 | 2,797 | 3.734375 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env bash
set -o pipefail
set -o errexit
set -o nounset
# Set magic variables for current file & dir
__dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
__file="${__dir}/$(basename "${BASH_SOURCE[0]}")"
__base="$(basename ${__file} .sh)"
__root="$(cd "$(dirname "${__dir}")" && pwd)"
if [ -z "${CI:-}" ]; then
echo "!! Running CI-style end-to-end tests but CI environment variable is not set"
echo "It is generally ill-advised to run the full end-to-end suite on your local machine."
echo "You are probably looking for this instead:"
echo " yarn run test:endtoend:local"
echo ""
echo "Hit Ctrl+C to stop or Enter if you REALLY DO want to run the full thing."
read
else
echo ::group::endtoend_build_ci
fi
set -o xtrace
YARN="corepack yarn"
VERDACCIO_REGISTRY=http://localhost:4002
CURRENT_COMMIT="$(git rev-parse HEAD)"
cleanup() {
rm -rf "${__root}/test/endtoend/node_modules"
rm -rf "${__root}/test/endtoend/tmp"
git reset
git checkout $CURRENT_COMMIT
if [ -n "${CI:-}" ]; then
echo ::endgroup::
fi
}
function on_exit() {
# revert to public registry
$YARN config unset npmScopes --home
$YARN config unset npmRegistryServer --home
$YARN config unset npmAuthToken --home
$YARN config unset unsafeHttpWhitelist --home
cleanup
}
trap on_exit EXIT
echo "Preparing for end to end test: copying static HTML and CSS, building JS"
rm -rf "${__root}/test/endtoend/node_modules"
# list of @uppy/* packages
PACKAGES="$($YARN workspaces list --json | node -e 'require("readline").createInterface({input:process.stdin}).on("line",l=>{const{name}=JSON.parse(l);if(name?.startsWith("@uppy/"))console.log(name)})')"
cleanup
# Initialise verdaccio storage path.
mkdir -p "${__root}/test/endtoend/tmp/verdaccio"
$YARN run build
# https://github.com/facebook/create-react-app/pull/4626
TOKEN="$($YARN npm-auth-to-token -u user -p password -e user@example.com -r "$VERDACCIO_REGISTRY")"
git checkout -b endtoend-test-build
# HACK this thing changes all the time for some reason on CI
# so I'll just ignore it…
git checkout -- yarn.lock
$YARN config set npmRegistryServer "$VERDACCIO_REGISTRY" --home
$YARN config set npmAuthToken "$TOKEN" --home
$YARN config set npmScopes.uppy.npmRegistryServer "$VERDACCIO_REGISTRY" --home
$YARN config set npmScopes.uppy.npmPublishRegistry "$VERDACCIO_REGISTRY" --home
$YARN config set npmScopes.uppy.npmAuthToken "$TOKEN" --home
$YARN config set unsafeHttpWhitelist "localhost" --home
# Simulate a publish of everything, to the local registry,
# without changing things in git
ENDTOEND=1 $YARN workspaces foreach --include '@uppy/*'\
version prerelease -d
$YARN version apply --all --prerelease
ENDTOEND=1 $YARN workspaces foreach --include '@uppy/*'\
npm publish
bash "${__dir}/endtoend-build-tests"
cleanup
| true |
3397b16fb19ffa547fcc3acd34017153d0e3367d | Shell | YeomanYe/linux-shell-practice | /Linux Shell脚本攻略/第3章 以文件之名/diff_patch.sh | UTF-8 | 637 | 3.375 | 3 | [] | no_license | #/bin/bash -xv
: '
cat version1.txt
line2
line3
line4
happy hacking!
cat version2.txt
this is the original text
line2
line4
happy hacking!
GNU is not UNIX
'
# 非一体化形式的diff输出
diff version1.txt version2.txt
# 一体化(unified)形式的diff输出
diff -u version1.txt version2.txt
# 生成一个修补文件
diff -u version1.txt version2.txt > version.patch
# 修补
patch -p1 version1.txt < version.patch
# 生成目录差异信息
diff -Naur directory1 directory2
: '
-N: 将所有缺失的文件视为空文件
-a: 将所有文件视为文本文件。
-u: 生成一体化输出。
-r: 遍历目录下的所有文件
' | true |
7cb7e33380da07172921b89da052fcec2e0a2b20 | Shell | emadghaffari/grpc_oauth_service | /ssl/generator.sh | UTF-8 | 1,034 | 3.078125 | 3 | [] | no_license | # private files: ca.key, server.key, server.pem, server.crt
# share files: server.pem (needed by the CA), ca.crt (needed by client)
# server name
SERVER_CN=localhost
# Step1: Generate Certificate Authority + Trust Certificate (ca.crt)
openssl genrsa -passout pass:1111 -des3 -out ca.key 4096
openssl req -passin pass:1111 -new -x509 -days 365 -key ca.key -out ca.crt -subj "/CN=${SERVER_CN}"
# Step2: Generate the server private key server.key
openssl genrsa -passout pass:1111 -des3 -out server.key 4096
# Step3: Get a certificate Signing request from the CA server.csr
openssl req -passin pass:1111 -new -key server.key -out server.csr -subj "/CN=${SERVER_CN}"
# Step4: Sign the certificate with the CA we created (id called self signing)
openssl x509 -req -passin pass:1111 -days 365 -in server.csr -CA ca.crt -CAkey ca.key -set_serial 01 -out server.crt
# Step5: Convert the server certificate to .pem format (server.pem) - usable by gRPC
openssl pkcs8 -topk8 -nocrypt -passin pass:1111 -in server.key -out server.pem
| true |
1c3ae239fa825a30cb7b1f1991da6d2b85490a25 | Shell | yinwh79/wangbin-src | /user/timeget.sh | UTF-8 | 414 | 2.96875 | 3 | [] | no_license | #!/bin/sh
printf "start: ntp client \n"
killall ntpd
echo "1970-00-00 00:00:00" > /tmp/ntpd_last.log
if [ `uci get system.@system[0].ntpenabled -q` -eq "0" ]
then
echo ntpd disabled !
exit
fi
ntpd -p `uci get system.@system[0].ntpserver1 -q` -p `uci get system.@system[0].ntpserver2 -q` -p `uci get system.@system[0].ntpserver3 -q`
echo `date "+%Y-%m-%d %H:%M:%S"` > /tmp/ntpd_last.log
echo ntpd success !
| true |
e970db61012b307cc2bb763e57a3b0a6db99dfa4 | Shell | adiyoss/dotfiles | /dot_backup.sh | UTF-8 | 578 | 2.78125 | 3 | [] | no_license | #!/bin/bash
echo "Copying local files to repo..."
HERE=~/Workspace/code/dotfiles/
cp -r ~/.zsh_config $HERE
cp -r ~/.functions $HERE
cp -r ~/.aliases $HERE
cp -r ~/.zshrc $HERE
cp -r ~/.tmux.conf $HERE
cp -r ~/.ssh/config $HERE/.ssh/
cp -r ~/.gitconfig $HERE
cp -r ~/.vim_runtime/my_configs.vim $HERE
cp -r ~/.config/nvim/ $HERE/.config/nvim
cp -r ~/.config/fish/ $HERE/.config/fish
cp -r ~/.vim/plugged/neosnippet-snippets/neosnippets/python.snip $HERE
echo "Updating local dot files to remote..."
cd $HERE
git add .
git commit -m "update"
git push origin master
echo "Done."
| true |
3a65bf48e32e488da86025f1daf2b3f6380d4655 | Shell | Madaerpao/tlbb | /gs/global/setini.sh | UTF-8 | 2,983 | 2.984375 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env bash
# author: yulinzhihou
# mail: yulinzhihou@gmail.com
# date: 2020-05-17
# comment: 根据env文件的环境变量,修改对应的配置文件,复制配置文件替换到指定目录,并给与相应权限
#\cp -rf /etc/gs_ini/*.ini /tlgame/tlbb/Server/Config && chmod -R 777 /tlgame && chown -R root:root /tlgame
#tar zxf /.tlgame/gs/scripts/ini.tar.gz -C /tlgame/tlbb/Server/Config && chmod -R 777 /tlgame && chown -R root:root /tlgame
#
tar zxf /root/.tlgame/gs/scripts/ini.tar.gz -C /root/.tlgame/gs/scripts/
tar zxf /root/.tlgame/gs/scripts/billing.tar.gz -C /root/.tlgame/gs/scripts/
if [ ! -d "/tlgame/billing/" ]; then
mkdir -p /tlgame/billing/ && chown -R root:root /tlgame/billing && chmod -R 777 /tlgame
fi
tar zxf /root/.tlgame/gs/scripts/billing.tar.gz -C /tlgame/billing/
# 游戏配置文件
if [ "${TL_MYSQL_PASSWORD}" != "123456" ]; then
sed -i "s/DBPassword=123456/DBPassword=${TL_MYSQL_PASSWORD}/g" /root/.tlgame/gs/scripts/LoginInfo.ini
sed -i "s/DBPassword=123456/DBPassword=${TL_MYSQL_PASSWORD}/g" /root/.tlgame/gs/scripts/ShareMemInfo.ini
sed -i "s/123456/${TL_MYSQL_PASSWORD}/g" /root/.tlgame/gs/services/server/config/odbc.ini
fi
if [ "${TL_MYSQL_PASSWORD}" != "123456" ]; then
sed -i "s/123456/${TL_MYSQL_PASSWORD}/g" /root/.tlgame/gs/scripts/config.json
fi
#if [ $TLBB_MYSQL_PORT -ne 3306 ]; then
# sed -i "s/DBPort=3306/DBPort=${TLBB_MYSQL_PORT}/g" /root/.tlgame/gs/scripts/LoginInfo.ini
# sed -i "s/DBPort=3306/DBPort=${TLBB_MYSQL_PORT}/g" /root/.tlgame/gs/scripts/ShareMemInfo.ini
# sed -i "s/3306/${TLBB_MYSQL_PORT}/g" /root/.tlgame/gs/services/server/config/odbc.ini
#fi
#
#if [ $WEB_MYSQL_PORT -ne 3306 ]; then
# sed -i "s/3306/${WEB_MYSQL_PORT}/g" /root/.tlgame/gs/scripts/config.json
#fi
#if [ ${BILLING_PORT} != "21818" ]; then
# sed -i "s/21818/${BILLING_PORT}/g" /root/.tlgame/gs/scripts/config.json
# sed -i "s/Port0=21818/Port0=${BILLING_PORT}/g" /root/.tlgame/gs/scripts/ServerInfo.ini
#fi
if [ "${LOGIN_PORT}" != "13580" ]; then
sed -i "s/Port0=13580/Port0=${LOGIN_PORT}/g" /root/.tlgame/gs/scripts/ServerInfo.ini
fi
if [ "${SERVER_PORT}" != "15680" ]; then
sed -i "s/Port0=15680/Port0=${SERVER_PORT}/g" /root/.tlgame/gs/scripts/ServerInfo.ini
fi
#复制到已经修改好的文件到指定容器
\cp -rf /root/.tlgame/gs/scripts/*.ini /tlgame/tlbb/Server/Config/
\cp -rf /root/.tlgame/gs/scripts/config.json /tlgame/billing/
docker cp /root/.tlgame/gs/services/server/config/odbc.ini gs_server_1:/etc
#每次更新后,先重置更改过的文件
sed -i 's/^else$/else \/home\/billing\/billing up -d/g' /tlgame/tlbb/run.sh && \
sed -i 's/exit$/tail -f \/dev\/null/g' /tlgame/tlbb/run.sh && \
cd ~/.tlgame/ && \
git checkout -- gs/services/server/config/odbc.ini && \
rm -rf /root/.tlgame/gs/scripts/*.ini && \
rm -rf /root/.tlgame/gs/scripts/config.json
echo -e "\e[44m 配置文件已经写入成功,可以执行【runtlbb】进行开服操作!!\e[0m" | true |
b174b0fc62edaa3aeeaddc2c12cbc8a46cbc44a8 | Shell | KrisSaxton/lfs-build-6.2 | /nbd/nbd-2.9.11-1/install/040-configure | UTF-8 | 402 | 2.5625 | 3 | [] | no_license | #!/bin/bash
source configure
pushd ../unpack/*
export PKG_CONFIG_PATH=$AB_PACKAGE_GLIB_INSTALL_DIR/lib/pkgconfig
CFLAGS="\
-I$AB_PACKAGE_GLIB_INCLUDE_DIR \
" \
LDFLAGS="\
-L$AB_PACKAGE_GLIB_LIB_DIR \
" \
./configure \
--prefix=$AB_PACKAGE_INSTALL_DIR \
--mandir=$AB_PACKAGE_INSTALL_DIR/share/man \
--enable-syslog \
--disable-glibtest \
| tee ../../logs/030-configure.log
popd
| true |
87cbb515508c721cb3affd8f95c71c5c13e5fb7d | Shell | nativejie/server-test | /restart.sh | UTF-8 | 272 | 3.25 | 3 | [] | no_license | #!/bin/bash
port=$1
echo "要停止的端口号为:$1"
if [ ! -n "$port" ];then
port=3000;
fi
echo "PORT: $port"
# netstat –tunlp
# netstat -tunlp
pid=$(lsof -t -i:$port)
echo "PID: $pid"
if [ -n "$pid" ];then
kill -9 $pid
fi
echo "开始启动"
ts-node src/index.ts
| true |
76b381cc78736f34067fb503a13e73ede48bb5c5 | Shell | chrishunt/oh-my-zsh | /custom/03.aliases.zsh | UTF-8 | 732 | 2.796875 | 3 | [
"MIT"
] | permissive | alias df='df -h'
alias dunnet='emacs -batch -l dunnet'
alias dus='du -hs'
alias irc='weechat-curses'
alias l='ls -CF'
alias la='ls -a'
alias ll='ls -l'
alias ls='ls -h'
alias lt='ls -ltr'
alias negdir='for i in *.tif; do negfix8 -cs "$i"; done'
alias tmux-pbcopy="tmux saveb -|pbcopy"
alias tmux="TERM=screen-256color-bce tmux"
alias top='htop'
alias vi=vim
alias vim='mvim -v'
# Ruby
alias be='bundle exec'
alias rspec='rspec --color'
# Git
alias g='git s'
alias gap='git ap'
alias gd='git d'
alias gds='git ds'
alias gl='git l'
alias gs='git s'
function listening() {
if [ -z "$@" ]; then
echo "See what servers are listening on PORT."
echo "Usage: listening PORT"
else
lsof -n -i4TCP:"$@" | grep LISTEN
fi
}
| true |
21a916043fc75aa9f029effb7fe25d50b06c5091 | Shell | clarafu/concourse-demo | /scripts/add_commit | UTF-8 | 298 | 2.921875 | 3 | [] | no_license | #!/bin/bash
set -ex
cd $(dirname $0)/..
num=$(($( cat commit.txt) + 1))
echo $num > commit.txt
git config user.email "dummy@example.com"
git config user.name "Dummy User"
git add commit.txt
git ci -m "increment commit to $num"
ssh-agent bash -c 'ssh-add scripts/app_private_key; git push origin master'
| true |
cb32dcfcc9d5f9c8cc40054a22eeed2bf2d61651 | Shell | Megalawls/Some-Ubuntu-Things | /zooscript.sh | UTF-8 | 686 | 3.890625 | 4 | [] | no_license | #!/bin/bash
echo "Zooscript Initialised"
#read name
#echo "Hello $name"
echo "Which animal folder would you like to go into?"
read folder
cd ~/Desktop/shells/zoo/$folder
pwd
fextension="facts.txt"
#Creates $folderfacts if it doesnt exist
if [[ ! -f ${folder}$fextension ]]; then
touch ${folder}$fextension
echo "${folder}$fextension created"
fi
#Prints all of the animals within folder, ammended to redirect
for i in $(ls)
do
cat $i >> ${folder}$fextension
done
#Code below is used for task f instead of the above, using grep to only get birthday
#cat $i | grep "birthday"
#Code below adds line numbers and converts to lower case
#cat -n ./zoo/birds/birdsfacts.txt | tr A-Z a-z
| true |
52b17fbe0ae5a0ea7e89758f75ab5e97ecce36b1 | Shell | FredHutch/angus-tophat-dmel | /make/run.sbatch.sh | UTF-8 | 457 | 2.71875 | 3 | [] | no_license | #!/usr/bin/env bash
echo "loading modules: $(date '+%Y-%m-%dT%H-%M-%S')"
t_start=$(date '+%s')
ml TopHat/2.1.1-foss-2016b
ml Bowtie2/2.2.9-foss-2016b
t_end=$(date '+%s')
echo "modules loaded in $((t_end - t_start)) seconds"
export NPROCS=${SLURM_CPUS_ON_NODE}
echo "starting make: $(date '+%Y-%m-%dT%H-%M-%S')"
t_start=$(date '+%s')
make ${RUN}
t_end=$(date '+%s')
echo "make ran $((t_end - t_start)) seconds"
echo "ended: $(date '+%Y-%m-%dT%H-%M-%S')"
| true |
da39d17eb9a90452c1a22857b0fc2890541016ac | Shell | dragonmaus/__bin.old | /src/sshmenu.sh | UTF-8 | 178 | 2.78125 | 3 | [
"BSD-3-Clause-Clear"
] | permissive | #!/bin/sh
set -e
host=$(sed -n '/^# My local servers$/,/^# Public servers$/s/^Host \(.*\)$/\1/p' < ~/.ssh/config | sort -u | dmenu)
[[ -n $host ]]
exec st -e ssh "$host" "$@"
| true |
83005c3902cd57719f68a1f61cb1d22253361f8f | Shell | vhbvb/iOS-CI | /test.sh | UTF-8 | 4,467 | 3.34375 | 3 | [] | no_license | SDKName=${JOB_NAME%%_*}
Space=${SDKName}SDK
Demo="${SDKName}Demo"
Scheme="MobPushDemo"
UnitTest="${Scheme}Tests"
# 更新预览用的本地html
updatePreview()
{
RetainCycles="${WORKSPACE}/RetainCycles"
if [ -d $RetainCycles ]
then
rm -r $RetainCycles
fi
cp -r "${WORKSPACE}/../FBMemoryProfiler/RetainCycles" "${WORKSPACE}/"
RetainCycleLog="${WORKSPACE}/RetainCycles/Log/log.text"
if [ -d $RetainCycleLog ]
then
rm $RetainCycleLog
fi
}
# 配置单元测试工程
configUnitTest()
{
Manager="${WORKSPACE}/../FBMemoryProfiler/Sample/TestDemoTests/FBTestManager.mm"
Framework="${WORKSPACE}/../FBMemoryProfiler/Package/FBMemoryLeakDetecter.framework"
TestPath="${WORKSPACE}/Sample/${Demo}/${UnitTest}"
FrameworkPath="${WORKSPACE}/SDK"
if [ -d "${TestPath}/FBTestManager.mm" ]
then
rm "${TestPath}/FBTestManager.mm"
fi
if [ -d "${FrameworkPath}/FBMemoryLeakDetecter.framework" ]
then
rm -r "${FrameworkPath}/FBMemoryLeakDetecter.framework"
fi
cp $Manager "${TestPath}/"
cp -r $Framework "${FrameworkPath}/"
python "${WORKSPACE}/../FBMemoryProfiler/py/Import.py" 0 1 "${WORKSPACE}/Sample/${Demo}/${Demo}.xcodeproj/project.pbxproj" "${Framework}/FBMemoryLeakDetecter.framework" "${TestPath}/FBTestManager.mm"
}
# 执行UnitTest
unitTest()
{
echo "123456" | sudo -S xcode-select -s /Applications/Xcode10.1.app
ReportsDir="${WORKSPACE}/Reports"
if [ -d $ReportsDir ]
then
rm -r $ReportsDir
fi
mkdir -p $ReportsDir
#编译
xcodebuild -workspace ${Space}.xcworkspace -scheme ${Scheme} -sdk iphonesimulator
#测试
xcodebuild test -scheme ${Scheme} -target $UnitTest -destination 'platform=iOS Simulator,name=iPhone 7 Plus' -enableCodeCoverage YES 2>&1 | ocunit2junit
slather coverage --html --input-format profdata --binary-basename ${Scheme} --scheme ${Scheme} --workspace ${Space}.xcworkspace --configuration Debug --ignore **View** --ignore **AppText** --output-directory Reports Sample/${Demo}/${Scheme}.xcodeproj
}
# 提取jenkins日志里面的输出,copy到Apache目录下
FormReport()
{
ApacheDocumentPath="/Library/WebServer/Documents/${SDKName}"
LogPath="${WORKSPACE}/../../jobs/${JOB_NAME}/builds/${BUILD_NUMBER}/log"
leftStr=$(grep -n '>>retainCycleLeft<<' ${LogPath})
rightStr=$(grep -n '>>retainCycleRight<<' ${LogPath})
leftLine=$((10#${leftStr%%:*}+1))
rightLine=$((10#${rightStr%%:*}-1))
sedL=${leftLine}
sedR=${rightLine}'p'
sedStr=${sedL}','${sedR}
echo "123456" | sudo -S sed -n ${sedStr} ${LogPath} > ${RetainCycleLog}
# retainCycle日志放到apache的document下
if [ -d "${ApacheDocumentPath}/RetainCycles"]
then
echo 123456 | sudo rm -rf "${ApacheDocumentPath}/RetainCycles"
fi
echo 123456 | sudo cp -r "${WORKSPACE}/RetainCycles" "${ApacheDocumentPath}/"
}
oclint()
{
cd ${WORKSPACE}
if [ -d ./derivedData ]; then
rm -rf ./derivedData
fi
if [ -d ./compile_commands.json ]; then
rm -f ./compile_commands.json
fi
if [ -d ./oclintReport.xml ]; then
rm -f ./oclintReport.xml
fi
find . -type d -name Build -exec rm -rf {} \+
xcodebuild -scheme $Scheme -workspace $Space.xcworkspace clean
xcodebuild -scheme $Scheme -workspace $Space.xcworkspace -configuration Debug COMPILER_INDEX_STORE_ENABLE=NO | xcpretty -r json-compilation-database -o compile_commands.json
if [ -f ./compile_commands.json ]; then
echo '-----编译数据生成完毕-----'
else
echo '-----编译数据生成失败-----'
exit 1
fi
/Users/vimfung/oclint/bin/oclint-json-compilation-database -e Sample -- -report-type pmd -o oclintReport.xml \
-rc LONG_LINE=200 \
-disable-rule ShortVariableName \
-disable-rule ObjCAssignIvarOutsideAccessors \
-disable-rule AssignIvarOutsideAccessors \
-disable-rule UnusedMethodParameter \
-disable-rule UnusedLocalVariable \
-max-priority-1=1000000 \
-max-priority-2=1000000 \
-max-priority-3=1000000 || true
if [ -f ./oclintReport.xml ]; then
echo '-----分析完毕-----'
else
echo '-----分析失败-----'
exit 1
fi
}
memoryProfiler()
{
updatePreview
configUnitTest
unitTest
FormReport
}
memoryProfiler
oclint
| true |
06919d68e517194bb8df0471abf22ab475837c8f | Shell | hangphan/resistType_docker | /bin/bbtools/textfile.sh | UTF-8 | 613 | 3.75 | 4 | [
"BSD-3-Clause-LBNL"
] | permissive | #!/bin/bash
function usage(){
echo "
Written by Brian Bushnell
Last modified January 21, 2015
Description: Displays contents of a text file.
Usage: textfile.sh <file> <start line> <stop line>
Start line and stop line are zero-based.
Please contact Brian Bushnell at bbushnell@lbl.gov if you encounter any problems.
"
}
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )/"
CP="$DIR""current/"
EA="-ea"
set=0
if [ -z "$1" ] || [[ $1 == -h ]] || [[ $1 == --help ]]; then
usage
exit
fi
function tf() {
#module load oracle-jdk/1.7_64bit
local CMD="java $EA -Xmx120m -cp $CP fileIO.TextFile $@"
echo $CMD >&2
$CMD
}
tf "$@"
| true |
a0df70fb153990b5ef56e98be9b00b05d36985b6 | Shell | aritama/sisop-2012-2c | /organizar.sh | UTF-8 | 812 | 2.546875 | 3 | [] | no_license | #!/bin/bash
mkdir -p ~/Desarrollo/Workspace/2012-2c-bashenato/
cp -r ../2012-2c-bashenato/PROCER/ ~/Desarrollo/Workspace/2012-2c-bashenato/
cp -r ../2012-2c-bashenato/SHIELD/ ~/Desarrollo/Workspace/2012-2c-bashenato/
cp tests.zip ~/
cd ~/
unzip tests.zip
cp tests/* ~/
rm -f -r tests
rm tests.zip
cd ~/Desarrollo/Workspace/2012-2c-bashenato/PROCER/commons/Debug/
make all
cp libcommons.so ~/
cd ~/Desarrollo/Workspace/2012-2c-bashenato/PROCER/PI/Debug/
make all
cp PI ~/pi
cp pi.config ~/
cd ~/Desarrollo/Workspace/2012-2c-bashenato/PROCER/PP/Debug/
make all
cp PP ~/pp
cp pp.config ~/
cp -r ~/Desarrollo/Workspace/2012-2c-bashenato/SHIELD/ ~/
rm -r -f ~/Desarrollo
echo '#!/bin/bash' > ~/exportLDlib.sh
chmod +x ~/exportLDlib.sh
echo 'export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:~/' >> ~/exportLDlib.sh
| true |
c83dc9cf3ad867c22f422c3bc546979a5c7344f3 | Shell | woggioni/x-toolchain | /packages/readline/PKGBUILD | UTF-8 | 2,369 | 2.796875 | 3 | [] | no_license | # Maintainer: pingplug <pingplug@foxmail.com>
# Contributor: Filip Brcic <brcha@gna.org>
# Thanks to the patch from the MXE project <mxe.cc>
pkgname=${_target}-readline
_basever=8.1
_patchlevel=001
pkgver=$_basever.$_patchlevel
pkgrel=1
pkgdesc="Library for editing typed command lines (${_target})"
arch=('any')
depends=("${_target}-gcc" "${_target}-ncurses")
makedepends=("${_target}-configure")
provides=("${_target}-libhistory.so" "${_target}-libreadline.so")
options=('staticlibs' '!buildflags' '!emptydirs')
license=('GPL')
url="https://tiswww.case.edu/php/chet/readline/rltop.html"
source=(https://ftp.gnu.org/gnu/readline/readline-$_basever.tar.gz{,.sig}
inputrc)
validpgpkeys=('7C0135FB088AAF6C66C650B9BB5869F064EA74AB') # Chet Ramey
if [ $_patchlevel -gt 0 ]; then
for (( _p=1; _p <= $((10#${_patchlevel})); _p++ )); do
source=(${source[@]} https://ftp.gnu.org/gnu/readline/readline-$_basever-patches/readline${_basever//.}-$(printf "%03d" $_p){,.sig})
done
fi
md5sums=('e9557dd5b1409f5d7b37ef717c64518e'
'SKIP'
'58d54966c1191db45973cb3191ac621a'
'8d127cb06ae797a05fba88ada117e6f7'
'SKIP')
prepare() {
cd "${srcdir}/readline-${_basever}"
for (( p=1; p<=$((10#${_patchlevel})); p++ )); do
msg "applying patch readline${_basever//./}-$(printf "%03d" $p)"
patch -Np0 -i $srcdir/readline${_basever//./}-$(printf "%03d" $p)
done
if [[ ${_os} == mingw32* ]]
then
msg "applying the patch from MXE project"
patch -Np1 -i ${srcdir}/readline-1.patch
fi
# Remove RPATH from shared objects (FS#14366)
sed -i 's|-Wl,-rpath,$(libdir) ||g' support/shobj-conf
}
build() {
cd "${srcdir}/readline-${_basever}"
unset LDFLAGS
mkdir -p build-${_target} && pushd build-${_target}
if [[ ${_os} == mingw32* ]]
then
local CURSES_LIB="-lncursesw"
else
local CURSES_LIB="-lncurses"
fi
bash_cv_wcwidth_broken=no ${_target}-configure \
--target=${_target}
make SHLIB_LIBS="${CURSES_LIB}"
popd
}
package() {
cd "${srcdir}/readline-${_basever}/build-${_target}"
if [[ ${_os} == mingw32* ]]
then
local CURSES_LIB="-lncursesw"
else
local CURSES_LIB="-lncurses"
fi
make install DESTDIR="${pkgdir}" SHLIB_LIBS="${CURSES_LIB}"
rm "$pkgdir/${_prefix}/${_target}/sysroot/usr/share/info/dir"
}
strip() {
${_target}-strip $@
}
export -f strip
| true |
d5593b98a49de8bb69e390f02107890f4d3b20e6 | Shell | gitvmn/spider | /bin/spider.sh | UTF-8 | 1,419 | 3.46875 | 3 | [] | no_license | #!/bin/bash
# sed -i 's/\r$//' filename
# 只需要在Master节点配置
USERS_FILE=/opt/spider/users # 保存用户信息的文件位置
# PUBLIC_URL= http://log.tc.mybank.cn # 如果配置了nginx,需要配置此项
# master end ---------------------
# agent start --------------------
MASTER= #http://172.17.10.5:3000
# agent end ----------------------
PORT=3000
HOSTNAME=`hostname`
LOG_DIR=/opt/logs
IP=`/sbin/ifconfig -a|grep inet|grep -v 127.0.0.1|grep -v 172.17| grep -v inet6 | awk '{print $2}' | tr -d "addr:"`
DEBUG=false
CONTAINER_NAME=spider
OPTIONS=" -v "$LOG_DIR":/opt/logs:ro"
OPTIONS=$OPTIONS" -v /opt/applications:/opt/spider/applications:ro "
if [ $USERS_FILE ];then
OPTIONS=$OPTIONS" -v "$USERS_FILE":/opt/spider/users "
fi
OPTIONS=$OPTIONS" -e PUBLIC_URL="$PUBLIC_URL" "
OPTIONS=$OPTIONS" -e DEBUG="$DEBUG" "
case "$1" in
start)
docker rm -f $name
docker run -d -p $PORT:3000 -e PORT=$PORT -e HOSTNAME=$HOSTNAME -e IP=$IP -e MASTER=$MASTER --hostname=$HOSTNAME $OPTIONS --name $CONTAINER_NAME spider /opt/spider/run.sh
nohup docker logs -f $CONTAINER_NAME > ${CONTAINER_NAME}.log 2>&1 &
tail -f ${CONTAINER_NAME}.log
;;
stop)
docker stop $CONTAINER_NAME
echo stop success
;;
restart)
$0 stop
sleep 2
$0 start
;;
*)
echo "usage: $0 start|stop|restart"
;;
esac | true |
ba42a6a398cb6249f9c9cedb99aad7c1deba2376 | Shell | leftygbalogh/UtilityScripts | /git-cron-auto-commiter.sh | UTF-8 | 760 | 3.265625 | 3 | [] | no_license | #!/bin/bash
#running cron as in */53 */7 * * * /usr/local/sbin/git-cron-autocommiter.sh >> /home/ebalgza/git-commit.log 2>&1
PATH=$PATH:/usr/local/git/bin/
echo ${PATH}
date
gitFolder=/tmp
username="Rodimus Prime"
useremail="autobots@inv.ericsson.commit"
cd $gitFolder
gitFileList=$(git status | grep "modified: " | cut -d':' -f2 | xargs)
if [[ $gitFileList == "" ]]; then
echo "nothing to do here"
exit 0
fi
git config --global --unset user.name
git config --global --unset user.email
git config --global --replace-all user.name "$username"
git config --global user.email "$useremail"
git add $gitFileList
git commit -m "$username automatically commits file changes several times a day."
git config --global --unset user.name
git config --global --unset user.email
| true |
b68cc4d5e7c2e4a3889fc487125510928e948231 | Shell | SwooshyCueb/irods-kuleuven | /bin/apply-patches | UTF-8 | 1,332 | 3.015625 | 3 | [] | no_license | #!/bin/bash
# Patch sql setup scripts
sed -i "s/^SET SESSION .*storage_engine=.*/SET SESSION default_storage_engine='InnoDB';/" /var/lib/irods/packaging/sql/icatSysTables.sql /var/lib/irods/packaging/sql/mysql_functions.sql
# Let irods cope with ipv6 database servers
#curl https://raw.githubusercontent.com/pypyodbc/pypyodbc/main/pypyodbc.py > /var/lib/irods/scripts/irods/pypyodbc.py
# Fix mysql indices to use with utf8 encoding
SQL=/var/lib/irods/packaging/sql/icatSysTables.sql
# remove coll_name from index and don't create a unique index
sed -i '/idx_coll_main2/d' $SQL
echo "create index idx_coll_main2 on R_COLL_MAIN (parent_coll_name (767));" >> $SQL
sed -i '/idx_data_main2/d' $SQL
echo "create unique index idx_data_main2 on R_DATA_MAIN (coll_id,data_name (515),data_repl_num,data_version);" >> $SQL
sed -i '/idx_data_main4/d' $SQL
echo "create index idx_data_main4 on R_DATA_MAIN (data_name (515));" >> $SQL
sed -i '/idx_grid_configuration/d' $SQL
echo "create unique index idx_grid_configuration on R_GRID_CONFIGURATION (namespace (383), option_name (383));" >> $SQL
# Replace varchar(2700) by TEXT in R_RULE_MAIN
sed -i '8s/varchar(2700)/TEXT/g' $SQL
# Set Charset=UTF8 in /var/lib/irods/.odbc.ini
sed -i "s/'Option': '2',/'Option': '2', 'Charset': 'UTF8',/" /var/lib/irods/scripts/irods/database_connect.py
| true |
fd8a1a7fd66dc0825abc1ddeb03f54aa74b48ff6 | Shell | orthodoxenthusiastic/X-Core | /run.sh | UTF-8 | 373 | 3.53125 | 4 | [] | no_license | #!/usr/bin/bash
r="\e[31;1m"
w="\e[00m"
cat << EOF
EOF
title=($w'Starting Script Mr.Zck18... ')
spinner=($r'|' $r'/' $r'-' $r'\' )
count(){
spin &
pid=$!
for i in `seq 1 10`
do
sleep 1;
done
kill $pid
}
spin(){
while [ 1 ]
do
for i in ${spinner[@]};
do
echo -ne "\r$i $title";
sleep 0.1;
done;
done
}
count
sh script.sh
| true |
ef6886f64ad1603027bd63ffdab39649b03449b8 | Shell | vujadeyoon/AWS-Neuron-Test | /Bash/bash_3.sh | UTF-8 | 1,477 | 2.828125 | 3 | [
"MIT"
] | permissive | #!/bin/bash
#
#
path_root=$(pwd)
#
#
rm -rf ./Pytorch_Retinaface/
#
#
# gdown.pl for google drive
git clone https://github.com/circulosmeos/gdown.pl
#
#
# PyTorch RetinaFace
git clone https://github.com/biubug6/Pytorch_Retinaface
mkdir -p ./Pytorch_Retinaface/weights/
#
#
# Download PyTorch pretrained model from the Google drive and copy the required python3 scripts to test the AWS-Neuron.
./gdown.pl/gdown.pl https://drive.google.com/file/d/14KX6VqF69MdSPk3Tr9PlDYbq7ArpdNUW/view ././Pytorch_Retinaface/weights/Resnet50_Final.pth
cp ./compile_retinaface_resnet50.py ./Pytorch_Retinaface/
cp ./detect_aws_neuron.py ./Pytorch_Retinaface/
#
#
# Create pip env and install dependencies
cd Pytorch_Retinaface
sudo apt-get install python3-venv # install Python 3 virtualenv on Ubuntu
python3 -m venv retinaface_env
source retinaface_env/bin/activate
python3 -m pip install -U pip
pip install --extra-index-url=https://pip.repos.neuron.amazonaws.com --upgrade 'torch-neuron' neuron-cc 'tensorflow==1.15.*' 'torchvision==0.8.2' opencv-python
cd ${path_root}
#
#
# Test for PyTorch (100 iterations)
cd Pytorch_Retinaface && python3 ./detect.py --cpu
cd ${path_root}
#
#
# Compile PyTorch model to AWS-Neuron
cd Pytorch_Retinaface && python3 ./compile_retinaface_resnet50.py
cd ${path_root}
#
#
# Test for AWS-Neuron (100 iterations): This python3 script, detect_aws_neuron.py is based on the detect.py.
cd Pytorch_Retinaface && python3 ./detect_aws_neuron.py --cpu
cd ${path_root}
| true |
709904882c58448f57bc837a7c29ffed6e31b5ee | Shell | compor/exitus | /utils/scripts/fill-the-gap-data.sh | UTF-8 | 285 | 3.3125 | 3 | [] | no_license | #!/usr/bin/env bash
[[ -z $1 ]] && echo "missing max number of entries" && exit 1
ENTRIES=$1
[[ -z $2 ]] && echo "missing report file location" && exit 1
REPORT_FILE=$2
awk -v N=$ENTRIES '{ a[$1]=$2; } END{ for(i=2; i<=N; i+=4) if(a[i]) print a[i]; else print 0; }' "$REPORT_FILE"
| true |
4f0fdb3cc7f04672d783f90a742a52fb30434658 | Shell | LamaAni/postgres-xl-helm | /experimental/permissions_with_vault/install.sh | UTF-8 | 5,809 | 2.984375 | 3 | [
"MIT"
] | permissive | #!/bin/bash
SINGLE_NODE_CLUSTER="true"
PGXL_NAME="pgxl"
CONSUL_NAME="consul"
VAULT_NAME="vault"
#=================================================================================================
# SETUP PGXL
#-------------------------------------------------------------------------------------------------
git clone https://github.com/LamaAni/postgres-xl-helm.git
helm install "${PGXL_NAME}" ./PGXL-HELM
rm -rf PGXL-HELM
#-------------------------------------------------------------------------------------------------
# SETUP CONSUL
#-------------------------------------------------------------------------------------------------
# Turns of affinity if one node cluster is set to true for testing purposes
git clone --single-branch --branch v0.9.0 https://github.com/hashicorp/consul-helm.git
if [ "$SINGLE_NODE_CLUSTER" = true ] ; then
sed -i '/affinity: |/,+8 s/^/#/' consul-helm/values.yaml
fi
helm install "${CONSUL_NAME}" ./consul-helm
rm -rf consul-helm
sleep 120
#=================================================================================================
#=================================================================================================
# SETUP VAULT
#-------------------------------------------------------------------------------------------------
git clone --single-branch --branch v0.1.2 https://github.com/hashicorp/vault-helm.git
sed -i "s/HOST_IP:8500/${CONSUL_NAME}-consul-server:8500/g" vault-helm/values.yaml
helm install "${VAULT_NAME}" ./vault-helm --set='server.ha.enabled=true'
rm -rf vault-helm
sleep 30
INIT_OUTPUT=$(kubectl exec -it "${VAULT_NAME}-0" -- vault operator init -n 1 -t 1)
sleep 30
UNSEAL_KEY=$(echo "${INIT_OUTPUT}" | grep 'Unseal Key 1:' | cut -d" " -f4)
UNSEAL_KEY=$(sed 's/\x1b\[[0-9;]*m//g' <<< $UNSEAL_KEY) # remove ansi colour ^[[0m^M
ROOT_TOKEN=$(echo "${INIT_OUTPUT}" | grep 'Initial Root Token:' | cut -d" " -f4)
ROOT_TOKEN=$(sed 's/\x1b\[[0-9;]*m//g' <<< $ROOT_TOKEN) # remove ansi colour ^[[0m^M
kubectl exec -it "${VAULT_NAME}-0" -- vault operator unseal "${UNSEAL_KEY}"
kubectl exec -it "${VAULT_NAME}-1" -- vault operator unseal "${UNSEAL_KEY}"
kubectl exec -it "${VAULT_NAME}-2" -- vault operator unseal "${UNSEAL_KEY}"
sleep 30
kubectl exec -it "${VAULT_NAME}-0" -- vault login "${ROOT_TOKEN}"
#=================================================================================================
#=================================================================================================
# SETUP VAULT PGXL SECRETS
#-------------------------------------------------------------------------------------------------
kubectl exec -it "${VAULT_NAME}-0" -- vault secrets enable database
kubectl exec -it "${VAULT_NAME}-0" -- vault write database/config/postgres \
plugin_name=postgresql-database-plugin \
allowed_roles="postgres-role" \
connection_url="postgresql://postgres@${PGXL_NAME}-postgres-xl-svc:5432/postgres?sslmode=disable"
kubectl exec -it "${VAULT_NAME}-0" -- vault write database/roles/postgres-role \
db_name=postgres \
creation_statements="CREATE ROLE \"{{name}}\" WITH LOGIN PASSWORD '{{password}}' VALID UNTIL '{{expiration}}'; \
GRANT SELECT ON ALL TABLES IN SCHEMA public TO \"{{name}}\";" \
default_ttl="1h" \
max_ttl="24h"
#=================================================================================================
#=================================================================================================
# SETUP K8S AUTH FOR VAULT
#-------------------------------------------------------------------------------------------------
kubectl exec -it "${VAULT_NAME}-0" -- sh -c "echo 'path \"database/creds/postgres-role\" {
capabilities = [\"read\"]
}
path \"sys/leases/renew\" {
capabilities = [\"create\"]
}
path \"sys/leases/revoke\" {
capabilities = [\"update\"]
}' > postgres-policy.hcl; \
vault policy write postgres-policy postgres-policy.hcl;"
#-------------------------------------------------------------------------------------------------
cat > postgres-serviceaccount.yml <<EOF
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: role-tokenreview-binding
namespace: default
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:auth-delegator
subjects:
- kind: ServiceAccount
name: postgres-vault
namespace: default
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: postgres-vault
EOF
#-------------------------------------------------------------------------------------------------
kubectl apply -f postgres-serviceaccount.yml; rm -rf postgres-serviceaccount.yml
#-------------------------------------------------------------------------------------------------
VAULT_SA_NAME=$(kubectl get sa postgres-vault -o jsonpath="{.secrets[*]['name']}"); \
SA_JWT_TOKEN=$(kubectl get secret $VAULT_SA_NAME -o jsonpath="{.data.token}" | base64 --decode; echo); \
SA_CA_CRT=$(kubectl get secret $VAULT_SA_NAME -o jsonpath="{.data['ca\.crt']}" | base64 --decode; echo); \
K8S_HOST=$(kubectl exec consul-consul-server-0 -- sh -c 'echo $KUBERNETES_SERVICE_HOST'); \
kubectl exec -it "${VAULT_NAME}-0" -- vault auth enable kubernetes; \
kubectl exec -it "${VAULT_NAME}-0" -- vault write auth/kubernetes/config \
token_reviewer_jwt="$SA_JWT_TOKEN" \
kubernetes_host="https://$K8S_HOST:443" \
kubernetes_ca_cert="$SA_CA_CRT"; \
kubectl exec -it "${VAULT_NAME}-0" -- vault write auth/kubernetes/role/postgres \
bound_service_account_names=postgres-vault \
bound_service_account_namespaces=default \
policies=postgres-policy \
ttl=24h;
#=================================================================================================
echo "Your vault details are:
Unseal Key 1: ${UNSEAL_KEY}
Initial Root Token: ${ROOT_TOKEN}
Keep them safe!"
| true |
45de1eb78f69b8297c255ea67be91789a82e066c | Shell | UtsavChokshiCNU/GenSym-Test2 | /run.sh | UTF-8 | 105 | 2.578125 | 3 | [] | no_license | IFS=$'\n'
x=$(find ./src/g* -type f)
for file in $x
do
git add $file
git commit -m "Commiting all"
done | true |
06f0da242e25bc9d0a74d01fe5b78b6a7fc4c73b | Shell | Luxan/ServerClientQTCrypto | /generate_documentation.sh | UTF-8 | 231 | 2.828125 | 3 | [] | no_license | #!/bin/bash
if [ ! -e doxygen_config_file ]
then
echo "Could not find configuration file for doxygen."
echo "Creating default config file..."
doxygen -g doxygen_config_file
fi
doxygen doxygen_config_file
| true |
23837254b108d87951cef8e108f6c5869c4aee1a | Shell | oncoapop/data_reporting | /beast_scripts/primer3.sh | UTF-8 | 1,303 | 3.65625 | 4 | [] | no_license | #!/bin/sh
# This Script was written by Damian Yap (Apr 2013)
# Primer3 is installed on beast
# and needs a specific input
# This script runs primer3
# on .txt files in the format (record sep "=")
# Working Directory
dir="/home/dyap/Projects/Tumour_Evol/"
clear
cd $dir
ls
# Source and Output directories where Barcoded files stored
sourcedir=$dir
echo "Input the name of the file that you want to run (without .ext but must be .txt):"
read filename
rm -f nohup.out
outdir=Primer3_outputs
outfile=$outdir/$filename"_primer3"
infile=$dir$filename".txt"
echo "File to output " $outfile
echo "Input file (with full path): "$infile
echo Output to this directory $outdir
nohup primer3_core < $infile
read ans
clear
echo "Number of sequences in input file"
grep "SEQUENCE_TEMPLATE=" -c $infile
echo "Number of outputs in " $outfile
grep "^=" -c nohup.out
echo ================================
echo "Number of failed sequences where there are no primers"
grep -c "PAIR_NUM_RETURNED=0" nohup.out
echo "Press Enter to show..."
read ans
grep -B24 "PAIR_NUM_RETURNED=0" nohup.out | more
echo Press Return to continue...
read ans
nohup primer3_core -format_output < $infile
echo Formatting for viewing...
echo Press return to continue...
read ans
view=$outfile"_view.txt"
cp nohup.out $view
| true |
144f8e8dd86bf1273a8868bf8d8c1c776af84c63 | Shell | friendbear/bashr_scripts_utf8 | /chap07/nice.sh | UTF-8 | 606 | 3.265625 | 3 | [] | no_license | #!/bin/sh
# nice.sh - niceコマンドのサンプル
# do_load
# システムに負荷をかける(CPU利用率を上げる)関数
do_load() {
while true; do
gzip -9 -c /bin/sh > /dev/null # システムに負荷をかける
done
}
# バックグラウンドでシステムに負荷をかける
do_load &
bgpid=$!
# 異なるnice値で同じプログラムを実行する
time nice -n 0 gzip -9 -c /bin/sh > /dev/null
time nice -n 10 gzip -9 -c /bin/sh > /dev/null
time nice -n 20 gzip -9 -c /bin/sh > /dev/null
# バックグラウンドプロセスを終了させる
kill $bgpid
| true |
6f2d8c0a769e1c30cb83b42a2e3e0a64c968703b | Shell | SteveSatterfield/HEVf | /idea/src/hev-testSuite/test_moth/mkMoth | UTF-8 | 4,640 | 3.453125 | 3 | [] | no_license | #!/usr/bin/env bash
# Test Suite data directory
TS_DATA="${HEV_IDEA_DIR}/testSuite"
MOTH_DATA="${TS_DATA}/moth_data"
SRC_DIR="$HR/idea/src/hev-testSuite/test_moth"
# Dir that test will run from
RUN_DIR="${HEVROOT}/test/renderTest/testCase_moth"
# Convert SAVG files it IVE and copy to test suite dat dir directory
for FILE in `\ls -C1 ${SRC_DIR}/src/SAVG/*.savg` # For each SAVG file in src/SAVG...
do
F=${FILE##*/} # ...Get just filename
#DEBUG echo "convert ${FILE} to ${SRC_DIR}/src/IVE/${F%.*}.ive"
iris-convert ${FILE} ${SRC_DIR}/src/IVE/${F%.*}.ive # ...Convert SAVG file to IVE and put in src/IVE
done
cp ${SRC_DIR}/src/IVE/*.ive ${SRC_DIR}/moth_data/ # Copy the new IVE files to dat
# make moth.iris
# Vars for IRIS filenames
MOTHIRIS="${TS_DATA}/moth.iris"
GRIDSIRIS="${MOTH_DATA}/grids.iris"
SIDESIRIS="${MOTH_DATA}/sides.iris"
POINTSIRIS="${MOTH_DATA}/points.iris"
RUNSCRIPT="${SRC_DIR}/runTestCase.sh"
# Output moth.iris to source dir, then copy to test suite data dir
#DEBUG echo "Output main IRIS file to ${MOTHIRIS}..."
echo "
###########################################################
# Load objects and create scenegraph switch models on/off #
###########################################################
LOAD moth_Points ${MOTH_DATA}/points.iris
NODEMASK moth_Points ON
ADDCHILD moth_Points world
LOAD moth_Sides ${MOTH_DATA}/sides.iris
NODEMASK moth_Sides ON
ADDCHILD moth_Sides world
LOAD moth_Grids ${MOTH_DATA}/grids.iris
NODEMASK moth_Grids ON
ADDCHILD moth_Grids world
" > ${SRC_DIR}/moth.iris
cp ${SRC_DIR}/moth.iris ${MOTHIRIS}
# Output top level group node for each of three IRIS files.
# This will initialize the files as well.
echo "GROUP moth_Grids" > ${SRC_DIR}/moth_data/grids.iris
echo "GROUP moth_Sides" > ${SRC_DIR}/moth_data/sides.iris
echo "GROUP moth_Points" > ${SRC_DIR}/moth_data/points.iris
# For each index, write a node to each IRIS file.
for INDEX in 0.0 0.2 0.4 0.5 0.7 0.8 1.0 1.2 1.4 1.5 1.7 1.8 2.0 2.2 2.4 2.5 2.7 2.8 3.0 3.2 3.4 3.5 3.7 3.8 4.0
do
# Girds IRIS file
echo "
LOAD moth-${INDEX}_Grid ${MOTH_DATA}/${INDEX}_Grid.ive
NODEMASK moth-${INDEX}_Grid OFF
ADDCHILD moth-${INDEX}_Grid moth_Grids
" >> ${SRC_DIR}/moth_data/grids.iris
# Sides IRIS file
echo "
LOAD moth-${INDEX}_Sides ${MOTH_DATA}/${INDEX}_Sides.ive
NODEMASK moth-${INDEX}_Sides OFF
ADDCHILD moth-${INDEX}_Sides moth_Sides
" >> ${SRC_DIR}/moth_data/sides.iris
# Points IRIS file
echo "
LOAD moth-${INDEX} ${MOTH_DATA}/${INDEX}.ive
NODEMASK moth-${INDEX} OFF
ADDCHILD moth-${INDEX} moth_Points
" >> ${SRC_DIR}/moth_data/points.iris
done
echo "RETURN moth_Grids" >> ${SRC_DIR}/moth_data/grids.iris
echo "RETURN moth_Sides" >> ${SRC_DIR}/moth_data/sides.iris
echo "RETURN moth_Points" >> ${SRC_DIR}/moth_data/points.iris
# Copy data to central test suite data directory.
# This is done in GNUmakefile, but do it in case one runs
# this without running make
if [ -d "${MOTH_DATA}" ]
then
cp ${SRC_DIR}/moth_data/* ${MOTH_DATA}
else
mkdir ${MOTH_DATA}
cp ${SRC_DIR}/moth_data/* ${MOTH_DATA}
fi
echo "
#! /bin/bash
# runTestCase.sh
# June 17, 2015
# Steve Satterfield
LOG=/tmp/\$(basename \$0 .sh)_\${USER}_\$$.log
TC=\$( basename \$(pwd) )
export OSG_FILE_PATH=\${OSG_FILE_PATH}:\${HEV_IDEA_DIR}/testSuite:\${HEV_IDEA_DIR}/testSuite/moth_data
export PATH=\${HEVROOT}/test/renderTest/bin:\${PATH}
# Remove previous frameGrabber images to ensure we are later testing
# against newly created images
rm -f /tmp/frameGrabber-\${USER}*
# Generate new images
irisfly --background 1 1 1 --ex moth.iris moth.timeline.iris > \$LOG 2>&1 #desktopWindowTrait512.iris
# Compare new images against base line
exitCode=0
pamCount=\$(ls -1 baseLine_*.pam | wc -l)
for ((i=0; i<\${pamCount}; i++))
do
n=\$(printf \"%06d\" \${i})
newfile=\"/tmp/frameGrabber-\${USER}-snap-perspective-\${n}.pam\"
basefile=\"baseLine_\${n}.pam\"
# Do the test
compare-binary \${newfile} \${basefile} >> \$LOG 2>&1
Dif=\$?
# DEBUG echo Dif = \${Dif}
# Check the result
if [ \${Dif} -ne 0 ]
then
echo \"\${TC} base line \${i}: FAILED (\${Dif})\"
echo \" Generated file differs from base line \${i}\"
echo \" Baseline: \${basefile}\"
echo \" Generated: \${newfile}\"
exitCode=1
else
echo \"\${TC} base line \${i}: PASSED\"
fi
done
# Return exit code
exit \${exitCode}" > ${RUNSCRIPT}
# Copy run script to run dir (i.e. .../renderTest/testCase_<name>)
cp ${RUNSCRIPT} ${RUN_DIR} | true |
c9baebb7d6c26b48a467fcd5bf8b75b8501a9ddf | Shell | ElkHairCaddis/substitutionCode | /getTop100FamiliesData.awk | UTF-8 | 5,250 | 3.59375 | 4 | [] | no_license | #!/bin/bash
# Have a list of the most active families in the Nielsen consumer data set according to number of trips
# and dollars spent. Would like to subset the "XXXX_trips.tsv' and 'XXXX_purchases.csv' files to
# create one merged file for each family.
# Define input/output directories and files
inDataDir='/scratch/enma222/panel'
outDataDir='/scratch/enma222/panel/allFamData'
topFamFile='/scratch/enma222/panel/metaData_eamonn/householdIDCounts.tsv'
#Retain only IDs of top 100 families
cut -f1 ${topFamFile} | head -101 | tail -100 > ${outDataDir}/topIDList.tmp
topFamIDFile=${outDataDir}/topIDList.tmp
# Parse through the XXXX_trips.tsv and XXXX_purchases.tsv for each year.
# We first want to extract the tripIDs related to any family.
# Then parse through the purchases file and output any line having a matching tripID
for yr in {2004..2016}; do
purchFile=${inDataDir}/${yr}/Annual_Files/purchases_${yr}.tsv
tripFile=${inDataDir}/${yr}/Annual_Files/trips_${yr}.tsv
outFile=${outDataDir}/${yr}_100FamilyPurchases.tmp
outDatesFile=${outDataDir}/${yr}_tripIDWithDates.tmp
awk 'BEGIN {
# Get family IDs
while (getline < "'"$topFamIDFile"'")
{
ids[$1];
}
close("'"$topFamIDFile"'")
print "Got family IDs from file, searching trips..."
# Search for family IDs in trip file,
# Store trip IDs WITH FAM ID AS KEY in array
# Also, print tripID - date to file
while (getline < "'"$tripFile"'")
{
if ($2 in ids)
{
trips[$1] = $2;
print $1"\t"$3 > "'"$outDatesFile"'"
}
}
close("'"$tripFile"'")
print "Stored trip IDs in array, searching purchases..."
# Search for trip IDs in purchases file,
# redirect matching lines to outfile
while (getline < "'"$purchFile"'")
{
if ($1 in trips) {
famID=trips[$1];
print $0"\t"famID > "'"$outFile"'";
}
}
close("'"$purchFile"'")
close("'"$outfile"'")
print "'"$purchFile"'"
print "Finished parsing"
}'
# Get rid of the ^M character inserted by awk
sed -i 's/\r//g' ${outFile}
echo "^M character removed"
done
#-----------------------------------------------------------------------------------------------------------------------------------------------------#
# Obtain all unique UPCs from the master purchases file. Collect meta data on these items from the master_products file
# (supplied by Nielsen).
masterPurchaseFile='/scratch/enma222/panel/allFamData/100FamilyPurchases_master.tsv'
UPCFile='/scratch/enma222/panel/allFamData/100FamilyUPCs.txt'
cut -f2 ${masterPurchaseFile} | sort | uniq > ${UPCFile}
echo "Collected unique UPCs... ready to search master product file"
masterProductFile='/scratch/enma222/panel/Master_Files/Latest/products.tsv'
UPCMetaDataFile='/scratch/enma222/panel/allFamData/100Family_UPCMetaData.tmp'
awk 'BEGIN {
# Get UPCs from file, store in array
while (getline < "'"$UPCFile"'")
{
upcs[$1];
}
close("'"$UPCFile"'")
print "Collected UPCs..."
# Parse master products file; return lines
# with matching UPCs
while (getline < "'"$masterProductFile"'")
{
if ($1 in upcs) print $0 > "'"$UPCMetaDataFile"'";
}
close("'"$masterProductFile"'")
close("'"$UPCMetaDataFile"'")
print "Finished scraping master product file"
}'
# Only keep the upc, product_module_code, product_group_code, department_code, and brand_code from the meta data file
cutMetaData='/scratch/enma222/panel/allFamData/100Family_UPCMetaData_cut.tsv'
cut -f1,4,6,8,10 ${UPCMetaDataFile} > ${cutMetaData}
echo "Finished cutting meta data file... process complete"
#-----------------------------------------------------------------------------------------------------------------------------------------------------#
# Run python script to add product meta data to master family purchases file
#
inDir='/scratch/enma222/panel/allFamData'
upc=${inDir}/cut_UPCMetaData_100Family.tsv
trips=${inDir}/tripIDWithDates_100Family.tsv
purchases=${inDir}/purchases_Master_100Family.tsv
./appendDateAndCategoryToPurchase.py ${upc} ${trips} ${purchases}
#-----------------------------------------------------------------------------------------------------------------------------------------------------#
# Concatenate all of the XXXX_purchase.tmp files into one master list.
# Grep each family ID to output a list of all purchases from 2004-2016.
cd /scratch/enma222/panel/allFamData
master=completeData/purchases_Master_100Family_addPurchaseMetaData.tsv
famIDFile=completeData/famIDList_100Family.txt
awk 'BEGIN {
while (getline < "'"$famIDFile"'")
{
famID[$1];
}
close("'"$famFile"'")
for ( id in famID)
{
ofname=id"_allPurchases.tsv";
while (getline < "'"$master"'")
{
if ($8 == id) print $0 > ofname;
}
close("'"$master"'")
print id" Finished..."
}
}'
#-----------------------------------------------------------------------------------------------------------------------------------------------------#
# Finally, cut the individual purchase files of each family to only keep necessary information.
# Add header line
# Then sort by purchase date.
cd /scratch/enma222/panel/allFamData
ls famPartitionedData/ > fileList.txt
while read line; do
sort -k13 famPartitionedData/${line} > ${line}.sort
echo '$line done'
done < fileList.txt
| true |
748088676cce6f743107d479c5eadd5c82d357df | Shell | jweslley/git-hub | /bin/git-fork | UTF-8 | 612 | 3.15625 | 3 | [
"MIT"
] | permissive | #!/bin/sh
domain=$(git config remote.origin.url | cut -d/ -f3)
owner=$(git config remote.origin.url | cut -d/ -f4)
repo=$(git config remote.origin.url | cut -d/ -f5)
user=$(git config github.user)
test "$domain" != "github.com" && echo "not a github repo." && exit 1
test -z "$owner" && echo "unable to define repo owner." && exit 1
test -z "$repo" && echo "unable to define repo name." && exit 1
test -z "$user" && echo "username required." && exit 1
repo=${repo%.git}
echo "Forking $owner/$repo"
git hub api POST /repos/$owner/$repo/forks > /dev/null \
&& git remote add -f $user git@github.com:$user/$repo.git
| true |
4270d9e7f35775dd93a305df73c44afa26c67834 | Shell | gs-niteesh/rtems-release | /rtems-release-kernel | UTF-8 | 4,559 | 3.625 | 4 | [] | no_license | #! /bin/sh
#
# RTEMS Tools Project (http://www.rtems.org/)
# Copyright 2015,2016,2019 Chris Johns (chrisj@rtems.org)
# All rights reserved.
#
# This file is part of the RTEMS Tools package in 'rtems-tools'.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
set -e
#
# This worker script bootstraps the RTEMS kernel.
#
. ${release_top}/rtems-release-version
echo "RTEMS Release RTEMS Kernel, v${rtems_release_version}"
#
# Defaults.
#
. ${release_top}/rtems-release-defaults
#
# The package, version, revision and release URL.
#
if [ $# -ne 5 ]; then
echo "error: must be arguments, the package, version," \
"release, release URL and the top directory."
exit 1
fi
package=$1
version=$2
revision=$3
release_url=$4
toptop=$5
release=${version}.${revision}
prefix=${package}-${release}
echo "Package: ${package}"
echo "Release: ${release}"
#
# Unpack the released RSB.
#
rsb=rtems-source-builder-${release}
echo "tar ${comp_tar}xf ../${rsb}.tar.${comp_ext}"
tar ${comp_tar}xf ../${rsb}.tar.${comp_ext}
#
# Build autoconf and automake.
#
cd ${rsb}/rtems
../source-builder/sb-set-builder --without-release-url \
--prefix=${top}/at ${version}/rtems-autotools
cd ../..
#
# Set the path to the autotools.
#
export XPATH=$PATH
export PATH=${top}/at/bin:$PATH
#
# Bootstrap RTEMS, collect the BSPs, and update the README.
#
cd ${prefix}
echo "Set the version in the build system.."
for f in $(find . -name version.m4)
do
echo " Generating ${f}"
cat <<EOF > ${f}
dnl Autogenerated for RTEMS release: ${release}
AC_DEFUN([RTEMS_VERSIONING],
m4_define([_RTEMS_VERSION],[${release}]))
dnl RTEMS API Version
m4_define([_RTEMS_API],[${version}])
EOF
done
echo "Bootstrapping RTEMS"
./bootstrap -c
if [ ${version} -lt 5 ]; then
./bootstrap -p
${top}/${rsb}/source-builder/sb-bootstrap
else
./rtems-bootstrap
fi
echo "Cleaning autom4te.cache"
rm -rf $(find . -name autom4te.cache)
echo "Generate ARCH-BSP.txt"
./rtems-bsps -m > ${toptop}/ARCH-BSP.md
echo "Generate README"
cp README README.tmp
cat README.tmp | \
sed -e "s|docs\.rtems\.org\/branches\/master\/|docs\.rtems\.org\/releases\/rtems-docs-${release}\/|g" | \
sed -e "s|docs\.rtems\.org\/doxygen\/branches\/master\/|docs\.rtems\.org\/doxygen\/releases\/rtems-doxygen-${release}\/|g" | \
awk "/https:\/\/git\.rtems\.org\/rtems/{\$0=\$0\"?h=${version}\"}{print}" > README
rm -r README.tmp
cd ..
#
# Clean up.
#
rm -rf ${rsb} ${at}
echo "Bootstrap complete."
#
# Only build doxygen output if present on the system.
#
if [ "`command -v doxygen`"no = no ]; then
echo "warning: doxygen not built: doxygen not found"
exit 0
fi
#
# Doxygen
#
echo "Creating: Doxyfile"
top_srcdir=$(echo ${prefix}/cpukit | sed -e 's/\//\\\//g')
perl=$(which perl | sed -e 's/\//\\\//g')
cat ${prefix}/cpukit/Doxyfile.in | \
sed -e "s/@PACKAGE_VERSION@/${release}/g" \
-e "s/@top_srcdir@/${top_srcdir}/g" \
-e "s/@PERL@/${perl}/g" \
-e "s/^INPUT[[:space:]].*=.*$/INPUT = ${top_srcdir}/g" \
-e "s/^HAVE_DOT[[:blank:]]/DOT_NUM_THREADS = 1\\
HAVE_DOT /g"> Doxyfile
doxygen Doxyfile
echo "Creating: rtems-doxygen-${release}.tar.${comp_ext}"
if [ ${version} -lt 5 ]; then
mv cpukit_doxy/html rtems-doxygen-${release}
else
mv html rtems-doxygen-${release}
fi
tar cf - rtems-doxygen-${release} | \
${comp} > ../rtems-${release}-doxygen.tar.${comp_ext}
exit 0
| true |
2bfaf04961c81affb149e579fdf73f7ecafc2a4d | Shell | tayfun/keep | /conf/wrapper_script.sh | UTF-8 | 1,065 | 3.25 | 3 | [] | no_license | #!/bin/sh
# Script for running multiple commands in a container.
# Adapted from https://docs.docker.com/engine/admin/multi-service_container/
uwsgi --plugins-dir /usr/lib/uwsgi/ --need-plugin python3 --uwsgi-socket 127.0.0.1:8000 --chdir /app --wsgi-file $PROJECT_NAME/wsgi.py --master --processes 2 --threads 2 --env DJANGO_SETTINGS_MODULE=$PROJECT_NAME.settings_prod --max-requests=100 --harakiri=45 --daemonize2 /var/log/uwsgi/uwsgi.log
status=$?
if [ $status -ne 0 ]; then
echo "uWSGI failed to start: $status"
exit $status
else
echo "uWSGI finished initializing"
fi
nginx
status=$?
if [ $status -ne 0 ]; then
echo "nginx failed to start: $status"
exit $status
else
echo "nginx finished initializing"
fi
while true; do
ps aux | grep uwsgi | grep -q -v grep
uwsgi_status=$?
ps aux | grep nginx | grep -q -v grep
nginx_status=$?
if [ $uwsgi_status -ne 0 ]; then
echo "uWSGI died"
exit -1
fi
if [ $nginx_status -ne 0 ]; then
echo "nginx died"
exit -1
fi
sleep 60
done
| true |
0685512ca061e75e70bbc11ae1c35c3d4bc8e531 | Shell | AppliedLogicSystems/ALSProlog | /core/unused_alsp_src/i386/oldstuff/CPDOSPHAR | UTF-8 | 1,102 | 3.328125 | 3 | [
"MIT"
] | permissive | #!/bin/sh
#
# Copy files of MS-DOS version of 386 system (for PharLap Environment)
#
DESTDIR=/elvis/pcfs/tmp
( cd DOSPHAR/obj ; \
( for i in *.[chs] ; do ( topc $i > $DESTDIR/$i ) ; done ) ; \
( for i in *.m4 ; do ( topc $i > $DESTDIR/$i ) ; done ) ; \
( cp libta.lib $DESTDIR/libta.lib ) ; \
( cp vmrmc.o $DESTDIR/vmrmc.o ) ; \
( cp cle.obj $DESTDIR/cle.obj ) ; \
( topc Makefile.sys > $DESTDIR/makefile ) ; \
cd ../.. )
#( cp DOSPHAR/libta.lib $DESTDIR/libta.lib )
#( cp DOSPHAR/vmrmc.o $DESTDIR/vmrmc.o )
#( cd DOSPHAR/obj/alsdir ; \
# (for i in *.pro ; do ( topc $i > $DESTDIR/$i ) ; done) ; \
# cd ../../.. )
( cd DOSPHAR/obj/alsdir ; \
(test -d $DESTDIR/alsdir || mkdir $DESTDIR/alsdir) ; \
(for i in *.pro ; do ( topc $i > $DESTDIR/alsdir/$i ) ; done) ; \
(test -d $DESTDIR/alsdir/library || mkdir $DESTDIR/alsdir/library) ; \
(for i in library/*.pro ; do ( topc $i > $DESTDIR/alsdir/$i ) ; done) ; \
cd ../../.. )
#
# Let everybody to access files in the destination directory
#
chmod 777 $DESTDIR/*
chmod 777 $DESTDIR/alsdir/*
chmod 777 $DESTDIR/alsdir/library/*
| true |
0c99690687bef1eefe08ce8c7e2bd89c69123349 | Shell | bborsari/Scripts | /utils/comment.script.sh | UTF-8 | 183 | 3.375 | 3 | [] | no_license | #!/bin/bash
# script to comment file
# usage: comment.script.sh <file>
file=$1
awk 'BEGIN{FS=" "; OFS="\t"}{if (( $1 ~ /^#/ ) || ($1=="")) {print $0} else {print "# "$0}}' $file
| true |
f909429c29df0d6da7da20eb64f1ca8bb0654dd8 | Shell | sowmyaiyer/new_repo | /flu/check_rRNA_numbers.sh | UTF-8 | 548 | 2.828125 | 3 | [] | no_license | for time in {"A","B","C","D","E","F","G","H"}
do
echo $time
samtools view -F 4 ../bowtie_out/112213flucap_rRNA_${time}.sorted.bam | awk '{ print $3}' | sort | uniq -c > ../txt/rRNA_reads_time_${time}.txt
total_rRNA_reads=`awk 'BEGIN{sum=0}{sum=sum+$1}END{print sum}' ../txt/rRNA_reads_time_${time}.txt`
echo "total rRNA reads = ${total_rRNA_reads}" >> ../txt/rRNA_reads_time_${time}.txt
totalReads=`samtools view -c ../bowtie_out/112213flucap_${time}.sorted.bam`
echo "total reads = ${totalReads}" >> ../txt/rRNA_reads_time_${time}.txt
done
| true |
7f6301b82fa8de64c294f56ebfbf89d3cdcf34e2 | Shell | pzi/dotfiles | /.bash_profile | UTF-8 | 1,597 | 3.265625 | 3 | [] | no_license | # Load the shell dotfiles, and then some:
# * ~/.path can be used to extend `$PATH`.
# * ~/.extra can be used for other settings you don’t want to commit.
for file in ~/.{aliases,bash_prompt,functions}; do
[ -r "$file" ] && [ -f "$file" ] && source "$file"
done
unset file
# Case-insensitive globbing (used in pathname expansion)
shopt -s nocaseglob
# Append to the Bash history file, rather than overwriting it
shopt -s histappend
# Autocorrect typos in path names when using `cd`
shopt -s cdspell
# Prepend timestamps to bash history
export HISTTIMEFORMAT='%Y-%b-%d %a %H:%M:%S - '
export PS1='\t \u@\h:\w\$ '
# Enable some Bash 4 features when possible:
# * `autocd`, e.g. `**/qux` will enter `./foo/bar/baz/qux`
# * Recursive globbing, e.g. `echo **/*.txt`
for option in autocd globstar; do
shopt -s "$option" 2> /dev/null
done
# Add `killall` tab completion for common apps
complete -o "nospace" -W "Contacts Calendar Dock Finder iTunes Spotify SystemUIServer iTerm Tweetbot" killall
# use sublime to open gems
export BUNDLER_EDITOR=subl
# Add `~/bin` to the `$PATH`
export PATH="$HOME/bin:$PATH"
[[ -s "$HOME/.rvm/scripts/rvm" ]] && source "$HOME/.rvm/scripts/rvm"
( which hub > /dev/null ) && $(hub alias -s bash)
# Requires bash-completion (brew install bash-completion)
if [ -f `brew --prefix`/etc/bash_completion ]; then
. `brew --prefix`/etc/bash_completion
fi
# Android SDK
export ANDROID_HOME=/usr/local/opt/android-sdk
## Added support for local rubies
# export PATH=$HOME/local/ruby/bin:$PATH
# This loads NVM
[[ -s $HOME/.nvm/nvm.sh ]] && . $HOME/.nvm/nvm.sh
| true |
ea7934dd788d8a68cc01134848bc71e10359201b | Shell | arjunkc/scanner-scripts | /install.sh | UTF-8 | 1,328 | 3.46875 | 3 | [] | no_license | #!/bin/bash
# based on version brscan-skey version 0.2.4-1
# needs changes if brother scripts change
shopt -s extglob
#DEFAULT_BRSCAN_DIR="/opt/brother/scanner/brscan-skey"
DEFAULT_BRSCAN_DIR="/tmp/brscan-skey"
if [ -e "$DEFAULT_BRSCAN_DIR" ]; then
# VERSION is used for all other scripts
# BINVERSION is used for the scantoemail script and cfg file
VERSION=$(ls "$DEFAULT_BRSCAN_DIR"/brscan-skey*.sh | sed 's/^.*brscan-skey-//g' | sed s/"\.sh"// )
eval $(grep "^BINVERSION=" "$DEFAULT_BRSCAN_DIR"/brscan-skey*.sh)
echo "Detected brscan-skey version: $VERSION"
echo "Detected brscan-skey BINVERSION: $BINVERSION"
if [ -e "$DEFAULT_BRSCAN_DIR"/script ]; then
#cp "!(brscan-skey-0.2.4-0.cfg)" "$DEFAULT_BRSCAN_DIR"/script -v
/bin/cp !(brscan-skey-${BINVERSION}.cfg) $DEFAULT_BRSCAN_DIR/script -v -i
else
echo "script directory not found. Bad installation or incompatible version"
fi
echo cp brscan-skey-"$BINVERSION".cfg "$DEFAULT_BRSCAN_DIR" -v
echo "Here are the package default options"
echo "You may edit them at $DEFAULT_BRSCAN_DIR/brscan-skey-$BINVERSION.cfg\n"
cat brscan-skey-"$BINVERSION".cfg
else
echo "Default Brother brscan-skey directory does not exist at $DEFAULT_BRSCAN_DIR.\n Install and test brother drivers first."
fi
| true |
14aae5fd6088c17d0f567c407ee0202722f23d05 | Shell | nosleep77/scripts | /crd_jenkins_confluence/crd_scriptv3.sh | UTF-8 | 8,607 | 3.796875 | 4 | [] | no_license | #!/bin/bash
set -euo pipefail
GREEN='\033[0;32m'
RED='\033[0;31m'
BLUE='\033[0;34m'
PURPLE='\033[0;35m'
NC='\033[0m'
if [ $# -lt 4 ]
then
echo "Missing argument"
exit 1
fi
Env=$1
Version=$2
Grafana_URL=$3
Token=$4
Namespace=$5
Filename=$6
exit_code=0
echo "<html><head>
<title>GrafanaDashboard check</title></head><body>" > $Filename
echo "<h1 style=\"text-align:center;color:blue;font-size:160%;\">GrafanaDashboard CR check </h1><p></p>" >> $Filename
echo "<table>
<tr>
<th>Dashboard uid</th>
<th>Crd file</th>
<th>version</th>
<th>Found in K8s</th>
<th>Version Found in k8s</th>
<th>Found in Grafana</th>
</tr>" >> $Filename
# ./script.sh <Env> <Version> <GrafanaURL> <Token>
#GET /api/search?dashboard_uid=<uid>
#GET /api/dashboards/uid/<ui>
#curl -s --location --request GET 'http://localhost:3000/api/search' --header "Authorization: Bearer $4" | jq -r ' .[] | select(.type == "dash-db") | .uid'
printf "\n"
# if [ "$Env" == "prod" ]; then
# Grafana_URL="prod.grafana.url"
# elif [ "$Env" == "dev" ]; then
# Grafana_URL="dev.grafana.url"
# else
# echo "env not found"
# exit 1
# fi
#Grabing all yaml files and store their relative path in array
array=()
all_yaml_files=$(find ./deployment/$Env/dashboards/ -type f -iname "*.yml" -or -iname "*.yaml" -print0 | tr '\0' ' ')
array=(`echo ${all_yaml_files}`)
#mapfile -d $'\0' array < <(find ./deployment/$Env/dashboards/ -type f -iname "*.yml" -or -iname "*.yaml" -print0)
#Filtering Yaml files. Keeping only Grafandashboard CRs
deployment_files_path=()
declare -A deployment_files_uids
declare -A deployment_files_vers
for i in "${array[@]}"
do
:
x=$(myenv="GrafanaDashboard" yq e ' select(.kind == env(myenv)) | .kind' $i)
if [ "$x" = "GrafanaDashboard" ]; then
# "$i" = filename
uid=$(yq -o=json eval $i | jq '.spec' | jq -r '.json' | jq -r '.uid')
ver=$(yq -o=json eval $i | jq '.metadata.labels.version')
deployment_files_vers["$uid"]+="$ver"
deployment_files_uids["$uid"]+="$i"
deployment_files_path+=($i)
fi
done
for i in "${!deployment_files_vers[@]}"
do
echo "${i}=${deployment_files_vers[$i]}"
done
len1=${#deployment_files_path[*]};
echo "Found ${len1} Yaml files of kind GrafanaDashboard in the deployment folder ./deployment/$Env/dashboards :"
#Printing the name of the Grafanadashboard CRs
for i in "${deployment_files_path[@]}"
do
:
printf " ${GREEN} $i ${NC}\n"
done
printf "\n"
#k8s_crs_names=$(kubectl get grafanadashboards -o jsonpath="{.items[*].metadata.name}")
#k8s_crs_names_array=($(echo "$k8s_crs_names" | tr ' ' '\n'))
k8s_uids_array=()
k8s_ver_array=()
#Store uid and version
#from k8s in seperate arrays
#for i in "${k8s_crs_names_array[@]}"
#do
:
#k8s_uids_array+=($(kubectl get grafanadashboard $i -o json | jq -r '.spec.json' | jq -r '.uid'))
#k8s_ver_array+=($(kubectl get grafanadashboard $i -o json | jq -r '.metadata.labels.version'))
#done
###
kubectl_json_output=$(kubectl get grafanadashboards -o json -n ${Namespace})
#readarray -t k8s_ver_array < <( echo $kubectl_json_output | jq -r '.items[].metadata.labels.version')
#readarray -t k8s_uids_array < <( echo $kubectl_json_output | jq -r '.items[].spec.json' | jq -r '.uid')
###
k8s_vers_string=$(echo $kubectl_json_output | jq -r '.items[].metadata.labels.version')
k8s_uids_string=$(echo $kubectl_json_output | jq -r '.items[].spec.json' | jq -r '.uid' )
#k8s_uids_string=$(kubectl get grafanadashboards -n monitoring -o jsonpath="{.items[*].spec.json}" -n monitoring | jq '.uid')
k8s_uids_array=(`echo ${k8s_uids_string}`)
k8s_ver_array=(`echo ${k8s_vers_string}`)
#k8s_uids
#readarray -t k8s_ver_array < <( kubectl get grafanadashboards -o json | jq -r '.items[].metadata.labels.version')
#readarray -t k8s_uids_array < <( kubectl get grafanadashboards -o json | jq -r '.items[].spec.json' | jq -r '.uid')
#Checking k8s
grafana_uids=$(curl -s --location --request GET "http://$Grafana_URL/api/search" --header "Authorization: Bearer $Token" | jq -r ' .[] | select(.type == "dash-db") | .uid')
grafana_uid_array=($(echo "$grafana_uids" | tr ' ' '\n'))
GrafanaColumn=" "
printf "${PURPLE}\t\t******** Grafana CR check ********\n${NC}"
for cr_uid in "${!deployment_files_uids[@]}"; do
#check_grafana
match_bool_grafana=0
for grafana_uid in "${grafana_uid_array[@]}"
do
:
if [ "$grafana_uid" == "$cr_uid" ] ; then
printf "${BLUE}\n Dashboard with UID: $cr_uid and version: $Version in Grafana - filename: ${deployment_files_uids[$cr_uid]}- found\n${NC}"
GrafanaColumn+=$(echo " True ")
match_bool_grafana=1
fi
done
if [ $match_bool_grafana -eq "0" ] ; then
printf "${RED}\n Dashboard with UID: $cr_uid and version: $Version in Grafana - filename:${deployment_files_uids[$cr_uid]} -not found ${NC}\n"
GrafanaColumn+=$(echo " False ")
exit_code=1
fi
done
GrafanaColumnArray=(`echo ${GrafanaColumn}`)
iter=0
printf "${PURPLE}\t\t******** kubernetes CR check ********\n${NC}"
for cr_uid in "${!deployment_files_uids[@]}"; do
match_bool_uid=0
match_bool_version=0
for j in "${!k8s_uids_array[@]}"; do
if [ "${k8s_uids_array[$j]}" == "$cr_uid" ] ; then
match_bool_uid=1
# If the above condition is evaluated to true when an UID match is found
# The followong condition will check if there is a version match as well
if [ "${k8s_ver_array[$j]}" == "$Version" ] ; then
printf "${BLUE}\ndashboard with UID: $cr_uid and version: $Version in kubernetes - filename:${deployment_files_uids[$cr_uid]} - found \n${NC}"
echo "<tr>
<td>$cr_uid</td>
<td>${deployment_files_uids[$cr_uid]}</td>
<td>${deployment_files_vers[$cr_uid]} </td>
<td style=\"color:blue;\"> True </td>
<td style=\"color:blue;\"> True</td>" >> $Filename
if [ "${GrafanaColumnArray[$iter]}" == "False" ] ; then
echo " <td style=\"color:red;\">${GrafanaColumnArray[$iter]}</td> " >> $Filename
else
echo " <td style=\"color:blue;\">${GrafanaColumnArray[$iter]}</td> " >> $Filename
fi
echo "</tr> " >> $Filename
match_bool_version=1
break;
fi
fi
done
if [ $match_bool_uid -eq "1" ] && [ $match_bool_version -eq "0" ]; then
printf "${RED}\ndashboard with UID: $cr_uid and version: $Version in kubernetes - ${deployment_files_uids[$cr_uid]} - Not found \n${NC}"
echo "<tr>
<td> $cr_uid</td>
<td> ${deployment_files_uids[$cr_uid]} </td>
<td> ${deployment_files_vers[$cr_uid]} </td>
<td style=\"color:blue;\"> True </td>
<td style=\"color:red;\"> False </td>" >> $Filename
if [ "${GrafanaColumnArray[$iter]}" == "False" ] ; then
echo " <td style=\"color:red;\">${GrafanaColumnArray[$iter]}</td> " >> $Filename
else
echo " <td style=\"color:blue;\">${GrafanaColumnArray[$iter]}</td> " >> $Filename
fi
echo "</tr> " >> $Filename
exit_code=1
fi
if [ $match_bool_uid -eq "0" ] && [ $match_bool_version -eq "0" ]; then
printf "${RED}\ndashboard with UID: $cr_uid and version: $Version in kubernetes - ${deployment_files_uids[$cr_uid]} - Not found \n${NC}"
echo "<tr>
<td>$cr_uid</td>
<td>${deployment_files_uids[$cr_uid]}</td>
<td>${deployment_files_vers[$cr_uid]} </td>
<td style=\"color:red;\"> False </td>
<td style=\"color:red;\"> False </td>" >> $Filename
if [ "${GrafanaColumnArray[$iter]}" == "False" ] ; then
echo " <td style=\"color:red;\">${GrafanaColumnArray[$iter]}</td> " >> $Filename
fi
if [ "${GrafanaColumnArray[$iter]}" == "True" ] ; then
echo " <td style=\"color:blue;\">${GrafanaColumnArray[$iter]}</td> " >> $Filename
fi
echo "</tr> " >> $Filename
exit_code=1
fi
iter=$((iter+1))
done
echo "</table> </body></html>" >> $Filename
exit $exit_code
| true |
757875e35972c9759fbd2f81e4a4bf47b79bedbe | Shell | xamuko/si | /scripts/ejercicio12.sh | UTF-8 | 157 | 3.15625 | 3 | [] | no_license | #!/bin/bash
if [ $HOME = manolo ]
then
echo "Este usuario es manolo."
exit 1
fi
if [ $HOME != manolo ]
then
echo "Este usuario no es manolo."
exit 1
fi
| true |
c81f39b5e8d4fb0441d1d702fa4b78cf4e991a89 | Shell | YancyW/MyProcessor | /bin/analysis_all.sh | UTF-8 | 1,009 | 3.46875 | 3 | [] | no_license | #!/bin/bash
if [[ $# != 3 ]] ; then
echo "usage: ./analysis_all.sh input_class_name (all/2f/4f/higgs/4f_WW/4f_ZZ..) final_state (l/sl/h/lep/e2/e1..) "
exit
fi
input_class_name=${1,,}
final_state=${2,,}
mass=${3,,}
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
four=("4f_ww" "4f_zz" "4f_zzww" "4f_szee" "4f_sznn" "4f_szsw" "4f_sw")
two=("2f_z")
all=("4f_ww" "4f_zz" "4f_zzww" "4f_szee" "4f_sznn" "4f_szsw" "4f_sw" "2f_z" "nh_${mass}")
higgs=("higgs")
nh=("nh")
if [[ ${input_class_name} == "4f" ]] ; then
class_names=("${four[@]}")
elif [[ ${input_class_name} == "2f" ]] ; then
class_names=("${two[@]}")
elif [[ ${input_class_name} == "all" ]] ; then
class_names=("${all[@]}")
elif [[ ${input_class_name} == "higgs" ]] ; then
class_names=("${higgs[@]}")
else
class_names=("${input_class_name}")
fi
classes_length=${#class_names[@]}
for (( i=0; i<$(( $classes_length )); i++ ))
do
class_name=${class_names[i]}
${DIR}/analysis_mass.sh ${class_name} ${final_state} ${mass}
done
| true |
626168c5d1f08f0eefd06c6d18c01f78432ce1f8 | Shell | songtaogui/pan-Zea_utilities | /PANZ_magma.sh | UTF-8 | 9,853 | 3.765625 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env bash
# set -o xtrace
# set -o errexit
set -o nounset
set -o pipefail
# >>>>>>>>>>>>>>>>>>>>>>>> Common functions >>>>>>>>>>>>>>>>>>>>>>>>
gst_log () {
local info=$1
echo -e "\033[36m[$(date +'%y-%m-%d %H:%M')]\033[0m $info" >&2
}
gst_rcd () {
local info=$1
echo -e "\033[32m>>>------------>\033[0m $info" >&2
}
gst_err () {
local info=$1
echo -e "\033[31m\033[7m[ERROR]\033[0m --> $info" >&2
}
gst_warn () {
local info=$1
echo -e "\033[35m[WARNING]\033[0m --> $info" >&2
}
check_files_exists(){
local num_related_file=1
local related_file=""
for related_file in "$@"
do
if [[ ! -s "$related_file" ]]; then
echo -e "\033[31m\033[7m[ERROR]\033[0m --> No file: $related_file $" >&2
let num_related_file++
fi
done
[ "$num_related_file" -ne 1 ] && exit 1
}
check_abs_path() {
local var_cc=1
local check_file=""
for check_file in "$@";do
if [[ "${check_file:0:1}" != "/" ]]; then
echo -e "\033[31m\033[7m[ERROR]\033[0m --> $check_file was not an ABSOLUTE path." >&2
let var_cc++
fi
done
[ "$var_cc" -ne 1 ] && exit 1
}
check_sftw_path(){
local num_tp_program=1
local tp_program=""
for tp_program in "$@"
do
if ! which $tp_program >/dev/null 2>&1 ; then
echo -e "\033[31m\033[7m[ERROR]\033[0m --> Program not in PATH: $tp_program $" >&2
let num_tp_program++
fi
done
[ "$num_tp_program" -ne 1 ] && exit 1
}
check_var_empty () {
local var_cc=1
local var_name=""
local var=""
for var_name in "$@"; do
var=$(eval echo "$"$var_name)
case ${var} in
'')
echo -e "\033[31m\033[7m[ERROR]\033[0m --> $var_name is empty: '$var' " >&2
let var_cc++ ;;
*) ;;
esac >&2
done
[ "$var_cc" -ne 1 ] && exit 1
}
check_var_numeric () {
local var_cc=1
local var_name=""
local var=""
for var_name in "$@"; do
var=$(eval echo "$"$var_name)
# add ${var#prefix} substitution to trim sign
case ${var#[-+]} in
'')
echo -e "\033[31m\033[7m[ERROR]\033[0m --> $var_name is empty: '$var' " >&2
let var_cc++ ;;
*.*.*)
echo -e "\033[31m\033[7m[ERROR]\033[0m --> $var_name has more than one decimal point: '$var' " >&2
let var_cc++ ;;
*[!0-9.]*)
echo -e "\033[31m\033[7m[ERROR]\033[0m --> $var_name has a non-digit somewhere in it: '$var' " >&2
let var_cc++ ;;
*) ;;
esac >&2
done
[ "$var_cc" -ne 1 ] && exit 1
}
check_suffix () {
check_suffix_file=$( basename $1 )
check_suffix=$2
# add x incase file has no suffix
if [[ "${check_suffix_file##*.}"x != "$check_suffix"x ]];then
echo "[ERROR] --> $check_suffix_file should have suffix: '$check_suffix'." >&2
exit 1
fi
}
export -f gst_log gst_rcd gst_warn gst_err check_var_empty check_var_numeric check_sftw_path check_suffix check_files_exists check_abs_path
# <<<<<<<<<<<<<<<<<<<<<<<< Common functions <<<<<<<<<<<<<<<<<<<<<<<<
usage=$(
cat <<EOF
------------------------------------------------------------
Perform regional association analysis of genic regions for PANZ.
A wrapper of MAGMA
------------------------------------------------------------
Dependence: MAGMA plink
------------------------------------------------------------
USAGE:
bash $(basename $0) [OPTIONS]
OPTIONS: ([R]:required [O]:optional)
-h, --help show help and exit.
-r, --ref <str> [R] Plink bed prefix of all variants
--annote <str> [R] Gene-Variant Annotation file in format:
<Gene_ID> <Variants_sep_by_space>
Gene1 SNP1 SV2 INDEL3
... ...
--gene_sets <str> [R] Gene set file in format:
<Gene_SET_ID> <Genes_sep_by_space>
Gene_family_1 Gene1 Gene2 Gene3
... ...
--pheno <str> [R] Phenotype in plink format
*** NOTE: Phenotype should not contain duplicate samples,
and the first Phenotype should not be "NA"
--covar <str> [R] covariant in plink format
--region <str> [O] Physical region included for the analysis,
in format "CHR:START-END", use all variants
if not provided.
-o, --out <str> [O] Output prefix, will create a dir accordingly for the outputs (default: PANZ_Gene_GWAS_out)
------------------------------------------------------------
Author: Songtao Gui
E-mail: songtaogui@sina.com
EOF
)
if [[ $# -eq 0 ]]; then
echo "$usage" >&2
exit 1
fi
# >>>>>>>>>>>>>>>>>>>>>>>> Parse Options >>>>>>>>>>>>>>>>>>>>>>>>
# Set Default Opt
export annote=""
export ref=""
export gene_sets=""
export pheno=""
export covar=""
export region=""
export out="PANZ_Gene_GWAS_out"
# parse args
UNKOWN_ARGS=()
while [[ $# > 0 ]]; do
case "$1" in
-h|--help)
echo "$usage" >&2
exit 1
;;
-r|--ref)
#echo "set argument \"$1\" with value: $2" >&2
ref=$2
shift 2
;;
--annote)
#echo "set argument \"$1\" with value: $2" >&2
annote=$2
shift 2
;;
--gene_sets)
#echo "set argument \"$1\" with value: $2" >&2
gene_sets=$2
shift 2
;;
--pheno)
pheno=$2
shift 2
;;
--covar)
#echo "set argument \"$1\" with value: $2" >&2
covar=$2
shift 2
;;
--region)
region=$2
shift 2
;;
-o|--out)
out=$2
shift 2
;;
*) # unknown flag/switch
UNKOWN_ARGS+=("$1")
shift
;;
esac
done
if [ "${#UNKOWN_ARGS[@]}" -gt 0 ];then
echo "[ERROR] --> Wrong options: \"${UNKOWN_ARGS[@]}\"" >&2
exit 1
fi
unset UNKOWN_ARGS # restore UNKOWN_ARGS params
# ! Check if required vars are legal
check_var_empty ref annote gene_sets pheno covar out
check_files_exists $ref.bed $ref.bim $ref.fam $annote $gene_sets $pheno $covar
if [ -n "$region" ]; then
echo "$region" | grep -P "^\S+\:\d+\-\d+$" > /dev/null
if [ $? -ne 0 ];then gst_err "Wrong region format: $region."; exit 1;fi
fi
# <<<<<<<<<<<<<<<<<<<<<<<< Parse Options <<<<<<<<<<<<<<<<<<<<<<<<
# >>>>>>>>>>>>>>>>>>>>>>>> Main >>>>>>>>>>>>>>>>>>>>>>>>
gst_log "All start. Will output to $out ..."
# ? make output dir
mkdir -p $out
gst_rcd "Get ref ..."
if [ ! -s "$out/get_ref_done" ];then
# ? get ref bed
if [ -n "$region" ];then
gst_rcd "Subset with $region ..."
echo "$region" | sed 's/[\:\-]/\t/g' | while read chr start end
do
export chr start end
gst_rcd "Get ref bed ..."
plink --make-bed --bfile $ref --chr $chr --from-bp $start --to-bp $end --out $out/ref 1>$out/plink.log 2>&1
if [ $? -ne 0 ];then gst_err "subset plink bed failed: check $out/plink.log for details";rm -f $out/ref.{bed,bim,fam}; exit 1;fi
gst_rcd "get ref annote .."
cat $annote | perl -F"\t" -lane '
BEGIN{use List::Util qw/max min/;$,="\t";}
if(/^#/){print;next;}
($c,$s,$e)=split(":",$F[1]);
#max(A.start,B.start)<=min(A.end,B.end)
if( $c eq $ENV{chr} && max($s,$ENV{start})<=min($e,$ENV{end}) ){
print @F;
}
' > $out/ref.v_annote
if [ $? -ne 0 ];then gst_err "get subset annote failed: Non-zero exit";rm -f $out/ref.v_annote; exit 1;fi
done
else
ln -f -s $ref.bed $out/ref.bed &&\
ln -f -s $ref.bim $out/ref.bim &&\
ln -f -s $ref.fam $out/ref.fam &&\
ln -f -s $annote $out/ref.v_annote
if [ $? -ne 0 ];then gst_err "link ref failed: Non-zero exit";rm -f $out/ref.*; exit 1;fi
fi
if [ $? -ne 0 ];then gst_err "Get ref failed: Non-zero exit"; exit 1;fi
echo "done" > $out/get_ref_done
else
echo -e "\033[35m[WARNING]\033[0m --> Already done, skip running." >&2
fi
gst_rcd "Gene analysis ..."
magma="magma"
if [ ! -s "$out/gene_ana_done" ];then
# magma --bfile $out/ref --covar file=$covar --pheno file=$pheno --gene-annot $out/ref.v_annote --gene-settings snp-max-miss=0.25 adap-permp --seed 1234 --out $out/${magma} 1> $out/gene_ana.log 2>&1
magma --bfile $out/ref --covar file=$covar --pheno file=$pheno --gene-annot $out/ref.v_annote --gene-settings snp-max-miss=0.25 --out $out/${magma} 1> $out/gene_ana.log 2>&1
if [ $? -ne 0 ];then gst_err "gene_analysis failed: check $out/gene_ana.log for details"; exit 1;fi
echo "done" > $out/gene_ana_done
else
echo -e "\033[35m[WARNING]\033[0m --> Already done, skip running." >&2
fi
gst_rcd "Gene set analysis ..."
if [ ! -s "$out/gene_set_done" ];then
magma --gene-results $out/${magma}.genes.raw --set-annot $gene_sets --out $out/$magma 1> $out/gene_set.log 2>&1
if [ $? -ne 0 ];then gst_err "gene set analysis failed: check $out/gene_set.log for details"; exit 1;fi
echo "done" > $out/gene_set_done
else
echo -e "\033[35m[WARNING]\033[0m --> Already done, skip running." >&2
fi
gst_log "All Done!"
# <<<<<<<<<<<<<<<<<<<<<<<< Main <<<<<<<<<<<<<<<<<<<<<<<<
| true |
d01aefe5209caa866480d19a974b00a677443c7c | Shell | sugar-activities/4503-activity | /opera/package/lib/xml.sh | UTF-8 | 2,135 | 3.421875 | 3 | [] | no_license | # -*- mode: sh -*-
xml_parse()
{
local state line char content ent tagname nesting haschildren
state=outside
line=''
content=''
nesting=''
haschildren=false
while true; do
if [ -z "$line" ]; then
char="
"
else
char=$(printf '%s\n' "$line" | cut -c 1)
fi
case "$state:$char" in
outside:'<')
state=intag
;;
outside:'&')
state=ent
ent=''
;;
outside:*)
content=$content$char
;;
ent:[a-z])
ent=$ent$char
;;
ent:';')
content=$content$(xml_expandent "$ent")
state=outside
;;
ent:*)
content=$content'&'$ent
state=outside
;;
intag:'?')
state=tagskip
;;
intag:'!')
state=comment1
;;
intag:[a-zA-Z])
tagname=$char
state=tagname
;;
intag:/)
if ! $haschildren; then
eval $nesting='$content'
fi
nesting=${nesting%_*}
haschildren=true
state=tagskip
;;
intag:*)
;;
tagname:[_a-zA-Z0-9])
tagname=$tagname$char
;;
tagname:-)
;;
tagname:/)
nesting=$nesting${nesting:+_}$tagname
eval $nesting=
nesting=${nesting%_*}
haschildren=true
state=tagskip
;;
tagname:'>')
nesting=$nesting${nesting:+_}$tagname
haschildren=false
content=''
state=outside
;;
tagname:*)
nesting=$nesting${nesting:+_}$tagname
haschildren=false
state=tagskip
;;
tagskip:/)
eval $nesting=
nesting=${nesting%_*}
haschildren=true
;;
tagskip:'>')
content=''
state=outside
;;
tagskip:*)
;;
comment1:-)
state=comment2
;;
comment2:-)
state=comment3
;;
comment3:-)
state=comment4
;;
comment4:-)
state=comment5
;;
comment4:*)
state=comment3
;;
comment5:-)
state=comment2
;;
comment5:'>')
state=outside
;;
esac
if [ -z "$line" ]; then
if ! read -r line; then
break
fi
else
line=$(printf '%s\n' "$line" | cut -c 2-)
fi
done
}
xml_expandent()
{
case $1 in
lt)
echo '<'
;;
gt)
echo '>'
;;
amp)
echo '&'
;;
quot)
echo '"'
;;
*)
echo "&$1;"
;;
esac
}
| true |
d59fca766f7670f94e44344f0b822fc8d4754600 | Shell | particleman314/ShellLibrary | /test/file_assertions/assert_is_file.sh | UTF-8 | 410 | 2.859375 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env bash
dir="${SUBSYSTEM_TEMPORARY_DIR}"
file="${SLCF_SHELL_TOP}/test/file_assertions/assert_is_directory.sh"
std_opts="--suppress ${YES} --dnr"
assert_is_file ${std_opts}
assert_failure "$( __get_last_result )"
assert_is_file ${std_opts} "${file}"
assert_success "$( __get_last_result )"
assert_is_file ${std_opts} "${dir}"
assert_failure "$( __get_last_result )"
__reset_assertion_counters
| true |
666489c87acee6837acc95038d23893673f644cc | Shell | mdehollander/longread-UMI-pipeline | /longread_UMI_mockanalysis.sh | UTF-8 | 2,448 | 3.484375 | 3 | [] | no_license | #!/bin/bash
# DESCRIPTION
# Script for processing Nanopore UMI amplicon data
# from Zymo mock bacterial rRNA genes.
#
# IMPLEMENTATION
# author Søren Karst (sorenkarst@gmail.com)
# Ryans Ziels (ziels@mail.ubc.ca)
# license GNU General Public License
### Source commands and subscripts -------------------------------------
export PIPELINE_PATH="$(dirname "$(readlink -f "$0")")"
. $PIPELINE_PATH/scripts/dependencies.sh # Path to dependencies script
THREADS=${1:-60}
### Data processing -----------------------------------------------------
# Validation
mkdir validation
### Prepare binning statistics
cp umi_binning/read_binning/umi_bin_map.txt validation/
cp umi_binning/umi_ref/umi_ref.txt validation/
### Prepare read data
$SEQTK sample -s1334 reads.fq 5000 |\
$SEQTK seq -a - \
> validation/reads.fa
$MINIMAP2 \
-x map-ont \
$REF \
umi_binning/trim/reads_tf.fq -t $THREADS |\
$GAWK '$13 ~ /tp:A:P/{split($6,tax,"_"); print $1, tax[1]"_"tax[2]}'\
> validation/read_classification.txt
### Prepare consensus data
cp ./consensus_sracon_medaka_medaka.fa validation/
cp ./variants/variants_all.fa validation/
### Mapping
for DATA_FILE in validation/*.fa; do
DATA_NAME=${DATA_FILE%.*};
DATA_NAME=${DATA_NAME##*/};
$MINIMAP2 -ax map-ont $REF $DATA_FILE -t $THREADS --cs |\
$SAMTOOLS view -F 2308 - |\
cut -f1-9,12,21 > validation/$DATA_NAME.sam
done
$MINIMAP2 \
-ax map-ont \
validation/variants_all.fa \
consensus_sracon_medaka_medaka.fa \
-t $THREADS --cs |\
$SAMTOOLS view -F 2308 - |\
cut -f1-9,12,21 \
> validation/consensus_sracon_medaka_medaka_variants.sam
$MINIMAP2 \
-ax map-ont \
$REF_VENDOR \
consensus_sracon_medaka_medaka.fa \
-t $THREADS --cs |\
$SAMTOOLS view -F 2308 - |\
cut -f1-9,12,21 \
> validation/consensus_sracon_medaka_medaka_vendor.sam
### Copy refs
cp $REF validation/
cp $REF_VENDOR validation/
### Detect chimeras
$USEARCH -uchime2_ref consensus_sracon_medaka_medaka.fa \
-uchimeout validation/consensus_sracon_medaka_medaka_chimera.txt \
-db $REF \
-strand plus \
-mode sensitive
### Read stats
fastq_stats(){
awk -v sample="$2" '
NR%4==2{
rc++
bp+=length
} END {
print sample","rc","bp","bp/rc
}
' $1
}
echo "data_type,read_count,bp_total,bp_average" > validation/data_stats.txt
fastq_stats ./reads.fq raw >> validation/data_stats.txt
fastq_stats ./umi_binning/trim/reads_tf.fq trim >> validation/data_stats.txt
| true |
d5030ad224b512af0cb29d613d18fb6acf9e9637 | Shell | neomatrix1993/riak-scripts | /riak_conf.sh | UTF-8 | 538 | 2.625 | 3 | [
"MIT"
] | permissive | #!/bin/sh
local_host=$(ifconfig eth0 | grep 'inet addr:' | cut -d: -f2 | awk '{ print $1}')
sed -i -e 's/nodename = riak@127.0.0.1/nodename = riak@'$local_host'/g' /etc/riak/riak.conf
sed -i -e 's/listener.http.internal = 127.0.0.1:8098/listener.http.internal = '$local_host':8098/g' /etc/riak/riak.conf
sed -i -e 's/listener.protobuf.internal = 127.0.0.1:8087/listener.protobuf.internal = '$local_host':8087/g' /etc/riak/riak.conf
sed -i -e 's/search = off/search = on/g' /etc/riak/riak.conf
echo riak.conf updated with ip : $local_host
| true |
4cbd241c6d0889d66c31b3c8044a281b5949afbc | Shell | cnJun/LetsEncryptInstall | /bin/input.sh | UTF-8 | 272 | 3.515625 | 4 | [] | no_license |
cfg="$1"
if [ ! -n "$cfg" ]; then
read -p "please enter config filename:" cfg
fi
if [ ! -n "$cfg" ]; then
echo "please input config filename!"
exit
fi
if [ ! -f "./$cfg.sh" ]; then
echo "this config filename not exist!"
exit
fi
source "./$cfg.sh"
| true |
57837f8c952e0867c1db2ae539f7be321f60c657 | Shell | ngkz/dotfiles | /packages/hotkey-scripts/brightness.sh | UTF-8 | 375 | 3.265625 | 3 | [
"MIT"
] | permissive | #!@bash@/bin/bash
set -euo pipefail
PATH=@path@
case "$1" in
up)
light -A 5
summary="Brightness up"
;;
down)
light -U 5
summary="Brightness down"
;;
esac
brightness=$(light -G | cut -d. -f1)
body="${brightness}%"
notify-send -a brightness -i display-brightness-symbolic -h "int:value:$brightness" -h string:synchronous:brightness -t 3000 -e "$summary" "$body"
| true |
8bacfacbaed02744ea8fd5f4b3cf1ac6efb886fd | Shell | rjgonza/Lab | /bash/until_file_exists.sh | UTF-8 | 194 | 3.546875 | 4 | [] | no_license | #!/bin/bash
read -p "Please enter the name of a file: " file
until [[ -e $file ]]; do
read -p "$file dose not exist, please try again: " file
done
echo "Finally, you got one, $file exists!"
| true |
02aceae600abdd6e7dcbfd1499fc1b1e9a76db92 | Shell | vlamenco/neuroimaging | /collateValues | UTF-8 | 2,219 | 3.578125 | 4 | [] | no_license | #!/bin/bash
# formerly collateResults
# N.B. IF ONLY A SINGLE VARIABLE IS NEEDED, DO THIS:
# cat /projects2/udall/task/fnirt/RC4???/pbAll.gfeat/cope1.feat/stats/cope10z1c2.txt>pdhcActC2nocue.csv
if [ "x$1" = "x" ]; then
echo 'collateValues - collect single values across directories into a table'
echo 'Usage:'
echo $0 'collateValues pathOut [header] -b file1prefix "file1dirs" [-b file2prefix "file2dirs"] ...'
echo 'inputs:'
echo 'pathOut = output file'
echo 'header = print column header'
echo 'file1prefix = filename prefix preceeding {index}.txt'
echo 'file1dirs = list of directories for file1'
echo '* list lengths MUST BE EQUAL'
echo '* number of files in each directory MUST BE EQUAL'
echo '* ALL FILES MUST END IN .txt'
echo '* fileDirs MUST BE IN QUOTES'
else
pathOut=$1
echo -n "" > $pathOut
shift
if [ $1 == header ]; then
header=1
shift
else
header=0
fi
while (( $# )); do
if [ $1 == -b ]; then
filePrefix=$2
shift 2
fi
if [ -z $filePrefix ]; then
echo 'empty filePrefix'
exit
else
declare -a fileDirs=($1)
nrDirs=${#fileDirs[@]}
declare -a fileNames=($((ls ${fileDirs[1]}/${filePrefix}*.txt)|sed 's|'${fileDirs[1]}'/||'g))
nrFiles=${#fileNames[@]}
for fileNr in $(seq 0 $(($nrFiles-1))); do
if (( header )); then
echo -n $(basename ${fileNames[$fileNr]} .txt) >> $pathOut
fi
for dirNr in $(seq 0 $(($nrDirs-1))); do
echo -n \ >> $pathOut
echo -n $(cat ${fileDirs[$dirNr]}/${fileNames[$fileNr]}) >> $pathOut
done
echo >> $pathOut
done
shift
fi
done
cat ${pathOut}|transpose|sed 's/ /,/g'>${pathOut}_transpose
mv ${pathOut}_transpose ${pathOut}
fi
# EXAMPLES
# 2106 collateValues clusters1-18.csv -b c2z2c '/projects2/udall/standardFC/RC4???/cluster2.gfeat/cope1.feat/stats' -b c4z2c '/projects2/udall/standardFC/RC4???/cluster4.gfeat/cope1.feat/stats' -b c2z2c1z1c '/projects2/udall/standardFC/RC4???/cluster2fc1.gfeat/cope1.feat/stats' -b c2z2c1z2c '/projects2/udall/standardFC/RC4???/cluster2fc1.gfeat/cope1.feat/stats' -b c4z2c1z1c '/projects2/udall/standardFC/RC4???/cluster4fc1.gfeat/cope1.feat/stats' -b c2z2c2z2c '/projects2/udall/standardFC/RC4???/cluster2fc2.gfeat/cope1.feat/stats'
| true |
053d920a0b75bfc5d6e55ef546cf931f6271d931 | Shell | ipubrich/labtest | /lab_management/lab_stop.sh | UTF-8 | 3,942 | 4.0625 | 4 | [] | no_license | #!/usr/bin/env bash
##
## 01 remove bridges in var
## 02 bring up docker
## 03 bring up containers in var
## 04 build VM's
## 05 update iptable rules
##
# Variables
LOG_FILE="./log.txt" ### LOG FILE
BRIDGES=('br111' 'br112' 'br113' 'br114' 'br115')
CONTAINERS=('nnat_ftp')
TEMP_FILE='/tmp/lab_management.txt'
# User-defined functions
# delete bridges
function delete_bridge() {
nmcli connection down ${1}
nmcli connection delete id ${1}
}
# Body
###### STEP 01 START ######
echo "$(date)//01 BRIDGES//Burning bridges..." >> ${LOG_FILE} # timestamp beginning to file
EXISTING_BRIDGES=$(nmcli connection show > ${TEMP_FILE} && cat ${TEMP_FILE})
rm ${TEMP_FILE}
for BRIDGE in ${BRIDGES[@]}; do
if [[ ${EXISTING_BRIDGES} =~ ${BRIDGE} ]]; then
delete_bridge "${BRIDGE}"
echo "01 Bridge ${BRIDGE} deleted"
else
echo "01 Bridge ${BRIDGE} does not exist."
fi
done
echo "$(date)//01 BRIDGES//Completed..." >> ${LOG_FILE} # timestamp beginning to file
###### STEP 01 END ######
###### STEP 02 START ######
echo "$(date)//02 DOCKER//Stopping Docker..." >> ${LOG_FILE} # timestamp beginning to file
sudo systemctl stop docker.service > ${TEMP_FILE}
CHECK_DOCKER=$(cat ${TEMP_FILE} | grep "Active")
rm ${TEMP_FILE}
if [[ ${CHECK_DOCKER} =~ 'inactive' ]]; then
echo "02 Stopping Docker service"
sudo systemctl stop docker.service
else
echo "02 Docker service has already stopped"
fi
echo "$(date)//02 DOCKER//Completed..." >> ${LOG_FILE} # timestamp end to file
###### STEP 02 END ######
###### STEP 03 START ######
echo "$(date)//03 CONTAINERS//Stopping Containers..." >> ${LOG_FILE} # timestamp beginning to file
ALL_CONTAINERS=$(sudo docker container ls --all)
for CONTAINER in ${CONTAINERS[@]}; do
if [[ ${ALL_CONTAINERS} =~ ${CONTAINER} ]]; then
echo "03 The container ${CONTAINER} exists. Checking operational state."
CONTAINER_LINE=$(echo "${ALL_CONTAINERS}" | awk '/Up/ {print $8}') # check for Up
if [[ ${CONTAINER_LINE} == "Up" ]]; then
echo "03 Container ${CONTAINER} is up, bringing down"
sudo docker container stop ${CONTAINER}
else
echo "03 The container ${CONTAINER} is already inactive"
fi
else
echo "03 Container ${CONTAINER} does NOT exist. delete or check the VARS files" >> ${LOG_FILE}
fi
done
echo "$(date)//03 CONTAINERS//Completed..." >> ${LOG_FILE} # timestamp end to file
###### STEP 03 END ######
###### STEP 04 START ######
echo "$(date)//04 VM//Stopping VMS..." >> ${LOG_FILE} # timestamp beginning to file
EXISTING_VMS=$(sudo virsh list --all)
# > ${TEMP_FILE} && cat ${TEMP_FILE})
#rm ${TEMP_FILE}
# LAB_VMS=($(ls -l VM | awk '/ipub/ {print $9}' | sed -e 's/\.sh//g')) # overcomplicated
LAB_VMS=($(ls VM | sed -e 's/\.sh//')) # drop .sh from the files you found in the folder
echo "04 LAB VM files found for : ${LAB_VMS[@]}" # list
for VM in ${LAB_VMS[@]}; do
if [[ ${EXISTING_VMS} =~ ${VM} ]]; then
echo "04 The VM ${VM} exists. Checking operational state."
VM_LINE=$(echo "${EXISTING_VMS}" | grep "${VM}") # check for Up
if [[ ${VM_LINE} =~ "running" ]]; then
echo "04 Stopping VM ${VM}"
sudo virsh destroy ${VM} >> /dev/null # hide on cli
# sudo virsh undefine ${VM} >> /dev/null # hide on cli
else
echo "04 VM ${VM} is already stopped"
fi
else
echo "04 VM ${VM} does NOT exist. delete or check the VM files" >> ${LOG_FILE}
fi
done
echo "$(date)//04 VMS//Completed..." >> ${LOG_FILE} # timestamp end to file
###### STEP 04 END ######
###### STEP 05 START ######
echo "$(date): 05 // Modifying iptables" >> ${LOG_FILE} # timestamp end to file
echo "05 Updating iptables"
sudo iptables -D FORWARD 1
echo "$(date): 05 // Complete." >> ${LOG_FILE} # timestamp end to file
###### STEP 05 END ###### | true |
f7cdcdf017c5c9efd0c825221edac772cbf36881 | Shell | Rhdrarja/safe-server | /bin/createuser-spdy-nghttpx-squid.sh | UTF-8 | 1,802 | 3.546875 | 4 | [
"MIT"
] | permissive | #!/bin/bash
source /opt/.global-utils.sh
if [ "x$1" = "x-h" -o "x$1" = "x--help" ]
then
echo "$0 USERNAME PASSWORD PORT"
exit 0
fi
if [[ ! -f ${SPDYConfig} ]]; then
touch ${SPDYConfig}
rm ${SPDYSquidPassWdFile}
touch ${SPDYSquidPassWdFile}
fi
if [[ ! -f ${SPDYSquidPassWdFile} ]]; then
touch ${SPDYSquidPassWdFile}
rm ${SPDYConfig}
touch ${SPDYConfig}
fi
if [[ ! -s ${letsEncryptKeyPath} ]]; then
echoS "The SSL Key file ${letsEncryptKeyPath} is not existed. Exit" "stderr"
sleep 2
exit 0
fi
if [[ ! -f ${letsEncryptCertPath} ]]; then
echoS "The SSL cert file ${letsEncryptCertPath} is not existed. Exit" "stderr"
sleep 2
exit 0
fi
username=$1
password=$2
port=$3
( [[ -z "${username}" ]] || [[ -z "${password}" ]] || [[ -z "${port}" ]] ) \
&& echoS "You should invoke me via \`$0 USERNAME PASSWORD PORT \`. \
None of the parameters could be omitted." "stderr" \
&& sleep 2 && exit 0
if [[ ! -z $(gawk "/^${username},/ {print}" ${SPDYConfig}) ]]; then
echoS "Ooops, the user ${username} is exited in file ${SPDYConfig} and ${SPDYSquidPassWdFile} already. Exit" "stderr"
sleep 2
exit 0
fi
if [[ ! -z $(gawk "/,${port}$/ {print}" ${SPDYConfig}) ]]; then
echoS "Ooops, the port ${port} is taken in file ${SPDYConfig} already. Exit" "stderr"
sleep 2
exit 0
fi
newline=${username},${password},${port}
echo ${newline} >> ${SPDYConfig}
#spdyproxy -k ${letsEncryptKeyPath} -c ${letsEncryptCertPath} -p $port -U $username -P $password >/dev/null 2>&1 &
echo ${password} | htpasswd -i ${SPDYSquidPassWdFile} ${username}
#${binDir}/restart-spdy-squid.sh
${binDir}/restart-dead-spdy-nghttpx-squid.sh
#${binDir}/restart-spdy-nghttpx-squid.sh
echoS "HTTP/2 account created with \n\
Username: $username \n\
Password: $password \n\
Port: $port \n\
"
| true |
960eb6069ac35e14b699c4b940343fe034fa22dc | Shell | thongqle11/scripts | /bash/switch/VC-Failover-10G.sh | UTF-8 | 1,223 | 3.3125 | 3 | [] | no_license | #!/bin/sh
# To run, pipe script to ssh session of VCM
# example: ./VC-Failover.sh | ssh Administrator@172.24.102.240
#Protocol being tested. Uncomment the one being tested.
PROTOCOL=fcoe
#PROTOCOL=iscsi
#IO Module Bays Used. Quitman->1/2, Quintana Mezz1-> 3/4, Quintana Mezz2-> 5/6, Quintana Mezz3-> 7/8
IO_MODULE1=3
IO_MODULE2=4
#Name of SAN Profiles used in IO_MODULES specified above. For iSCSI, substitute with Ethernet Networks used for iSCSI connection.
SAN1=Bay3-Port4-Linux-Cisco
#SAN1=iSCSI-Bay3-x7
SAN2=Bay4-Port4-Linux-Brocade
#SAN2=iSCSI-Bay4-x7
#Name of Server Profile in VCM assigned to blade.
SERVER_PROFILE="TL-BAY4"
time_down=180
time_up=60
if [ "$PROTOCOL" = fcoe ]; then
TYPE=Fabric
elif [ "$PROTOCOL" = iscsi ]; then
TYPE=Network
else
echo "ERROR: Check Test Type, FCoE or iSCSI"
exit
fi
while true
do
(
sleep 2
echo set $PROTOCOL-connection $SERVER_PROFILE:$IO_MODULE1 $TYPE=""
sleep $time_down
echo set $PROTOCOL-connection $SERVER_PROFILE:$IO_MODULE1 $TYPE="$SAN1"
sleep $time_up
echo set $PROTOCOL-connection $SERVER_PROFILE:$IO_MODULE2 $TYPE=""
sleep $time_down
echo set $PROTOCOL-connection $SERVER_PROFILE:$IO_MODULE2 $TYPE="$SAN2"
sleep $time_up
)
done
| true |
9b092c779e5e3b604b8c245b9a05f0ec4eb8060a | Shell | siosios/odd-scripts | /D7-32bit.sh | UTF-8 | 5,259 | 2.78125 | 3 | [] | no_license | #!/bin/bash
apt-get update && apt-get -y upgrade
# go to root
cd
# disable ipv6
echo 1 > /proc/sys/net/ipv6/conf/all/disable_ipv6
sed -i '$ i\echo 1 > /proc/sys/net/ipv6/conf/all/disable_ipv6' /etc/rc.local
# install wget and curl
apt-get update;apt-get -y install wget curl;
# set time GMT +7
ln -fs /usr/share/zoneinfo/Asia/Bangkok /etc/localtime
# set locale
sed -i 's/AcceptEnv/#AcceptEnv/g' /etc/ssh/sshd_config
service ssh restart
# set repo
wget -O /etc/apt/sources.list "https://scriptauto.000webhostapp.com/openvz/sources.list.debian"
wget "http://www.dotdeb.org/dotdeb.gpg"
cat dotdeb.gpg | apt-key add -;rm dotdeb.gpg
# remove unused
apt-get -y --purge remove samba*;
apt-get -y --purge remove apache2*;
apt-get -y --purge remove sendmail*;
apt-get -y --purge remove bind9*;
# update
apt-get update; apt-get -y upgrade;
# install webserver
apt-get -y install nginx php5-fpm php5-cli
# install essential package
apt-get -y install bmon iftop htop nmap axel nano iptables traceroute sysv-rc-conf dnsutils bc nethogs openvpn vnstat less screen psmisc apt-file whois ptunnel ngrep mtr git zsh mrtg snmp snmpd snmp-mibs-downloader unzip unrar rsyslog debsums rkhunter
apt-get -y install build-essential
# disable exim
service exim4 stop
sysv-rc-conf exim4 off
# update apt-file
apt-file update
# setting vnstat
vnstat -u -i venet0
service vnstat restart
# install screenfetch
cd
wget https://github.com/KittyKatt/screenFetch/raw/master/screenfetch-dev
mv screenfetch-dev /usr/bin/screenfetch
chmod +x /usr/bin/screenfetch
echo "clear" >> .profile
echo "screenfetch" >> .profile
# install webserver
cd
rm /etc/nginx/sites-enabled/default
rm /etc/nginx/sites-available/default
wget -O /etc/nginx/nginx.conf "https://scriptauto.000webhostapp.com/openvz/nginx.conf"
mkdir -p /home/vps/public_html
echo "<?php phpinfo(); ?>" > /home/vps/public_html/info.php
wget -O /etc/nginx/conf.d/vps.conf "https://scriptauto.000webhostapp.com/openvz/vps.conf"
sed -i 's/listen = \/var\/run\/php5-fpm.sock/listen = 127.0.0.1:9000/g' /etc/php5/fpm/pool.d/www.conf
service php5-fpm restart
service nginx restart
# install openvpn
wget -O /etc/openvpn/openvpn.tar "https://scriptauto.000webhostapp.com/openvz/openvpn-debian.tar"
cd /etc/openvpn/
tar xf openvpn.tar
wget -O /etc/openvpn/1194.conf "https://scriptauto.000webhostapp.com/openvz/1194.conf"
service openvpn restart
sysctl -w net.ipv4.ip_forward=1
sed -i 's/#net.ipv4.ip_forward=1/net.ipv4.ip_forward=1/g' /etc/sysctl.conf
wget -O /etc/iptables.up.rules "https://scriptauto.000webhostapp.com/openvz/iptables.up.rules"
sed -i '$ i\iptables-restore < /etc/iptables.up.rules' /etc/rc.local
MYIP=`curl -s ifconfig.me`;
MYIP2="s/xxxxxxxxx/$MYIP/g";
sed -i $MYIP2 /etc/iptables.up.rules;
iptables-restore < /etc/iptables.up.rules
service openvpn restart
# configure openvpn client config
cd /etc/openvpn/
wget -O /etc/openvpn/Client.ovpn "https://scriptauto.000webhostapp.com/openvz/1194.conf"
sed -i $MYIP2 /etc/openvpn/Client.ovpn;
cp Client.ovpn /home/vps/public_html/
cd
# install badvpn
wget -O /usr/bin/badvpn-udpgw "https://scriptauto.000webhostapp.com/openvz/badvpn-udpgw"
sed -i '$ i\screen -AmdS badvpn badvpn-udpgw --listen-addr 127.0.0.1:7300' /etc/rc.local
chmod +x /usr/bin/badvpn-udpgw
screen -AmdS badvpn badvpn-udpgw --listen-addr 127.0.0.1:7300
cd
# setting port ssh
sed -i '/Port 22/a Port 143' /etc/ssh/sshd_config
sed -i 's/Port 22/Port 22/g' /etc/ssh/sshd_config
service ssh restart
# install dropbear
apt-get -y install dropbear
sed -i 's/NO_START=1/NO_START=0/g' /etc/default/dropbear
sed -i 's/DROPBEAR_PORT=22/DROPBEAR_PORT=143/g' /etc/default/dropbear
sed -i 's/DROPBEAR_EXTRA_ARGS=/DROPBEAR_EXTRA_ARGS="-p 109 -p 110"/g' /etc/default/dropbear
echo "/bin/false" >> /etc/shells
service ssh restart
service dropbear restart
# install vnstat gui
cd /home/vps/public_html/
wget http://www.sqweek.com/sqweek/files/vnstat_php_frontend-1.5.1.tar.gz
tar xf vnstat_php_frontend-1.5.1.tar.gz
rm vnstat_php_frontend-1.5.1.tar.gz
mv vnstat_php_frontend-1.5.1 vnstat
cd vnstat
sed -i 's/eth0/venet0/g' config.php
sed -i "s/\$iface_list = array('venet0', 'sixxs');/\$iface_list = array('venet0');/g" config.php
sed -i "s/\$language = 'nl';/\$language = 'en';/g" config.php
sed -i 's/Internal/Internet/g' config.php
sed -i '/SixXS IPv6/d' config.php
cd
# install fail2ban
apt-get -y install fail2ban;service fail2ban restart
# install squid3
apt-get -y install squid3
wget -O /etc/squid3/squid.conf "https://scriptauto.000webhostapp.com/openvz/squid.conf"
sed -i $MYIP2 /etc/squid3/squid.conf;
service squid3 restart
# install webmin
cd
wget -O webmin-current.deb "https://scriptauto.000webhostapp.com/openvz/webmin-current.deb"
dpkg -i --force-all webmin-current.deb
wget http://www.webmin.com/jcameron-key.asc
apt-key add jcameron-key.asc
apt-get update
apt-get install -y webmin
apt-get -y --force-yes -f install libxml-parser-perl
service webmin restart
service vnstat restart
# finalisasi
chown -R www-data:www-data /home/vps/public_html
service nginx start
service php-fpm start
service vnstat restart
service openvpn restart
service ssh restart
service dropbear restart
service fail2ban restart
service squid3 restart
service webmin restart
| true |
8e39d0d775d01b486936549775f877efd45886cd | Shell | crystalbobby/some-shell-scripts | /greetings.sh | UTF-8 | 262 | 3.3125 | 3 | [] | no_license | #!/bin/sh
echo "Is it morning? Please anszer yes or no"
read timeofday
if [ $timeofday = "yes"]
then
echo "Good :orning"
elif [ $timeofday = "no"]; then
echo "Good afternoon"
else
echo "Sorry, $timeofday not recognised. Enter yes or no"
exit i
fi
exit 0
| true |
0ed6941e83ca4d83934b9e51735301d6eb91ccaa | Shell | JimmyLoloy98/Bash-Course---Excersises | /variables.sh | UTF-8 | 340 | 3.46875 | 3 | [] | no_license | #!/bin/bash
#Uso de variables.
#no deben existir espacios entre asignaciones de variables.
echo "Digit your name: "
echo 'This is a variable: $var'
echo UID = $UID
echo HOME = $HOME
echo USER = $USER
var1=10
var2=$var1
echo "the value of var1 is: $var2"
var3=`date`
echo "Date: " $var3
test=`date +%y%m%d`
echo "Date format: " $test | true |
947690cc12ce105a4772fc8bf15ac3d51aacb9a9 | Shell | ahmad0807/LinuxAssignment | /LinuxAssignment/Ex4.sh | UTF-8 | 138 | 2.578125 | 3 | [] | no_license | #!/bin/bash
var_arr=(“man” ”bear” ”pig” ”dog” ”cat” “sheep”)
for((i=0; i<5; i++))
do
echo "${var_arr[i]}"
done
| true |
6ab96d6c04613271a7f3675c750fc0a4348f10a5 | Shell | scpcorp/skynet-webportal | /setup-scripts/setup.sh | UTF-8 | 2,367 | 2.9375 | 3 | [
"MIT"
] | permissive | #! /usr/bin/env bash
set -e
# Copy over basic configuration files.
cp ./tmux.conf ~/.tmux.conf
cp ./bashrc ~/.bashrc
source ~/.bashrc
# Add SSH keys and set SSH configs
sudo cp ./ssh_config /etc/ssh/ssh_config
mkdir -p ~/.ssh
cat ./authorized_keys >> ~/.ssh/authorized_keys
# Nodejs install prerequisite https://nodejs.org/en/download/package-manager/
curl -sL https://deb.nodesource.com/setup_13.x | sudo -E bash -
# Yarn install prerequisite https://classic.yarnpkg.com/en/docs/install
curl -sL https://dl.yarnpkg.com/debian/pubkey.gpg | sudo apt-key add -
echo "deb https://dl.yarnpkg.com/debian/ stable main" | sudo tee /etc/apt/sources.list.d/yarn.list
# Apt installations.
sudo apt-get update
sudo apt-get -y install ufw tmux ranger htop nload nginx certbot \
python-certbot-nginx nodejs gcc g++ make yarn git vim
# terminfo for alacritty terminal via ssh
# If you don't use the alacritty terminal you can remove this step.
wget -c https://raw.githubusercontent.com/alacritty/alacritty/master/extra/alacritty.info
sudo tic -xe alacritty,alacritty-direct alacritty.info
rm alacritty.info
# Setup nginx config
sudo cp ./skynet-nginx.conf /etc/nginx/sites-available/skynet
sudo nginx -t
sudo ln -sf /etc/nginx/sites-available/skynet /etc/nginx/sites-enabled/skynet
sudo rm /etc/nginx/sites-enabled/default --force
sudo systemctl reload nginx
# Setup firewall
# TODO: disable plain HTTP eventually
sudo ufw enable
sudo ufw allow ssh
sudo ufw allow 'Nginx Full'
sudo ufw allow 'Nginx HTTP'
# Install Go 1.13.7.
wget -c https://dl.google.com/go/go1.13.7.linux-amd64.tar.gz
sudo tar -C /usr/local -xzf go1.13.7.linux-amd64.tar.gz
export PATH=$PATH:/usr/local/go/bin
rm go1.13.7.linux-amd64.tar.gz
# Sanity check that will pass if go was installed correctly.
go version
# Install Sia
git clone -b v1.4.4 https://gitlab.com/NebulousLabs/Sia ~/Sia
make --directory ~/Sia
# Setup systemd files
mkdir -p ~/.config/systemd/user
cp siad.service ~/.config/systemd/user/siad.service
cp siad-upload.service ~/.config/systemd/user/siad-upload.service
# Setup files for storing environment variables
mkdir -p ~/.sia
cp sia.env ~/.sia/
cp sia.env ~/.sia/sia-upload.env
# Setup persistent journal
sudo mkdir -p /var/log/journal
sudo cp journald.conf /etc/systemd/journald.conf
sudo systemctl restart systemd-journald
# Setup skynet frontend.
cd ..
yarn
yarn build
| true |
eeabab26a4b8c96ad53caa19018af816ef7e439a | Shell | LinneaAx/cryo | /all_scripts/make_subdir.sh | UTF-8 | 252 | 3.234375 | 3 | [] | no_license | #!/usr/bin/bash
args=("$@")
filename=${args[0]}
while IFS=' ' read -r value1 value2 remainder; do
#read -r line || [[ -n "$line" ]]; read -r value1 value2 remainder; do
# echo "read line $line"
echo "$value2"
done < "$filename"
| true |
41f569dc05ed2e1154a66bacde7ff68466dd0f17 | Shell | jhrozek/dotfiles | /bin/check_mail.sh | UTF-8 | 255 | 2.90625 | 3 | [] | no_license | #!/bin/bash
MAILBOXES_FILE=~/.muttrc_mbsync_mboxes
FOLDER=/home/remote/jhrozek/.maildir/Inbox/
while true
do
mbsync -a
echo -n "mailboxes " > $MAILBOXES_FILE
find $FOLDER -type d -name cur -printf '%h ' >> $MAILBOXES_FILE
sleep 120
done
| true |
a6d4a19762381d20137e08e2e632a12bfbff7d94 | Shell | nhatch/rrt | /run_all_experiments.sh | UTF-8 | 725 | 2.65625 | 3 | [] | no_license | #!/bin/bash
./hack_compile.sh
SEED=12345
for DYNAMIC_OBSTACLES in 0 1; do
for TASK in gate bugtrap forest blob; do
echo "STARTING $TASK $DYNAMIC_OBSTACLES point_first"
./headless_point_first.out $TASK $DYNAMIC_OBSTACLES $SEED >> final_results_all.txt
echo "STARTING $TASK $DYNAMIC_OBSTACLES point_second"
./headless_point_second.out $TASK $DYNAMIC_OBSTACLES $SEED >> final_results_all.txt
echo "STARTING $TASK $DYNAMIC_OBSTACLES stick_first"
./headless_stick_first.out $TASK $DYNAMIC_OBSTACLES $SEED >> final_results_all.txt
echo "STARTING $TASK $DYNAMIC_OBSTACLES stick_second"
./headless_stick_second.out $TASK $DYNAMIC_OBSTACLES $SEED >> final_results_all.txt
done
done
./analyze.sh
| true |
0fded7f868d54c7c5ca7e403958c460cd69051ac | Shell | Skullabs/kikaha | /kikaha-maven-plugin/resources/META-INF/defaults/bin/inc.cmds-unix.sh | UTF-8 | 1,104 | 3.796875 | 4 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env bash
# FUNCTIONS
retrieve_server_pid(){
ps -o uid,pid,cmd ax | grep "config.app.dir=$(pwd)" | grep -v grep| head -n 1 | tr '\t' '@' | sed 's/ */@/g;s/^@//' | cut -d '@' -f 2
}
start_server(){
PID=$(retrieve_server_pid)
if [ ! "$PID" = "" ]; then
warn "Server already running"
exit 1
else
info "Starting server in background..."
nohup ${JAVA} ${JAVA_OPTS} -classpath "${CLASSPATH}" ${MAIN_CLASS} > $NULL 2> $NULL &
fi
}
debug_server(){
PID=$(retrieve_server_pid)
if [ ! "$PID" = "" ]; then
warn "Server already running"
exit 1
else
info "Starting server in debug mode..."
${JAVA} ${JAVA_OPTS} -classpath "${CLASSPATH}" ${MAIN_CLASS}
fi
}
stop_server(){
PID=$(retrieve_server_pid)
if [ ! "$PID" = "" ]; then
info "Sending graceful shutdown signal..."
kill $PID && info "Signal sent." || exit 1
retries=1
while [ ! "$PID" = "" -a "$retries" -lt 10 ]; do
sleep 1
PID=$(retrieve_server_pid)
retries=`expr $retries + 1`
done
info "Service was shut down."
else
warn "Server not running"
exit 1
fi
}
# VARIABLES
CLASSPATH="${LIBDIR}/*:." | true |
f0489601eea5190023f92eeb103b081ad5c7abd3 | Shell | bwoebi/drag0n | /drag0n.app/Contents/System/bin/executer.sh | UTF-8 | 269 | 3.15625 | 3 | [] | no_license | #!/bin/bash
# A quick "how to fix your shit":
# 1st: we need a dumb terminal to avoid pesky stderr messages:
export TERM=dumb
# 2nd: Our entire cmd is in a variable we need to EVALUATE.
eval "$THE_ARGS"
# 3nd: We need to clean the mess we started.
export THE_ARGS="" | true |
e5f795b814f8b8a4b8ab8325167a12f04325fcb2 | Shell | Plan9-Archive/antshill | /grid/stosvc | UTF-8 | 4,891 | 2.9375 | 3 | [] | no_license | #!/bin/rc
# attaches to hubfs and runs on storage node
rfork
spawnmax=`{cat /usr/grid/spawnmax}
if(~ $#spawnmax 0)
spawnmax=5
if(! test -e /usr/grid/stononce)
echo 1009 >usr/grid/stononce
nonce = `{cat /usr/grid/stononce}
if(! test -e /usr/grid/nextstoport)
echo 20000 >/usr/grid/nextstoport
ndb/ipquery sys $sysname ip |sed 's/ip=127.0.0.1//' >/usr/grid/myip
. /usr/grid/myip
scorefile=/n/9fat/scorenames
if(! test -e $scorefile)
mount /srv/fatsrv /n/9fat
while(usercmd=`{read}){
reqcmd=()
srvowner=()
username=$usercmd(1)
echo $username $usercmd >>/usr/grid/stolog.$pid
if(test -e /srv/$username.req){
srvowner=`{ls -l /srv/$username.req |awk '{print $4}'}
if(~ $srvowner $username){
echo reqcmd equals cat /srv/$username.req >>/usr/grid/stolog.$pid
reqcmd=`{cat /srv/$username.req}
echo $reqcmd >>/usr/grid/$username.reqlog
}
if not
echo user id error >[1=2]
rm /srv/$username.req
}
if(! ~ $username $reqcmd(1))
reqcmd=(fail fail)
usercmd=$reqcmd(2-)
switch($usercmd){
case spawn*
if(~ $#usercmd 2){
targetfs=$usercmd(2)
if(! test -e /usr/grid/spawncount)
echo 0 >/usr/grid/spawncount
spawncount=`{cat /usr/grid/spawncount}
if(test -e /srv/$targetfs^fossil){
echo that root is already being served from this storage node >[1=2]
echo use save to clone it to a new name if desired >[1=2]
}
if not if(! ~ $spawncount $spawnmax){
usercheck=`{ndb/query -f /n/9fat/scorenames -a scorename $targetfs user |grep $username}
if(~ $usercheck $username){
echo init fossil...waiting for 10...
stoport=`{cat /usr/grid/nextstoport}
echo $username spawnroot -l $stoport $targetfs >>/usr/grid/stolog.$pid
if(~ $usercmd spawndisk*){
spawnroot -d -l $stoport $targetfs &
}
if(! ~ $usercmd spawndisk*){
spawnroot -l $stoport $targetfs &
}
sleep 3
if(! test -e /srv/$targetfs^fossil)
sleep 3
if(! test -e /srv/$targetfs^fossil)
sleep 4
if(test -e /srv/$targetfs^fossil){
spawncount=`{echo $spawncount '+ 1' |hoc}
echo $spawncount >/usr/grid/spawncount
if(! test -d /usr/grid/$username)
mkdir /usr/grid/$username
echo 'fetch tcp!'^$ip^'!'^$stoport $targetfs $username
echo 'fetch tcp!'^$ip^'!'^$stoport $targetfs $username >/usr/grid/$username/$targetfs
echo 'fetch tcp!'^$ip^'!'^$stoport $targetfs $username >>/n/g/cpu.in
stoport=`{echo $stoport '+ 2' |hoc}
echo $stoport >/usr/grid/nextstoport
}
if not
echo $targetfs spawn failed on storage server >[1=2]
}
if not
echo $targetfs not found for $username >[1=2]
}
if not
echo spawn limit reached >[1=2]
}
if not
echo spawn SCORENAME >[1=2]
case save*
if(~ $#usercmd 3){
echo savesnap -u $username $usercmd(2) $usercmd(3) $nonce >>/usr/grid/stolog.$pid
if(! ~ $usercmd savedisk*)
savesnap -u $username -r /srv/^$usercmd(2) -s /srv/^$usercmd(2)^fscons $usercmd(3)^.^$nonce &
if(~ $usercmd savedisk*)
savesnap -u $username -f `{cat /usr/grid/^$usercmd(2)} -s /srv/^$usercmd(2)^fscons $usercmd(3)^.^$nonce &
nonce=`{echo $nonce ' + 100 ' |hoc}
echo $nonce >/usr/grid/stononce
}
if not
echo save OLDNAME NEWNAME >[1=2]
case invite*
if(~ $#usercmd 3){
inviteduser=$usercmd(2)
targetfs=$usercmd(3)
if(test -e /usr/grid/$username/$targetfs){
echo uname $inviteduser $inviteduser >>/srv/$targetfs^fscons
echo 'uname sys +'^$inviteduser >>/srv/$targetfs^fscons
echo 'uname adm +'^$inviteduser >>/srv/$targetfs^fscons
echo 'uname upas +'^$inviteduser >>/srv/$targetfs^fscons
sleep 1
echo savesnap -u $inviteduser $targetfs $targetfs^$inviteduser $nonce >>/usr/grid/stolog.$pid
savesnap -u $inviteduser -r /srv/$targetfs -s /srv/$targetfs^fscons $targetfs^$inviteduser^.^$nonce &
nonce=`{echo $nonce ' + 100 ' |hoc}
echo $nonce >/usr/grid/stononce
}
if not
echo $targetfs not owned by $username >[1=2]
}
if not
echo invite USER FSNAME >[1=2]
case boom*
if(~ $#usercmd 2){
targetfs=$usercmd(2)
if(test -e /usr/grid/$username/$targetfs){
echo boom $targetfs >>/n/g/cpu.in
sleep 1
. /usr/grid/kill.$targetfs
rm /usr/grid/$targetfs
rm /usr/grid/$username/$targetfs
rm /usr/grid/kill.$targetfs
echo $targetfs BOOMed
}
if not
echo $targetfs not owned by $username >[1=2]
}
if not
echo boom FSNAME >[1=2]
case status
echo active grid namespaces for $username
for(i in `{ls /usr/grid/$username}){
basename $i
echo serving on cpu port `{cat $i |sed 's/.*!.*!//g' |sed 's/ .*/ \+1/g' |hoc}
}
case scores*
if(! test -e /n/9fat/scorenames)
mount /srv/fatsrv /n/9fat
if(~ $#usercmd 2)
ndb/query -a -f /n/9fat/scorenames user $username scorename |grep $usercmd(2)
if not
ndb/query -a -f /n/9fat/scorenames user $username scorename
case fail*
echo command authorization failed
case *
echo unknown command >[1=2]
}
}
| true |
7b3a95c46bfbf394b26a413946e00a89af939402 | Shell | ankitraj311/Bash | /17_professional_menu.sh | UTF-8 | 847 | 3.828125 | 4 | [] | no_license | #!/bin/bash
: '
select car in BMW MERCEDES TESLA ROVER TOYOTA
do
echo "you have selected $car"
done
'
: '
select car in BMW MERCEDES TESLA ROVER TOYOTA
do
case $car in
BMW)
echo "BMW Selected $car";;
MERCEDES)
echo "MERCEDES Selected $car";;
TESLA)
echo "TESLA Selected $car";;
ROVER)
echo "ROVER Selected $car";;
TOYOTA)
echo "TOYOTA Selected $car";;
*)
echo "Select in between 1..5";;
esac
done
'
#THis is waiting for input to be taken, other wise it will print a string after a interval
echo "Press any key to continue"
while [ true ]
do
read -t 3 -n 1 #-t stands for time, after every 3 sec it will going to print else block
# -n will print the else block for once
if [ $? = 0 ]
then
echo "You have Terminated the script"
exit;
else
echo "Waiting for you to press the key"
fi
done
| true |
abaf8d23345636ba2abdf6a7ab69729c95f8bdd7 | Shell | Hayao0819/EG-Installer | /softwares/genymotion.entry | UTF-8 | 747 | 3.421875 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env bash
name="Genymotion"
package_name="genymotion"
description="Linuxで動くVirtualBoxベースのAndroidエミュレータ"
run_preparing=true
function preparing () {
curl -o /tmp/make_from_aur.sh https://gist.githubusercontent.com/Hayao0819/3389c0ab2f000da13dbfe8e06315131b/raw/601833a210eeb4e19fea48584569e29d0f8517dc/aur.bash
chmod 755 /tmp/make_from_aur.sh
su $aur_user -c "/tmp/make_from_aur.sh $package_name"
mv $(find /tmp -name "$package_name*.pkg.tar.xz") /tmp > /dev/null
rm /tmp/make_from_aur.sh
}
function install () {
pkg_file=$(find /tmp -name "$package_name*.pkg.tar.xz")
pacman -U --noconfirm $pkg_file
rm $pkg_file
}
uninstall () {
pacman -Rsn --noconfirm ${package_name}
} | true |
00cf79bde0a4dd08958747ee3892fe9bdac2da79 | Shell | ruian1/Slurm_MPID | /run_mpid.sh | UTF-8 | 946 | 2.6875 | 3 | [] | no_license | #!/bin/bash
# This runs inside the container and builds the container
# We assume you did the first-time setup already
INFILE_LIST=$1
OUTDIR=$2
RUNID=$3
RUNID2=$4
echo "WORKDIR",$WORKDIR
echo "INFILE_LIST",$INFILE_LIST
echo "OUTDIR"=$OUTDIR
# go to the working directory
#cd $WORKDIR
echo $OUTDIR
source /usr/local/bin/thisroot.sh
cd /usr/local/share/dllee_unified
source ./configure.sh
source /usr/local/share/MPID_pytorch/setup.sh
tmp_dir=/tmp/tmp_${RUNID}_${RUNID2}
mkdir $tmp_dir
cd $tmp_dir
inference_cfg_file=/cluster/kappa/90-days-archive/wongjiradlab/larbys/pubs/dlleepubs/downstream/Production_Config/cfg/network/inference_config_tufts_WC.cfg
mpid_torch_dir=${LARCV_BASEDIR}/../../MPID_pytorch/uboone/
echo "in dir of ", $PWD
python ${mpid_torch_dir}/inference_pid_torch_dlmerger_WC.py ${INFILE_LIST} . ${inference_cfg_file}
echo "in dir of ", $PWD
ls -lrth .
mv ./multipid_out_*_WC.root $OUTDIR
echo "WTF!!!!!!!!!!!!!!!!!!" | true |
8d6742b729354587314bae38949eba3116325aa7 | Shell | GunioRobot/maloader | /unpack_xcode.sh | UTF-8 | 2,325 | 3.484375 | 3 | [
"BSD-2-Clause"
] | permissive | #!/bin/sh
#
# Copyright 2011 Shinichiro Hamaji. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials
# provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY Shinichiro Hamaji ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL Shinichiro Hamaji OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
# USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
# OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
# Usage:
#
# %./unpack_xcode.sh xcode_3.2.6_and_ios_sdk_4.3__final.dmg
#
# The above commandline will put CLI tools in the dmg package into
# ./xcode_3.2.6_and_ios_sdk_4.3__final/root .
#
# This script was inspired by this document:
# http://devs.openttd.org/~truebrain/compile-farm/apple-darwin9.txt
set -e
PKGS="MacOSX10.6 gcc4.2 gcc4.0 llvm-gcc4.2 DeveloperToolsCLI clang"
dmg=$1
dir=`basename $dmg .dmg`
rm -fr $dir
mkdir $dir
cd $dir
7z x ../$dmg
7z x 5.hfs
for pkg in $PKGS; do
# Xcode4 doesn't have gcc4.0
7z x -y */Packages/$pkg.pkg || continue
7z x -y Payload
mkdir -p $pkg
cd $pkg
cpio -i < ../Payload~
cd ..
rm -f Payload*
done
rm -fr root
mkdir root
for pkg in $PKGS; do
if [ $pkg = "MacOSX10.6" ]; then
cp -R $pkg/SDKs/*/* root
else
cd $pkg || continue
tar -c * | tar -xC ../root
cd ..
fi
done
ln -sf "../../System/Library/Frameworks root/Library/Frameworks"
cd root/usr/lib
ln -s system/* .
echo "The package was unpacked into $dir/root"
| true |
2b118f98a284613b111f19107ea42e12d320145c | Shell | svenkoenig/lokaler-vm | /remote/db_settings.sh | UTF-8 | 207 | 2.5625 | 3 | [] | no_license | OWNER='sven'
DUMP_DIR='/home/sven/db/dumps'
# ---------- import local settings ----------
SETTINGS_LOCAL="`dirname \"$0\"`/db_settings_local.sh"
if [ -f $SETTINGS_LOCAL ]; then
source $SETTINGS_LOCAL
fi
| true |
74d7e822ab670ba92535101f8d0fe1d08925769c | Shell | Venki22/Jenkins | /JenkinsCLI-Scripts/TriggeredJob.sh | UTF-8 | 484 | 2.734375 | 3 | [] | no_license | echo "Please enter the job which you want to triggered"
read jobname
echo "Trigger the job $jobname"
#reading details from properties files and -f2 is field 2 value
userName=`grep JENKINS_USERNAME Jenkins.properties | cut -d "=" -f2`
token=`grep JENKINS_TOKEN Jenkins.properties | cut -d "=" -f2`
url=`grep JENKINS_URL Jenkins.properties | cut -d "=" -f2`
java -jar jenkins-cli.jar -auth $userName:$token -s $url -webSocket build $jobname
echo "$jobname is triggered successs"
| true |
a292c28b74e1bb71fc7f5690e15394243cff61b1 | Shell | xuanloct4/Backup | /Documents/BuildScript/build_script.sh | UTF-8 | 660 | 2.84375 | 3 | [] | no_license | #!/bin/bash
#My First Script
#Info to be configured
target_name="$appname"
sdk="iphoneos"
certificate="iPhone Distribution: Aron Bury"
project_dir="$HOME/Documents/Apps/iOS/awesomeapp/$appname"
build_location="$Home/Builds/$appname
current_path=$(pwd)
appName="jamesApp"
jamesApp_workspace="jamesAppV2.xcworkspace"
if [ ! -d "$build_location" ]; then
mkdir -p "$build_location"
fi
cd "$project_dir"
xcodebuild -target "$appname" OBJROOT="$build_location/obj.root" SYMROOT="$build_location/sym.root"
xcrun -sdk iphoneos PackageApplication -v "$build_location/sym.root/Release-iphoneos/$appname.app" -o "$build_location/$appname.ipa" --sign "$certificate"
| true |
0b9d91c83dcdeb3ba435716dc174a13bea288e26 | Shell | zbigg/bashfoo | /memoize.sh | UTF-8 | 1,311 | 3.703125 | 4 | [
"MIT"
] | permissive | #!/bin/sh
bashfoo_require temp
normalize_name_as_file()
{
echo "$@" | tr "/{} \"'" "_______"
}
memoized()
{
#set -x
local mangled_file_name="$(normalize_name_as_file $@)"
#TBD, consider bashfoo.mktempname (which doesn't exist yet)
local tmp_cached_file_name="/tmp/.memoized_$USER_$$_$mangled_file_name"
if [ ! -f $tmp_cached_file_name ] ; then
"$@" > $tmp_cached_file_name
local r="$?"
if [ "$r" = 1 ] ; then
cat $tmp_cached_file_name
rm -f $tmp_cached_file_name
return $r
else
bashfoo.mktemp.register "$tmp_cached_file_name"
memoized_files="$memoized_files $tmp_cached_file_name"
fi
#else
# log "reusing output for $*"
fi
#set +x
cat $tmp_cached_file_name
}
memoized_result()
{
local mangled_file_name="bf_memoized_result_$(normalize_name_as_file $@)"
if ! variable_exists "$mangled_file_name" ; then
if "$@" ; then
local r=0
else
local r=$?
fi
variable_set "$mangled_file_name" "$r"
else
local r="$(variable_get "$mangled_file_name")"
fi
return $r
}
clean_memoize_cache()
{
if [ -n "$memoized_files" ] ; then
#rm -rf $memoized_files
true
fi
}
| true |
4b809f75a39eabd666c77386d67f88fde91fdb7e | Shell | Sabayon/build | /bin/bump_injected_kernel_mods | UTF-8 | 1,056 | 3.015625 | 3 | [] | no_license | #!/bin/sh
kernel="${1}"
if [ -z "${kernel}" ]; then
echo "$0 <kernel>"
echo "example: $0 2.6.35-sabayon"
exit 1
fi
packages="app-emulation/open-vm-tools-kmod
app-emulation/virtualbox-guest-additions
app-emulation/virtualbox-modules
app-emulation/vmware-modules
app-laptop/omnibook
app-laptop/tp_smapi
media-video/em8300-modules
net-wireless/acx
net-wireless/broadcom-sta
net-wireless/madwifi-ng
=net-wireless/ndiswrapper-1.55-r1
sys-block/iscsitarget
sys-fs/vhba
~x11-drivers/ati-drivers-10.8
net-wireless/rtl8192se
~x11-drivers/nvidia-drivers-173.14.27
~x11-drivers/nvidia-drivers-195.36.31
~x11-drivers/nvidia-drivers-96.43.18
~x11-drivers/nvidia-drivers-256.53
x11-drivers/xf86-video-virtualbox"
ETP_REPO="${ETP_REPO:-sabayonlinux.org}"
# build for latest kernel
rm -rf /usr/portage/packages/*
KERNEL_DIR="/usr/src/linux-${kernel}" emerge -Bav ${packages}
if [ "${?}" != "0" ]; then
exit 1
fi
eit inject --to ${ETP_REPO} $(find /usr/portage/packages -name "*.tbz2" | xargs echo)
echo "Now you should remove old packages, if you want of course"
| true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.