blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 4 115 | path stringlengths 2 970 | src_encoding stringclasses 28 values | length_bytes int64 31 5.38M | score float64 2.52 5.28 | int_score int64 3 5 | detected_licenses listlengths 0 161 | license_type stringclasses 2 values | text stringlengths 31 5.39M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
495178a2ef8edeaf7762189d2cfd0ae1afaf80de | Shell | sahilsehwag/dotfiles | /packages/tmux/install.sh | UTF-8 | 613 | 2.9375 | 3 | [] | no_license | #!/usr/bin/env bash
script_directory=$(F_getScriptDir ${BASH_SOURCE:-$0})
! F_isSymlink "$HOME/.config/tmux" && ln -sv "$script_directory/" "$HOME/.config/tmux"
! F_isSymlink "$HOME/.tmux.conf" && ln -sv "$script_directory/tmux.conf" "$HOME/.tmux.conf"
sudo cp "$script_directory/bin/tmux-icon-name" /usr/local/bin/tmux-icon-name
sudo chmod +x /usr/local/bin/tmux-icon-name
git clone https://github.com/tmux-plugins/tpm "$HOME/.cache/tmux/plugins/tpm"
F_pkg_install tmux
F_install tmuxinator
if F_isMac; then
brew tap arl/arl
brew install gitmux
else
go install github.com/arl/gitmux@latest
fi
| true |
3696dd6ab6ba3b88c3e0d02979b883a14ced5fa0 | Shell | AshviniDeo/Assignment8 | /dictionary.sh | UTF-8 | 420 | 3.046875 | 3 | [] | no_license | counter=0
for ((i=0; i<40; i++))
do
die=$((1+RANDOM%6))
declare -A Dice
Dice[$i]=$(($die))
for ((j=$die; j<6; j++))
do
counter=$((counter+1))
arr[j]=$counter
done
if [ $counter -eq 10 ]
then
echo " ${!arr[@]} of die come maximum times"
break
elif [ $counter == 1 ]
then
echo " ${!arr[@]} of die come minimum times "
fi
done
echo "dictionary : ${Dice[@]} "
| true |
6f21875936be4097a119b1312c448f75e8076067 | Shell | ciudilo/mongodocker | /DockerWork/mongoDocker_RS1.sh.save | UTF-8 | 3,022 | 3.984375 | 4 | [] | no_license | #!/bin/bash
#CONFIGURATION
PORTRANGE=3701
NUM_MONGOD=4
WORKDIR=/space/data2
MONGODDIR=$WORKDIR/sh
MONGOCFG=$WORKDIR/configsvr
MONGOSDIR=$WORKDIR/mongos
MONGODCMD='gosu mongodb mongod -f /etc/mongod.conf'
MONGOSCMD='gosu mongodb mongos --configdb '
HOSTIP=`/sbin/ifconfig eth1 | grep 'inet addr:' | cut -d: -f2 | awk '{ print $1}'`
#CREATE DIR BASED ON TYPE
# type = mongod, configsvr, mongos
# num
createdir(){
mkdir -p $WORKDIR
TYPE=$1
NUM=$2
#Create directory (if not created)
if [ $TYPE = 'mongod' ]; then
mkdir -p $MONGODDIR$NUM/db > /dev/null
chown -R mongodb:mongodb $MONGODDIR$NUM
# chmod -R 777 $MONGODDIR$NUM
elif [ $TYPE = 'configsvr' ]; then
mkdir -p $MONGOCFG/{db,configdb} > /dev/null
chown -R mongodb:mongodb $MONGOCFG
# chmod -R 777 $MONGOCFG/
else
mkdir -p $MONGOSDIR/db > /dev/null
chmod -R 777 $MONGOSDIR
fi
if [ $? -eq 0 ]; then
echo "Directory created"
else
echo "Error creating new directories"
fi
}
#RUN CONTAINER
# name, port, number, type, cfgsvr for mongos
runCont(){
NAME=$1
PORT=$2
NUMBER=$3
TYPE=$4
echo $4
#TODO: REDIRECT OUTPUT TO /dev/null
echo "$NAME $PORT $NUMBER $TYPE"
if [ $TYPE = 'mongod' ]; then
echo "dbpath is $MONGODDIR/db"
docker run --name $NAME -p $PORT:27017 -v $MONGODDIR$NUMBER:/data mongodb $MONGODCMD --replSet rs$i
elif [ $TYPE = 'configsvr' ]; then
docker run --name $NAME -p $PORT:27019 -v $MONGOCFG:/data mongodb $MONGODCMD --configsvr
else
docker run --name $NAME -p $PORT:27017 -v $MONGOSDIR$i:/data mongodb $MONGOSCMD $5
fi
exit 0
}
stopContainers(){
#STOP All
echo "Stopping all containers..."
docker stop $(docker ps -q)
exit 0
}
clean(){
docker rm $(docker ps -aq)
exit 0
}
startRS(){
#DATA NODES
echo $NUM_MONGOD
i=1
for i in `seq 1 $NUM_MONGOD`;
do
createdir mongod $i
echo $i
#Run docker container
echo "rs$i-srv1 $PORTRANGE$i $i 'mongod'"
runCont "rs$i-srv1" "$PORTRANGE$i" "$i" 'mongod' > log/mongod$i.log 2>&1 &
#docker run --name "rs$i-srv1" -p "$PORTRANGE$i":27017 -v "/space/data/db$i":/data mongodb mongod -f /etc/mongod.conf --replSet rs$i &
#> /dev/null 2>&1 &
done
exit 0
}
startConfigSvr(){
createdir configSvr 0
#CONFIG SERVER
runCont cfgsrv1 "$PORTRANGE"0 0 configsvr > log/cfgsvr.log 2>&1 &
exit 0
}
startMongos(){
createdir mongos
#MONGOS
echo $1 $2
runCont mongos "$PORTRANGE"7 0 mongos $1:$2 > log/mongos.log 2>&1 &
exit 0
}
listContainers(){
echo "Checking containers"
sleep 5
docker ps
}
#MAIN SCRIPT
case "$1" in
setup)
mkdir log
createdir configsvr
createdir mongos
;;
startRS)
startRS
;;
startConfigSvr)
startConfigSvr
;;
startMongos)
startMongos $HOSTIP "$PORTRANGE"0
;;
list)
listContainers
;;
stop)
stopContainers
;;
clean)
clean
;;
restart)
$0 stop
$0 startRS
$0 startConfigSvr
$0 startMongos
;;
*) echo 'Usage: mongoDocker_RS1.sh {setup|startRS|startConfigSvr|startMongos|list|stop|restart}'
exit 2
;;
esac
exit 0
| true |
6c2461c5edd3005fabfa5f02ff605219469a8e72 | Shell | alexbarrera/GGR-Docker | /star-genomegenerate/genomeGenerate.sh | UTF-8 | 1,781 | 4.15625 | 4 | [] | no_license | #!/bin/bash
set -e
# This is a script for a docker image to run star to generate a genome index
# INPUTS
#
# CONT_INPUT_SJDB_GTF_FILE
# CONT_INPUT_GENOME_FASTA_FILES
# OUTPUTS
#
# CONT_OUTPUT_DIR - output directory for temp files and logs
# CONT_OUTPUT_GENOME_DIR - directory for generating STAR-formatted genome
# PARAMS
#
# CONT_PARAM_THREADS, default 32
# CONT_PARAM_SJDB_OVERHANG, default 25
# Check that variables are set
[ -z "$CONT_INPUT_SJDB_GTF_FILE" ] && echo "Error: The CONT_INPUT_SJDB_GTF_FILE variable must be set" && exit 1
[ -z "$CONT_INPUT_GENOME_FASTA_FILES" ] && echo "Error: The CONT_INPUT_GENOME_FASTA_FILES variable must be set" && exit 1
[ -z "$CONT_OUTPUT_DIR" ] && echo "Error: The CONT_OUTPUT_DIR variable must be set" && exit 1
[ -z "$CONT_OUTPUT_GENOME_DIR" ] && echo "Error: The CONT_OUTPUT_GENOME_DIR variable must be set" && exit 1
# Check that output directories are writable
[ ! -w "$CONT_OUTPUT_DIR" ] && echo "Error: output dir $CONT_OUTPUT_DIR is not writable" && exit 1
[ ! -w "$CONT_OUTPUT_GENOME_DIR" ] && echo "Error: output dir $CONT_OUTPUT_GENOME_DIR is not writable" && exit 1
# Populate defaults
THREADS="32"
SJDB_OVERHANG="25"
if [ ! -z "$CONT_PARAM_THREADS" ]; then
THREADS=$CONT_PARAM_THREADS
fi
if [ ! -z "$CONT_PARAM_SJDB_OVERHANG" ]; then
SJDB_OVERHANG=$CONT_PARAM_SJDB_OVERHANG
fi
# Build command
STAR_BIN=$(which STAR)
STAR_CMD="$STAR_BIN \
--outFileNamePrefix $CONT_OUTPUT_DIR \
--runMode genomeGenerate \
--sjdbGTFfile $CONT_INPUT_SJDB_GTF_FILE \
--sjdbOverhang $SJDB_OVERHANG \
--genomeDir $CONT_OUTPUT_GENOME_DIR \
--genomeFastaFiles $CONT_INPUT_GENOME_FASTA_FILES \
--runThreadN $THREADS"
echo
echo "Starting $0..."
echo "$STAR_VERSION"
echo "Running star:"
echo "$STAR_CMD"
sh -c "$STAR_CMD"
| true |
c49f6c42b0ac48d95319fbd5be387211e99abbb7 | Shell | paretje/dotfiles | /.pbuilder/hooks/D00experimental | UTF-8 | 437 | 2.953125 | 3 | [
"MIT"
] | permissive | #!/bin/sh
if [ -z "$EXPERIMENTAL_DEPS" ]; then
exit
fi
echo "deb ${MIRRORSITE} experimental ${COMPONENTS}" > /etc/apt/sources.list.d/experimental.list
echo "Package: ${EXPERIMENTAL_DEPS}" > /etc/apt/preferences.d/99experimental
echo "Pin: release a=experimental" >> /etc/apt/preferences.d/99experimental
echo "Pin-priority: 500" >> /etc/apt/preferences.d/99experimental
cat /etc/apt/preferences.d/99experimental
apt-get update
| true |
037bf980d35b1b569c901b59e191b06b6937d775 | Shell | sudobash1/dotfiles | /zshrc | UTF-8 | 4,102 | 2.875 | 3 | [] | no_license | # If not running interactively, don't do anything
[[ -o interactive ]] || return
source ~/.shrc
# ------------------- OH MY ZSH ------------------- {{{
# Path to your oh-my-zsh installation.
export ZSH=$DOTFILES_REPO/oh-my-zsh
# Set name of the theme to load --- if set to "random", it will
# load a random theme each time oh-my-zsh is loaded, in which case,
# to know which specific one was loaded, run: echo $RANDOM_THEME
# See https://github.com/robbyrussell/oh-my-zsh/wiki/Themes
#ZSH_THEME="kphoen"
#ZSH_THEME="kafeitu"
#ZSH_THEME="dpoggi"
#ZSH_THEME="gentoo"
ZSH_THEME="sudobash"
#ZSH_THEME="robbyrussell"
#ZSH_THEME="daveverwer"
# Set list of themes to pick from when loading at random
# Setting this variable when ZSH_THEME=random will cause zsh to load
# a theme from this variable instead of looking in ~/.oh-my-zsh/themes/
# If set to an empty array, this variable will have no effect.
# ZSH_THEME_RANDOM_CANDIDATES=( "robbyrussell" "agnoster" )
# Uncomment the following line to use case-sensitive completion.
CASE_SENSITIVE="true"
# Uncomment the following line to use hyphen-insensitive completion.
# Case-sensitive completion must be off. _ and - will be interchangeable.
# HYPHEN_INSENSITIVE="true"
# Uncomment the following line to disable bi-weekly auto-update checks.
DISABLE_AUTO_UPDATE="true"
# Uncomment the following line to change how often to auto-update (in days).
# export UPDATE_ZSH_DAYS=13
# Uncomment the following line to disable colors in ls.
# DISABLE_LS_COLORS="true"
# Uncomment the following line to disable auto-setting terminal title.
# DISABLE_AUTO_TITLE="true"
# Uncomment the following line to enable command auto-correction.
# ENABLE_CORRECTION="true"
# Uncomment the following line to display red dots whilst waiting for completion.
# COMPLETION_WAITING_DOTS="true"
# Uncomment the following line if you want to disable marking untracked files
# under VCS as dirty. This makes repository status check for large repositories
# much, much faster.
DISABLE_UNTRACKED_FILES_DIRTY="true"
# Uncomment the following line if you want to change the command execution time
# stamp shown in the history command output.
# You can set one of the optional three formats:
# "mm/dd/yyyy"|"dd.mm.yyyy"|"yyyy-mm-dd"
# or set a custom format using the strftime function format specifications,
# see 'man strftime' for details.
# HIST_STAMPS="mm/dd/yyyy"
# Would you like to use another custom folder than $ZSH/custom?
# ZSH_CUSTOM=/path/to/new-custom-folder
ZSH_CUSTOM=$DOTFILES_REPO/zsh
# Which plugins would you like to load?
# Standard plugins can be found in ~/.oh-my-zsh/plugins/*
# Custom plugins may be added to ~/.oh-my-zsh/custom/plugins/
# Example format: plugins=(rails git textmate ruby lighthouse)
# Add wisely, as too many plugins slow down shell startup.
plugins=(
git
vi-mode
)
source $ZSH/oh-my-zsh.sh
# }}}
fpath=($HOME/.sbr_local/share/zsh/site-functions $fpath)
compinit
# ------------------- Basic settings ------------------- {{{
# Allow ** globs and more
# See man zshexpn section `FILENAME GENERATION'
setopt extendedglob
# pound sign in interactive prompt
setopt interactivecomments
# Do not automatically cd if you provide a directory name
# without a cd
unsetopt auto_cd
# ------------------- History ------------------- {{{
HISTFILE=~/.zhistory
HISTSIZE=SAVEHIST=10000
setopt nosharehistory
#setopt sharehistory
# Add timestamp information to history
#setopt extendedhistory
# }}}
# }}}
# ------------------- Key Binding ------------------- {{{
# Shift-tab to go back in completion
bindkey '^[[Z' reverse-menu-complete
# Make home and end work as expected...
bindkey "${terminfo[khome]}" beginning-of-line
bindkey "${terminfo[kend]}" end-of-line
# Make Ctrl-R behave like bash
bindkey '^R' history-incremental-search-backward
# }}}
# ------------------- Aliases ------------------- {{{
alias reloadzsh="killall -u $(whoami) -USR1 zsh"
# }}}
# Execute the local zshrc (if exists)
[[ -f $HOME/.zshrc.local.zsh ]] && source $HOME/.zshrc.local.zsh
alias resource="source ~/.zshrc"
# vim: fdm=marker foldlevel=0
| true |
95b6c1eb80b8a1a6b0af0b812e391c2b403ed10e | Shell | rasimaliyev/MyScript | /statushttp.sh | UTF-8 | 215 | 2.96875 | 3 | [] | no_license | #! /bin/bash
status="$(systemctl is-active httpd.service)"
if [ "${status}" = "active" ]; then
echo "active"
else
systemctl start httpd.service ; tail /var/log/httpd/error_log >> error.log.httpd
exit 0
fi
| true |
2f8a261e1af1c30d850c12ca86460b0a1e1c733a | Shell | olcf/visit | /src/tools/dev/scripts/hooks/check_dos.sh | UTF-8 | 4,038 | 3.4375 | 3 | [
"GPL-2.0-only",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-warranty-disclaimer",
"BSD-3-Clause"
] | permissive | #!/bin/sh
##############################################################################
#
# Purpose: Count ctrl characters in committed files and make sure we don't
# commit files with ctrl characters.
#
# Programmer: Mark C. Miller
# Created: April 30, 2008
#
# Modifications:
#
# Mark Miller, Mon Jun 23 17:07:38 PDT 2008
# Added docs/website to skip
#
# Mark C. Miller, Tue Dec 9 00:19:04 PST 2008
# Obtain list of changed files via FLIST ($3) argument and loop
# over them via 'read' sh builtin method.
#
# Mark C. Miller, Tue Dec 9 23:11:02 PST 2008
# Re-factored a lot of skip logic to HandleCommonSkipCases. Adjusted
# main file loop to account for fact that FLIST file now includes file
# status chars as well as file name.
#
# Kathleen Bonnell, Mon Jan 26 17:16:17 PST 2009
# Added windowsbuild/ThirdParty to the skip list.
#
# Tom Fogal, Sun Mar 22 16:10:25 MST 2009
# I added a case for build_visit to be ignored.
#
# Tom Fogal, Mon Jul 20 21:12:09 MDT 2009
# Allow PNGs.
#
# Tom Fogal, Sat Aug 22 16:25:07 MDT 2009
# Allow `configure', it's autogenerated.
#
# Kathleen Bonnell, Wed Nov 17 10:20:16 PST 2010
# Added vendor_branches to skip list.
#
# Cyrus Harrison, Fri Jan 14 10:48:50 PST 2011
# Add docs to the skip list.
#
# Brad Whitlock, Tue Jul 26 10:28:47 PDT 2011
# Add releases to skip list.
#
# Brad Whitlock, Fri May 18 17:15:34 PDT 2012
# Add .rc and .in files to skip list.
#
# Brad Whitlock, Wed Jun 13 13:57:01 PDT 2012
# Skip for qtssh files.
#
##############################################################################
REPOS="$1"
TXN="$2"
FLIST="$3"
#
# Create a temp file containing the ctrl char(s) we wan't to grep for
#
ctrlCharFile=/tmp/visit_svn_hook_ctrl_M_char_$$.txt
if test -n "$TMPDIR"; then
ctrlCharFile=$TMPDIR/visit_svn_hook_ctrl_M_char_$$.txt
fi
echo -e '\r' > $ctrlCharFile
#
# Iterate over the list of files
#
while read fline; do
#
# Get file 'svnlook' status and name
#
fstat=`echo $fline | tr -s ' ' | cut -d' ' -f1`
fname=`echo $fline | tr -s ' ' | cut -d' ' -f2`
#
# Skip common cases of deletions, dirs, non-text files
#
if `HandleCommonSkipCases $fstat $fname`; then
continue
fi
#
# Filter out other cases HandleCommonSkipCases doesn't catch
#
case $fname in
*.doc)
continue
;;
*.ini)
continue
;;
*src/third_party_builtin/*|*src/common/icons/*)
continue
;;
*src/exe/*|*src/bin/*|*src/archives/*|*src/help/*)
continue
;;
*src/java/images/*|*src/tools/mpeg_encode/*)
continue
;;
*/tools/qtssh/*|*/tools/qtssh/windows/*)
continue
;;
*windowsbuild/* | *test/win32_baseline/*)
continue
;;
*.bat)
continue
;;
*.rc)
continue
;;
*.in)
continue
;;
*docs/WebSite/*)
continue
;;
*svn_bin/build_visit)
continue
;;
*svn_bin/bv_support/*)
continue
;;
*png)
continue
;;
*configure)
continue
;;
*docs/*)
continue
;;
*vendor_branches/*)
continue
;;
*/releases/*)
continue
;;
esac
#
# Using svnlook to cat the file and examine it for ctrl chars.
#
svnlook cat -t $TXN $REPOS $fname | grep -q -f $ctrlCharFile 1>/dev/null 2>&1
commitFileHasCtrlChars=$?
# If the file we're committing has ctrl chars, reject it
if test $commitFileHasCtrlChars -eq 0; then
log "File \"$fname\" appears to contain '^M' characters, maybe from dos?."
log "Please remove them before committing. Try using dos2unix tool."
rm -f $ctrlCharFile
exit 1
fi
done < $FLIST
# clean up
rm -f $ctrlCharFile
# all is well!
exit 0
| true |
da020e87e3302ed201de16b57c4ba421b6a9da85 | Shell | kayceesrk/Sting | /trunk/tests/src/pldi/benchmarks/benchmark2/b/ordinary.sh | UTF-8 | 536 | 2.90625 | 3 | [] | no_license | #!/bin/bash
debug=false # N.B. The server also debug prints.
repeat=1
args=
numargs=0
while true;
do
case $1 in
"")
break
;;
-d)
debug="true"
shift
;;
-r)
shift
repeat=$1
shift
;;
*)
args="$args $1"
numargs=$(($numargs + 1))
shift
;;
esac
done
if [ $numargs -ne 2 ]
then
echo 'Expected arguments: [port, depth]; not:' $args
exit 1
fi
bin/sessionj -cp tests/classes/ pldi.benchmarks.benchmark2.b.Ordinary $debug $args $repeat
| true |
08e7dae55dbbe49e86f5ef59fde46fb339ddac02 | Shell | jesical516/CheckAppConfAndTools | /backup/backup.sh | UTF-8 | 1,005 | 3.515625 | 4 | [] | no_license | #!/bin/bash
#TODO:
# 1. mkdir
# 2. cd
# 3. do something
# 4. delete older thing
DIR_PATH=$(cd "$(dirname "$0")"; pwd)
. $DIR_PATH/../env.sh
TODAY=b-`date +%Y-%m-%d`
FILE_SUF=tar.gz
TMP_BACKUP_DIRECTORY=$TODAY
TODAY_BACKUP_FILE=$TODAY.$FILE_SUF
BACKUP_FILE=$BACKUP_DIR/$TODAY_BACKUP_FILE
OLD_TMP_BACKUP_DIRECTORY=$BACKUP_DIR/b-`date -d '4 days ago' +%Y-%m-%d`.$FILE_SUF
if [ ! -d $BACKUP_DIR ];then
mkdir $BACKUP_DIR
fi
mkdir $TMP_BACKUP_DIRECTORY
if [ $? -ne 0 ];then
echo "build backup directory[$TMP_BACKUP_DIRECTORY] fail" 1>&2
exit -1
fi
cd $TMP_BACKUP_DIRECTORY
rsync -arLv $BACKUP_MEDIA_FILES .
if [ $? -ne 0 ];then
echo "sync $BACKUP_MEDIA_FILES fail" 1>&2
exit -1
fi
mysqldump -uwork -p12345678 check_app_production > db
if [ $? -ne 0 ];then
echo "dump mysql fail" 1>&2
exit -1
fi
cd ..
##TODO:error
tar zcf $TODAY_BACKUP_FILE $TMP_BACKUP_DIRECTORY/
mv $TODAY_BACKUP_FILE $BACKUP_FILE
rm -rf $TMP_BACKUP_DIRECTORY
echo $OLD_TMP_BACKUP_DIRECTORY
rm -rf $OLD_TMP_BACKUP_DIRECTORY
| true |
f2872b26f102938e8c2198dab7a6c114b3c7bb99 | Shell | rzh/vagrant | /gssapi/config/kerberos/install_kerberos_common.sh | UTF-8 | 1,551 | 3.34375 | 3 | [] | no_license | #! /bin/bash
set -e
set -x
echo "Exporting env variables"
export DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
source $DIR/config.sh
# echo "config NTP server"
# chkconfig ntpd on
echo "config firewall"
# echo "Configuring firewalld ..."
#
# following port are needed for kerberos to work
# Please make sure the following ports are opened in the firewall settings:
# TCP: 80, 88, 389
# UDP: 88 (at least one of TCP/UDP ports 88 has to be open)
# Also note that following ports are necessary for ipa-client working properly after enrollment:
# TCP: 464
# UDP: 464, 123 (if NTP enabled)
# iptables -A INPUT -p tcp -m tcp --dport 80 -j ACCEPT
# iptables -A INPUT -p tcp -m tcp --dport 88 -j ACCEPT
# iptables -A INPUT -p tcp -m tcp --dport 389 -j ACCEPT
# iptables -A INPUT -p udp -m udp --dport 88 -j ACCEPT
# iptables -A INPUT -p udp -m udp --dport 464 -j ACCEPT
# iptables -A INPUT -p tcp -m tcp --dport 464 -j ACCEPT
# open port for mongod
# iptables -A INPUT -p tcp -m tcp --dport 27017 -j ACCEPT
echo "config DNS"
echo "Configuring /etc/hosts ..."
echo "127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4" > /etc/hosts
echo "::1 localhost localhost.localdomain localhost6 localhost6.localdomain6" >> /etc/hosts
# echo "$HOST_IP_ADDR $HOST_FQDN $HOST_NAME" >> /etc/hosts
echo "config client DNS"
bash $DIR/../config_dns.sh
if [ -f /etc/redhat-release ]
then
# redhat
service iptables stop
chkconfig iptables off
elif [ -f /etc/lsb-release ]; then
# ubuntu
ufw disable
fi | true |
569a885534f2bc5fc1124c05385eb3702948bf2c | Shell | celot/luffy | /src/v741/user/rt2880_app/scripts/3g.sh | UTF-8 | 1,697 | 3.328125 | 3 | [] | no_license | #!/bin/sh
LOCK_FILE=/var/lock/LOCK.3G
#interface=`nvram_get 2860 wan_3g_interface`
interface=`module b`
if [ "$interface" = "" ]; then
interface=ttyUSB0
fi
if [ -f "$LOCK_FILE" ]; then
exit 0
else
if [ ! -f "/var/lock" ]; then
mkdir -p /var/lock/
fi
touch "$LOCK_FILE"
fi
if [ "$1" != "" ]; then
dev=$1
else
dev=`nvram_get 2860 wan_3g_dev`
fi
killall -q module
killall -q pppd
ifconfig ppp0 down
chat -v -f /etc_ro/ppp/3g/celot_disconn.scr </dev/$interface >/dev/$interface 2> /dev/null
sleep 3
#create ppp call script for 3G connection
modem_f=$interface
user=`nvram_get 2860 wan_3g_user`
pass=`nvram_get 2860 wan_3g_pass`
apn=`nvram_get 2860 wan_3g_apn`
pin=`nvram_get 2860 wan_3g_pin`
dial=`nvram_get 2860 wan_3g_dial`
apntype=`nvram_get 2860 wan_3g_apntype`
apncid=`nvram_get 2860 wan_3g_apncid`
if [ "$user" = "" ]; then
user=none
fi
if [ "$pass" = "" ]; then
pass=none
fi
if [ "$dial" = "" ]; then
dial=*98#
fi
if [ "$apn" = "" ]; then
echo "Check APN!!!"
exit 0
fi
if [ "$apntype" = "" ]; then
echo "Check APN Type!!!"
exit 0
fi
config-3g-ppp.sh -p $pass -u $user -m $modem_f -n $dial
#config-3g-ppp.sh -p $pass -u $user -m $modem_f -n $dial -c Generic_conn.scr -d Generic_disconn.scr
echo "Connectiong via $dev ..."
#set apn
apncid_n=1
case "$apncid" in
"CID1")
apncid_n=1 ;;
"CID2")
apncid_n=2 ;;
"CID3")
apncid_n=3 ;;
"CID4")
apncid_n=4 ;;
"CID5")
apncid_n=5 ;;
"CID6")
apncid_n=6 ;;
"CID7")
apncid_n=7 ;;
"CID8")
apncid_n=8 ;;
"CID9")
apncid_n=9 ;;
"CID10")
apncid_n=10 ;;
*)
apncid_n=1 ;;
module at "AT+CGDCONT=$apncid_n,\"$apntype\",\"$apn\""
#pppd call 3g&
module ppp&
rm -f $LOCK_FILE
exit 0
| true |
6a513d385d358c61328294981feee5be0a2693f5 | Shell | carze/clovr-base | /hudson/hudson-scripts/clovr_comparative.sh | UTF-8 | 1,058 | 2.984375 | 3 | [] | no_license | #!/bin/bash
set -e
source /opt/vappio-scripts/clovrEnv.sh
DATE=`date +"%m-%d-%Y-%T" | sed -e 's/:/_/g'`
vp-add-dataset --tag-name=bifidobacter_genbank_tag_$2 -o /opt/hudson/pangenome_data/bifidobacter_genbank_files/Bifidobacterium_adolescentis_ATCC_15703/AP009256.gbk /opt/hudson/pangenome_data/bifidobacter_genbank_files/Bifidobacterium_longum_infantis_ATCC_15697/CP001095.gbk
vp-describe-protocols --config-from-protocol=clovr_comparative \
-c input.GENBANK_TAG=bifidobacter_genbank_tag_$2 \
-c params.OUTPUT_PREFIX=bifidobacter \
-c params.ORGANISM="Bifidobacter sp" \
-c cluster.CLUSTER_NAME=$1 \
-c cluster.CLUSTER_CREDENTIAL=$2 \
-c cluster.TERMINATE_ONFINISH=false \
-c pipeline.PIPELINE_DESC="Hudson CloVR Comparative Test $2" \
> /tmp/$$.pipeline.conf.${DATE}
TASK_NAME=`vp-run-pipeline --print-task-name --pipeline-config /tmp/$$.pipeline.conf.${DATE} --overwrite`
if [ "$?" == "1" ]; then
echo "vp-run-pipeline failed to run"
exit 1
fi
vp-describe-task --name local --exit-code --block $TASK_NAME
exit $?
| true |
995a957a76345962061dc4cf5c2c25c788764c08 | Shell | strokirk/dotfiles | /bin/x-run-pytest-for-all-branches | UTF-8 | 660 | 3.28125 | 3 | [] | no_license | #!/usr/bin/env bash
branches=()
while IFS='' read -r line; do branches+=("$line"); done < <(
git for-each-ref --format='%(refname)' refs/heads |
sed 's:refs/heads/::' |
grep -v master |
grep -v wip
)
RED=$(tput setaf 1)
GREEN=$(tput setaf 2)
RESET=$(tput sgr0)
for branch in "${branches[@]}"; do
echo "$branch"
git checkout "$branch" --quiet;
pytest -m unit -qq -p no:sugar -x
err=$?
if [ $err -eq 0 ]; then
echo ""
echo "${GREEN}Tjoho! 🎉${RESET}"
echo ""
else
echo ""
echo "${RED}Va?! Nee... 😥${RESET}"
echo ""
fi
done;
git checkout master
| true |
94aac97376e278aa29208e3f2e29774404f70417 | Shell | sboosali/.emacs.d | /scripts/eval.sh | UTF-8 | 2,116 | 3.359375 | 3 | [] | no_license | #!/bin/bash
set -e
#################################################
NIX_FILE=./default.nix
NIX_DIRECTORY=result
NIX_EMACS=./"$NIX_DIRECTORY"/bin/emacs
#################################################
EMACS_DIRECTORY=.emacs-interpreter.d
EMACS_FILE=./init.el
#EMACS_OPTIONS=(-q )
#EMACS_FILE="$EMACS_DIRECTORY"/init.el
#################################################
nix-build "$NIX_FILE" -o "$NIX_DIRECTORY"
#################################################
"$NIX_EMACS" --no-init-file --load "$EMACS_FILE" --chdir="$EMACS_DIRECTORY" --batch --eval "$@"
#################################################
# USAGE
#################################################
#
# $ ./eval.sh '(print `((load-file-name . ,(or load-file-name (buffer-file-name))) (default-directory . ,default-directory) (which-emacs . ,which-emacs)))'
#
# ((load-file-name)
# (default-directory . ~/.emacs.d/.emacs-interpreter.d/)
# ...)
#
########################
#
#################################################
# NOTES
#################################################
#
# `--batch`
#
# Run Emacs in batch mode.
#
# Batch mode is used for running programs written in Emacs Lisp from shell scripts, makefiles, and so on. To invoke a Lisp program, use the ‘-batch’ option in conjunction with one or more of ‘-l’, ‘-f’ or ‘--eval’.
#
# In batch mode:
# - Emacs does not display the text being edited, and the standard terminal interrupt characters such as C-z and C-c have their usual effect.
# - Emacs functions that normally print a message in the echo area will print to either the standard output stream (stdout) or the standard error stream (stderr) instead. (To be precise, functions like prin1, princ and print print to stdout, while message and error print to stderr.)
# - Functions that normally read keyboard input from the minibuffer take their input from the terminal's standard input stream (stdin) instead.
#
# See:
#
# - https://www.gnu.org/software/emacs/manual/html_node/emacs/Initial-Options.html
#
#
########################
#
#
#
################################################# | true |
379d275fc205b06cb392231207f190756c44f021 | Shell | hard-chain/docker-pyeth-dev | /miner/start.sh | UTF-8 | 758 | 3.015625 | 3 | [] | no_license | echo "Running filebeat service"
service filebeat start
echo "BOOTSTRAP_NODE is $BOOTSTRAP_NODE"
if [ -z "$BOOTSTRAP_NODE" ]; then echo "BOOTSTRAP_NODE must be set" && exit 1; fi
openssl rand -hex 32 > /root/.config/pyethapp/privkey.hex
export PRIVKEY=`cat /root/.config/pyethapp/privkey.hex | awk '{print $1}'`
echo "Generated random private key: $PRIVKEY"
perl -pi -e "s/PRIVKEY/$PRIVKEY/" /root/.config/pyethapp/config.yaml
echo "Creating new account"
/usr/local/bin/pyethapp --password /root/.config/pyethapp/password.txt account new
sleep $SLEEPTIME
echo "Launching node with mine amt: $MINE_PERCENT"
/usr/local/bin/pyethapp -m $MINE_PERCENT -l eth.chain:info,eth.chainservice:info,eth.validator:info --log-file /root/log/log.txt -b $BOOTSTRAP_NODE run
| true |
7ba7aa3ff0f2e7d21a7e35e8841d0ada6c8e65d8 | Shell | mnitchev/dotfiles | /util/bin/pass | UTF-8 | 647 | 4.03125 | 4 | [] | no_license | #!/bin/bash
bold=$(tput bold)
normal=$(tput sgr0)
red="\033[31m"
active_socket=$(basename $(readlink $(gpgconf --list-dirs agent-socket)) | sed -e "s/^S.gpg-agent-//")
passbin=$(which -a pass | grep -v $HOME | head -1)
if [ ! -f "$passbin" ]; then
echo Pass does not seem to be installed!
exit 1
fi
exit_code=1
echo 1>&2 gpg: using the $active_socket socket
for attempt in {1,2,3}; do
"$passbin" $@
exit_code="$?"
if [[ "$exit_code" -eq 2 ]]; then
echo -e "${red}${bold}Bad Passphrase (attempt $attempt of 3)${normal}" >&2
continue
fi
if [[ "$exit_code" -ge 0 ]]; then
exit "$exit_code"
fi
done
exit "$exit_code"
| true |
7e377dd940792ac6306be634806e82c17e16d1e3 | Shell | dbochicchio/Vera-Decouple | /recouple.sh | UTF-8 | 6,436 | 3.390625 | 3 | [] | no_license | #!/bin/sh
# ------------------------------------------------------------------------------
#
# recouple.sh -- Shell script to recouple Vera after a previous decouple.sh
# Copyright (C) 2020,2021 Patrick H. Rigney (rigpapa), All Rights Reserved
#
# Please see https://github.com/toggledbits/Vera-Decouple
#
# ------------------------------------------------------------------------------
_VERSION=21116
askyn() {
local __ans
local __resultvar
__resultvar="${1:-ans}"
while true; do
echo -e -n "${2:-}"
read __ans
case "$__ans" in
[Yy]* )
eval "${__resultvar}=Y"
break
;;
[Nn]* )
eval "${__resultvar}=N"
break
;;
esac
echo "Please answer Y or N."
done
}
echo ""
echo "Running recouple.sh version $_VERSION"
echo ""
echo "Before we begin, check Github to see if a newer version of this script is"
echo "available. If so, you should download it, and then run that, particularly"
echo "if it has been some time since you decoupled."
echo "See https://github.com/toggledbits/Vera-Decouple/releases"
askyn ans "Continue running with this version [Y/N]? "
[ "$ans" == "Y" ] || exit 0
SAVEDIR=${SAVEDIR:=/root/.decouple-saved}
FORCE=0
if [ "${1:-}" == "-f" ]; then
shift
FORCE=1
fi
if [ $FORCE -eq 0 ]; then
if [ ! -d ${SAVEDIR} ]; then
cat <<-EOF1
$0: ${SAVEDIR} not found
Can't restore automatically. If you have a saved copy of the directory, please
restore it to the path above and run again. If you don't have the save data,
you can run this script with the "-f" flag and it will install system default
versions of the config files, or you can do a factory reset of the device.
EOF1
exit 255
fi
if [ ! -f ${SAVEDIR}/decouple-version ]; then
cat <<-EOF2
$0: ${SAVEDIR} invalid/incomplete.
The directory does not appear to have a decouple save in it. Make sure there
is a / at the end of the pathname shown above. If not, please fix the setting
used on the command line. If the path is correct, your decouple may not have
finished. You can run this script with "-f" to force recouple; any missing
configuration will be replaced with factory defaults.
EOF2
exit 255
fi
if [ -f ${SAVEDIR}/recoupled ]; then
cat <<-EOF4
$0: It appears you have already recoupled this system. You can force this
script to run by using the -f option, but be warned that if it has been some
time since the recouple, configuration files may have been changed auto-
matically by the cloud services, and the recouple will reverse that, poten-
tially installing servers that no longer exist in Vera's cloud. That risk is
low, but present. Proceed with -f at your own risk! Here's the recouple time:
EOF4
ls -l ${SAVEDIR}/recoupled
exit 255
fi
fi
echo "Restoring default (cloud) NTP time servers..."
uci delete system.ntp.server
while uci delete ntpclient.@ntpserver[-1] >/dev/null 2>&1; do n=0; done
for s in 0.openwrt.pool.ntp.org 1.openwrt.pool.ntp.org; do
key=$(uci add ntpclient ntpserver)
uci set ntpclient.$key.hostname=$s
uci set ntpclient.$key.port=123
uci add_list system.ntp.server=$s
done
uci commit ntpclient
uci commit system.ntp
/etc/init.d/sysntpd restart
/etc/init.d/ntpclient restart
echo "Restoring default DNS servers (Google DNS)..."
uci delete dhcp.@dnsmasq[0].server
for s in 8.8.8.8 8.8.4.4; do
uci add_list dhcp.@dnsmasq[0].server="$s"
done
uci commit dhcp
/etc/init.d/dnsmasq restart
log=$(uci -q get system.@system[0].log_ip)
if [ -n "$log" ]; then
echo ; echo "You have enabled remote system logging (via syslog) to $log."
askyn keep_log "Continue remote syslog [y/n]? "
if [ "$keep_log" == "N" ]; then
uci delete system.@system[0].log_ip
uci delete system.@system[0].log_proto
uci delete system.@system[0].log_port
uci commit system
/etc/init.d/log restart
echo "Remote syslog now disabled."
fi
fi
echo "Re-enabling NetworkMonitor..."
cp -p /mios/etc/init.d/check_internet /etc/init.d/
ln -sf /mios/usr/bin/* /usr/bin/
keep_local_backup=N
if fgrep 'decouple_daily_backup.sh' /etc/crontabs/root >/tmp/decouple-cron ; then
echo ; echo "Local daily backups are enabled. Recoupling will restart the cloud backups to"
echo "Vera/MiOS/eZLO, but you have the option of continuing the local backups simul-"
echo "taneously or disabling them."
askyn keep_local_backup "Keep doing the daily local backups [y/n]? "
fi
echo "Restoring root's crontab..."
if [ ! -s ${SAVEDIR}/crontab-root ]; then
crontab -u root /mios/etc/crontabs/root
else
crontab -u root ${SAVEDIR}/crontab-root
fi
if [ "${keep_local_backup}" == "Y" ]; then
cat /tmp/decouple-cron >>/etc/crontabs/root
fi
echo "Restoring remote access and cloud services..."
if [ ! -s ${SAVEDIR}/servers.conf ]; then
cp /mios/etc/cmh/servers.conf /etc/cmh/ || exit 1
else
cp ${SAVEDIR}/servers.conf /etc/cmh/ || exit 1
fi
if [ -f /mios/usr/bin/mios-service-servers_sync.sh ]; then
echo "Recovering server list..."
/mios/usr/bin/mios-service-servers_sync.sh
fi
cp ${SAVEDIR}/services.conf /etc/cmh/ || {
echo "Recovering /etc/cmh/services.conf..."
/usr/bin/Report_AP.sh
}
# Force relay on
sed -i 's/Permissions_Relay=.*/Permissions_Relay=1/' /etc/cmh/services.conf
# Note we don't undo the move of dropbear because it's harmless and actually,
# an improvement Vera themselves should have made.
# Restore provisioning at boot
if [ -z "$(ls -1 /etc/rc.d/S*-provision_vera* 2>/dev/null)" ]; then
# Note on Edge is /mios, the .sh is missing; present on Plus
cp -P /mios/etc/rc.d/S*-provision_vera* /etc/rc.d/
fi
if [ -z "$(ls -1 /etc/rc.d/S*-cmh-ra 2>/dev/null)" ]; then
cp -P /mios/etc/rc.d/S*-cmh-ra /etc/rc.d/
fi
# NB: Not replacing mios_fix_time, since its brokenness is regressive.
# And our own boot script
rm -f /etc/init.d/decouple /etc/rc.d/S*decouple
[ -f ${SAVEDIR}/servers.conf ] && touch ${SAVEDIR}/recoupled
cat <<EOF3
Done! Cloud service configuration has been restored.
* The changes do not take effect until you have completed a full reboot.
* Once you have verified that the system is working satisfactorily, you
may delete the decouple save directory (recommended); it is:
${SAVEDIR}
Reboot your Vera now by typing: /sbin/reboot
EOF3
exit 0
| true |
3d40adb2d26ccaa5c3e2f0306cbb5f54dfec96ab | Shell | pvsr/CS470-EP | /timing/nonpref/strong/list.sh | UTF-8 | 331 | 2.53125 | 3 | [] | no_license | #!/bin/sh
for n in 1 2 4 8 16 32; do
echo ====$n==== | tee -a list_strong.results
salloc -Q -n $n mpirun ../../../votecounter $n -m list -s 300 -T | grep count | tee -a list_strong.results
salloc -Q -n $n mpirun ../../../votecounter $n -m list -s 300 -T | grep count | tee -a list_strong.results
done
mv list_strong.results ..
| true |
92b7a4b714303b88c9b5efd3a3db7894ee67aabb | Shell | abhinav-webonise/linux_assignment | /ass3/ass3.sh~ | UTF-8 | 136 | 2.53125 | 3 | [] | no_license | #!/bin/bash
find . -maxdepth 0 -type f -iname '^x'
#for f in ls | grep "^x";
#do
#mv "$f" "$changed" #"$(basename "$f").text"
#done
| true |
39290690994fc9d562d077321db9015b2d0de651 | Shell | cherylling/OS-kerneltest | /测试套/debug_oprofile_t/debug_oprofile_t_src/testcase/opreport/opreport-sd.sh | UTF-8 | 1,254 | 2.71875 | 3 | [] | no_license | #!/bin/bash
set -x
. ../conf/conf.sh
do_test(){
msg info "do_test..."
rm /tmp/lo -rf
opcontrol --session-dir=/tmp/lo
opcontrol --setup --no-vmlinux
if [ $? -ne 0 ];then
msg fail "opcontrol --session-dir=/tmp/lo place sample database in dir /tmp/lo fail"
RET=$((RET+1))
return $RET
fi
opcontrol -s
if [ $? -ne 0 ];then
msg fail "opcontrol -s start data collection fail"
opcontrol -h
rm -rf /tmp/lo
RET=$((RET+1))
return $RET
fi
if [ ! -f /tmp/lo/samples/oprofiled.log ];then
msg fail "opcontrol --session-dir=/tmp/lo place sample database in dir /tmp/lo fail"
opcontrol -h
rm -rf /tmp/lo
RET=$((RET+1))
return $RET
fi
msg pass "opcontrol --session-dir=/tmp/lo place sample database in dir /tmp/lo pass"
opcontrol --dump
if [ $? -ne 0 ];then
msg fail "opcontrol --dump fail"
opcontrol -h
rm -rf /tmp/lo
RET=$((RET+1))
return $RET
fi
opreport --session-dir=/tmp/lo --session-dir=/tmp/lo
if [ $? -ne 0 ];then
msg fail "opreport --session-dir=/tmp/lo --session-dir=/tmp/lo fail"
opcontrol -h
rm -rf /tmp/lo
RET=$((RET+1))
return $RET
fi
msg pass "opreport --session-dir=/tmp/lo --session-dir=/tmp/lo pass"
opcontrol -h
rm -rf /tmp/lo
}
RET=0
setenv && do_test
do_clean
exit $RET
| true |
8ba133eace953569876c11a101259a8ce4ced052 | Shell | jeyzshan/nagios-install-1 | /nrpe-install.sh | UTF-8 | 2,355 | 3.59375 | 4 | [] | no_license | #!/bin/bash
function helpMe(){
# output standard usage
echo -e "Usage: `basename $0` nagios_serv_private_ip";
}
# check user args
if [[ $# == 0 ]]; then
helpMe;
exit;
else
master_priv_ip=$1;
fi
# install nrpe
function installNRPE(){
sudo apt-get update;
sudo apt-get install nagios-plugins nagios-nrpe-server -y;
}
# configure NRPE
function configureNRPE(){
sudo sed -i "/allowed_hosts=127.0.0.1/s/$/,${master_priv_ip}/" /etc/nagios/nrpe.cfg;
old_load_setting='command\[check_load\]=\/usr\/lib\/nagios\/plugins\/check_load -w';
# get number of CPUs
num_cpu=$(grep -c "model name" /proc/cpuinfo);
case $num_cpu in
1 )
sudo sed -i '/^command\[check_load\]/c\command[check_load]=/usr/lib/nagios/plugins/check_load -w 1.0,0.7,0.6 -c 1.2,1.0,0.8' /etc/nagios/nrpe.cfg;
;;
2 )
sudo sed -i '/^command\[check_load\]/c\command[check_load]=/usr/lib/nagios/plugins/check_load -w 2.0,1.4,1.2 -c 2.4,2.0,1.6' /etc/nagios/nrpe.cfg;
;;
4 )
sudo sed -i '/^command\[check_load\]/c\command[check_load]=/usr/lib/nagios/plugins/check_load -w 4.0,3.2,3.0 -c 4.6,4.0,3.4' /etc/nagios/nrpe.cfg;
;;
8 )
sudo sed -i '/^command\[check_load\]/c\command[check_load]=/usr/lib/nagios/plugins/check_load -w 8.0,5.6,4.8 -c 8.8,8.0,6.4' /etc/nagios/nrpe.cfg;
;;
12 )
sudo sed -i '/^command\[check_load\]/c\command[check_load]=/usr/lib/nagios/plugins/check_load -w 12.0,8.4,7.2 -c 14.4,12.0,9.6' /etc/nagios/nrpe.cfg;
;;
16 )
sudo sed -i '/^command\[check_load\]/c\command[check_load]=/usr/lib/nagios/plugins/check_load -w 16.0,11.2,9.6 -c 19.2,16.0,12.8' /etc/nagios/nrpe.cfg;
;;
20 )
sudo sed -i '/^command\[check_load\]/c\command[check_load]=/usr/lib/nagios/plugins/check_load -w 20.0,14.0,12 -c 24.0,20.0,16.0' /etc/nagios/nrpe.cfg;
;;
esac
#get private IP
node_priv_ip=$(curl -s http://169.254.169.254/metadata/v1/interfaces/private/0/ipv4/address);
#configure server address
sudo sed -i "s/#server_address=127.0.0.1/server_address=${node_priv_ip}/1" /etc/nagios/nrpe.cfg;
sudo sed -i "s&/dev/hda1&/dev/vda1&" /etc/nagios/nrpe.cfg;
sudo service nagios-nrpe-server restart;
}
installNRPE;
configureNRPE;
| true |
94a3861909a06985fe4546881546226ed5e68367 | Shell | rafal321/Linux_ShellScripting | /templateA1.sh | UTF-8 | 8,789 | 3.625 | 4 | [] | no_license | #!/bin/bash
# ----GIT--------------
#git test 2
git commit in the file
MSG=`cat /mnt/c/AA_git/est2.txt`
git commit -m "${MSG}"
JIRA: 0000
Description: Commit from web
git commit -m $'JIRA: 0000 \nDescription: xxx'
git reset --hard HEAD~1
return to any previous revision:
git reset --hard 0ad5a7a6
# ------------------
# ZZ - przydalo mi sie w pracy
# Autobot - do wprowadzenia
#https://www.youtube.com/watch?v=zWVV31NYi1U
#git: https://www.youtube.com/watch?v=9cMWR-EGFuY&t=274s
echo "Hi there"
# ==== read user input ====
echo "Enter names "
read name1 name2 name3
echo "Names are: ${name1}, ${name2}, ${name3}"
read -p "Enter name: "
read -sp "Enter Password: "
# --- read an array ---
echo "Enter names:"
read -a names
echo "Names are: ${names[0], $names[1]}"
# --- build in variable REPLY --
echo "Enter name: "
read
echo "Name: $REPLY"
# === pass argument to a script ===
echo $1 $2 $3 ' > echo $1 $2 $3'
#---store in array - 36:00 -
args=("$@")
echo ${args[0]} ${args[1]} ${args[2]}
#---read array, output array
args=("$@")
echo $@
echo $# # outputs no of arguments we've passed
######################################
# === if statement 40:10 ===
#Arithmetic operators ----------
#-lt [ "$a" -lt "$b" ]
#(<) (("$a" < "$b"))
#-gt (>)
#-le (<=)
#-ge (>=)
#-eq (==)
#-ne (!=)
#string comparison -------------
# ==, !=, = if [ "$a" = "$b" ], [
# <, >, -z (string is null) [["$a" > "$b"]]
#use double angle bracket if with string you use >< ...
count=10
if [ $count -eq 9 ]; then
echo "it is alse"
else
echo "it is true"
fi
word=a
if [[ $word == "b" ]]
then
echo "it is false"
elif [[ $word == "a" ]]
then
echo "it is true"
else
echo "something"
fi
####################################
# === file test operators 53:20 ===
# if file exists -e
# there are two types of files
# text -c ;blob (binary, pic, video) -b
#if directory exist -d
# if file is empty -s
# e enable iterpret \
echo -e "Enter file name: \c"
read file_name
if [ -e $file_name ]; then
echo "$file_name found"
else
echo "$file_name not found"
fi
##########################################
#01:03:45 - How to append output to the end of text file
echo -e "Enter file name: \c"
read file_name
if [ -e $file_name ]; then
if [ -w $file_name ]; then
echo "Type some data. ctr+d to quit"
cat >> $file_name
else
echo "File do not have write premissions"
fi
else
echo "$file_name not exists"
fi
######################################
#01:14:26 - Logical 'AND' Operator
age=25
# if [ "${age}" -gt 18 -a "${age}" -lt 30 ]; then
# if [[ "${age}" -gt 18 && "${age}" -lt 30 ]]; then
if [ "${age}" -gt 18 ] && [ "${age}" -lt 30 ]; then
echo "valid age"
else
echo "age not valid"
fi
#or operator:] || [, -o, [[ || ]]
######################################
# 1:26:35 10 - Perform arithmetic operations
num1=20
num2=5
echo $(( num1 + num2 ))
#expr comand
echo $(expr $num1 + $num2 )
#only for multiplication use
echo $(expr $num1 \* $num2)
#---- operations on decimals -------
#we have to use BC
# see: man BC
num1=20.5
num2=5.1
echo "20.5+5" | bc
echo "20.5-5" | bc
echo "20.5*5" | bc
echo "scale=2;20.5/5" | bc
echo "20.5%5" | bc
#scale is used with division
# two decimal points
echo "$num1+$num2" | bc
# square root
num=27
echo "scale=2;sqrt($num)" | bc -l
#-l is calling liblary of functions.
#power
echo "scale=2;3^3" | bc -l
############################
# 01:46:06 12 - The case statement
#$1 value taken from console
vechicle=$1
case $vechicle in
"car" )
echo "Rent of $vechicle is 100$" ;;
[vV][aA][nN])
echo "Rent of $vechicle is 150$" ;;
"bike" )
echo "Rent of $vechicle is 15$" ;;
* )
echo "Unknown vechicle" ;;
esac
############################
# 02:02:26 14 - Array variables
os=('ubuntu' 'windows' 'kali')
echo "${os[@]}"
echo "${os[1]}"
echo[3]='mac' #add to array
echo "${!os[@]}" #prints indexes
echo "${#os[@]}" #print lenght
unset os[2] #remove
#gaps in array are ok
############################
#02:12:51 15 - WHILE Loops
counter=1
for x in ${os[*]}
do
echo "$counter) $x."
#((counter++)) arithmetic expansion
let "counter++"
done
n=1
#while (( $n <= 10 ))
while [ "$n" -le 4 ]; do
echo "$n"
n=$(( n+1 )) #$(( n++ ))
sleep 0.5
gmome-terminal & # it opens terminal
done
############################
#02:23:59 17 - Read a file content in Bash
#there ate several ways to do it
# 1]
#p is variable where you save the content (can be anything)
while read p
do
echo $p
done < hello.txt
# 2]
cat hello.txt | while read p
do
echo $p
done
#if special caracters above methods is cosing problems
# 3]
# IFS - internal field separator
#-r prevents \ from being interpreted
#after IFS it is empty space or IFS=' '
while IFS= read -r p
do
echo $line
done < /etc/passwd
#######################
# 02:31:15 18 - UNTIL loop
# if condition is false then it is executed
# oposite to while
n=1
# or until (( $n > 10 ))
until [ $n -ge 10 ]; do
echo $n
n=$(( n+1 ))
done
#######################
# 02:35:38 19 - FOR loop
for VARIABLE in 123456 .. N
for VARIABLE in file1 file2
for VARIABLE in $(Linux command here)
for VARIABLE in (( EXP1, EXP2, EXP3 ))
#for i in 1 2 3 4 5
#for i in {1..10}
for i in {1..10..2}; do #increment by 2
echo $i
done
for (( i=0; i<5; i++ )); do
echo ">> $i"
done
###########################
# 02:44:08 20 - use FOR loop to execute commands
for command in ls pwd date
do
echo "---$command---"
$command
done
# * iterate over every file/dir in current dir
# and print if directories only
for i in *
do
if [ -d $i ] #if [ -f $i ] for files only
then
echo $i
done
###################
# 02:50:44 21 - Select loop
#used when menu is needed
# used with case
select name1 in mark john james; do
echo ">> $name1 selected"
done
# here we can implement complex logic based on select loop:
select name2 in bob alex elen; do
case $name2 in
bob )
echo "mark selected" ;;
alex )
echo "alex selected" ;;
elen )
echo "elen selected" ;;
*)
echo "select btwn 1..4"
esac
done
##############################
# 02:57:41 22 - Break and continue
for (( i=1; i<=10; i++ )); do
if [ "$i" -gt 8 ]; then
break
fi
echo "$i"
done
# -o or operator
# anything after continue will be skipped
for (( i=1; i<=10; i++ )); do
if [ "$i" -eq 3 -o "$i" -eq 6 ]; then
continue
fi
echo ">> $i"
done
##########################
# 03:04:11 23 - Functions
# you can skip function word
function hello(){
echo "hi there"
}
hello
exit #exits script
hello
# $1 means 1st argument etc
function print(){
echo $1 "xx" $2 $3
}
print bobo abab cece
print Hello
########################
# 03:13:48 24 - Local variables
#all variables are global unless
#local keyword is used
function print(){
local name=$1
echo "the name is $name"
}
name="Tom"
echo "The name is $name : Before"
print Max
echo "The name is $name : After"
########################
# 03:22:09 25 - Function Example
#if file exists or not
function usage(){
echo "You need to provide an argument : "
echo "usage : $0 file_name"
}
function is_file_there(){
local file="$1"
#if file exists it will skip return0 and jump to return1
#if 1st condition is fale it will jump to return0
#no if needed in ternary operators!
[[ -f "$file" ]] && return 0 || return 1
}
# $# - will give us number of arguments
[[ $# -eq 0 ]] && usage
if ( is_file_there "$1" ); then #$1 arg providet to script
echo "File Found"
else
echo "File not Found"
fi
########################
# 03:34:16 26 - Readonly command
var=31
readonly var
var=50
echo "var => $var"
#function also can be readonly
hello() {
echo "Helo Universe"
}
readonly -f hello
hello() {
echo "Helo Universe Again"
}
#lists read only functions/variables
readonly -f readonly -p
######################################
# 03:41:34 27 - Signals and Traps
trap "echo Exit signal detected" SIGINT
#SIGKILL SIGSTOP do not apply
# $$ pid of current shel scrip
echo "pid is $$"
while (( COUNT < 10 )); do
sleep 10
(( COUNT++ ))
echo $COUNT
done
exit 0
#exit script with signal 0 success
#man 7 signal
# kill -9 procesID
file=/home/raf/test.file
trap "rm -f $file && echo File deleted; exit" 0 2 15
04:03:08 28 - How to debug a bash script
| true |
f51328fbbdb20c66f37679cbac779bcb2e418075 | Shell | zenotec/meta-zenotec | /setup-env.sh | UTF-8 | 148 | 3.15625 | 3 | [] | no_license | #!/bin/bash
if [ ${#} -ne 1 ]; then
echo "Please specify machine name"
exit 1
fi
export MACHINE=${1}
source ${0/.sh} build-${MACHINE}
exit 0
| true |
3265409a3d81c08f45d8d98b497c65310f898c8f | Shell | guido-sofer/mantis-automation | /docker/mining-bootstrap/build.sh | UTF-8 | 328 | 2.84375 | 3 | [] | no_license | #!/bin/bash
cd "$(dirname "$0")"
for ((i=1;i<=5;i++));
do
echo "Processing bootstrap-${i}"
echo "Copying node.key"
cp ../../nomad/persistance/bootstrap-${i}/dot.mantis/pottery/node.key .
echo "Building docker image"
sudo docker build -t mining-bootstrap-${i}:local .
echo "Removing node.key"
rm node.key
done
| true |
87b9f73e2837213d9e9640d02ba606abbdf9910e | Shell | rgbcpu/Random | /nmapcleaner.sh | UTF-8 | 206 | 3.140625 | 3 | [] | no_license | #!/bin/bash
if [ -z "$1" ]; then
echo "[*] Greppable NMAP Cleaner"
echo "[*} Usage: $0 <greppable scan>"
exit 0
fi
sed -i '/Status:/d;s/\/\/\//\n/g;s/Ports:/\nPorts:\n*/g;s/Host:/\nHost:/g;s/,/*/g' $1
| true |
f8c535c6e14be3d2ed15f2af4dc1b31ca9f27a3a | Shell | atasky/installer | /apps/dokuwiki-latest-offisrc-nginx | UTF-8 | 1,820 | 3.65625 | 4 | [] | no_license | #!/bin/bash
if [ -f "include/startup.sh" ]; then
. include/startup.sh
elif [ -f "../include/startup.sh" ]; then
. ../include/startup.sh
fi
checkTagExist nginx.success
checkTagExist php.success
checkTagExist nginx-enable-php-ssl.success
versionFilename=2018-04-22b
appPath=/var/www/html
rootDir=$(rootDir)
echo "Installing DokuWiki" | log
echo "Downloading DokuWiki from original repo" | log
cd $rootDir/temp
curlDownload https://download.dokuwiki.org/src/dokuwiki/dokuwiki-$versionFilename.tgz
mkdir -p $appPath
mv dokuwiki-$versionFilename.tgz $appPath
echo "Extracting DokuWiki to ${appPath}/" | log
tar -xzvf $appPath/dokuwiki-$versionFilename.tgz -C $appPath/ --strip 1
waitOrStop 0
rm -f $appPath/dokuwiki-$versionFilename.tgz
echo "Setting directory permissions" | log
chown -R www-data.www-data $appPath/data/
chown -R www-data.www-data $appPath/conf/
chown -R www-data.www-data $appPath/inc/
chmod 0700 $appPath/data/
chmod 0700 $appPath/conf/
chmod 0700 $appPath/inc/
echo "Creating nginx configuration" | log
#copy configs to nginx folder
cp $rootDir/tweaks/extras/dokuwiki-nginx/dokuwiki-nginx-config /etc/nginx/sites-available/dokuwiki
# Symlink DokuWiki vhost
ln -s /etc/nginx/sites-available/dokuwiki /etc/nginx/sites-enabled/dokuwiki
# Remove default vhosts
unlink /etc/nginx/sites-enabled/default
unlink /etc/nginx/sites-enabled/default-ssl
systemctl restart nginx
echo "Creating Welcome Message" | log
cat << EOF > ${appPath}/data/pages/start.txt
====== You are almost done ======
Please proceed to [[https://${CWM_SERVERIP}/install.php|this link]] for additional configuration
EOF
chown www-data:www-data $appPath/data/pages/start.txt
echo "Adding descriptions" | log
descriptionAppend "DokuWiki Web UI: https://${CWM_SERVERIP}"
descriptionAppend " "
tagScript success
exit 0
| true |
5fecca3ec5e51350a588fb90385b21a810b8ea62 | Shell | manuelmarcano22/tmuxmlbbar | /gamesstatusbar.sh | UTF-8 | 104 | 2.53125 | 3 | [] | no_license | #!/bin/bash
if pgrep -x 'games.py' > /dev/null;
then
exit 0
else
python $HOME/tmux/games.py &
fi
| true |
c69c889eb19a588531f09a65a700c590bdc4237f | Shell | alvin6666/myscripts | /ping/ping.sh | UTF-8 | 179 | 3.171875 | 3 | [] | no_license | #!/bin/bash
hist=`cat ping.txt`
for ip in $hist
do
ping -c 3 -i 0.2 -W 3 $ip &> /dev/null
if [ $? -eq 0 ]
then
echo "host $ip is up"
else
echo "host $ip is down"
fi
done
| true |
749470afd02274bbaa5997b9cd3c871800786943 | Shell | lincore81/dotfiles | /.local/bin/optiswitch | UTF-8 | 190 | 2.640625 | 3 | [] | no_license | #!/bin/sh
cmode=`optimus-manager --print-mode | grep -o "intel\|nvidia"`
nmode="nvidia"
if [ "$cmode" = "nvidia" ]; then
nmode="intel"
fi
optimus-manager --switch $nmode --no-confirm
| true |
ba7e109b9ee5eb3c8e573fdc77fc85f206dd8733 | Shell | VladVons/sh-conf | /pkg/console/sysbench/script.sh | UTF-8 | 1,273 | 2.984375 | 3 | [] | no_license | #!/bin/bash
#--- VladVons@gmail.com
# https://wiki.mikejung.biz/Sysbench
source $DIR_ADMIN/conf/script/system.sh
CPU()
{
sysbench --test=cpu --cpu-max-prime=20000 run
}
FileIO()
{
Size="16G"
sysbench --test=fileio --file-total-size=$Size prepare
sysbench --test=fileio --file-total-size=$Size --file-test-mode=rndrw --max-time=300 --max-requests=0 run
sysbench --test=fileio --file-total-size=$Size cleanup
}
Mem()
{
sysbench --test=memory --num-threads=4 run
}
RunMySQL()
{
db="app_test"
gAuth="--user=$gMySQLUser --password=$gMySQLPassw"
SQL="CREATE DATABASE IF NOT EXISTS $db"
ExecM "mysql $gAuth --disable-column-names --batch --execute='$SQL'"
ExecM "sysbench --test=oltp --oltp-table-size=1000000 --mysql-db=$db --mysql-user=$gMySQLUser --mysql-password=$gMySQLPassw prepare"
#ExecM "sysbench --test=oltp --mysql-db=$db --mysql-user=$gMySQLUser --mysql-password=$gMySQLPassw --max-time=10 --oltp-read-only=on --max-requests=0 --num-threads=2 run"
ExecM "sysbench --test=oltp --mysql-db=$db --mysql-user=$gMySQLUser --mysql-password=$gMySQLPassw cleanup"
}
MySQL()
{
time RunMySQL
}
clear
sysbench --version
# ------------------------
case $1 in
CPU) $1 $2 ;;
FileIO) $1 $2 ;;
Mem) $1 $2 ;;
MySQL) $1 $2 ;;
esac
| true |
be17194b74a0ca301e8750586505155bc96e1b42 | Shell | rhokhh/allerleirauh | /vagrant/script.sh | UTF-8 | 1,274 | 3.140625 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
# Updating repository
sudo apt-get -y update
# Installing Apache
sudo apt-get -y install apache2
# Installing MySQL and it's dependencies, Also, setting up root password for MySQL as it will prompt to enter the password during installation
sudo debconf-set-selections <<< 'mysql-server-5.5 mysql-server/root_password password rootpass'
sudo debconf-set-selections <<< 'mysql-server-5.5 mysql-server/root_password_again password rootpass'
sudo apt-get -y install mysql-server libapache2-mod-auth-mysql php5-mysql
# Installing PHP and it's dependencies
sudo apt-get -y install php5 libapache2-mod-php5 php5-mcrypt
#create contao db
mysql -u root -prootpass -e "create database contao; GRANT ALL PRIVILEGES ON contao.* TO contao@localhost IDENTIFIED BY 'contao'"
#install Contao 3.5
curl -L http://download.contao.org | tar -xzp
export DIR=/var/www/html/contao
sudo cp -r -a contao-3.5.6/. $DIR/
sudo chown -R www-data:www-data $DIR/
sudo chmod -R 755 $DIR/
sudo chmod -R 775 $DIR/assets/images/
sudo chmod -R 775 $DIR/system/logs/
sudo chmod -R 775 $DIR/system/tmp
sudo mv $DIR/.htaccess.default $DIR/.htaccess
sudo sed -i -e "s/^upload_max_filesize\s*=\s*2M/upload_max_filesize = 16G/" /etc/php5/apache2/php.ini
sudo service apache2 reload
| true |
53f5cd80bd167ccee433abd5c9737f646d32666a | Shell | vrushabh95/ShellScript | /Assignment3/PrimeFactorsInArray.sh | UTF-8 | 252 | 3.421875 | 3 | [] | no_license | #!/bin/bash -x
clear
read -p "Enter Number: " number
for (( i=2; i<=$number; i++ ))
do
while (( $(( number % i )) == 0 ))
do
primeFactors[k++]=$i
number=$(( number / i ))
done
done
echo ${!primeFactors[@]}
echo ${primeFactors[@]}
sleep 1
| true |
8f109e07282e591cb8dbd4589fbadf83d3a62e07 | Shell | dono/dotfiles | /setup.sh | UTF-8 | 2,318 | 3.65625 | 4 | [] | no_license | #!/bin/zsh
run() {
# setup zsh -----------------------------------------------------------------
# auto install prezto
if [[ ! -s "${ZDOTDIR:-$HOME}/.zprezto/init.zsh" ]]; then
git clone --recursive https://github.com/sorin-ionescu/prezto.git "${ZDOTDIR:-$HOME}/.zprezto"
setopt EXTENDED_GLOB
for rcfile in "${ZDOTDIR:-$HOME}"/.zprezto/runcoms/^README.md(.N); do
ln -s "$rcfile" "${ZDOTDIR:-$HOME}/.${rcfile:t}"
done
fi
# zshrc
for file in ~/dotfiles/zsh/common/*
do
ln -sf ${file} ~/.zprezto/runcoms/
done
case ${OSTYPE} in
darwin*)
for file in ~/dotfiles/zsh/mac/*
do
ln -sf ${file} ~/.zprezto/runcoms/
done
;;
linux*)
for file in ~/dotfiles/zsh/linux/*
do
ln -sf ${file} ~/.zprezto/runcoms/
done
;;
esac
# end -----------------------------------------------------------------------
# setup tmux ----------------------------------------------------------------
if [ ! -e ~/.tmux/ ]; then
mkdir -p ~/.tmux/plugins
fi
if [ ! -e ~/.tmux/plugins/tpm ]; then
git clone https://github.com/tmux-plugins/tpm ~/.tmux/plugins/tpm
fi
ln -sf ~/dotfiles/tmux/conf/tmux.conf ~/.tmux.conf
# end -----------------------------------------------------------------------
# setup neovim --------------------------------------------------------------
if [ ! -e ~/.config/nvim/ ]; then
mkdir -p ~/.config/nvim/
fi
if [ ! -e ~/.cache/ ]; then
mkdir -p ~/.cache
fi
for file in ~/dotfiles/nvim/*.toml
do
ln -sf ${file} ~/.config/nvim/
done
ln -sf ~/dotfiles/nvim/init.vim ~/.config/nvim/
# end ---------------------------------------------------------------------
# setup tern --------------------------------------------------------------
ln -sf ~/dotfiles/.tern-config ~/
# end ---------------------------------------------------------------------
}
# output confirmation message
echo "I will overwrite the existing dotfiles, but is it okay? [Y/n]"
read ANSWER
case $ANSWER in
"Y" | "y" | "yes" | "Yes" | "YES" ) run;;
* ) echo "Operation stopped";;
esac
| true |
4a8dd4a7d0542f99595f584c60f44dbd9c4651f6 | Shell | eloy13/Scrips2021 | /15-Copyfiles.sh | UTF-8 | 610 | 3.296875 | 3 | [] | no_license | # !/ in/bash
# @edt ASIX M01-ISO
# Febrer 2021
# Hacer que crezca
# -------------------------------------
ERR_NARGS=1
ERR_FILE=2
ERR_DIREX=3
#Validar argumento
if [ $# -ne 2 ]
then
echo "Error: nºarguments incorrecte"
echo "Usage: $0 directori"
exit $ERR_NARGS
fi
file=$1
dir=$2
if [ ! -e $1 ]
then
echo "Error: El fichero no existe"
echo "Usage : programa.sh file directorio_destino"
exit $ERR_FILE
fi
if ! [ -d $2 ]
then
echo "Error: $directori no es un directori existent"
echo "Usage: $0 directori"
exit $ERR_DIREX
fi
cp $file $dir
exit 0
| true |
55d1612bdece647865ca174f6b3baefa407664e5 | Shell | pdadhich/hbc-ddw-o5 | /o5_ppe_cheetah_data_load_extract.sh | UTF-8 | 5,454 | 3.21875 | 3 | [] | no_license | #!/usr/bin/ksh
#############################################################################################################################
##### SAKS INC.
#############################################################################################################################
#####
##### PROGRAM NAME : o5_ppe_cheetah_data_load_extract.sh
#####
##### DESCRIPTION : This script does the following
##### 1. Calls the sql script for building the BI partner base
#####
#####
#####
#####
##### CODE HISTORY : Name Date Description
##### ------------ ---------- ------------
##### Divya Kafle 06/02/2014 Created
#####
#############################################################################################################################
################################################################
. $HOME/params.conf o5
export PROCESS='o5_ppe_cheetah_data_load_extract'
export SQL=$HOME/SQL
export LOG=$HOME/LOG
export DATA=$HOME/DATA
export LOG_FILE="$LOG/${PROCESS}_log.txt"
export BAD_SUBJECT="${PROCESS} failed"
export JOB_NAME="${PROCESS}"
export SCRIPT_NAME="${PROCESS}"
export SFILE_SIZE='0'
export FILE_NAME='0'
export LOAD_COUNT='0'
export FILE_COUNT='0'
export TFILE_SIZE='0'
export SOURCE_COUNT='0'
export TARGET_COUNT='0'
SQL1='o5_ppe_cheetah_data_load'
SQL2='o5_ppe_cheetah_xml_extract'
########################################################################
##Initialize Email Function
########################################################################
function send_email {
CURRENT_TIME=`date +"%m/%d/%Y-%H:%M:%S"`
cat $HOME/email_distribution_list.txt|grep '^3'|while read group address
do
cat ${LOG_FILE}|mailx -s "${SUBJECT}" $address
done
}
#################################################################
#################################################################
##Update Runstats Start
#################################################################
sqlplus -s -l $CONNECTDW <<EOF> ${LOG}/${PROCESS}_runstats_start.log @${SQL}/runstats_start.sql "$JOB_NAME" "$SCRIPT_NAME" "$SFILE_SIZE" "$FILE_NAME" "$LOAD_COUNT" "$FILE_COUNT" "$TFILE_SIZE" "$SOURCE_COUNT" "$TARGET_COUNT"
EOF
#################################################################
echo -e "o5_ppe_cheetah_data_extract_load Process started at `date '+%a %b %e %T'`\n" >${LOG_FILE}
#################################################################
# Run the sql script that performs the data load
#################################################################
sqlplus -s -l $CONNECTDW @${SQL}/${SQL1}.sql>> ${LOG_FILE}
##################################################################
# Check for the data load
#################################################################
if [ $? -ne 0 ] || [ `egrep -c "^ERR|ORA-|not found|SP2-0" ${LOG_FILE}` -ne 0 ]
then
echo -e "FAILURE - o5_ppe_cheetah_data_load `date '+%a %b %e %T %Z %Y'`\n " >>${LOG_FILE}
export SUBJECT=${BAD_SUBJECT}
send_email
exit 99
else
echo -e "SUCCESS - o5_ppe_cheetah_data_load `date '+%a %b %e %T %Z %Y'`\n " >>${LOG_FILE}
fi
#################################################################
# get the target count
#################################################################
TARGET_COUNT=`sqlplus -s $CONNECTDW <<EOF
set heading off
select count(*)
from o5.TURN_TO_CHEETAH_EXTRACT
WHERE item_exclude='F' and email is not null and product_id is not null
and trunc(add_dt) = TRUNC (SYSDATE);
quit;
EOF`
################################################################
echo -e "The Target record count is : $TARGET_COUNT" >> ${LOG_FILE}
echo -e "Starting the off5th ppe xml data extract `date '+%a %b %e %T %Z %Y'`\n " >>${LOG_FILE}
################################################################
sqlplus -s -l $CONNECTDWXML @${SQL}/${SQL2}.sql>> ${LOG_FILE}
#################################################################
sqlplus -s -l $CONNECTDW<<EOF> ${LOG}/${PROCESS}_runstats_finish.log @${SQL}/runstats_end.sql "$JOB_NAME" "$SCRIPT_NAME" "$SFILE_SIZE" "$FILE_NAME" "$LOAD_COUNT" "$FILE_COUNT" "$TFILE_SIZE" "$SOURCE_COUNT" "$TARGET_COUNT"
EOF
#################################################################
#Pull the data from 145 box
#################################################################
echo -e "copying the o5 ppe data from 145 to 101 at `date '+%a %b %e %T %Z %Y'`\n " >>${LOG_FILE}
scp cognos@$ORACLESRV:/oracle/EXPORTS/dataservices/Off5th_ppe_`date +%Y%m%d`.xml $DATA
wait
echo -e "Finished copying the data from 145 to 101 at `date '+%a %b %e %T %Z %Y'`\n " >>${LOG_FILE}
echo -e "o5_ppe_cheetah_data_extract_load Process Ended at `date '+%a %b %e %T'`\n" >>${LOG_FILE}
################################################################
# Check for errors
################################################################
if [ `egrep -c "^ERROR|ORA-|not found|SP2-0|^553" ${LOG_FILE}` -ne 0 ]
then
cp "${LOG_FILE}" "${LOG_FILE}.`date +%Y%m%d`"
echo -e "${PROCESS} failed. Please investigate"
echo -e "${PROCESS} failed. Please investigate\n" >> ${LOG_FILE}
export SUBJECT=${BAD_SUBJECT}
exit 99
else
export SUBJECT="SUCCESS:Off5th PPE DAILY DATA IS PRODUCED AND copied 145-101-30 and READY FOR FURTHER PROCESS"
echo -e "${PROCESS} completed without errors."
echo -e "${PROCESS} completed without errors.\n" >> ${LOG_FILE}
exit 0
fi
| true |
752825ca7573891205cff8d2573e73b8d2ee69e5 | Shell | Jabromen/dotfiles | /zsh/zshrc.zsh | UTF-8 | 2,378 | 2.875 | 3 | [] | no_license | ## History file configuration
[ -z "$HISTFILE" ] && HISTFILE="$HOME/.zsh_history"
HISTSIZE=50000
SAVEHIST=10000
## History command configuration
setopt extended_history # record timestamp of command in HISTFILE
setopt hist_expire_dups_first # delete duplicates first when HISTFILE size exceeds HISTSIZE
setopt hist_ignore_dups # ignore duplicated commands history list
setopt hist_ignore_space # ignore commands that start with space
setopt hist_verify # show command with history expansion to user before running it
setopt inc_append_history # add commands to HISTFILE in order of execution
setopt share_history # share command history data
# enable color output by default
alias ls='ls --color=auto'
alias grep='grep --color=auto'
alias fgrep='fgrep --color=auto'
alias egrep='egrep --color=auto'
# shorter list view of ls
alias ll='ls -Al'
alias ~='cd ~'
# print directory contents when entering it
chpwd() ls
# fixme - the load process here seems a bit bizarre
zmodload -i zsh/complist
WORDCHARS=''
unsetopt menu_complete # do not autoselect the first completion entry
unsetopt flowcontrol
setopt auto_menu # show completion menu on successive tab press
setopt complete_in_word
setopt always_to_end
# should this be in keybindings?
zstyle ':completion:*:*:*:*:*' menu select
# case insensitive (all), partial-word and substring completion
zstyle ':completion:*' matcher-list 'm:{a-zA-Z}={A-Za-z}' 'r:|=*' 'l:|=* r:|=*'
# Complete . and .. special directories
zstyle ':completion:*' special-dirs true
zstyle ':completion:*' list-colors ''
zstyle ':completion:*:*:kill:*:processes' list-colors '=(#b) #([0-9]#) ([0-9a-z-]#)*=01;34=0=01'
zstyle ':completion:*:*:*:*:processes' command "ps -u $USER -o pid,user,comm -w -w"
# disable named-directories autocompletion
zstyle ':completion:*:cd:*' tag-order local-directories directory-stack path-directories
# Use caching so that commands like apt and dpkg complete are useable
zstyle ':completion::complete:*' use-cache 1
zstyle ':completion::complete:*' cache-path $ZSH_CACHE_DIR
# Enable plugins
source ~/dotfiles/zsh/plugins/zsh-autosuggestions/zsh-autosuggestions.zsh
source ~/dotfiles/zsh/plugins/zsh-syntax-highlighting/zsh-syntax-highlighting.zsh
# Setup keybindings
source ~/dotfiles/zsh/keybindings.zsh
# Setup prompt
source ~/dotfiles/zsh/prompt.zsh
| true |
befd15d43d01987c52eb9ba3110f0110f6d19ce9 | Shell | dimagi/commcare-cloud | /git-hooks/pre-commit.sh | UTF-8 | 1,325 | 4.09375 | 4 | [
"BSD-3-Clause"
] | permissive | #!/bin/bash
#
# Pre-commit hook that verifies if all files containing 'vault' in the name
# are encrypted.
# If not, commit will fail with an error message
#
# File should be .git/hooks/pre-commit and executable
VAULT_FILE_INCLUDE_PATTERN='(/vault.yml|.*.vault|.*vault.*)$'
VAULT_FILE_EXCLUDE_PATTERN='^tests|development|^src/commcare_cloud/environment/secrets/backends/ansible_vault/|src/commcare_cloud/manage_commcare_cloud/list_vault_keys.py'
REQUIRED='ANSIBLE_VAULT'
EXIT_STATUS=0
wipe="\033[1m\033[0m"
yellow='\033[1;33m'
# carriage return hack. Leave it on 2 lines.
cr='
'
for f in $(git diff --cached --name-only | grep -E "$VAULT_FILE_INCLUDE_PATTERN" | grep -E -v "$VAULT_FILE_EXCLUDE_PATTERN")
do
MATCH=`grep -L $REQUIRED $f | head -n 1`
if [ -n "${MATCH// }" ] ; then
UNENCRYPTED_FILES="$f$cr$UNENCRYPTED_FILES"
EXIT_STATUS=1
fi
done
if [ $EXIT_STATUS = 0 ] ; then
exit 0
else
echo '# COMMIT REJECTED'
echo '# Looks like unencrypted ansible-vault files are part of the commit:'
echo '#'
while read -r line; do
if [ -n "$line" ]; then
echo -e "#\t${yellow}unencrypted: $line${wipe}"
fi
done <<< "$UNENCRYPTED_FILES"
echo '#'
echo "# Please encrypt them with 'ansible-vault encrypt <file>'"
echo "# (or force the commit with '--no-verify')."
exit $EXIT_STATUS
fi
| true |
8933e9df94ec6b7d667e7b405c4d89329f5acc1b | Shell | maticnetwork/matic-cli | /src/setup/devnet/templates/docker/docker-heimdall-start.sh.njk | UTF-8 | 456 | 2.578125 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env sh
INDEX=$1
docker compose run -d --service-ports --name heimdall$INDEX --entrypoint bash heimdall$INDEX -c "
mkdir -p /root/heimdall/logs && touch /root/heimdall/logs/heimdalld.log &
sleep 60 && heimdalld start --home /root/var/lib/heimdall \
--chain=/root/var/lib/heimdall/config/genesis.json \
--bridge --all \
--rest-server > /root/heimdall/logs/heimdalld.log 2>&1 &
sleep 10 && tail -f /root/heimdall/logs/heimdalld.log
"
| true |
0e3ea16f0106d082b9f837779a4b9d98ad5f2a30 | Shell | containers/podman | /test/apiv2/70-short-names.at | UTF-8 | 5,339 | 3.5 | 4 | [
"Apache-2.0"
] | permissive | # -*- sh -*-
#
# Tests for exercising short-name resolution in the compat API.
#
# Pull the libpod/quay image which is used in all tests below.
t POST "images/create?fromImage=quay.io/libpod/alpine:latest" 200 .error~null .status~".*Download complete.*"
# 14291 - let a short-name resolve to a *local* non Docker-Hub image.
t POST containers/create Image=alpine 201 .Id~[0-9a-f]\\{64\\}
cid=$(jq -r '.Id' <<<"$output")
t GET containers/$cid/json 200 .Config.Image="quay.io/libpod/alpine:latest" .Image~sha256:[0-9a-f]\\{64\\}
podman rm -f $cid
########## TAG
t POST "images/quay.io/libpod/alpine/tag?repo=foo" 201
t DELETE "images/docker.io/library/foo" 200
########## BUILD
function test_build {
from=$1
tag=$2
fqn=$3
TMPD=$(mktemp -d podman-apiv2-test.build.XXXXXXXX)
CONTAINERFILE_TAR="${TMPD}/containerfile.tar"
cat > $TMPD/containerfile << EOF
FROM $from
RUN touch /foo
EOF
tar --format=posix -C $TMPD -cvf ${CONTAINERFILE_TAR} containerfile &> /dev/null
t POST "/build?dockerfile=containerfile&t=$tag" $CONTAINERFILE_TAR 200 \
.stream~".*Successfully tagged .*"
rm -rf $TMPD
t DELETE "images/$fqn" 200
}
t POST "images/quay.io/libpod/alpine/tag?repo=foo" 201
test_build foo bar "docker.io/library/bar:latest"
t DELETE "images/foo" 200
########## TAG
# The libpod endpoint will resolve to it without issues.
t GET "libpod/images/alpine/exists" 204
# Now let's tag the image with 'foo'. Remember, it will be normalized to
# docker.io/library/foo.
t GET "libpod/images/docker.io/library/foo/exists" 404
t POST "images/quay.io/libpod/alpine/tag?repo=foo" 201
t GET "libpod/images/docker.io/library/foo/exists" 204
########## REMOVE
t DELETE "images/foo" 200 # removes the previously tagged image
########## GET
# Same procedure as above but with the /get endpoint.
t POST "images/quay.io/libpod/alpine/tag?repo=foo" 201
t GET "images/foo/get" 200 '[POSIX tar archive]'
t DELETE "images/foo" 200
t GET "images/alpine/get" 200
########## HISTORY
t GET "images/alpine/history" 200
t GET "images/quay.io/libpod/alpine/history" 200
t POST "images/quay.io/libpod/alpine/tag?repo=foo" 201
t GET "libpod/images/foo/history" 200
t DELETE "images/foo" 200
########## PUSH
t POST "images/quay.io/libpod/alpine/push?destination=localhost:9999/do/not:exist" 500
t POST "images/quay.io/libpod/alpine/tag?repo=foo" 201
t POST "images/foo/push?destination=localhost:9999/do/not:exist" 500
t DELETE "images/foo" 200
########## CREATE A CONTAINER
t POST "containers/create" Image=alpine 201
t POST "containers/create" Image=quay.io/libpod/alpine:latest 201
cid=$(jq -r '.Id' <<<"$output")
t POST "images/quay.io/libpod/alpine/tag?repo=foo" 201
t POST "containers/create" Image=foo 201
cid=$(jq -r '.Id' <<<"$output")
t DELETE "images/foo" 200
t DELETE "containers/$cid" 204
########## COMMIT CONTAINER
t POST "containers/create" Image=quay.io/libpod/alpine:latest 201
cid=$(jq -r '.Id' <<<"$output")
t GET "images/alpine/get" 200
t POST "commit?container=$cid&repo=foo&tag=tag" 201
t GET "images/foo/get" 404 .cause="image not known"
t GET "images/foo:tag/get" 200
t DELETE "images/docker.io/library/foo:tag" 200
t DELETE "containers/$cid" 204
######### SMOKE TESTS WITHOUT DOCKER.IO ENFORCEMENT
# Note that we need to restart the service with a custom containers.conf to
# disable the docker.io enforcement.
stop_service
CONTAINERS_CONF=$TESTS_DIR/containers.conf start_service
t POST "images/create?fromImage=quay.io/libpod/alpine:latest" 200 .error~null .status~".*Download complete.*"
t POST "images/alpine/tag?repo=foo" 201
t GET "images/localhost/foo:latest/get" 200
t DELETE "images/foo" 200
t GET "images/alpine/history" 200
t POST "images/alpine/push?destination=localhost:9999/do/not:exist" 500
t POST "containers/create" Image=alpine 201
cid=$(jq -r '.Id' <<<"$output")
t POST "commit?container=$cid&repo=foo&tag=tag" 201
t DELETE "images/localhost/foo:tag" 200
t DELETE "containers/$cid" 204
test_build alpine bar "localhost/bar:latest"
stop_service
start_service
| true |
92c3b42c9cdf3c18d33632112f204544c7c4e39b | Shell | FauxFaux/utils | /logging | UTF-8 | 618 | 3.5 | 4 | [] | no_license | #!/bin/bash
# logging
# File ID: af318500-fb9b-11dd-bb0f-000475e441b9
export LOGDIR=$HOME/log/script
[ -d $LOGDIR/. ] || mkdir -p $LOGDIR || { echo logging: $LOGDIR: Cannot create log directory >&2; exit 1; }
export LDATE=`u`
uuid=`suuid -t logging -w eo -c "logging $*"` || { echo logging: Error when generating UUID, logging not started >&2; exit 1; }
if [ -z "$1" ]; then
export LNAME="$LOGDIR/$LDATE.$uuid"
else
export LNAME="$LOGDIR/$LDATE.$uuid.$1"
fi
LOGGING_UUID=$uuid script -ft $LNAME.scrlog 2>$LNAME.timing
suuid -w e -t logging -c "Loggsession $uuid ferdig."
echo "Loggsession $uuid ferdig." >&2
| true |
4a0e129a347d0d443659f090204f348ed9248acc | Shell | archhaskell/habs | /helpers/buildpkgs | UTF-8 | 1,748 | 4.03125 | 4 | [] | no_license | #!/usr/bin/env bash
set -eu
rundir="$(dirname $(readlink -f $0))"
. ${rundir}/useful.sh
export LANG=en_US.UTF8
arch=$(uname -m)
# default values
habs_dir=.
clean_build_first=0
update_pristine_first=0
owner_uid=1000
owner_gid=100
usage() {
cat << EOF
Usage: makeahpkg [options] -- [packages]
Run this script in your HABS dir to build the named packages.
Options:
-h This help
-b <dir> Location of your HABS dir (default .)
-u <uid> User ID to own the built files
-g <gid> Group ID to own the built files
EOF
exit 0
}
buildpkg() {
eval "$(awk ' /^_[[:alpha:]]+=/ { print } /^pkg[[:alpha:]]+=/ { print }' PKGBUILD)"
if [[ ! -f ${pkgname}-${pkgver}-${pkgrel}-${arch}.pkg.tar.xz ]]; then
msg "Building in $PWD"
sudo pacman -Sy
PACKAGER="ArchHaskell <arch-haskell@haskell.org>" BUILDDIR=/tmp/build-${arch} \
makepkg --noconfirm -s
else
msg "Skipping build in $PWD"
fi
sudo mkdir -p /repo
for p in ${pkgname[@]}; do
local pn=${p}-${pkgver}-${pkgrel}-${arch}.pkg.tar.xz
sudo chown ${owner_uid}:${owner_gid} ${pn}
sudo cp ${pn} /repo
(cd /repo; sudo repo-add repo.db.tar.gz ${pn})
done
}
while getopts hb:u:g: opt; do
case "${opt}" in
h) usage; exit 0;;
b) habs_dir="${OPTARG}";;
u) owner_uid=${OPTARG};;
g) owner_gid=${OPTARG};;
esac
done
shift $((OPTIND - 1))
sudo pacman -Syu --noconfirm
for pkg0 in $@; do
pkg=
pkg1=${pkg0,,}
[ -d ${habs_dir}/${pkg1} ] && pkg=${pkg1}
[ -d ${habs_dir}/haskell-${pkg1} ] && pkg=haskell-${pkg1}
if [[ "x${pkg}" != "x" ]]; then
(cd ${habs_dir}/$pkg; buildpkg )
else
die "No such package: $pkg0"
fi
pkg=
done
| true |
632c927c6feff6a43628003b2f361842dafd2c4a | Shell | tareksfouda/TextProvider-server-client | /run.sh | UTF-8 | 118 | 2.5625 | 3 | [] | no_license | #!/bin/sh
./build.sh;
cd bin;
if [ -z $1 ]
then
java Client;
else
java Server ../$1;
fi
rm -rf lines;
cd ..;
| true |
6c0fede2c89c989691eb24875ac5a844dc4e6d73 | Shell | samsquire/markupcontrol | /n3.sh | UTF-8 | 620 | 3.5625 | 4 | [] | no_license | #!/bin/bash
FILE="$1"
NAME="$2"
echo "N3 processor $FILE with name $NAME" >&2
FROM=$(xml sel -t -v "/sam[@type='n3' and @name='$NAME']/@from" "$FILE")
TO=$(xml sel -t -v "/sam[@type='n3' and @name='$NAME']/@to" "$FILE")
if [[ -z "$FROM" ]]; then
FROM="n3"
fi
if [[ -z "$TO" ]]; then
TO="rdfa"
fi
SERVER="http://localhost:8080/convert/$FROM/$TO/content"
echo "Converting from $FROM to $TO with server $SERVER" >&2
# Find the element
# Select the data, decode it and send it to our N3->RDFA server
xml sel -t -v "/sam[@type='n3' and @name='$NAME']" "$FILE" | recode html..utf8 | curl --form content=@- \
"$SERVER"
| true |
c069545dd49684b1778eb76580849335aaedcd58 | Shell | sourcemage/cauldron | /enchantment/lib/lib.i18n | UTF-8 | 6,972 | 3.9375 | 4 | [] | no_license | #!/bin/bash
#-------------------------------------------------------------------------------
##
##=head1 SYNOPSIS
##
## These are the functions used for i18n, including setting keymaps, setting
## the current language the installer is running under, etc.
##
## For the keymap functions, only numbers are to be used as input, since they
## are more likely to be consistent across different keymaps/keyboards, whereas
## letters and punctuation get moved around much more.
##
##=head1 COPYRIGHT
##
## Copyright 2010 by the Cauldron Team
##
##=head1 FUNCTIONS
##
##=over 4
##
#-------------------------------------------------------------------------------
#-------------------------------------------------------------------------------
## @param size of choices list (highest numbered entry to choose from)
##
## Reads input from the user, and ensures that it is either '*' or a number in
## the range of 0-highest_entry. Echoes the input if it is a number that fits
## the range. If the input is '*', then ERR_KEYMAP_EXIT is returned so that the
## caller can do any cleanup needed and then exit.
##
#-------------------------------------------------------------------------------
function enchant_i18n_keymap_input_get() {
local max="$1"
local input=""
[[ -z "$max" ]] && return $ERR_KEYMAP_LIST_SIZE
# grab the input, allowing readline editing
read -re input || return $ERR_KEYMAP_INPUT
if [[ -n "$input" ]]
then
# if the user entered '*', then we exit without finishing
if [[ "$input" == "*" ]]
then
return $ERR_KEYMAP_EXIT
fi
# if we got a number outside the range of what we can accept,
# tell the user before returning an error
if [[ "$input" -lt 0 || "$input" -gt "$max" ]]
then
return $ERR_KEYMAP_INPUT
fi
fi
echo "$input"
return $ERR_OK
}
#-------------------------------------------------------------------------------
## @param prefix -> path to populate the list of choices from
##
## Creates a list of choices consisting of directory entries from the prefix. If
## prefix is not explicitly provided, it defaults to /usr/share/keymaps (the base
## of the keymap file hierarchy) relative to the ENCHANT_ISO_PATH.
##
#-------------------------------------------------------------------------------
function enchant_i18n_keymap_make_list {
local prefix="${1:-$ENCHANT_KEYMAPDIR}"
local list=()
local result=()
local i=""
[[ -d "$prefix" ]] || return $ERR_DIR_ACCESS
list=( $("${ENCHANT_CMD_FIND[@]}" "$prefix"/* -maxdepth 0 -type d -not -name include) )
if [[ "${#list[*]}" -eq 0 ]]
then
list=( $("${ENCHANT_CMD_FIND[@]}" "$prefix"/* -type f -name *.map.gz) )
fi
[[ "${#list[*]}" -eq 0 ]] && return $ERR_KEYMAP_LIST
for ((i=0; i < "${#list[*]}"; i++))
do
result=( "${result[@]}" "${list[i]##*/}" )
done
echo "${result[@]}"
return $ERR_OK
}
#-------------------------------------------------------------------------------
##
## Get the current keymap setting from the enchantment temporary directory.
##
#-------------------------------------------------------------------------------
function enchant_i18n_keymap_get() {
"${ENCHANT_CMD_CAT[@]}" "$ENCHANT_KEYMAP" || return $ERR_KEYMAP_GET
return $ERR_OK
}
#-------------------------------------------------------------------------------
## @param keymap to load
##
## Load the keymap passed as the first argument and store it into the
## enchantment temporary directory for later use.
##
#-------------------------------------------------------------------------------
function enchant_i18n_keymap_set() {
local keymap="$1"
[[ -f "$keymap" ]] || return $ERR_FILE_ACCESS
"${ENCHANT_CMD_LOAD_KEYMAP[@]}" "$keymap" || return $ERR_KEYMAP_LOAD
# store the selected keymap for later use by enchantment (e.g., for potions)
echo "${keymap%.*map.*}" > "$ENCHANT_KEYMAP" ||
return $ERR_KEYMAP_SET
return $ERR_OK
}
#-------------------------------------------------------------------------------
## @param prefix -> current path to populate the menu from
## @param size of choices list (highest numbered entry to choose from)
##
## Gets the user input and then processes it, possibly calling
## enchant_i18n_keymap_set to actually set the keymap if that's what was input.
##
#-------------------------------------------------------------------------------
function enchant_i18n_keymap_input_handler() {
local prefix="${1:-$ENCHANT_KEYMAPDIR}"
local max="$2"
local input=""
local rc=""
[[ ! -e "$prefix" ]] && return $ERR_KEYMAP_PREFIX
[[ -z "$max" ]] && return $ERR_KEYMAP_LIST_SIZE
# get the user's menu choice
input="$(enchant_i18n_keymap_input_get $max)"
rc="$?"
# if the user entered '*', exit the keymap routines
[[ "$rc" -eq "$ERR_KEYMAP_EXIT" ]] && return $ERR_KEYMAP_EXIT
# if there was an error in getting the input, report it to the caller
[[ "$rc" -eq "$ERR_KEYMAP_INPUT" ]] && return $ERR_KEYMAP_INPUT
# if the user entered '0', go "back" (up a dir/level)
if [[ "$input" -eq "0" ]]
then
# only go up a level if we aren't at the beginning
# (don't go lower than the floor)
[[ "$prefix" != "$ENCHANT_KEYMAPDIR" ]] &&
prefix="${prefix%/[^/]*}"
else
# get the menu item, adjusting for array indexed from 0
input="${choices[((input-1))]}"
# if the input corresponds to a keymap file, attempt to switch to that
# keymap, otherwise set the prefix to the new path and loop again
if [[ "$input" == *.map.gz ]]
then
prefix="$prefix/$input"
# if a keymap is successfully set, return ERR_KEYMAP_EXIT to the caller,
# so they know that the keymap was successfully chosen and loaded
enchant_i18n_keymap_set "$prefix" && return $ERR_KEYMAP_EXIT
# return any errors from enchant_i18n_keymap_set if it didn't load a
# keymap successfully
return "$?"
else
prefix="$prefix/$input"
# echo (return) the new prefix back to the caller
echo "$prefix"
fi
fi
return $ERR_OK
}
#-------------------------------------------------------------------------------
##
## This software is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This software is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this software; if not, write to the Free Software
## Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##
#-------------------------------------------------------------------------------
# vim:ai:tw=80:tabstop=2:softtabstop=2:shiftwidth=2:expandtab
| true |
b51a42a70986386d875fe6b61d926e1e000d5c4e | Shell | JimCallahan/Pipeline | /src/java/us/temerity/pipeline/plugin/archive-plugins | UTF-8 | 2,494 | 3.78125 | 4 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | #!/bin/bash
JAR=$1
srcdir=$2
rm -f plugin-all
rm -f plugin-extra
for pdir in `find . -mindepth 1 -maxdepth 1 -type d | sort`
do
plugin=`echo $pdir | awk -F/ '{print $2}'`
for vdir in `find $pdir -mindepth 1 -maxdepth 1 -type d | sort`
do
version=`echo $vdir | awk -F/ '{print $3}'`
# echo $plugin $version
cnt=`find $vdir -type f -name "*.class" | wc -l`
#
# For the Temerity plugins all the resources for the plugin go in a directory named
# RESOURCES. The resources should be placed in the directory as if it was relative to the plugin class file.
# This script will take care of putting the resources in the jar in the proper directories.
#
rcnt=`find $srcdir/us/temerity/pipeline/plugin/$plugin/$version/ -type f | grep -v \/CVS\/ | grep RESOURCES | wc -l`
if [ $cnt -eq 1 -a $rcnt -eq 0 ]
then
cfile=`find $vdir -type f -name "*.class" | sed 's:\./:us/temerity/pipeline/plugin/:g'`
echo "PLUGIN CLASS:" $cfile
if echo $cfile | grep "us/temerity/pipeline/plugin/QueueStatsExt"
then
echo $cfile >> plugin-extra
else
echo $cfile >> plugin-all
fi
else
jfile=us/temerity/pipeline/plugin/$plugin/$version/$plugin.jar
echo "PLUGIN JAR:" $jfile
pushd ../../../.. >& /dev/null
echo $JAR cvf $jfile \
`find us/temerity/pipeline/plugin/$plugin/$version -type f -name "*.class"`
$JAR cvf $jfile \
`find us/temerity/pipeline/plugin/$plugin/$version -type f -name "*.class"`
popd >& /dev/null
if echo $jfile | grep "us/temerity/pipeline/plugin/QueueStatsExt"
then
echo $jfile >> plugin-extra
else
echo $jfile >> plugin-all
fi
#
# Add the resources to the jar file from the RESOURCES directory. First create the
# intermediate directories in a temporary directory. Then copy the files from the RESOURCES
# directory into the temporary directory and add to the jar.
#
if [ $rcnt -gt 0 ]
then
rm -rf $plugin/$version/TMP-RESOURCES
mkdir -p $plugin/$version/TMP-RESOURCES/us/temerity/pipeline/plugin/$plugin/$version/
cp -R $srcdir/us/temerity/pipeline/plugin/$plugin/$version/RESOURCES/* $plugin/$version/TMP-RESOURCES/us/temerity/pipeline/plugin/$plugin/$version/
pushd $plugin/$version/TMP-RESOURCES >& /dev/null
$JAR uvf ../$plugin.jar `find . -type f | grep -v \/CVS\/`
popd >& /dev/null
rm -rf $plugin/$version/TMP-RESOURCES
fi
fi
done
done
| true |
b8449581f1c3b100600d971df4e2635c31cb48a5 | Shell | Artemish/etc | /sh/get-music.sh | UTF-8 | 201 | 3.359375 | 3 | [] | no_license | #!/bin/bash
getmusic() {
if [ $# -ne 1 ]; then
echo "Usage: getmusic <youtube_url>" 1>&2
return 1
fi
pushd ~/what
youtube-dl -x "$1" -o '%(title)s.%(ext)s'
popd
}
| true |
7e722da9bb761c9334dfaddaad2096c08612dc14 | Shell | Cloudxtreme/snltd-monitor | /checks/dav/check_dav_read.sh | UTF-8 | 1,808 | 3.84375 | 4 | [] | no_license | #=============================================================================
#
# check_dav_read.sh
# -----------------
#
# Checks we can download a file from webDAV.
#
# Requires curl and that DAV_S_LIST is a whitespace separated list of
# servers to connect to.
#
# Requires that target Apache configurations allow the connection, and that
# the file is there. How to set up the DAV server is in the wiki.
#
# R Fisher 03/2009
#
# v1.0 Initial Release
#
#=============================================================================
#-----------------------------------------------------------------------------
# VARIABLES
EXIT=0
TARGET="${DIR_STATE}/dav_testfile"
#-----------------------------------------------------------------------------
# SCRIPT STARTS HERE
can_has curl && [[ -n $DAV_S_LIST ]] \
|| exit 3
for server in $DAV_S_LIST
do
rm -f $TARGET
URL="https://${server}/snltd_monitor/dav_testfile_read"
# Curl. Don't worry about --insecure. It just tells it not worry about
# the self-signed cert we use on the dav server. --fail tells curl to
# properly fail if it can't get the file - without it, you get exit 0
# and the web server's output in the $TARGET file.
curl \
--fail \
--connect-timeout 4 \
--max-time 5 \
--silent \
--insecure \
--config ${DIR_CONFIG}/dav_connect \
-o $TARGET \
$URL
CURL_RET=$?
if [[ $CURL_RET != 0 ]] || [[ ! -s $TARGET ]]
then
ERRORS=1
[[ -n $RUN_DIAG ]] \
&& cat <<-EOERR
Failed to transfer test file over webDAV.
server: $server
url: $URL
curl exit: $CURL_RET
target: $TARGET
curl diagnostic follows:
$(curl_diag $CURL_RET)
ls of target file follows:
$(ls -l $TARGET 2>&1)
EOERR
fi
done
if [[ -n $ERRORS ]]
then
EXIT=2
else
EXIT=0
fi
exit $EXIT
| true |
83b3d2699c703cff574bf5d5134554b575afd8fd | Shell | will-henney/bowshock-shape | /Stellar-Bowshocks-2017/RSG/wget-scripts/CWLEO_wget_data.sh | UTF-8 | 830 | 2.59375 | 3 | [] | no_license | #!/bin/sh
#
# To run as an executable on a unix platform, do the following:
# chmod 775 wget_data.bat
# ./wget_data.bat
#
wget -x "https://irsa.ipac.caltech.edu:443/data/Herschel/MESS/images/CWLEO-1_160_pixfrac10.0.mod.fits"
wget -x "https://irsa.ipac.caltech.edu:443/data/Herschel/MESS/images/CWLeo-160_10_AFGL190.mod.fits"
wget -x "https://irsa.ipac.caltech.edu:443/data/Herschel/MESS/images/CWLEO-all_160_pixfrac10.0.mod.fits"
wget -x "https://irsa.ipac.caltech.edu:443/data/Herschel/MESS/images/CWLeo-70_3_AFGL190.mod.fits"
wget -x "https://irsa.ipac.caltech.edu:443/data/Herschel/MESS/images/CWLEO_70_pixfrac10.0.mod.fits"
wget -x "https://irsa.ipac.caltech.edu:443/data/Herschel/MESS/images/CWLEO_100_pixfrac10.0.mod.fits"
wget -x "https://irsa.ipac.caltech.edu:443/data/Herschel/MESS/images/CWLEO_160_pixfrac10.0.mod.fits"
| true |
d21789faa5cb3ca9b51ca0b667315170e8441011 | Shell | ehershey/utilities | /add-wifi.sh | UTF-8 | 849 | 3.828125 | 4 | [] | no_license | #!/bin/bash
#
set -o pipefail
set -o errexit
set -o nounset
INTERFACES="en0 en1"
WIFIDOC=~/Dropbox//PlainText/wifi.txt
for INTERFACE in `echo $INTERFACES`
do
if networksetup -getairportnetwork $INTERFACE
then
network=$(networksetup -getairportnetwork $INTERFACE | cut -f2- -d: | sed 's/^ *//')
if [ "$network" ]
then
break
fi
fi
done
echo "Adding to $WIFIDOC"
if grep -q "$network" "$WIFIDOC"
then
echo "WARNING: Appears to be duplicate"
fi
echo "Network appears to be: $network"
password="$(security find-generic-password -wa "$network")"
echo "Password appears to be: $password"
echo -n "Enter location name: "
read location
entry="$location - $network / $password"
echo "Press enter to add entry, ^C to quit:"
echo "$entry"
read null
echo >> $WIFIDOC
date >> $WIFIDOC
echo "$entry" >> $WIFIDOC
| true |
30ce160a506bf007748074f36ba2d5ed39fc4211 | Shell | AC9090/wipe-script | /init-wipe.sh | UTF-8 | 2,507 | 3.90625 | 4 | [] | no_license | #!/bin/bash
# 11/7/19 Add nwipe as menu option
# 14/7/19 Get shell menu option working
# 29/10/19 Version 2.1 Add clear in shell
# 15/12/19 Version 2.2 disable ipv6 in /etc/sysctl.conf
version=V2.2
export NEWT_COLORS='
window=white,black
border=white.black
textbox=white,black
button=white,red
'
# Initial loading message
tagline="Turing Trust Disk Wipe Utility $version"
if [ -z "$1" ]; then
brand="$tagline"
else
brand="$1 parallel $tagline $version"
fi
dmesg -n 1
while true; do
selection=$(whiptail --title "$brand" --menu "\nPlease select an option:\n " 22 78 12 \
"Wipe" "Run the Secure Erase script."\
"Nwipe" "Run Disk Wipe script."\
"Shell" "Show a bash shell." \
"Unlock" "Unlock a disk." \
"Disk Info" "Run 'hdparm -I' to get information on a disk." \
"Shutdown" "Turn off the machine." \
"About" "Info about the wipe script" \
3>&1 1>&2 2>&3);
#"Wipe Advanced" "Run the wipe sript with advanced options."
if [ "$selection" == "Wipe" ]; then
bash -c "./wipe-main.sh"
elif [ "$selection" == "Nwipe" ]; then
bash -c "./nwipe-script.sh"
elif [ "$selection" == "Unlock" ]; then
bash -c "./unlock_drive.sh"
elif [ "$selection" == "Disk Info" ]; then
drives=`lsblk -nio KNAME,TYPE,SIZE,MODEL | grep disk | awk '{print $1}'`
drives_count=(`lsblk -nio KNAME,TYPE,SIZE,MODEL | grep disk | grep -c sd`)
drives_available=()
for drive in $drives; do
drives_available+="/dev/$drive \
`lsblk -nio MODEL /dev/$drive | awk '{print $1}'`_`lsblk -nio SIZE,TYPE /dev/$drive | grep disk | awk '{print $1}'` "
done
drive_selected=$(whiptail --title "$brand" --menu "\nPlease select a drive to get information from.\nUse up and down arrows to scroll and press 'q' to quit the info screen." \
22 78 12 $drives_available 3>&1 1>&2 2>&3)
if [ "x$drive_selected" == "x" ]; then
echo "No drive selected"
sleep 2
else
hdparm -I $drive_selected | less
fi
elif [ "$selection" == "Shutdown" ]; then
echo
echo "Shutting down..."
sleep 2
shutdown -h 0
exit
elif [ "$selection" == "About" ]; then
whiptail --title "Info" --msgbox "`cat README.txt`" 20 78 --scrolltext
elif [ "$selection" == "Shell" ]; then
clear
echo
echo
echo "Type exit to return to selection menu."
sleep 2
bash --norc --noprofile
else
echo "Selection " $selection
sleep 2
fi
done
| true |
627940f897855b9ada63e8b0e1b9530502d82a00 | Shell | evilbinary/RScheme-for-ios | /lib/rs/bin/rsc | UTF-8 | 437 | 2.78125 | 3 | [
"MIT"
] | permissive | #!/bin/sh
if test "$1" = "-abt"
then abt=-abt
shift
else abt=""
fi
case "$1" in
-precore) cb=start ; shift ;;
-corelib) cb=+precore ; shift ;;
-lowscm) cb=+core ; shift ;;
*) cb=+low ;;
esac
exec /home/evil/dev/rs/stage0/install/bin/rs -q $abt -image /home/evil/dev/rs/stage0/install/resource/compiler/rsc.img \
-BC /home/evil/dev/rs/stage0/install/resource/compiler/bytecode/bcgen.scm \
-config-basis $cb "$@"
| true |
38e67d53f6143929134fa9158f0f3ab91a37bef5 | Shell | anosillus/dotfiles | /memo/i3/scripts/executable_multiheadmsw | UTF-8 | 905 | 3.546875 | 4 | [] | no_license | #!/bin/bash
# Multihead mode switch
CONFIG="$HOME/.dotfiles/i3/config"
# Grab monitor names from the i3 config
mon0="$(cat "$CONFIG" | grep set | grep mon0 | cut -d " " -f 3)"
mon1="$(cat "$CONFIG" | grep set | grep mon1 | cut -d " " -f 3)"
mon2="$(cat "$CONFIG" | grep set | grep mon2 | cut -d " " -f 3)"
docked_wrkstat() {
xrandr --output $mon0 --off
xrandr --output $mon1 --auto --output $mon2 --auto --left-of $mon1 --primary
}
docked_tvmode() {
xrandr --output $mon0 --off
xrandr --output $mon1 --auto --output $mon2 --auto --right-of $mon1 --primary
}
undocked() {
xrandr --output $mon1 --off --output $mon2 --off
xrandr --output $mon0 --auto
}
case $1 in
1) docked_wrkstat;;
2) undocked;;
3) docked_tvmode;;
*) echo "$0 <option>; 1: docked workstation mode, 2: undocked mode, 3: docked tv mode";;
esac
# Restore wallpaper settings
nitrogen --restore
| true |
2efc698c24f5cbec709fe69b1d5388e766ff617f | Shell | jm96441n/dotfiles | /system/.alias | UTF-8 | 976 | 2.875 | 3 | [] | no_license | #!/usr/bin/zsh
# Shortcuts
alias reload="exec zsh"
# Directory traversal
alias ..="cd .."
alias ...="cd ../.."
alias ....="cd ../../.."
# exa
alias ls='exa --icons -x -a --group-directories-first'
alias ll='exa --icons -a -l'
alias lt='ls --tree'
alias ltl='ls --tree -L'
alias lr='ls --recurse'
# Rails Aliases
alias be="bundle exec"
alias yolo="be rake db:drop;be rake db:create;"
# cht.sh
alias cht="tmux-cht"
# Fix grep to use color and show line numbers
alias fgrep="fgrep --color=auto"
alias egrep="egrep --color=auto"
# Map vim to use neovim
alias vim="nvim"
# useful shortcuts for editing config files
alias vr="vim ~/.dotfiles/.config/nvim/init.vim"
alias zr="vim ~/.dotfiles/runcom/.zshrc"
alias txr="vim ~/.dotfiles/runcom/.tmux.conf"
alias kr="vim ~/.dotfiles/.config/kitty/kitty.conf"
alias ir="vim ~/.dotfiles/.config/i3/config"
# Named Directories
hash -d projects=$HOME/Projects
hash -d dot=$HOME/.dotfiles
alias projects=~projects
alias dot=~dot
| true |
4b63a146bc8d05a272f15849213d39e417bdbe81 | Shell | justingarcia/dotfiles | /install.sh | UTF-8 | 3,380 | 3.828125 | 4 | [] | no_license | #!/bin/sh
# Install justingarcia/dotfiles
# Author: Justin Garcia
# =============================================================================
DATE=$(date +"%F")
TIME=$(date +"%F %T %Z")
DOTFILES_DIR=$HOME/dotfiles
LOGFILE=$HOME/dotfiles-install-$DATE.log
touch $LOGFILE
# Functions -------------------------------------------------------------------
# .............................................................................
# log() - Log a message
# Usage:
# log [message-type] <message-string>
# .............................................................................
log()
{
if [ $# -eq 1 ]; then
printf "[%s] %s\n" "$TIME" "$1" >> $LOGFILE
else
printf "[%s] [%-5s] %s\n" "$TIME" "$1" "$2" >> $LOGFILE
fi
}
# .............................................................................
# install() - Use apt-get to install a package, log results
# Usage:
# install <command-name> <package-name>
# .............................................................................
install()
{
sudo apt-get -y install $2
if command -v $1 > /dev/null; then
log INFO "$1 installed"
else
log ERROR "$1 failed to install"
fi
}
# Add and Update Repositories -------------------------------------------------
log "-- UPDATING REPOSITORIES --"
sudo add-apt-repository ppa:neovim-ppa/stable
sudo apt-get update
# Install Packages ------------------------------------------------------------
log "-- INSTALLING PACKAGES --"
install zsh zsh
install uxterm xterm
#install urxvt rxvt-unicode-256color
install tmux tmux
install nvim neovim
install curl curl
sudo apt-get -y install ncurses-base
sudo apt-get -y install ncurses-term
# Install Fonts ---------------------------------------------------------------
# TODO: for ttf fonts -- create '.font' directory in 'home'
# then copy ttf font folders there
log "-- INSTALLING FONTS --"
sudo unlink /etc/fonts/conf.d/70-no-bitmaps.conf
sudo ln -sf /etc/fonts/conf.avail/70-yes-bitmaps.conf /etc/fonts/conf.d/70-yes-bitmaps.conf
sudo dpkg-reconfigure fontconfig
xset +fp $DOTFILES_DIR/fonts/ctrld-font/
xset fp rehash
#sudo make -C $DOTFILES_DIR/fonts/ctrld-font
# Create Symlinks -------------------------------------------------------------
log "-- CREATING SYMLINKS --"
# Remove preexisting symlinks and dotfiles ....................................
unlink ~/.zshrc
unlink ~/.Xresources
unlink ~/.xinitrc
unlink ~/.xsessionrc
#unlink ~/.urxvt
unlink ~/.tmux
unlink ~/.tmux.conf
unlink ~/.config/nvim
# Remove preexisting configuration folders ....................................
#sudo rm -rf ~/.urxvt > /dev/null 2>&1
sudo rm -rf ~/.tmux > /dev/null 2>&1
sudo rm -rf ~/.config/nvim > /dev/null 2>&1
# Create new symlinks .........................................................
ln -s $DOTFILES_DIR/zsh/zshrc ~/.zshrc
ln -s $DOTFILES_DIR/X11/Xresources ~/.Xresources
ln -s $DOTFILES_DIR/X11/xinitrc ~/.xinitrc
ln -s $DOTFILES_DIR/X11/xinitrc ~/.xsessionrc
#ln -s $DOTFILES_DIR/X11/urxvt ~/.urxvt
ln -s $DOTFILES_DIR/tmux ~/.tmux
ln -s $DOTFILES_DIR/tmux/tmux.conf ~/.tmux.conf
ln -s $DOTFILES_DIR/nvim ~/.config/nvim
# Set Defaults ----------------------------------------------------------------
xrdb -load ~/.Xresources
chsh -s `which zsh`
sudo update-alternatives --config x-terminal-emulator
# Log Summary -----------------------------------------------------------------
log "-- SUMMARY --"
cat $LOGFILE
| true |
b4cc8d50641e77c3b4da709bd08e4f54e750fb5c | Shell | 1z2s3e4v/Cap-Array-Generator | /runLPE/runLPE.sh | UTF-8 | 2,324 | 3.34375 | 3 | [] | no_license | #!/bin/bash
#GDS_FILE="/raidj/user/t28/t28u25/frank/project/runLPE/gds_sp/ARRAY_CAP_T1.gds"
#SP_FILE="/raidj/user/t28/t28u25/frank/project/runLPE/gds_sp/ARRAY_CMP.sp"
LOG_FILE="./log.runLPE"
GDS_FILE=$1
SPF_FILE=""
SPICE_FILE="/raidj/user/t28/t28u25/frank/project/runLPE/spice/ARRAY_CMP.sp"
if [ $# -eq 2 ]; then
SPICE_FILE=$2
fi
date > $LOG_FILE
if [ -f ./ARRAY_CMP_T1.spf ]; then
rm ./ARRAY_CMP_T1.spf
fi
# check ARGC
if [ $# -eq 0 ]; then
echo "[runLPE] - error! Please input a gds file." |& tee -a $LOG_FILE
exit 0;
fi
# check gds and spice
if [ ! -f "$GDS_FILE" ]; then
echo "[runLPE] - error! gds file not found." |& tee -a $LOG_FILE
exit 0
fi
if [ ! -f "$SPICE_FILE" ]; then
echo "[runLPE] - error! spice file not found." |& tee -a $LOG_FILE
exit 0
fi
# Extract cell name, fix gds path, name spf file
if [[ "$GDS_FILE" =~ .*\.gds$ ]]; then
CellName="$(basename $GDS_FILE)"
CellName="${CellName%%.*}"
echo "[runLPE] - running case '$CellName'" |& tee -a $LOG_FILE
if [[ ! "$GDS_FILE" =~ ^/.* ]]; then
GDS_FILE="$(pwd)/$GDS_FILE"
fi
echo "[runLPE] - gds file: '$GDS_FILE'" |& tee -a $LOG_FILE
echo "[runLPE] - spice file: '$SPICE_FILE'" |& tee -a $LOG_FILE
SPF_FILE="spf/$CellName.spf"
echo "[runLPE] - spf file: '$SPF_FILE'" |& tee -a $LOG_FILE
else
echo "[runLPE] - error! Input is not a gds file." |& tee -a $LOG_FILE
exit 0
fi
# set gds, spice in DFM_LVS_RC_CALIBRE_N28HP_1p9m_ALRDL_CCI.v1.0_3o
sed -i "s#LAYOUT PATH.*#LAYOUT PATH \"$GDS_FILE\"#g" DFM_LVS_RC_CALIBRE_N28HP_1p9m_ALRDL_CCI.v1.0_3o
sed -i "s#SOURCE PATH.*#SOURCE PATH \"$SPICE_FILE\"#g" DFM_LVS_RC_CALIBRE_N28HP_1p9m_ALRDL_CCI.v1.0_3o
# run lvs check
echo "checking lvs ..."
calibre -lvs -hier DFM_LVS_RC_CALIBRE_N28HP_1p9m_ALRDL_CCI.v1.0_3o >> $LOG_FILE
if grep -Fqi "error" ./LVS/lvs.rep
then
echo "[runLPE] - lvs error." |& tee -a $LOG_FILE
exit 0
else
echo "[runLPE] - lvs pass." |& tee -a $LOG_FILE
fi
# Capacitance extraction
calibre -query ./LVS/svdb/ < query_cmd
StarXtract -clean star_cmd
# Correct pin name of ARRAY_CMP_T1 in spf file
sed -i "s/\.SUBCKT ARRAY_CMP_T1.*/.SUBCKT ARRAY_CMP_T1 SL1A SL1B SL2A SL2B SL3A SL3B TOP_ARRAY VDD09A VSS09A/g" ARRAY_CMP_T1.spf
# Copy spf file to spf/Named
mkdir -p spf
cp ARRAY_CMP_T1.spf $SPF_FILE
cp $SPF_FILE ~/frank/project/input/spf/
echo "$SPF_FILE generated."
exit 0
| true |
30631fd7c8f4bba8cd157ffa1512aba1eb793809 | Shell | rdermer/english-words | /sbPangram.sh | UTF-8 | 263 | 3.109375 | 3 | [
"Unlicense",
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | #!/bin/bash
# spelling bee - find the pangram
if [[ $# -ne 1 || ${#1} -ne 7 ]]; then
echo usage: $0 7letters
exit
fi
egrep "^[$1]+$" words_alpha.txt | grep ${1:0:1} | grep ${1:1:1} | grep ${1:2:1} | grep ${1:3:1} | grep ${1:4:1} | grep ${1:5:1} | grep ${1:6:1}
| true |
95cb16a6608318c37518d8ec4ed2c4377c506eea | Shell | starena/unifi-uck-backup | /uck-backup-ftp.sh | UTF-8 | 3,915 | 3.46875 | 3 | [
"MIT"
] | permissive | #!/bin/sh
# =============================================================================
# Unifi Cloud Key Backup to FTP
# https://github.com/aessing/uck-backup-ftp
# -----------------------------------------------------------------------------
# Developer.......: Andre Essing (https://www.andre-essing.de/)
# (https://github.com/aessing)
# (https://twitter.com/aessing)
# (https://www.linkedin.com/in/aessing/)
# -----------------------------------------------------------------------------
# THIS CODE AND INFORMATION ARE PROVIDED "AS IS" WITHOUT WARRANTY OF ANY KIND,
# EITHER EXPRESSED OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND/OR FITNESS FOR A PARTICULAR PURPOSE.
# =============================================================================
FTP_SERVER={SERVERNAME}
FTP_PATH={BACKUPPATH}
FTP_USER={FTPUSER}
FTP_PASSWORD={FTPPASSWORD}
###############################################################################
############### Magic Line - Normally no changes below necessary ##############
###############################################################################
echo ""
echo "============================================================================="
echo "$(date)"
echo "Starting backtup to:"
echo " - FTP Server: ${FTP_SERVER}"
echo " - FTP Path: ${FTP_PATH}"
echo " - FTP User: ${FTP_USER}"
echo "-----------------------------------------------------------------------------"
###############################################################################
#
# Setup some stuff
#
echo ""
echo " - Setup some stuff"
BACKUP_ROOT=/uck-backup-ftp
CRON_FILE=/etc/cron.d/uck-backup-ftp
PROTECT_BACKUP_FOLDER=/srv/unifi-protect/backups
PROTECT_BACKUP_LINK=$BACKUP_ROOT/protect
SCRIPT_FILE=`basename $0`
SCRIPT_PATH=$(dirname $(readlink -f $0))
UNIFI_BACKUP_FOLDER=/srv/unifi/data/backup/autobackup
UNIFI_BACKUP_LINK=$BACKUP_ROOT/unifi
###############################################################################
#
# Install lftp
#
dpkg -s lftp >/dev/null 2>&1
if [ ! $? -eq 0 ]; then
echo ""
echo " - Installing lftp with apt-get"
apt-get update
apt-get install --no-install-recommends -y lftp
fi
###############################################################################
#
# Create backup folder
#
if [ ! -d $BACKUP_ROOT ]; then
echo ""
echo " - Creating backup folder ($BACKUP_ROOT)"
mkdir -p $BACKUP_ROOT
fi
if [ ! -L $UNIFI_BACKUP_LINK ]; then
echo ""
echo " - Linking UNIFI backups ($UNIFI_BACKUP_FOLDER) to backup folder ($UNIFI_BACKUP_LINK)"
ln -s $UNIFI_BACKUP_FOLDER $UNIFI_BACKUP_LINK
fi
if [ ! -L $PROTECT_BACKUP_LINK ]; then
echo ""
echo " - Linking UNIFI backups ($PROTECT_BACKUP_FOLDER) to backup folder ($PROTECT_BACKUP_LINK)"
ln -s $PROTECT_BACKUP_FOLDER $PROTECT_BACKUP_LINK
fi
###############################################################################
#
# Create CRON file
#
if [ ! -f "$CRON_FILE" ]; then
echo ""
echo " - Setting up CRON job that runs every hour ($CRON_FILE)"
echo "30 * * * * root $SCRIPT_PATH/$SCRIPT_FILE" > $CRON_FILE
chmod 644 $CRON_FILE
systemctl restart cron.service
fi
###############################################################################
#
# Copy backup files to FTP server
#
echo ""
echo " - Copy backups to FTP server ($FTP_SERVER)"
/usr/bin/lftp -e "set ssl:verify-certificate no;mirror --overwrite --no-perms --no-umask -RL $BACKUP_ROOT $FTP_PATH;exit" -u $FTP_USER,$FTP_PASSWORD $FTP_SERVER
echo ""
echo " - done"
echo ""
echo "============================================================================="
echo ""
###############################################################################
#EOF | true |
12230f65c3bd38603bfa5162f4794c05ab78eeb3 | Shell | nysol/doc | /olddoc/tutorial/mcmd/jp/exercise/msel0.sh | UTF-8 | 695 | 2.59375 | 3 | [] | no_license | #!/bin/bash
#=====================================================
# MCMD bash script - Lesson 6: Select records
# Exercise
#=====================================================
# Variables
inPath="tutorial_jp"
# Command
# Method 4
mcut f=日付,数量,金額 i=${inPath}/dat.csv |
msel c='(${金額}/${数量})<=100' o=outdat/mselout0.csv
# Method 3
mcut f=日付,数量,金額 i=${inPath}/dat.csv |
msel c='${日付}>20011015&&(${数量}>5||${金額}>=1000)' o=outdat/mselout0.csv
mcut f=日付,数量,金額 i=${inPath}/dat.csv |
msel c='${数量}>5 && ${金額}>=1000' o=outdat/mselout0.csv
#=====================================================
| true |
0875c4053b6883c41ccbe72264d8129de96c8c1d | Shell | ghuntley/monorepo | /third_party/git/t/t8004-blame-with-conflicts.sh | UTF-8 | 1,546 | 3.5 | 4 | [
"GPL-1.0-or-later",
"LGPL-2.0-or-later",
"LGPL-2.1-only",
"GPL-3.0-only",
"GPL-2.0-only",
"MIT"
] | permissive | #!/bin/sh
# Based on a test case submitted by Björn Steinbrink.
test_description='git blame on conflicted files'
. ./test-lib.sh
test_expect_success 'setup first case' '
# Create the old file
echo "Old line" > file1 &&
git add file1 &&
git commit --author "Old Line <ol@localhost>" -m file1.a &&
# Branch
git checkout -b foo &&
# Do an ugly move and change
git rm file1 &&
echo "New line ..." > file2 &&
echo "... and more" >> file2 &&
git add file2 &&
git commit --author "U Gly <ug@localhost>" -m ugly &&
# Back to master and change something
git checkout master &&
echo "
bla" >> file1 &&
git commit --author "Old Line <ol@localhost>" -a -m file1.b &&
# Back to foo and merge master
git checkout foo &&
if git merge master; then
echo needed conflict here
exit 1
else
echo merge failed - resolving automatically
fi &&
echo "New line ...
... and more
bla
Even more" > file2 &&
git rm file1 &&
git commit --author "M Result <mr@localhost>" -a -m merged &&
# Back to master and change file1 again
git checkout master &&
sed s/bla/foo/ <file1 >X &&
rm file1 &&
mv X file1 &&
git commit --author "No Bla <nb@localhost>" -a -m replace &&
# Try to merge into foo again
git checkout foo &&
if git merge master; then
echo needed conflict here
exit 1
else
echo merge failed - test is setup
fi
'
test_expect_success \
'blame runs on unconflicted file while other file has conflicts' '
git blame file2
'
test_expect_success 'blame does not crash with conflicted file in stages 1,3' '
git blame file1
'
test_done
| true |
54eed5b4b11899b947cc01119aadfca06eb6d97f | Shell | spodin/elementary-config | /appearance.sh | UTF-8 | 4,260 | 2.578125 | 3 | [
"MIT"
] | permissive | #!/bin/bash
## Install and apply `Paper Icons Theme` (https://snwh.org/paper)
sudo add-apt-repository ppa:snwh/pulp -y && sudo apt-get install paper-icon-theme -y
gsettings set org.gnome.desktop.interface icon-theme 'Paper'
## Change window control layout
gsettings set org.pantheon.desktop.gala.appearance button-layout 'close,minimize,maximize'
gsettings set org.gnome.desktop.wm.preferences button-layout 'close,minimize,maximize'
## Change system default fonts
gsettings set org.gnome.desktop.interface font-name 'SF Pro Text 9'
gsettings set org.gnome.desktop.interface document-font-name 'SF Pro Text 10'
gsettings set org.gnome.desktop.interface monospace-font-name 'Menlo for Powerline 10'
gsettings set org.gnome.desktop.wm.preferences titlebar-font 'SF Pro Text 9'
## TIP
## For getting names of specific keyboard buttons:
## - sudo apt-get install xbindkeys
## - sudo xbindkeys -k
## Set up window keyboard shortcuts
gsettings set org.gnome.desktop.wm.keybindings maximize "['<Primary><Super>Up']"
gsettings set org.gnome.desktop.wm.keybindings unmaximize "['<Primary><Super>Down']"
gsettings set org.gnome.desktop.wm.keybindings toggle-fullscreen "['<Primary><Super>F11']"
gsettings set org.gnome.mutter.keybindings toggle-tiled-left "['<Control><Super>Left']"
gsettings set org.gnome.mutter.keybindings toggle-tiled-right "['<Control><Super>Right']"
## Disable Switch Group (default value: ['<Super>Above_Tab', '<Alt>Above_Tab'])
gsettings set org.gnome.desktop.wm.keybindings switch-group []
## Set up custom keyboard shortcuts
gsettings set org.gnome.settings-daemon.plugins.media-keys custom-keybindings "['/org/gnome/settings-daemon/plugins/media-keys/custom-keybindings/custom0/', '/org/gnome/settings-daemon/plugins/media-keys/custom-keybindings/custom1/', '/org/gnome/settings-daemon/plugins/media-keys/custom-keybindings/custom2/', '/org/gnome/settings-daemon/plugins/media-keys/custom-keybindings/custom3/']"
### `lxtask` Shortcut
gsettings set org.gnome.settings-daemon.plugins.media-keys.custom-keybinding:/org/gnome/settings-daemon/plugins/media-keys/custom-keybindings/custom0/ name "lxtask"
gsettings set org.gnome.settings-daemon.plugins.media-keys.custom-keybinding:/org/gnome/settings-daemon/plugins/media-keys/custom-keybindings/custom0/ command "lxtask"
gsettings set org.gnome.settings-daemon.plugins.media-keys.custom-keybinding:/org/gnome/settings-daemon/plugins/media-keys/custom-keybindings/custom0/ binding "<Primary>Escape"
### `screenshot-tool` Shortcut
gsettings set org.gnome.settings-daemon.plugins.media-keys.custom-keybinding:/org/gnome/settings-daemon/plugins/media-keys/custom-keybindings/custom1/ name "io.elementary.screenshot-tool"
gsettings set org.gnome.settings-daemon.plugins.media-keys.custom-keybinding:/org/gnome/settings-daemon/plugins/media-keys/custom-keybindings/custom1/ command "io.elementary.screenshot-tool"
gsettings set org.gnome.settings-daemon.plugins.media-keys.custom-keybinding:/org/gnome/settings-daemon/plugins/media-keys/custom-keybindings/custom1/ binding "Print"
### `pantheon-files` Shortcut
gsettings set org.gnome.settings-daemon.plugins.media-keys.custom-keybinding:/org/gnome/settings-daemon/plugins/media-keys/custom-keybindings/custom2/ name "io.elementary.files"
gsettings set org.gnome.settings-daemon.plugins.media-keys.custom-keybinding:/org/gnome/settings-daemon/plugins/media-keys/custom-keybindings/custom2/ command "io.elementary.files"
gsettings set org.gnome.settings-daemon.plugins.media-keys.custom-keybinding:/org/gnome/settings-daemon/plugins/media-keys/custom-keybindings/custom2/ binding "<Super>e"
### Suspend with Ctrl+Alt+Delete
gsettings set org.gnome.settings-daemon.plugins.media-keys logout ''
gsettings set org.gnome.settings-daemon.plugins.media-keys.custom-keybinding:/org/gnome/settings-daemon/plugins/media-keys/custom-keybindings/custom3/ name "systemctl suspend -i"
gsettings set org.gnome.settings-daemon.plugins.media-keys.custom-keybinding:/org/gnome/settings-daemon/plugins/media-keys/custom-keybindings/custom3/ command "systemctl suspend -i"
gsettings set org.gnome.settings-daemon.plugins.media-keys.custom-keybinding:/org/gnome/settings-daemon/plugins/media-keys/custom-keybindings/custom3/ binding "<Primary><Alt>Delete"
| true |
e80e780e6dcdd4378c2c3f1e524f8a2e5c01499f | Shell | mback2k/docker-teamspeak3 | /docker-entrypoint.d/01-startup-conf.sh | UTF-8 | 867 | 3.015625 | 3 | [] | no_license | #!/bin/sh
set -e
# check environment variable TEAMSPEAK3_APPDIR
if [ -z "${TEAMSPEAK3_APPDIR}" ]; then
echo "Environment variable TEAMSPEAK3_APPDIR is required"
exit 1
fi
# check environment variable TEAMSPEAK3_INIFILE
if [ -z "${TEAMSPEAK3_INIFILE}" ]; then
echo "Environment variable TEAMSPEAK3_INIFILE is required"
exit 2
fi
echo "#!/bin/sh" > /usr/local/bin/teamspeak3
echo "cd ${TEAMSPEAK3_APPDIR}" >> /usr/local/bin/teamspeak3
echo "export LD_LIBRARY_PATH=${TEAMSPEAK3_APPDIR}:${LD_LIBRARY_PATH}" >> /usr/local/bin/teamspeak3
echo "exec chroot --userspec=teamspeak3:teamspeak3 --skip-chdir / ${TEAMSPEAK3_APPDIR}/ts3server inifile=${TEAMSPEAK3_INIFILE}" >> /usr/local/bin/teamspeak3
chown --reference=${TEAMSPEAK3_APPDIR}/ts3server /usr/local/bin/teamspeak3
chmod --reference=${TEAMSPEAK3_APPDIR}/ts3server /usr/local/bin/teamspeak3
exit 0
| true |
158240099540d03005600ddf07733ab55609129b | Shell | prompto/prompto-docs | /WebSite/release.sh | UTF-8 | 790 | 3.328125 | 3 | [] | no_license | #!/bin/bash
echo "Are you using the latest platform version?"
read -p "version to publish: " version
read -p "release name: " name
export JAVA_HOME=/Library/Java/JavaVirtualMachines/jdk-11.0.13.jdk/Contents/Home
mvn versions:set -DnewVersion=$version -DgenerateBackupPoms=false
mvn clean deploy -P deploy -DskipTests=true
deploy=$?
mvn versions:set -DnewVersion=0.0.1-SNAPSHOT -DgenerateBackupPoms=false
if [ $deploy -eq 0 ]
then
tag=v$version
json="{ \"tag_name\": \"$tag\", \"name\": \"$name\" }"
rm -f release.json
echo $json >> release.json
curl --request POST \
--header "Content-Type: application/json" \
--data @release.json \
--header "Authorization: token $(cat token.txt)" \
--url https://api.github.com/repos/prompto/prompto-docs/releases
else
echo $deploy
fi
| true |
2b5d44e11fc7652e32f65cdf1f2ea24d4e8e9de6 | Shell | machellerogden/powertrain | /var/VERSION.sh | UTF-8 | 488 | 3.4375 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
pushd $PT_CONTEXT > /dev/null
if [ "$1" == "all" ]; then
VERSION=
elif [ -z "$1" ] || [ "$1" == "default" ]; then
if [ "$VERSION_SCRIPT" == "" ] || [ "$VERSION_SCRIPT" == "default" ]; then
HEAD="$(git rev-parse HEAD 2>&1)"
if [[ $HEAD =~ ^[0-9a-f]{5,40}$ ]]; then
VERSION=$HEAD
else
VERSION=latest
fi
else
VERSION=$($VERSION_SCRIPT)
fi
elif [ -n "$1" ]; then
VERSION=$1
fi
popd > /dev/null
| true |
dcae92a088cdc1734cc50c2bc54370e339f20dbc | Shell | hcnelson99/voxel | /scripts/make_benchmark.sh | UTF-8 | 369 | 3.78125 | 4 | [] | no_license | #!/bin/bash
rm -f benchmark
if [ $# -ne 1 ]; then
echo "Usage: ./benchmark.sh [world file]"
exit 1
fi
world="$1"
if [ -f "$world" ]; then
size=$(wc -c "$world" | awk '{print $1}')
ws=$(python -c "print(int(round($size ** (1.0 / 3))))")
make benchmark CFLAGS="-DWORLD_SIZE=$ws -DWORLD=\"$world\""
else
echo "World file not found: $world"
fi
| true |
41922c18f6659d84c547e3d85b7816c5ea79196e | Shell | Bonfirium/sharemed-ledger-fabric | /install_binaries.sh | UTF-8 | 527 | 3.046875 | 3 | [] | no_license | #!/bin/bash
set -ev
HLF_VERSION=1.4.3
CA_VERSION=1.4.3
ARCH=$(
echo "$(uname -s|tr '[:upper:]' '[:lower:]'|sed 's/mingw64_nt.*/windows/')-$(uname -m | sed 's/x86_64/amd64/g')"
)
echo -e '\033[0;34m'$ARCH'\033[0m'
MARCH=$(uname -m)
echo -e '\033[0;34m'$MARCH'\033[0m'
HLF_DIST_URL=https://nexus.hyperledger.org/content/repositories/releases/org/hyperledger/fabric
BINARY_FILE=hyperledger-fabric-${ARCH}-${HLF_VERSION}.tar.gz
curl ${HLF_DIST_URL}/hyperledger-fabric/${ARCH}-${HLF_VERSION}/${BINARY_FILE} | tar -xzf - bin/
| true |
4c68b98571dc53243965c38fefbc5ddb28cfb26d | Shell | ek9/shell-dev-config | /.config/shell/profile.d/04-composerbin.sh | UTF-8 | 404 | 3.140625 | 3 | [
"MIT"
] | permissive | ## ek9/shell-config - https://github.com/ek9/shell-config
## 00-composerbin.sh
## Sets up composer bin directory
# enable local bin dir
COMPOSERBIN="$HOME/.config/composer/vendor/bin"
# append $COMPOSERBIN to PATH if directory exists and it is not yet in PATH
if [[ $UID -ge 1000 ]] && [[ -d $COMPOSERBIN ]] && [[ -z $(echo $PATH | grep -o $COMPOSERBIN) ]]; then
export PATH=$PATH:$COMPOSERBIN
fi
| true |
534b0cdb269385e09fecfbb7d0a00fb63addd950 | Shell | hello-stanford/compciv | /homework/test-babies/helper.sh | UTF-8 | 516 | 3.0625 | 3 | [] | no_license | # first, make the data-hold subdirectory if it doesn't already exist
mkdir -p data-hold
# then change into data-hold so that the data is downloaded there
cd data-hold
# download the file into the current directory, i.e. data-hold/
curl -o namesbystate.zip http://stash.compciv.org/ssa_baby_names/namesbystate.zip
# unzip the file; it should dump the data into your current location, i.e.
# the data-hold/ directory
unzip namesbystate.zip
# go back to the parent directory, i.e. ~/compciv/homework/test-babies
cd ..
| true |
c5f07315c5b46fce263838a5ea98cd6c8a105cf4 | Shell | sudheerbd/cicd-modernization | /CICD/BuildWar/deploy/deploy2.sh | UTF-8 | 1,805 | 3.203125 | 3 | [] | no_license | #!/bin/bash
. ./CICD/BuildWar/variables.sh
. $main/scripts/env_variables.sh
isDevelopment=${IS_DEVELOPMENT}
tomcatHostname1=${TOMCAT_HOSTNAME2}
tomcatPort1=${TOMCAT_PORT2}
tomcatUsername1=${TOMCAT_USERNAME2}
tomcatPassword1=${TOMCAT_PASSWORD2}
if [ -d "$tempLocation/deploy2" ]
then
rm $tempLocation/deploy2
fi
mkdir -p $tempLocation/deploy2
cd $tempLocation/deploy2
if [ $isDevelopment == "true" ];
then
cp $destinationDebugLocation/reporting.war $tempLocation/deploy2/reporting.war
else
cp $destinationTomcatLocation/reporting.war $tempLocation/deploy2/reporting.war
fi
mkdir -p $tempLocation/deploy2/WEB-INF/classes
cp $tomcatConfig/$tomcatHostname2/$tomcatPort2/application-dev.properties $tempLocation/deploy2/WEB-INF/classes
cp $tomcatConfig/$tomcatHostname2/$tomcatPort2/application-qa.properties $tempLocation/deploy2/WEB-INF/classes
cp $tomcatConfig/$tomcatHostname2/$tomcatPort2/application.properties $tempLocation/deploy2/WEB-INF/classes
# ON Master branch
# if [ -d $tomcatConfig/$tomcatHostname2/$tomcatPort2/licenses ];
# then
# mkdir $tempLocation/deploy2/licenses/
# cp -r $tomcatConfig/$tomcatHostname2/$tomcatPort2/licenses $tempLocation/deploy2/licenses/
# zip -ur $tempLocation/deploy2/reporting.war licenses
# fi
# zip -d $tempLocation/deploy2/reporting.war WEB-INF/classes/application-dev.properties
# zip -d $tempLocation/deploy2/reporting.war WEB-INF/classes/application-qa.properties
# zip -d $tempLocation/deploy2/reporting.war WEB-INF/classes/application.properties
zip -ur $tempLocation/deploy2/reporting.war WEB-INF
curl -s -f -u $tomcatUsername2:$tomcatPassword2 -T $tempLocation/deploy2/reporting.war "http://$tomcatHostname2:$tomcatPort2/manager/text/deploy?path=/reporting&update=true"
if [ $? -ne 0 ];
then
exit
fi
rm $tempLocation/deploy2 | true |
e9e21214b9aef075a236a24e4b71c9001fb5523d | Shell | jaferrer/odoo-helper-scripts | /lib/server.bash | UTF-8 | 8,227 | 3.703125 | 4 | [] | no_license | if [ -z $ODOO_HELPER_LIB ]; then
echo "Odoo-helper-scripts seems not been installed correctly.";
echo "Reinstall it (see Readme on https://github.com/katyukha/odoo-helper-scripts/)";
exit 1;
fi
if [ -z $ODOO_HELPER_COMMON_IMPORTED ]; then
source $ODOO_HELPER_LIB/common.bash;
fi
ohelper_require 'db';
ohelper_require 'git';
ohelper_require 'addons';
ohelper_require 'odoo';
# ----------------------------------------------------------------------------------------
set -e; # fail on errors
# Prints server script name
# (depends on ODOO_BRANCH environment variable,
# which should be placed in project config)
# Now it simply returns openerp-server
function get_server_script {
check_command odoo odoo.py openerp-server openerp-server.py;
}
# Function to check server run status;
# Function echo:
# pid - server running process <pid>
# -1 - server stopped
# -2 - pid file points to unexistent process
#
# server_is_running
function server_get_pid {
if [ -f "$ODOO_PID_FILE" ]; then
local pid=`cat $ODOO_PID_FILE`;
if is_process_running $pid; then
echo "$pid";
else
echo "-2";
fi
else
echo "-1";
fi
}
# server_run <arg1> .. <argN>
# all arguments will be passed to odoo server
function server_run {
local SERVER=`get_server_script`;
echo -e "${LBLUEC}Running server${NC}: $SERVER $@";
if [ ! -z $SERVER_RUN_USER ]; then
local sudo_opt="sudo -u $SERVER_RUN_USER -H -E";
echov "Using server run opt: $sudo_opt";
fi
exec_conf $ODOO_CONF_FILE execu "$sudo_opt $SERVER $@";
}
function server_start {
if [ ! -z $INIT_SCRIPT ]; then
echo -e "${YELLOWC}Starting server via init script: $INIT_SCRIPT ${NC}";
execu $INIT_SCRIPT start;
else
# Check if server process is already running
if [ $(server_get_pid) -gt 0 ]; then
echoe -e "${REDC}Server process already running.${NC}";
exit 1;
fi
server_run --pidfile=$ODOO_PID_FILE "$@" &
local pid=$!;
sleep 2;
echoe -e "${GREENC}Odoo started!${NC}";
echoe -e "PID File: ${YELLOWC}$ODOO_PID_FILE${NC}."
echoe -e "Process ID: ${YELLOWC}$pid${NC}";
fi
}
function server_stop {
if [ ! -z $INIT_SCRIPT ]; then
echoe -e "${YELLOWC}Soppting server via init script: $INIT_SCRIPT ${NC}";
execu $INIT_SCRIPT stop;
else
local pid=$(server_get_pid);
if [ $pid -gt 0 ]; then
if kill $pid; then
# wait until server is stopped
for stime in 1 2 3 4; do
if is_process_running $pid; then
# if process alive, wait a little time
echov "Server still running. sleeping for $stime seconds";
sleep $stime;
else
break;
fi
done
# if process still alive, it seems that it is frozen, so force kill it
if is_process_running $pid; then
kill -SIGKILL $pid;
sleep 1;
fi
echoe -e "${GREENC}OK${NC}: Server stopped.";
rm -f $PID_FILE;
else
echoe -e "${REDC}ERROR${NC}: Cannot kill process.";
fi
else
echoe -e "${YELLOWC}Server seems not to be running!${NC}"
echoe -e "${YELLOWC}Or PID file $ODOO_PID_FILE was removed${NC}";
fi
fi
}
function server_status {
if [ ! -z $INIT_SCRIPT ]; then
echoe -e "${BLUEC}Server status via init script:${YELLOWC} $INIT_SCRIPT ${NC}";
execu $INIT_SCRIPT status;
else
local pid=$(server_get_pid);
if [ $pid -gt 0 ]; then
echoe -e "${GREENC}Server process already running. PID=${YELLOWC}${pid}${GREENC}.${NC}";
elif [ $pid -eq -2 ]; then
echoe -e "${YELLOWC}Pid file points to unexistent process.${NC}";
elif [ $pid -eq -1 ]; then
echoe -e "${REDC}Server stopped${NC}";
fi
fi
}
function server_restart {
if [ ! -z $INIT_SCRIPT ]; then
echoe -e "${YELLOWC}Server restart via init script: $INIT_SCRIPT ${NC}";
execu $INIT_SCRIPT restart;
else
server_stop;
server_start "$@";
fi
}
# WARN: only for odoo 8.0+
# Update odoo sources
function server_auto_update {
# Stop odoo server
if [ $(server_get_pid) -gt 0 ]; then
echoe -e "${BLUEC}Stopping server...${NC}";
server_stop;
local need_start=1;
fi
# Do database backup
odoo_db_backup_all zip;
# Update odoo sources
odoo_update_sources;
echoe -e "${BLUEC}update databases...${NC}";
addons_install_update "update" all;
# Start server again if it was stopped
if [ ! -z $need_start ]; then
echoe -e "${BLUEC}Starting server...${NC}";
server_start;
fi
}
# Print ps aux output for odoo-related processes
function server_ps {
local server_script=$(get_server_script);
if [ -z "$server_script" ]; then
echo -e "${REDC}ERROR${NC}: this command should be called inside odoo-helper project"
return 1;
fi
echo -e "${YELLOWC}Odoo processes:${NC}";
ps aux | grep -e "$(get_server_script)";
}
# server [options] <command> <args>
# server [options] start <args>
# server [options] stop <args>
function server {
local usage="
Usage
$SCRIPT_NAME server [options] [command] [args]
args - arguments that usualy will be passed forward to openerp-server script
Commands:
run - run the server. if no command supply, this one will be used
start - start server in background
stop - stop background running server
restart - restart background server
status - status of background server
auto-update - automatiacly update server. (WARN: experimental feature. may be buggy)
log - open server log
ps - print running odoo processes
-h|--help|help - display this message
Options:
--use-test-conf - Use test configuration file for server
-u|--user - Name of user to run server as
";
while [[ $# -gt 0 ]]
do
key="$1";
case $key in
-h|--help|help)
echo "$usage";
exit 0;
;;
--use-test-conf)
ODOO_CONF_FILE=$ODOO_TEST_CONF_FILE;
echo -e "${YELLOWC}NOTE${NC}: Using test configuration file: $ODOO_TEST_CONF_FILE";
;;
-u|--user)
SERVER_RUN_USER=$2;
shift;
;;
run)
shift;
server_run "$@";
exit;
;;
start)
shift;
server_start "$@";
exit;
;;
stop)
shift;
server_stop "$@";
exit;
;;
restart)
shift;
server_restart "$@";
exit;
;;
status)
shift;
server_status "$@";
exit
;;
auto-update)
shift;
server_auto_update "$@";
exit;
;;
log)
shift;
# TODO: remove backward compatability from this code
less ${LOG_FILE:-$LOG_DIR/odoo.log};
exit;
;;
ps)
shift;
server_ps;
exit;
;;
*)
# all nex options have to be passed to the server
break;
;;
esac;
shift;
done;
server_run "$@";
exit;
}
# odoo_py <args>
function odoo_py {
echov -e "${LBLUEC}Running odoo.py with arguments${NC}: $@";
local cmd=$(check_command odoo odoo-bin odoo.py);
exec_conf $ODOO_CONF_FILE execu $cmd "$@";
}
| true |
4352afcf331d1b3e06db5657b1437ddb90c3385d | Shell | slindes/.emacs.d | /bin/pdb | UTF-8 | 384 | 3.703125 | 4 | [] | no_license | #!/bin/bash
_prg=$(basename $(readlink -f $0))
die() {
exit_code=$1
shift
echo "${_prg}: $*" >&2
exit $exit_code
}
pydir=$(python -c 'import sys; import os.path; print [d for d in sys.path if os.path.isfile("%s/pdb.py" % d)][0]')
pdb="$pydir/pdb.py"
[[ -x "$pdb" ]] || die 1 "Not executable: $pdb"
if [ -z "$*" ]; then
echo "$pdb"
else
exec "$pdb" "$@"
fi
| true |
c8f43184de9a9dfaa8dee00fef186308f0bd7f86 | Shell | maokelong/wpmfs-bench | /run.sh | UTF-8 | 1,085 | 3.625 | 4 | [] | no_license | #!/bin/bash
set -e
source configs.sh
# download pin
echo "$CONFIG_SRC_PIN"
pin_tar=$(basename $CONFIG_SRC_PIN)
if [[ ! -d $CONFIG_PIN_PATH ]]; then
pushd $CONFIG_PATH_TOOLS
if [[ ! -f $pin_tar ]]; then
wget $CONFIG_SRC_PIN
fi
tar -xvzf $pin_tar
# rm -rf $pin_tar
popd
fi
echog "Wpmfs-bench: Pin(The platform) installed"
# install our customized pintool
pin_so=$(basename $CONFIG_PATH_PINTOOL .cpp).so
cp $CONFIG_PATH_PINTOOL $CONFIG_PIN_PATH/source/tools/MyPinTool
pushd $CONFIG_PIN_PATH/source/tools/MyPinTool
mkdir -p obj-intel64
make obj-intel64/$pin_so
CONFIG_PIN_SO_PATH=$(realpath -e obj-intel64/$pin_so)
popd
echog "Wpmfs-bench: Pintool(The dynamic library) installed."
# clear syslog
sudo bash -c 'echo "" > /var/log/syslog'
echog "Wpmfs-bench: /var/log/syslog cleared."
# download wpmfs
if [[ ! -d wpmfs || "`ls wpmfs`" = "" ]]; then
git clone $CONFIG_SRC_WPMFS wpmfs
echog "Wpmfs-bench: Wpmfs installed."
fi
InstallWpmfs
pushd micro_bench
# bash run_micro.sh
popd
pushd macro_bench
chmod +x run_macro.sh
bash run_macro.sh
popd
| true |
5ead2c3f007dd00069ec0717e0e93d654165f719 | Shell | petro-rudenko/ferry | /ferry/data/dockerfiles/openmpi-client/test01.sh | UTF-8 | 711 | 3.609375 | 4 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | #!/bin/bash
source /etc/profile
# Bash colors
GREEN='\e[0;32m'
NC='\e[0m'
# Commands
MKBINS='mkdir /service/data/binaries'
MKOUT='mkdir /service/data/outputs'
COMPILE='mpic++ -W -Wall /service/examples/helloworld.cpp -o /service/data/binaries/helloworld.o'
RUN='mpirun -np 4 --hostfile $MPI_CONF/hosts /service/data/binaries/helloworld.o >> /service/data/outputs/mpitest.out'
function run_as_ferry {
echo -e "${GREEN} ${2} ${NC}"
if [ $USER == "root" ]; then
su ferry -c "$1"
else
$1
fi
}
run_as_ferry "$MKBINS" "Creating binary directory"
run_as_ferry "$MKOUT" "Creating output directory"
run_as_ferry "$COMPILE" "Compiling MPI application"
run_as_ferry "$RUN" "Running MPI application"
| true |
07f0f09786cd0eceed26aa01c38135ca1419d6f0 | Shell | erebe/couber | /scripts/coub.sh | UTF-8 | 1,686 | 3.5 | 4 | [] | no_license | #!/usr/bin/env bash
set -e
video_name=${1}
mkdir "${video_name}"
video_path="${video_name}/${video_name}"
video_url="https://coub.com/view/${video_name}"
output="${2}"
loops=50
function cleanup() {
rm -f ${video_path}.{mp4.avi,txt,mp3}
if [ "$1" != "0" ]; then
echo "Error $1 occurred on $2"
rm -rf "${video_name}"
fi
}
trap "cleanup $? $LINENO" EXIT INT TERM
youtube-dl -o ${video_path}.mp4 ${video_url}
youtube-dl -f html5-audio-high -o ${video_path}.mp3 ${video_url}
printf '\x00\x00' | dd of=${video_path}.mp4 bs=1 count=2 conv=notrunc
for i in `seq 1 "$loops"`; do echo "file '${1}.mp4'" >> ${video_path}.txt; done
ffmpeg -y -hide_banner -t 30 -f concat -i ${video_path}.txt -i ${video_path}.mp3 -c copy -shortest -movflags faststart -c:a aac -b:a 128k "${video_path}".ori.mp4
# Version without watermark
echo '' > "${video_path}.txt"
python3 remove_watermark.py "${video_path}.mp4"
for i in `seq 1 "$loops"`; do echo "file '${1}.mp4.avi'" >> ${video_path}.txt; done
ffmpeg -y -hide_banner -t 30 -f concat -i ${video_path}.txt -i ${video_path}.mp3 -c copy -shortest -movflags faststart -vcodec libx264 -c:a aac -b:a 128k "$video_path".mp4
ffmpegthumbnailer -i "${video_path}.mp4" -o "${video_path}.thumbnail.png" -s 500
tags=$(curl -s ${video_url} | grep -A1 coubPageCoubJson | tail -n 1 | jq .tags[].value | paste -sd ',')
cat <<EOF > ${video_path}.js
{
"name": "${video_name}",
"url": "/videos/${video_path}.mp4",
"tags": [${tags}],
"original": "/videos/${video_path}.ori.mp4",
"thumbnail": "/videos/${video_path}.thumbnail.png",
"creation_timestamp": $(date '+%s')
}
EOF
cleanup 0 ''
rm -rf "${2}/${video_name}"
mv "${video_name}" "${2}"
| true |
20d5f2b7be139b1a5669bf6eff2f3b8d5b2de469 | Shell | jcsteven/openbmc | /meta-google/recipes-google/ncsi/files/gbmc-ncsi-br-pub-addr.sh.in | UTF-8 | 3,668 | 3.359375 | 3 | [
"Apache-2.0",
"MIT"
] | permissive | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
[ -z "${gbmc_ncsi_br_pub_addr_lib-}" ] || return
gbmc_ncsi_br_pub_addr_init=
gbmc_ncsi_br_pub_addr_lastip=
gbmc_ncsi_br_pub_addr_update() {
[ -n "$gbmc_ncsi_br_pub_addr_init" ] || return
printf 'gBMC Bridge Pub Addr from NCSI: %s\n' \
"${gbmc_ncsi_br_pub_addr_lastip:-(deleted)}" >&2
local pfx=
if [ -n "$gbmc_ncsi_br_pub_addr_lastip" ]; then
# Pad the address out to a /64 and ensure that it doesn't have extra bits
pfx="${gbmc_ncsi_br_pub_addr_lastip%::}"
while true; do
# Count `:` in `pfx` by removing them and diffing their lengths
local nos="${pfx//:/}"
(( ${#pfx} - ${#nos} >= 3 )) && break
pfx+=":0"
done
# Addresses that have more than 64bits of prefix (more than 3 separators)
# do not work with this scheme. Ignore them.
(( ${#pfx} - ${#nos} == 3 )) || pfx=
fi
local contents='[Network]'$'\n'
if [ -n "$pfx" ]; then
local here=
read -r -d '' here <<EOF
Address=${pfx}:fd01::/128
IPv6PrefixDelegation=yes
[IPv6PrefixDelegation]
RouterLifetimeSec=60
[IPv6Prefix]
Prefix=${pfx}:fd00::/80
PreferredLifetimeSec=60
ValidLifetimeSec=60
[IPv6RoutePrefix]
Route=${pfx}:fd01::/80
LifetimeSec=60
EOF
contents+="$here"$'\n'
fi
local file
for file in /run/systemd/network/{00,}-bmc-gbmcbr.network.d/50-public.conf; do
mkdir -p -m 755 "$(dirname "$file")"
printf '%s' "$contents" >"$file"
done
# We only restart networkd if we know we have a management network available
# on the machine and networkd is already running.
if [ -e /lib/systemd/network/-bmc-gbmcbrdummy.network ] && \
! systemctl status systemd-networkd | grep -q inactive; then
echo "Restarting networkd" >&2
# HACK: We can't restart systemd-networkd without coordinating with
# phosphor-networkd, otherwise it will sometimes detect interfaces as
# unmanaged because it reads administrative state to determine enabled
# status. Adding an IP to phosphor-networkd is guaranteed to trigger the
# restart we want, and systemd-network will never actually accept the
# new value.
local start=$SECONDS
while (( SECONDS - start < 30 )); do
busctl call xyz.openbmc_project.Network \
/xyz/openbmc_project/network/gbmcbrdummy \
xyz.openbmc_project.Network.IP.Create IP ssys \
xyz.openbmc_project.Network.IP.Protocol.IPv6 ff02::1 128 '' && break
sleep 1
done
fi
}
gbmc_ncsi_br_pub_addr_hook() {
if [ "$change" = 'init' ]; then
gbmc_ncsi_br_pub_addr_init=1
gbmc_ncsi_br_pub_addr_update
elif [ "$change" = 'addr' -a "$intf" = '@NCSI_IF@' ] &&
[ "$scope" = 'global' -a "$fam" = 'inet6' ]; then
if [ "$action" = 'add' -a "$ip" != "$gbmc_ncsi_br_pub_addr_lastip" ]; then
gbmc_ncsi_br_pub_addr_lastip="$ip"
gbmc_ncsi_br_pub_addr_update
fi
if [ "$action" = 'del' -a "$ip" = "$gbmc_ncsi_br_pub_addr_lastip" ]; then
gbmc_ncsi_br_pub_addr_lastip=
gbmc_ncsi_br_pub_addr_update
fi
fi
}
GBMC_IP_MONITOR_HOOKS+=(gbmc_ncsi_br_pub_addr_hook)
gbmc_ncsi_br_pub_addr_lib=1
| true |
02fdb85e8b0ee8c29f86c2b5976b7d0349593955 | Shell | novigit/broCode | /doStandardRaxml.sh | UTF-8 | 1,700 | 4.09375 | 4 | [] | no_license | #!/bin/bash
# documentation
# Simply does raxml where you dont have to set all the standard settings
# like 100 bootstraps, runname, -x and -p etc
# to do: set a true random number of -p and -x
# state usage
function usage() {
echo -e "Usage: \n\tdoStandardRaxml.sh -p <phylip> -t <threads> -o <outdir> [ -m <nt|aa> ]\n\n"
echo "Will replace most illegal characters with underscore in phylip"
echo "Will use GTRGAMMA if dna, and PROTGAMMALG if protein; Default is PROTGAMMALG"
echo -e "Final tree files will be in directory with the basename of the phylip\n\n"
echo "Currently requires <phylip> ends with .phylip. Also number of threads must be > 1"
exit
}
# if number or arguments is less than 4, invoke usage function
if [ "$#" -lt "6" ]; then
usage
exit
fi
# default mode
mode="aa"
# state options
while getopts ":p:t:m:o:" opt; do
case $opt in
p) phylip=${OPTARG};;
t) threads=${OPTARG};;
m) mode=${OPTARG};;
o) outdir=${OPTARG};;
*) usage ;;
esac
done
# checks
# if file ends with .phylip
# if threads > 1
# if outfile already exists
# prepare outdir
runname=$(basename $phylip .phylip)
mkdir -p $outdir/$runname/
# fix names
sed -i -r "s/[)(:;,]/_/g" $phylip
# set model
model=PROTGAMMALG # default model
if [ "$mode" = "nt" ]; then
model=GTRGAMMA
elif [ "$mode" = "aa" ]; then
model=PROTGAMMALG
fi
echo "Alignment file: $phylip"
echo "Mode: $mode, Model: $model"
echo "Out directory: $outdir"
echo "Number of threads: $threads"
# do raxml
echo "Running raxml ..."
raxmlHPC-PTHREADS-SSE3 \
-f a -x 12345 -p 12345 -N 100 -m $model \
-s $phylip -n $runname -w $(pwd)/$outdir/$runname -T $threads &> /dev/null
echo "Done!"
| true |
d5c4277f0a037311b52d0be82230683c11d8c9df | Shell | antarus/mystra-pve | /vagrant-conf/provision-nginx.sh | UTF-8 | 5,944 | 3.375 | 3 | [] | no_license | #!/bin/bash
php_config_file="/etc/php5/fpm/php.ini"
xdebug_config_file="/etc/php5/mods-available/xdebug.ini"
mysql_config_file="/etc/mysql/my.cnf"
document_root_zend="/var/www/zf"
document_public_zend="${document_root_zend}/public"
# This function is called at the very bottom of the file
main() {
repositories_go
update_go
network_go
tools_go
nginx_go
mysql_go
php_go
phpmyadmin_go
maj_composer_Project
autoremove_go
}
repositories_go() {
echo "NOOP"
}
update_go() {
# Update the server
apt-get update
# apt-get -y upgrade
}
autoremove_go() {
apt-get -y autoremove
}
network_go() {
IPADDR=$(/sbin/ifconfig eth0 | awk '/inet / { print $2 }' | sed 's/addr://')
sed -i "s/^${IPADDR}.*//" /etc/hosts
echo ${IPADDR} ubuntu.localhost >> /etc/hosts # Just to quiet down some error messages
}
tools_go() {
# Install basic tools
apt-get -y install build-essential binutils-doc git subversion
}
nginx_go() {
apt-get -y install nginx php5-fpm
sudo rm /etc/nginx/sites-available/default
sudo touch /etc/nginx/sites-available/default
# sudo cat >> /etc/nginx/sites-available/default <<'EOF'
sudo cat << EOF > /etc/nginx/sites-available/default
server {
listen 80;
root ${document_public_zend};
index index.php index.html index.htm;
# Make site accessible from http://localhost/
server_name _;
location / {
# First attempt to serve request as file, then
# as directory, then fall back to index.html
try_files \$uri \$uri/ /index.php$is_args$args;
}
location /doc/ {
alias /usr/share/doc/;
autoindex on;
allow 127.0.0.1;
deny all;
}
# redirect server error pages to the static page /50x.html
#
error_page 500 502 503 504 /50x.html;
location = /50x.html {
root /usr/share/nginx/html;
}
# pass the PHP scripts to FastCGI server listening on /tmp/php5-fpm.sock
#
location ~ \.php$ {
try_files \$uri =404;
fastcgi_split_path_info ^(.+\.php)(/.+)$;
fastcgi_pass unix:/var/run/php5-fpm.sock;
fastcgi_index index.php;
include fastcgi_params;
}
# deny access to .htaccess files, if Apache's document root
# concurs with nginx's one
#
location ~ /\.ht {
deny all;
}
### phpMyAdmin ###
location /phpmyadmin {
root /usr/share/;
index index.php index.html index.htm;
location ~ ^/phpmyadmin/(.+\.php)$ {
client_max_body_size 4M;
client_body_buffer_size 128k;
try_files \$uri =404;
root /usr/share/;
# Point it to the fpm socket;
fastcgi_pass unix:/var/run/php5-fpm.sock;
fastcgi_index index.php;
fastcgi_param SCRIPT_FILENAME \$document_root\$fastcgi_script_name;
include /etc/nginx/fastcgi_params;
}
location ~* ^/phpmyadmin/(.+\.(jpg|jpeg|gif|css|png|js|ico|html|xml|txt)) {
root /usr/share/;
}
}
location /phpMyAdmin {
rewrite ^/* /phpmyadmin last;
}
### phpMyAdmin ###
}
EOF
sudo touch /usr/share/nginx/html/info.php
sudo cat >> /usr/share/nginx/html/info.php <<'EOF'
<?php phpinfo(); ?>
EOF
sudo service nginx restart
sudo service php5-fpm restart
}
php_go() {
apt-get -y install php5 php5-cli php5-curl php5-mysql php5-sqlite php5-xdebug php5-intl
sed -i "s/display_startup_errors = Off/display_startup_errors = On/g" ${php_config_file}
sed -i "s/display_errors = Off/display_errors = On/g" ${php_config_file}
if [ -f "${xdebug_config_file}" ]; then
cat << EOF > ${xdebug_config_file}
zend_extension=xdebug.so
xdebug.remote_handle=dbgp
xdebug.remote_enable=1
xdebug.remote_connect_back=1
xdebug.remote_port=9000
xdebug.idekey=netbeans-xdebug
xdebug.remote_mode="req"
EOF
fi
sudo service nginx restart
sudo service php5-fpm restart
}
mysql_go() {
# Install MySQL
echo "mysql-server mysql-server/root_password password root" | debconf-set-selections
echo "mysql-server mysql-server/root_password_again password root" | debconf-set-selections
apt-get -y install mysql-client mysql-server
sed -i "s/bind-address\s*=\s*127.0.0.1/bind-address = 0.0.0.0/" ${mysql_config_file}
# Allow root access from any host
echo "GRANT ALL PRIVILEGES ON *.* TO 'root'@'%' IDENTIFIED BY 'root' WITH GRANT OPTION" | mysql -u root --password=root
echo "GRANT PROXY ON ''@'' TO 'root'@'%' WITH GRANT OPTION" | mysql -u root --password=root
if [ -d "/vagrant/vagrant-conf/provision-sql" ]; then
echo "Executing all SQL files in /vagrant/provision-sql folder ..."
echo "-------------------------------------"
for sql_file in /vagrant/vagrant-conf/provision-sql/*.sql
do
echo "EXECUTING $sql_file..."
time mysql -u root --password=root < $sql_file
echo "FINISHED $sql_file"
echo ""
done
fi
service mysql restart
}
phpmyadmin_go(){
# Default PHPMyAdmin Settings
debconf-set-selections <<< 'phpmyadmin phpmyadmin/dbconfig-install boolean true'
debconf-set-selections <<< 'phpmyadmin phpmyadmin/app-password-confirm password root'
debconf-set-selections <<< 'phpmyadmin phpmyadmin/mysql/admin-pass password root'
debconf-set-selections <<< 'phpmyadmin phpmyadmin/mysql/app-pass password root'
debconf-set-selections <<< 'phpmyadmin phpmyadmin/reconfigure-webserver multiselect apache2'
# Install PHPMyAdmin
apt-get install -y phpmyadmin
}
maj_composer_Project(){
sudo apt-get -y install curl
# Install latest version of Composer globally
if [ ! -f "/usr/local/bin/composer" ]; then
curl -sS https://getcomposer.org/installer | php -- --install-dir=/usr/local/bin --filename=composer
fi
# Install PHP Unit 4.8 globally
if [ ! -f "/usr/local/bin/phpunit" ]; then
curl -O -L https://phar.phpunit.de/phpunit-old.phar
chmod +x phpunit-old.phar
mv phpunit-old.phar /usr/local/bin/phpunit
fi
cd ${document_root_zend}
curl -Ss https://getcomposer.org/installer | php
php composer.phar install --no-progress
}
main
exit 0 | true |
6154ec87d54dc8efc6f0243bbff82ed7275fe4e2 | Shell | spatialos/UnrealGDK | /ci/get-engine.sh | UTF-8 | 2,903 | 3.984375 | 4 | [
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-proprietary-license",
"MIT"
] | permissive | #!/usr/bin/env bash
set -e -u -o pipefail
if [[ -n "${DEBUG-}" ]]; then
set -x
fi
pushd "$(dirname "$0")"
# Unreal path is the path to the Engine directory. No symlinking for mac, because they seem to cause issues during the build.
#This should ultimately resolve to "/Users/buildkite-agent/builds/<agent name>/improbable/UnrealEngine".
UNREAL_PATH="${1:-"$(pwd)/../../UnrealEngine"}"
# The GCS bucket that stores the built out Unreal Engine we want to retrieve
GCS_PUBLISH_BUCKET="${2:-io-internal-infra-unreal-artifacts-production/UnrealEngine}"
GDK_HOME="$(pwd)/.."
pushd "${GDK_HOME}"
# Fetch the version of Unreal Engine we need
pushd "ci"
# Allow overriding the engine version if required
if [[ -n "${ENGINE_COMMIT_HASH:-}" ]]; then
VERSION_DESCRIPTION="${ENGINE_COMMIT_HASH}"
echo "Using engine version defined by ENGINE_COMMIT_HASH: ${VERSION_DESCRIPTION}"
else
# Read Engine version from the file and trim any trailing white spaces and new lines.
VERSION_DESCRIPTION=$(head -n 1 unreal-engine.version)
echo "Using engine version found in unreal-engine.version file: ${VERSION_DESCRIPTION}"
fi
# Check if we are using a 'floating' engine version, meaning that we want to get the latest built version of the engine on some branch
# This is specified by putting "HEAD name/of-a-branch" in the unreal-engine.version file
# If so, retrieve the version of the latest build from GCS, and use that going forward.
HEAD_VERSION_PREFIX="HEAD "
if [[ "${VERSION_DESCRIPTION}" == ${HEAD_VERSION_PREFIX}* ]]; then
VERSION_BRANCH=${VERSION_DESCRIPTION#"${HEAD_VERSION_PREFIX}"} # Remove the prefix to just get the branch name
VERSION_BRANCH=$(echo ${VERSION_BRANCH} | tr "/" "_") # Replace / with _ since / is treated as the folder seperator in GCS
# Download the head pointer file for the given branch, which contains the latest built version of the engine from that branch
HEAD_POINTER_GCS_PATH="gs://${GCS_PUBLISH_BUCKET}/HEAD/mac-${VERSION_BRANCH}.version"
UNREAL_VERSION=$(gsutil cp "${HEAD_POINTER_GCS_PATH}" -) # the '-' at the end instructs gsutil to download the file and output the contents to stdout
else
UNREAL_VERSION="Mac-UnrealEngine-${VERSION_DESCRIPTION}"
fi
popd
echo "--- download-unreal-engine"
ENGINE_GCS_PATH="gs://${GCS_PUBLISH_BUCKET}/${UNREAL_VERSION}.zip"
echo "Downloading Unreal Engine artifacts version ${UNREAL_VERSION} from ${ENGINE_GCS_PATH}"
gsutil cp -n "${ENGINE_GCS_PATH}" "${UNREAL_VERSION}".zip
7z x "${UNREAL_VERSION}".zip -o${UNREAL_PATH} -aos
popd
popd
| true |
3fe675fe86e910b69fbb9b8d583454412a1708ee | Shell | Warbo/music-scripts | /raw/gather_acoustids.sh | UTF-8 | 3,437 | 4.25 | 4 | [
"LicenseRef-scancode-public-domain"
] | permissive | #!/usr/bin/env bash
set -euo pipefail
## Don't run directly, use gather_acoustids.nix to bake-in dependencies
function getField() {
grep "^$1=" | sed -e "s/$1=//g"
}
function getAcoustID() {
# Takes file path as first arg (e.g. 'Music/Commercial/A/Artist/track.mp3')
# and cache path as second arg (e.g. '.acoustid_cache/A/Artist'). Calculates
# AcoustID of the music file and appends it to the cache file.
echo "Fingerprinting $1" 1>&2
if RESULT=$(fpcalc -raw "$1")
then
DURATION=$(echo "$RESULT" | getField 'DURATION')
FINGERPRINT=$(echo "$RESULT" | getField 'FINGERPRINT')
echo -e "$1\\t$DURATION\\t$FINGERPRINT" >> "$2"
fi
}
function alreadyKnown() {
# Takes file path as first arg (e.g. 'Music/Commercial/A/Artist/track.mp3')
# and cache path as second arg (e.g. '.acoustid_cache/A/Artist'). Returns
# whether or not the cache contains an entry for that file (if the cache
# file doesn't exist, that counts as not having an entry)
[[ -e "$2" ]] || return 1
cut -f1 < "$2" | grep -Fx "$1" > /dev/null
}
function cacheFromPath() {
# Takes a file path as arg (e.g. 'Music/Commercial/A/Artist/track.mp3') and
# echoes the relevant cache file name (e.g. '.acoustid_cache/A/Artist')
I=$(echo "$1" | cut -d '/' -f 3)
A=$(echo "$1" | cut -d '/' -f 4)
echo ".acoustid_cache/$I/$A"
}
function processInitial() {
# Takes a path to an initial directory (e.g. 'Music/Commercial/A') and
# processes all artists in that directory.
for ARTIST in "$1"/*
do
if [[ -d "$ARTIST" ]]
then
processArtist "$ARTIST"
fi
done
}
function processArtist() {
# Takes a path to an artist (e.g. 'Music/Commercial/A/Artist') and processes
# all files in that directory.
# Find (or create) the cache and read all of the filenames it contains
CACHE=$(cacheFromPath "$1")
PARENT=$(dirname "$CACHE")
mkdir -p "$PARENT"
unset PARENT
touch "$CACHE"
KNOWN=$(cut -f1 < "$CACHE")
# Loop through every file in this artist directory
while read -r F
do
# Grep for this file in the cached paths, skip it if found
if echo "$KNOWN" | grep -Fx "$F" > /dev/null
then
printf '.' 1>&2
continue
fi
# If not found, calculate the the AcoustID and append it to the cache
getAcoustID "$F" "$CACHE"
done < <(find "$1" -type f)
}
# If we've been called with an argument, use that as the path for finding files.
# Otherwise default to 'Music/Commercial'.
DIR="${1:-Music/Commercial}"
echo "$DIR" | grep '^Music/Commercial' > /dev/null || {
echo "Error: Argument '$DIR' doesn't begin with 'Music/Commercial'" 1>&2
echo "Aborting, since this will not match any cached values." 1>&2
exit 1
}
# We process one artist at a time, so that we only need to read each cache once.
# See if we've been given exactly one artist to process.
ARTIST=$(echo "$DIR" | cut -d '/' -f 4)
if [[ -n "$ARTIST" ]]
then
processArtist "$DIR"
else
# We've not got one artist, but maybe we have one initial
INIT=$(echo "$DIR" | cut -d '/' -f 3)
if [[ -n "$INIT" ]]
then
processInitial "$DIR"
else
# If we're here then we're processing everything in 'Music/Commercial'
for INIT in "$DIR"/*
do
processInitial "$INIT"
done
fi
fi
| true |
6e08629ba544094a0f4844b1506dffe2d7fb8fe8 | Shell | stevenkaras/bashfiles | /.bash_completion.d/triage | UTF-8 | 922 | 3.453125 | 3 | [] | no_license | #!/bin/bash
__triage_completion() {
local current_word=${COMP_WORDS[COMP_CWORD]}
local previous_word=${COMP_WORDS[COMP_CWORD-1]}
local all_words=("${COMP_WORDS[@]}")
local which_word=$COMP_CWORD
if [[ $current_word == *"="* ]]; then
previous_word=${current_word%=*}
current_word=${current_word#*=}
fi
local words=""
if (($which_word == 1)); then
# display only commands
words="help what defer all add log resolve"
else
case ${COMP_WORDS[1]} in
help)
words="help what defer all add log resolve"
;;
what)
;;
defer)
;;
all)
;;
add)
;;
log)
;;
resolve)
;;
esac
fi
COMPREPLY=($(compgen -W "$words" -- $current_word))
}
complete -o default -o nospace -F __triage_completion triage
| true |
24d69840cff421c26326d1dd7558e1aea066a37a | Shell | upa/graft-bench | /zmq/zmq-nat-lat-test.sh | UTF-8 | 749 | 3.0625 | 3 | [] | no_license | #!/bin/bash
docker_server=yayoi1
sshcmd="ssh -i ~/.ssh/id_rsa_nopass $docker_server"
local_lat=~/src/zeromq-4.2.2/perf/local_lat
remote_lat=~/src/zeromq-4.2.2/perf/remote_lat
# docker nat
msgcnt=10000
for x in `seq 1 10`; do
for msgsize in 64 256 1000 4000 16000 64000 256000 1000000 4000000 16000000 64000000; do
echo msgsize $msgsize, count $x
echo start local_lat in nat conitaner at $docker_server
conid=`$sshcmd docker run -d --rm -p 10.0.0.1:5555:5555 nat-zmq \
local_lat tcp://0.0.0.0:5555 $msgsize $msgcnt`
echo $conid
echo start remote_lat
$remote_lat tcp://10.0.0.1:5555 $msgsize $msgcnt \
> output/nat-lat-msgsize_${msgsize}-msgcnt_${msgcnt}-${x}.txt
echo stop container
$sshcmd docker stop $conid
sleep 2
done
done
| true |
347fb6708840968910634ed8acc483ca5ac72f11 | Shell | miyamonz/dotfiles | /addpath.sh | UTF-8 | 220 | 3.390625 | 3 | [] | no_license | #!/bin/bash
MODULE_DIR=$HOME/dotfiles/modules
FOLDERS=$(ls "$MODULE_DIR")
for FOLDER in $FOLDERS
do
BINPATH="$MODULE_DIR/$FOLDER/bin"
if [[ -d $BINPATH ]]; then
export PATH="$BINPATH:$PATH"
fi
done
| true |
1f913c216fbb612c26eacfacbbca64b921b764e5 | Shell | techn0punk/chip-arch-build | /scripts/build_uboot.sh | UTF-8 | 380 | 2.75 | 3 | [] | no_license | #!/bin/env bash
ALLFLAGS="$MAKEFLAGS CROSS_COMPILE=$UBOOT_COMPILER_PREFIX"
cd $BUILD_ROOTDIR/chip-u-boot
echo "Building uboot..."
{
let CLEAR && make clean
make $ALLFLAGS CHIP_defconfig
make $ALLFLAGS
} &>$BUILD_ROOTDIR/logs/uboot-build.log
test -e $BUILD_ROOTDIR/chip-u-boot/spl/sunxi-spl-with-ecc.bin || {
echo "u-Boot build failed - see uboot-build.log"
exit -1
}
| true |
355acf349224342f0f04382d4a946f78ea3a16a6 | Shell | jimmyklein4/Arch-Scripts | /batt.sh | UTF-8 | 1,331 | 2.875 | 3 | [] | no_license | #!/bin/bash
# For when you have two batteries in your computer
# Combines most entries for both batteries into one so you can get a more accurate readout
# Make sure to create the folder ~/.BATT_TOTAL
while true
do
paste /sys/class/power_supply/BAT0/uevent /sys/class/power_supply/BAT1/uevent | awk '{split($0,a,"="); split(a[2],b," "); (a[3] == "Charging" || b[1] == "Charging") ? $5 = "Charging" : $5 = (a[3] + b[1])/2; print a[1] "=" $5}' > .BAT_TOTAL/uevent
paste /sys/class/power_supply/BAT0/energy_full_design /sys/class/power_supply/BAT1/energy_full_design | awk '{split($0,a," "); print a[1]+a[2]}' > .BAT_TOTAL/energy_full_design
paste /sys/class/power_supply/BAT0/energy_now /sys/class/power_supply/BAT1/energy_now | awk '{split($0,a," "); print a[1]+a[2]}' > .BAT_TOTAL/energy_now
paste /sys/class/power_supply/BAT0/energy_now /sys/class/power_supply/BAT1/energy_now | awk '{split($0,a," "); print a[1]+a[2]}' > .BAT_TOTAL/energy_now
paste /sys/class/power_supply/BAT0/energy_full /sys/class/power_supply/BAT1/energy_full | awk '{split($0,a," "); print a[1]+a[2]}' > .BAT_TOTAL/energy_full
paste /sys/class/power_supply/BAT0/power_now /sys/class/power_supply/BAT1/power_now | awk '{split($0,a," "); print a[1]+a[2]}' > .BAT_TOTAL/power_now
paste /sys/class/power_supply/BAT1/status > .BAT_TOTAL/status
sleep 20
done
| true |
28e356a16c4c8ceb54d851eb34e3b203865058b2 | Shell | GBeushausen/distill | /tests/Resources/scripts/iso.sh | UTF-8 | 872 | 3.1875 | 3 | [
"MIT"
] | permissive | #!/bin/bash
################################################################################
# Initial configuration
################################################################################
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
FILES_DIR="$DIR/../files"
################################################################################
# Clean files
################################################################################
rm -f $FILES_DIR/file_ok.iso $FILES_DIR/file_fake.iso
################################################################################
# Generate files
################################################################################
# iso: fake file
dd if=/dev/urandom of=$FILES_DIR/file_fake.iso bs=1 count=1240
# iso: regular file
cd $FILES_DIR/uncompressed
hdiutil makehybrid -iso -joliet -o ../file_ok.iso . | true |
4ca14cf22a644dc91e91b168fc426386d3bcbeec | Shell | purdue-aalp/gpgpu-sim_simulations | /benchmarks/src/cuda/lonestargpu-2.0/apps/sp/run | UTF-8 | 162 | 2.703125 | 3 | [
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | #!/bin/bash
for i in ../../inputs/random*.cnf; do
MAXCLAUSES=`echo $i | sed 's/.*-\([0-9]\+\).cnf/\1/'`
echo $i
./nsp.sh ./nsp $i $MAXCLAUSES
done;
| true |
0caf021068f14644929d41a349f66b165b999ce0 | Shell | lucifer654321/kubernetes | /deploy_k8s_scripts/create_files/02.deploy_k8s_docker_src.sh | UTF-8 | 1,723 | 3.234375 | 3 | [] | no_license | #!/bin/bash
# 2020年12月28日
# Auto Deploy Docker with source
# BY: Lucifer
###################################
if [ ${SCRIPTS_DIR} == "" ];then
SCRIPTS_DIR=$(cd ..; pwd)
fi
source ${SCRIPTS_DIR}/env/directory_set.sh
BIN_DIR="/usr/local/bin"
Docker_VER="19.03.14"
Docker_SRC="docker-${Docker_VER}.tgz"
Docker_URL="https://download.docker.com/linux/static/stable/x86_64"
yum remove -y docker*
# Download
mkdir -p ${WORK_DIR}/{src,bin,conf/docker,service/docker}
cd ${WORK_DIR}/src
wget -c -N ${Docker_URL}/${Docker_SRC}
tar xvf ${Docker_SRC} -C ${WORK_DIR}/bin
cd ${WORK_DIR}/service/docker
# Create docker.service
cat > docker.service <<"EOF"
[Unit]
Description=Docker Application Container Engine
Documentation=http://docs.docker.io
[Service]
ExecStart=/usr/local/bin/dockerd
ExecReload=/bin/kill -s HUP $MAINPID
Restart=on-failure
RestartSec=5
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
Delegate=yes
KillMode=process
[Install]
WantedBy=multi-user.target
EOF
# Create Docker Daemon.json
cd ${WORK_DIR}/conf/docker
cat > docker-daemon.json <<EOF
{
"registry-mirrors": [
"https://dbzucv6w.mirror.aliyuncs.com",
"https://registry.docker-cn.com",
"http://hub-mirror.c.163.com",
"https://docker.mirrors.ustc.edu.cn"
],
"exec-opts": ["native.cgroupdriver=systemd"],
"data-root": "${Docker_DATA_DIR}",
"exec-root": "${Docker_EXEC_DIR}",
"max-concurrent-downloads": 10,
"max-concurrent-uploads": 5,
"log-driver": "json-file",
"log-opts": {
"max-size": "100m",
"max-file": "5"
},
"live-restore": true,
"storage-driver": "overlay2",
"storage-opts": [
"overlay2.override_kernel_check=true"
]
}
EOF
| true |
4c52747d9969f415b8fe05b6dc996c90e0842567 | Shell | aolivier23/ProtoDUNE-crt-daq-scripts | /startupscript/start_shift.sh | UTF-8 | 1,009 | 2.734375 | 3 | [] | no_license | #!/bin/zsh
source ~/.profile
REMOTE_HOST="crackle.nevis.columbia.edu"
REMOTE_DAQ="snap.nevis.columbia.edu"
USER="dconline"
REMOTE_PATH="/local/home/dconline/dchooz/dconline/trunk/DCOV/readout/startupscript"
cd ${DCONLINE_PATH}/DCOV/readout/startupscript
./stopOV_GUI.sh
sleep 10
echo "Starting all the server processes on $HOST with username $USER"
ssh $REMOTE_HOST -l $USER "nohup ${REMOTE_PATH}/stopOV_wo_GUI.sh $REMOTE_DAQ >>${REMOTE_PATH}/log/stopOV_wo_GUI.log 2>>${REMOTE_PATH}/log/stopOV_wo_GUI.log < /dev/null &"
sleep 15
ssh $REMOTE_HOST -l $USER "nohup ${REMOTE_PATH}/startOV_wo_GUI.sh $REMOTE_DAQ >>${REMOTE_PATH}/log/startOV_wo_GUI.log 2>>${REMOTE_PATH}/log/startOV_wo_GUI.log < /dev/null &"
sleep 30
echo "Starting GUI interface on localhost"
cd ${DCONLINE_PATH}/DCOV/readout/startupscript
./startOV_GUI.sh
#echo "Starting HV GUI interface on localhost"
./start_HV_GUI.sh
echo "Done."
#echo "starting HV GUI locally"
#ssh -Y -l $USER $REMOTE_HOST "$REMOTE_PATH/start_HV_GUI.sh"
| true |
02ec00e54e5c662cff1d8a4b2d5d5ba5e91493ab | Shell | shaakaud/access-UK | /bash/bashrc | UTF-8 | 8,675 | 3.109375 | 3 | [] | no_license | ###########################################################################
#
# Filename: .bashrc
#
# Author: smorton based on .bashrc for cygwin 1.3 by mbs
#
# Created: 2010/06/03 17:45:10
#
# Description: Default .bashrc for new developers on PANOS using DESKTOP_2: Win7 + Cygwin 1.5
#
# Note - you should try not to modify this file if you can help it. You can
# put your personal settings in .bashrc.local which this file will 'source'
# automatically.
#
#
###########################################################################
#
# Source Control System Information
#
# $Id: .bashrc,v 1.11 2015/07/30 17:45:15 jswart Exp $
#
###########################################################################
#
# Copyright (c) 2010-2010 Alcatel-Lucent
#
###########################################################################
java=`which java` # Must be before we overwrite the Windows path
export PATH=/usr/local/bin:/usr/bin:/bin:/usr/X11R6/bin
if $(uname -a | grep -qi cygwin)
then
export OSTYPE=cygwin
cygwin=1
export WIND_HOST_TYPE=x86-win32
export DESKTOP_2=1
else
export OSTYPE=linux
cygwin=0
export WIND_HOST_TYPE=x86-linux
fi
if [ -z $USER ] && [ ! -z $USERNAME ]
then
export USER=$USERNAME
fi
if [ -z $USERNAME ] && [ ! -z $USER ]
then
export USERNAME=$USER
fi
# This is not needed to build TiMOS. The build environment figures it out
# automatically. But, it is here just to catch incorrecty installed software
# installations to make it easier for me to debug an installation somebody
# has screwed up.
export WIND_BASE=/usr/local/tornado-2.0
export EMACS_BASE=/usr/local/emacs-20.6
export TMP=/tmp
export TEMP=/tmp
#
# CVS things
#
export CVSROOT=":pserver:$USER@cvspc.mv.usa.alcatel.com:/swdev/cvsrep"
export GASHROOT=":pserver:$USER@gashpc.mv.usa.alcatel.com:/swdev/cvsrep"
export ADMINROOT=":pserver:$USER@cvspc.mv.usa.alcatel.com:/swdev/adminrep"
export CVSREAD=TRUE
#
# Things for building PANOS
#
export MAKE_MODE=UNIX
# The default TiMOS build targets.
export TGT_HW=i386
export TGT_SYS=both
#
# Set up the path. It's done in stages...
#
if [ $cygwin -eq 1 -a -d /etc/alternatives ]
then
export PATH=${PATH}:/etc/alternatives
fi
#
# Wind River Tornado
#
if [ -d ${WIND_BASE}/host/${WIND_HOST_TYPE}/bin ]
then
export PATH=${PATH}:${WIND_BASE}/host/${WIND_HOST_TYPE}/bin
fi
#
# Various tools that may or may not exist
#
# Emacs
if [ -d ${EMACS_BASE}/bin ]
then
export PATH=${PATH}:${EMACS_BASE}/bin
fi
# VI-clone
if [ -d /usr/local/vim56 ]
then
export PATH=${PATH}:/usr/local/vim56
fi
# TCL
if [ -d /usr/local/tcl/bin ]
then
export PATH=${PATH}:/usr/local/tcl/bin
fi
# WinCVS, use its version of cvs over what is in /bin
# 4/14/05 mbs: Not any more. With newer wincvs versions, they
# have severely screwed things up. You can't script a login (which might
# be intentional). And, often 'cvs edit' leaves a file still read-only.
# Plus, it seems to have totally messed up execute permissions. It seems
# make all .c and .h files executable.
#if [ -d /usr/local/wincvs/CVSNT ]
#then
# export PATH=/usr/local/wincvs/CVSNT:${PATH}
#fi
#
# Sun's java compiler. The thing gets installed in different
# places depending upon the particular versions. Something that revs
# once per month is sort of a problem... Try and "guess" at the most recent
# one. Surely the most recent is the "best", right?
#
if [ -n "$java" ]; then
PATH=${PATH}:$(dirname $java)
else
jdk='/ProgramData/Oracle/Java/javapath'
if [ -d $jdk ]; then
export PATH="${PATH}:$jdk" #no /bin here
else
jdk=$(/bin/ls -d -t /j2sdk* 2>/dev/null | head -1)
if [ "${jdk}" != "" ]
then
export PATH="${PATH}:${jdk}/bin"
else
jdk=$(/bin/ls -d -t /jdk* 2>/dev/null | head -1)
if [ "${jdk}" != "" ]
then
export PATH="${PATH}:${jdk}/bin"
fi
fi
fi
fi
#
# If you have timostools, then use it
#
if [ -d /usr/local/timostools ]
then
export PATH="/usr/local/timostools:${PATH}"
# Enable git tab-complete and fancy git prompting for linux users
if [ $cygwin -eq 0 ]; then
source /usr/local/timostools/git.d/git-completion.bash
source /usr/local/timostools/git.d/git-prompt.sh
# git prompt configuration
GIT_PS1_SHOWUPSTREAM=auto
GIT_PS1_SHOWCOLORHINTS=1
GIT_PS1_SHOWDIRTYSTATE=1
GIT_PS1_SHOWUNTRACKEDFILES=1
GIT_PS1_STATESEPARATOR=' '
export PROMPT_COMMAND='__git_ps1 "\w" " > "'
fi
else
echo "You do not appear to have the SW development tools."
fi
#
# Put standard Windows system path next
#
if [ $cygwin -eq 1 ]
then
export PATH="${PATH}:$(cygpath -a -u $SYSTEMROOT)/system32"
export PATH="${PATH}:$(cygpath -a -u $SYSTEMROOT)"
export PATH="${PATH}:$(cygpath -a -u $SYSTEMROOT)/system32/Wbem"
export PATH="${PATH}:$(cygpath -a -u $SYSTEMROOT)/system32/WindowsPowerShell/v1.0"
fi
#
# If you have a bin in your home directory, we'll use it
#
if [ -d ${HOME}/bin ]
then
export PATH="${PATH}:${HOME}/bin"
fi
#
# Finally, use current directory
#
export PATH="${PATH}:."
#
# Make sure the temp directories exist
#
if [ ! -d $TMP ]
then
mkdir -p $TMP
fi
if [ -d /cygdrive/c -a ! -d /cygdrive/c/tmp ]
then
mkdir -p /cygdrive/c/tmp
fi
if [ -d /cygdrive/d -a ! -d /cygdrive/d/tmp ]
then
mkdir -p /cygdrive/d/tmp
fi
#
# Set prompt to <working directory>, space, ">", space
#
export PS1="\w > "
# Set size of history
export HISTSIZE=500
export HISTFILESIZE=500
# Ignore duplicate cmds in history log; ignore cmds beginning with whitespace
export HISTCONTROL=ignoreboth
# When doing file completion, ignore files ending in these
export FIGNORE=".o:~"
# Ignore EOF character
export IGNOREEOF
# Executed just before printing every prompt. You probably don't
# want to do anything with this.
#export PROMPT_COMMAND="printf '\e]0;$HOSTNAME\7'"
# Don't allow redirection to overwrite existing file
export noclobber
# If you do 'cd <val>' and val isn't a directory, then assume
# it's a variable and cd to the contents of the variable.
export cdable_vars
#
# Generic aliases.
# You may want to add more or override (unalias) these in your .bashrc.local
# These aliases put training wheels on commands like rm and mv to prevent
# overwriting existing files
#
alias copy='cp -i'
alias del='rm -i'
alias dir='ls -lF'
alias h=history
alias la='ls -a'
alias ll='ls -l'
alias ls='ls -F'
alias more=less
alias mv='mv -i'
alias ren='mv -i'
alias rm='rm -i'
#
# Some aliases specific to our development
#
alias panos='cd $HOME/ws/panos'
#
# Do a 'cvs -n up' which shows all files in the currrent directory
# and below which need to be updated or are modified. But, filter out
# all of the noise lines that cvs generates as it traverses a file tree
#
alias cvsnup='cvs -n -q up -P -d 2>/dev/null | grep -v "^\?"'
alias cvsup='cvs -z9 -q up -P -d 2>/dev/null | grep -v "^\?"'
alias cvsedit='cvs edit'
alias cvschanged="cvs -n -f -q up -dPR 2>/dev/null | grep '^[MARC?] '"
alias cvschanged2='cvs -z9 -q status 2>&1 | grep Status | egrep -v "(Up-to|Needs Patch)"'
alias cvsstatus='cvs status | grep Status'
# Turn on vi-style command line editing.
# If you prefer EMACS-style editing, use -o emacs
# If you prefer windows-style editing (using arrow keys), don't use anything.
#set -o vi
#make an intelligent guess as to what your DISPLAY env var should be
if [ -z ${DISPLAY:=""} ]; then
XSERVER=$(who am i | /bin/awk '{print $6}' | /bin/sed 's/[()]//g')
XSERVER=${XSERVER%%:*}
if [[ -z ${XSERVER} || ${XSERVER} == $(hostname) || ${XSERVER} == "unix" ]]; then
export DISPLAY=$(hostname):0.0 # Display on local host
else
export DISPLAY=${XSERVER}:0.0 # Display on remote host
fi
fi
#
# Finally, if a ~/.bashrc.local exists, source it
#
if [ -e ~/.bashrc.local ]
then
source ~/.bashrc.local
fi
[ -f ~/.fzf.bash ] && source ~/.fzf.bash
# >>> conda initialize >>>
# !! Contents within this block are managed by 'conda init' !!
__conda_setup="$('/home/udaytj/software/anaconda3/bin/conda' 'shell.bash' 'hook' 2> /dev/null)"
if [ $? -eq 0 ]; then
eval "$__conda_setup"
else
if [ -f "/home/udaytj/software/anaconda3/etc/profile.d/conda.sh" ]; then
. "/home/udaytj/software/anaconda3/etc/profile.d/conda.sh"
else
export PATH="/home/udaytj/software/anaconda3/bin:$PATH"
fi
fi
unset __conda_setup
# <<< conda initialize <<<
complete -C /usr/bin/terraform terraform
# Generated for envman. Do not edit.
[ -s "$HOME/.config/envman/load.sh" ] && source "$HOME/.config/envman/load.sh"
| true |
dd710c791c0bc76f465e1510d27105b723028154 | Shell | nabinno/dotfiles | /.zsh.d/function/init-perl | UTF-8 | 2,762 | 2.84375 | 3 | [
"MIT",
"CC-BY-4.0"
] | permissive | #!/usr/bin/env zsh
export REQUIRED_PERL_VERSION=5.20.3
export PATH="$HOME/.cask/bin:$PATH"
# ----------------------------------------------------------------------
# ### version control ###
get-plenv() {
case "${OSTYPE}" in
freebsd* | darwin* | linux*) anyenv install plenv && exec -l zsh ;;
esac
}
if ! type -p plenv >/dev/null; then get-plenv; fi
# ----------------------------------------------------------------------
# ### installation ###
get-perl() {
case "${OSTYPE}" in
cygwin) apt-cyg install perl ;;
freebsd* | darwin*)
plenv install $REQUIRED_PERL_VERSION
plenv rehash
plenv global $REQUIRED_PERL_VERSION
plenv install-cpanm
;;
linux*)
case $DIST in
Redhat | RedHat)
nix-install perl-$REQUIRED_PERL_VERSION
nix-install perl-App-cpanminus
;;
Debian | Ubuntu)
plenv install $REQUIRED_PERL_VERSION
plenv rehash
plenv global $REQUIRED_PERL_VERSION
plenv install-cpanm
;;
esac
;;
esac
}
if ! type -p perl >/dev/null; then get-perl; fi
# eval $(perl -I$HOME/.local/lib/perl5 -Mlocal::lib=$HOME/.local)
# ----------------------------------------------------------------------
# ### plagger ###
get-plagger() {
case "${OSTYPE}" in
cygwin) ;;
freebsd* | darwin* | linux*)
cpanm -fi YAML::Loader \
XML::LibXML \
XML::LibXML::SAX \
XML::LibXML::XPathContext \
XML::Liberal \
Text::Glob \
Module::Runtime \
Params::Util \
Digest::SHA1 \
Class::Load \
XML::RSS \
XML::RSS::LibXML \
XML::RSS::Liberal \
XML::Feed \
XML::Feed::RSS \
XML::Atom \
WebService::Bloglines \
LWP::Protocol::https \
JSON \
OAuth::Lite::Consumer \
OAuth::Lite::Token \
Plagger
;;
esac
}
# ----------------------------------------------------------------------
# ### org-asana ###
get-org-asana() {
yes | cpanm -fi Moose \
WWW::Asana \
Org::Parser \
YAML
}
get-global-cpan-packages() {
yes | cpanm -fi Carton
}
# ----------------------------------------------------------------------
# ### cpan ###
cpan-module-list() {
perl -e "print \"@INC\"" | find -name "*.pm" -print
}
cpan-module-version() {
perl -M$1 -le "print \$$1::VERSION"
}
cpan-uninstall() {
perl -MConfig -MExtUtils::Install -e '($FULLEXT=shift)=~s{-}{/}g;uninstall "$Config{sitearchexp}/auto/$FULLEXT/.packlist",1'
}
alias cpanmini='cpan --mirror ~/.cpan/minicpan --mirror-only'
# alias cpan-uninstall='perl -MConfig -MExtUtils::Install -e '"'"'($FULLEXT=shift)=~s{-}{/}g;uninstall "$Config{sitearchexp}/auto/$FULLEXT/.packlist",1'"'"
| true |
882db23b751ba22f678f10ee0e5dcbf679f8fece | Shell | Antanukas/maven-release-script | /release.sh | UTF-8 | 10,517 | 3.796875 | 4 | [
"MIT"
] | permissive | #!/bin/bash
function check_release_mvn_plugin() {
# validate release using maven-release-plugin
$MVN -DdryRun=true -B org.apache.maven.plugins:maven-release-plugin:2.3.2:prepare || rollback_and_die_with "release:prepare reports errors. See output for details"
$MVN -B org.apache.maven.plugins:maven-release-plugin:2.3.2:clean
}
function echoc() {
printf "\033[0;32m$1\033[0m\n"
}
function exec_command() {
echoc "> $1"
$1 > /dev/null
}
function append_snapshot() {
# Add -SNAPSHOT to the end (and make sure we don't accidentally have it twice)
echo "$(echo "$1" | perl -pe 's/-SNAPSHOT//gi')-SNAPSHOT"
}
function die_with() {
echoc "$*" >&2
exit 1
}
function has_command() {
which "$1" >/dev/null 2>/dev/null || return 1
return 0
}
function has_xmllint_with_xpath() {
if [ "$(xmllint 2>&1 | grep xpath | wc -l)" = "0" ] ; then
return 1
else
return 0
fi
}
function die_unless_xmllint_has_xpath() {
has_command xmllint || die_with "Missing xmllint command, please install it (from libxml2)"
has_xmllint_with_xpath || die_with "xmllint command is missing the --xpath option, please install the libxml2 version"
}
function die_without_command() {
while [ -n "$1" ]
do
has_command "$1" || die_with "Missing required command: $1"
shift
done
}
function rollback_and_die_with() {
MSG=$3
echoc "$MSG" >&2
echoc "Deleting artifacts from Archiva in case they were deployed"
exec_command "mvn lt.omnitel.maven.plugins:archiva-plugin:0.0.1-SNAPSHOT:deleteArtifacts -DversionToDelete=$RELEASE_VERSION"
echoc "Resetting release commit to return you to the same working state as before attempting a deploy"
if ! [ -z "$RELEASE_BRANCH" ] && [ $(git branch --list "${RELEASE_BRANCH}" | wc -l) != "0" ] ; then
exec_command "git branch -D $RELEASE_BRANCH" || echoc "Could not delete branch"
fi
if ! [ -z "$VCS_RELEASE_TAG" ] && [ $(git tag -l "${VCS_RELEASE_TAG}" | wc -l) != "0" ] ; then
exec_command "git tag -d $VCS_RELEASE_TAG" || echoc "Could not delete tag"
fi
exec_command "git reset --hard $HEAD_BEFORE_RELEASE" || echoc "Git reset command failed!"
exec_command "$MVN -B org.apache.maven.plugins:maven-release-plugin:2.3.2:clean" || echoc "Unable to clean up release:perform artifacts"
echoc "Release failed. Changes have been rolled back. See output for details."
exit 1
}
function usage() {
echoc "Maven git release script v1.0 (c) 2014 Peter Wright"
echoc ""
echoc "Usage:"
echoc " $0 [-a -b | [ -r RELEASE_VERSION ] [ -n NEXT_DEV_VERSION ] ] [ -c ASSUMED_POM_VERSION ] [ -m NEXT_REL_BRANCH_VERSION ]"
echoc "Updates release version, then builds and commits it"
echoc ""
echoc " -a Shorthand for -a auto -n auto"
echoc " -r Sets the release version number to use ('auto' to use the version in pom.xml)"
echoc " -n Sets the next development version number to use (or 'auto' to increment release version)"
echoc " -m Sets the version in release branch"
echoc " -c Assume this as pom.xml version without inspecting it with xmllint"
echoc " -b Assume simple release of bugfix version"
echoc ""
echoc " -h For this message"
echoc ""
}
###############################
# HANDLE COMMAND-LINE OPTIONS #
###############################
BUGFIX_RELEASE=false
while getopts "ahbr:n:c:m:" o; do
case "${o}" in
a)
RELEASE_VERSION="auto"
NEXT_VERSION="auto"
NEXT_VERSION_RELEASE_BRANCH="auto"
;;
r)
RELEASE_VERSION="${OPTARG}"
;;
n)
NEXT_VERSION="${OPTARG}"
;;
c)
CURRENT_VERSION="${OPTARG}"
;;
m)
NEXT_VERSION_RELEASE_BRANCH="${OPTARG}"
;;
b)
BUGFIX_RELEASE=true
;;
h)
usage
exit 0
;;
*)
usage
die_with "Unrecognised option ${o}"
;;
esac
done
shift $((OPTIND-1))
function check_script_dependencies() {
die_without_command git perl wc
if [ -z "$MVN" ] ; then
die_without_command mvn
MVN=mvn
else
die_without_command $MVN
fi
echoc "Using maven command: $MVN"
}
check_script_dependencies
function check_git_state() {
# If there are any uncommitted changes we must abort immediately
if [ $(git status -s | wc -l) != "0" ] ; then
git status -s
die_with "There are uncommitted changes, please commit or stash them to continue with the release:"
else
echoc "Good, no uncommitted changes found"
fi
}
check_git_state
function get_release_version_number() {
if [ -z "$CURRENT_VERSION" ] ; then
# Extract the current version (requires xmlllint with xpath suport)
die_unless_xmllint_has_xpath
CURRENT_VERSION=$(xmllint --xpath "/*[local-name() = 'project']/*[local-name() = 'version']/text()" pom.xml)
fi
echoc "Current pom.xml version: $CURRENT_VERSION"
echoc ""
# Prompt for release version (or compute it automatically if requested)
RELEASE_VERSION_DEFAULT=$(echo "$CURRENT_VERSION" | perl -pe 's/-SNAPSHOT//')
if [ -z "$RELEASE_VERSION" ] ; then
read -p "Version to release [${RELEASE_VERSION_DEFAULT}]" RELEASE_VERSION
if [ -z "$RELEASE_VERSION" ] ; then
RELEASE_VERSION=$RELEASE_VERSION_DEFAULT
fi
elif [ "$RELEASE_VERSION" = "auto" ] ; then
RELEASE_VERSION=$RELEASE_VERSION_DEFAULT
fi
if [ "$RELEASE_VERSION" = "$CURRENT_VERSION" ] ; then
die_with "Release version requested is exactly the same as the current pom.xml version (${CURRENT_VERSION})! Is the version in pom.xml definitely a -SNAPSHOT version?"
fi
}
get_release_version_number
function get_next_major_version() {
# Prompt for next version (or compute it automatically if requested)
NEXT_VERSION_DEFAULT=$(echo "$RELEASE_VERSION" | perl -pe 's{^(([0-9]\.)+)?([0-9]+)(\.[0-9]+)$}{$1 . ($3 + 1) . $4}e')
if [ -z "$NEXT_VERSION" ] ; then
read -p "Next snapshot version [${NEXT_VERSION_DEFAULT}]" NEXT_VERSION
if [ -z "$NEXT_VERSION" ] ; then
NEXT_VERSION=$NEXT_VERSION_DEFAULT
fi
elif [ "$NEXT_VERSION" = "auto" ] ; then
NEXT_VERSION=$NEXT_VERSION_DEFAULT
fi
# Add -SNAPSHOT to the end (and make sure we don't accidentally have it twice)
NEXT_VERSION=$(append_snapshot $NEXT_VERSION)
if [ "$NEXT_VERSION" = "${RELEASE_VERSION}-SNAPSHOT" ] ; then
die_with "Release version and next version are the same version!"
fi
}
$BUGFIX_RELEASE || get_next_major_version
#Promot for next version in release branch
function get_next_release_branch_version() {
NEXT_VERSION_RELEASE_BRANCH_DEFAULT=$(echo "$RELEASE_VERSION" | perl -pe 's{^(([0-9]\.)+)?([0-9]+)$}{$1 . ($3 + 1)}e')
if [ -z "$NEXT_VERSION_RELEASE_BRANCH" ] ; then
read -p "Next snapshot version in release branch [${NEXT_VERSION_RELEASE_BRANCH_DEFAULT}]" $NEXT_VERSION_RELEASE_BRANCH
if [ -z "$NEXT_VERSION_RELEASE_BRANCH" ] ; then
NEXT_VERSION_RELEASE_BRANCH=$NEXT_VERSION_RELEASE_BRANCH_DEFAULT
fi
elif [ "$NEXT_VERSION_RELEASE_BRANCH" = "auto" ] ; then
NEXT_VERSION_RELEASE_BRANCH=$NEXT_VERSION_RELEASE_BRANCH_DEFAULT
fi
NEXT_VERSION_RELEASE_BRANCH=$(append_snapshot $NEXT_VERSION_RELEASE_BRANCH)
if [ "NEXT_VERSION_RELEASE_BRANCH" = "${RELEASE_VERSION}-SNAPSHOT" ] ; then
die_with "Release version in branch and next version are the same version!"
fi
}
get_next_release_branch_version
if [ "$BUGFIX_RELEASE"=true ]; then
NEXT_VERSION=$NEXT_VERSION_RELEASE_BRANCH
fi
echoc ""
echoc "Using $RELEASE_VERSION for release"
$BUGFIX_RELEASE || echoc "Using $NEXT_VERSION for next development version"
echoc "Using $NEXT_VERSION_RELEASE_BRANCH for next development version in branch"
STARTING_BRANCH=$(git symbolic-ref --short -q HEAD)
HEAD_BEFORE_RELEASE=$(git rev-parse HEAD)
VCS_RELEASE_TAG="${RELEASE_VERSION}"
if [ "$BUGFIX_RELEASE"=false ]; then
RELEASE_BRANCH="release-$RELEASE_VERSION"
fi
function validate_tag() {
# Check that tag and release branch doesn't exist
if [ $(git tag -l "${VCS_RELEASE_TAG}" | wc -l) != "0" ] ; then
die_with "A tag already exists ${VCS_RELEASE_TAG} for the release version ${RELEASE_VERSION}"
fi
}
validate_tag
function validate_release_branch() {
if [ $(git branch --list "${RELEASE_BRANCH}" | wc -l) != "0" ] ; then
die_with "A release branch already exists ${RELEASE_BRANCH} for the release version ${RELEASE_VERSION}"
fi
# Check that poms are OK. E.g. doesn't contain SNAPSHOT versions.
check_release_mvn_plugin
}
$BUGFIX_RELEASE || validate_release_branch
function perform_release() {
# Update the pom.xml versions
$MVN versions:set -DgenerateBackupPoms=false -DnewVersion=$RELEASE_VERSION || die_with "Failed to set release version on pom.xml files"
# Commit the updated pom.xml files
git commit -a -m "Release version ${RELEASE_VERSION}" || rollback_and_die_with "Failed to commit updated pom.xml versions for release!"
echoc ""
echoc " Starting build and deploy"
echoc ""
# build and deploy the release
$MVN -DperformRelease=true clean deploy || rollback_and_die_with "Build/Deploy failure. Release failed."
# tag the release (N.B. should this be before perform the release?)
git tag "${VCS_RELEASE_TAG}" || rollback_and_die_with "Failed to create tag ${RELEASE_VERSION}! Release has been deployed, however"
}
perform_release
function create_release_branch() {
git checkout -b $RELEASE_BRANCH || rollback_and_die_with "Can not create realease branch $RELEASE_BRANCH"
mvn versions:set -f pom.xml -DnewVersion=$NEXT_VERSION_RELEASE_BRANCH || rollback_and_die_with "Can't update pom version to $NEXT_VERSION_RELEASE_BRANCH"
mvn versions:commit -f pom.xml || rollback_and_die_with "Can't commit $NEXT_VERSION_RELEASE_BRANCH pom.xml"
git commit -a -m "Prepare release branch for bug-fix development. Bumping version to $NEXT_VERSION_RELEASE_BRANCH."
git checkout $STARTING_BRANCH
}
$BUGFIX_RELEASE || create_release_branch
function prepare_for_next_development_process() {
$MVN versions:set -DgenerateBackupPoms=false "-DnewVersion=${NEXT_VERSION}" || rollback_and_die_with "Failed to set next dev version on pom.xml files, please do this manually"
git commit -a -m "Start next development version ${NEXT_VERSION}" || rollback_and_die_with "Failed to commit updated pom.xml versions for next dev version! Please do this manually"
}
prepare_for_next_development_process
function push_release_branch() {
git checkout $RELEASE_BRANCH
git push origin $RELEASE_BRANCH || die_with "Failed to push commits from $RELEASE_BRANCH. Please do this manually"
git checkout $STARTING_BRANCH
}
$BUGFIX_RELEASE || push_release_branch
function push_current_branch() {
git push origin $STARTING_BRANCH || die_with "Failed to push commits. Please do this manually"
git push --tags || die_with "Failed to push tags. Please do this manually"
}
push_current_branch | true |
28a36ddfc32fae7a191f0dc2e4566b4e6040feb7 | Shell | daniellandau/shell-magic | /reorder_xyz.sh | UTF-8 | 239 | 3.140625 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
i=0;
for f in $(head -n 2 movie.* | sed -n -e '/movie/ {N;N;s/\n/ /g;p;}' | sort -k 8 -g | cut -d' ' -f2);
do
$1 mv $f tmp_movie.$(printf "%03d" $i).xyz;
i=$((i+1));
done
for f in tmp_*
do
$1 mv $f ${f/tmp_/}
done
| true |
9cc0c09017f6740f4e9cd1537782e35fc004e7da | Shell | tannishk/linux_bashes | /usage.sh | UTF-8 | 62 | 2.5625 | 3 | [] | no_license | if [ $# -eq 0 ]
then
echo "NOthing enterd"
exit 1
fi
echo $1
| true |
0eb8ae1ef3451c9ad5dabf34227203be3fd6a37a | Shell | martinjvickers/dzlab-tools | /full_run.sh | UTF-8 | 3,460 | 3.59375 | 4 | [] | no_license | #!/bin/bash
#___UNDOCUMENTED___
echo "Starting run at " `date`
LREAD=$1
RREAD=$2
GENOME=$3
TISSUE=$4
BATCH=$5
OVERWRITE=$6
if [ ! -e ${LREAD}.fa -o $OVERWRITE == 1 ]; then
# convert from fastq to fasta
echo -n "Converting sequences to fasta format..."
fq_all2std.pl fq2fa $LREAD > ${LREAD}.fa
echo "done with code: $?"
fi
if [ ! -e ${RREAD}.fa -o $OVERWRITE == 1 ]; then
# convert from fastq to fasta
echo -n "Converting sequences to fasta format..."
fq_all2std.pl fq2fa $RREAD > ${RREAD}.fa
echo "done with code: $?"
fi
if [ ! -e ${LREAD}.c2t -o $OVERWRITE == 1 ]; then
# convert sequences
echo -n "Converting sequences..."
convert.pl c2t ${LREAD}.fa > ${LREAD}.c2t
echo "done with code: $?"
fi
if [ ! -e ${RREAD}.c2t -o $OVERWRITE == 1 ]; then
# convert sequences
echo -n "Converting sequences..."
convert.pl g2a ${RREAD}.fa > ${RREAD}.g2a
echo "done with code: $?"
fi
if [ ! -e ${GENOME}-RC.G2A -o $OVERWRITE == 1 ]; then
# convert genome
echo -n "Converting genome..."
rcfas.pl $GENOME > ${GENOME}-RC
convert.pl c2t ${GENOME}-RC > ${GENOME}-RC.C2T
convert.pl g2a ${GENOME}-RC > ${GENOME}-RC.G2A
echo "done with code: $?"
fi
# /available_memory:`free -m | grep 'Mem:' | perl -e 'while(<>) {@a=(split /\s+/, $_); print $a[3];}'`
# align sequences
echo -n "Aligning sequences..."
seqmap 2 ${LREAD}.c2t ${GENOME}-RC.C2T ${LREAD}.eland3 /eland:3 /forward_strand /available_memory:8000 /cut:1,45 &&
seqmap 2 ${RREAD}.g2a ${GENOME}-RC.G2A ${RREAD}.eland3 /eland:3 /forward_strand /available_memory:8000 /cut:1,45 &&
# echo "done with code: $?"
# correlate paired ends
echo -n "Running correlatePairedEnds.pl..."
correlatePairedEnds.pl --left ${LREAD}.eland3 --right ${RREAD}.eland3 --reference $GENOME --output ${LREAD}_pre.gff --offset 0 --distance 300 --readsize 45 &&
echo "Done with code: $?"
# replace processed reads with original
echo -n "Running replaceMutation.pl..."
replaceMutation.pl ${LREAD}.fa ${RREAD}.fa ${LREAD}_pre.gff 45 > ${LREAD}_post.gff &&
echo "Done with code: $?"
# check alignment ${LREAD}.accuracy
echo -n "Computing alignment ${LREAD}.accuracy..."
collect_align_stats.pl ${LREAD}.eland3 ${RREAD}.eland3 ${LREAD}_pre.gff $TISSUE $BATCH > ${LREAD}_pre.alignment.log
echo "Done with code: $?"
# filter out all non matches
echo -n "Filtering out records without matches..."
grep 'target=' ${LREAD}_post.gff > ${LREAD}_post_filtered.gff &&
echo "Done with code: $?"
# split into multiple chromosomes
echo -n "Splitting into multiple chromosomes..."
for i in `grep '>' $GENOME | sed s/\>//`
do
grep -i "^$i " ${LREAD}_post_filtered.gff > ${LREAD}_post_filtered_${i}.gff
done
echo "Done with code: $?"
# count methylation
echo -n "Running countMethylation.pl..."
for i in `grep '>' $GENOME | sed s/\>//`
do
countMethylation.pl --ref $GENOME --gff ${LREAD}_post_filtered_${i}.gff --output ${LREAD}_post_filtered_${i}_singleC.gff --sort
done
echo "Done with code: $?"
# split into multiple contexts
echo -n "Splitting into multiple contexts..."
for i in `grep '>' $GENOME | sed s/\>//`
do
for j in CG CHG CHH
do
grep $j ${LREAD}_post_filtered_${i}_singleC.gff > ${LREAD}_post_filtered_${i}_${j}_singleC.gff
done
done
echo "Done with code: $?"
# # window files
# echo "Windowing files..."
# window_gff.pl -b ${LREAD}_*_singleC.gff -w 50 -s 50
# echo "Done with code: $?"
## Done
echo "Finished run at " `date`
| true |
594aa102fba9c406f8e22e6c0d85de259c3a452d | Shell | thefallenidealist/scripts | /hex2bin | UTF-8 | 356 | 2.71875 | 3 | [] | no_license | #!/bin/sh
# created 140809
# hex to dec
#echo "ibase=16; 1700" | bc
#echo "obase=2; 5888" | bc
# bez 0x prefiksa
# pretvori u velika slova da se bc ne buni:
input=`echo $1 | sed 's/0x//' | tr '[:lower:]' '[:upper:]'`
# nece bit nista ako nema 0x prefiks
dec=`echo "ibase=16; $input" | bc`
bin=`echo "obase=2; $dec" | bc`
echo 0x$input =
echo 0b$bin
| true |
ab1914a38a54acb32ead79c27256fde6450d5a17 | Shell | caoyu5779/freestyle | /count_daily.sh | UTF-8 | 92 | 2.6875 | 3 | [] | no_license |
#!/bin/sh
chmod -R 777 *
for i in $(seq 1 10)
do
./daily_active_user.sh $[$i+1] $i
done | true |
e09a57c17116e7847782950362dcba966c4e85e3 | Shell | cliu1/lorelei-amdtk | /recipes/CHN_DEV_20160831/utils/phone_loop_train_1best.sh | UTF-8 | 1,593 | 3.53125 | 4 | [] | no_license | #!/usr/bin/env bash
#
# Train the inifinite phone-loop model.
#
if [ $# -ne 4 ]; then
echo "usage: $0 <setup.sh> <model_dir> <labels_dir> <out_dir>"
exit 1
fi
setup="$1"
model="$2/model.bin"
labels_dir="$3"
out_dir="$4"
source $setup || exit 1
if [ ! -e $out_dir/.done ]; then
mkdir -p "$out_dir"
# VB E-step: estimate the posterior distribution of the
# latent variables.
#amdtk_run $parallel_profile \
# --ntasks "$parallel_n_core" \
# --options "$train_parallel_opts" \
# "pl-vbexp" \
# "$train_keys" \
# "amdtk_ploop_1best_exp $model $labels_dir/\$ITEM1.lab \
# $fea_dir/\$ITEM1.$fea_ext $out_dir/\$ITEM1.acc" \
# "$out_dir"|| exit 1
#
## Accumulate the statistics. This step could be further
## optimized by parallelizing the accumulation.
#find "$out_dir" -name "*.acc" > "$out_dir/stats.list" || exit 1
#amdtk_ploop_acc "$out_dir/stats.list" "$out_dir/total_acc_stats" \
# || exit 1
# VB M-step: from the sufficient statistics we update the
# parameters of the posteriors
llh=$(amdtk_ploop_max $model "$out_dir/total_acc_stats" \
"$out_dir/model.bin") || exit 1
echo "log-likelihood >= $llh"
# Keep track of the lower bound on the log-likelihood.
echo "$llh" > "$out_dir"/llh.txt || exit 1
# Clean up the statistics as they can take a lot of space.
rm "$out_dir/stats.list" || exit 1
find "$out_dir/" -name "*.acc" -exec rm {} + || exit 1
date > "$out_dir/.done"
else
echo "The model is already trained. Skipping."
fi
| true |
35860dba75c08f6cb6a0f34336b25ee098768f54 | Shell | mitdbg/graph-on-db | /vertica/sql/myload.sh | UTF-8 | 1,236 | 3.078125 | 3 | [] | no_license | #! /bin/sh
DATA=$1
NODE_FILE="tmp.nodes"
EDGE_FILE="tmp.edges"
VSQL_CMD="/opt/vertica/bin/vsql -d db_alekh2 -w db_alekh -U dbadmin"
#PWD=`pwd`
#cat ~/graphs/data/$DATA/nodes | awk '{print $0" 0"}' > $NODE_FILE
#cat ~/graphs/data/$DATA/edges | tr '\t' ' ' > $EDGE_FILE
NODE_FILE="/home/alekh/graphs/data/$DATA/nodes"
EDGE_FILE="/home/alekh/graphs/data/$DATA/edges"
#CREATE_NODE="DROP TABLE IF EXISTS "$DATA"_node; CREATE TABLE "$DATA"_node (id int NOT NULL PRIMARY KEY, value int NOT NULL) ORDER BY Id SEGMENTED BY Hash(id) ALL NODES; COPY "$DATA"_node FROM '$PWD/$NODE_FILE' DELIMITER ' ' NULL 'null';"
CREATE_NODE="DROP TABLE IF EXISTS "$DATA"_node; CREATE TABLE "$DATA"_node (id int NOT NULL PRIMARY KEY) ORDER BY Id SEGMENTED BY Hash(id) ALL NODES; COPY "$DATA"_node FROM '$NODE_FILE' DELIMITER ' ' NULL 'null';"
CREATE_EDGE="DROP TABLE IF EXISTS "$DATA"_edge; CREATE TABLE "$DATA"_edge (from_node INT NOT NULL, to_node INT NOT NULL) ORDER BY from_node SEGMENTED BY Hash(from_node) ALL NODES; COPY "$DATA"_edge FROM '$EDGE_FILE' DELIMITER E'\t' NULL 'null';"
$VSQL_CMD -c "$CREATE_NODE"
$VSQL_CMD -c "ALTER TABLE "$DATA"_node ADD COLUMN value INT DEFAULT 0 NOT NULL;"
$VSQL_CMD -c "$CREATE_EDGE"
#rm $NODE_FILE $EDGE_FILE
| true |
6436d00b29ffe9b9a715ae2be33da184afe5d98b | Shell | rangapv/pyUpgrade | /p2.sh | UTF-8 | 1,714 | 3.203125 | 3 | [] | no_license | #!/bin/bash
pyupgrade() {
pargs="$#"
args=("$@")
echo "pargs,args,args1 is $pargs,${args[$((pargs-2))]},${args[$((pargs-1))]},${args[$((pargs-3))]}"
echo "New is ${args[$((pargs-$((pargs-2))))]}"
echo "New is ${args[$((pargs-$((pargs-1))))]}"
echo "New is ${args[$((pargs-$((pargs))))]}"
first=${args[$((pargs-$((pargs))))]}
second=${args[$((pargs-$((pargs-1))))]}
third=${args[$((pargs-$((pargs-2))))]}
var3="/"
var34="https://www.python.org/ftp/python/3.8.7/Python-3.8.7.tgz"
total="$first$second$var3$third"
echo "total is $total"
var4="Python-3.7.9.tgz"
se1=$( echo "${var4}" | awk '{split($0,a,".");print a[1]"."a[2]"."a[3]}')
se2=$( echo "${var34}" | awk '{split($0,a,"/");print a[6]}')
se3=$( echo "${se2}" | awk '{split($0,a,".");print a[1]"."a[2]}')
echo "$se1"
echo "$se2"
echo "$se3"
sudo ln -sf "/home/ec2-user/$se3" "/home/ec2-user/sdf"
}
pyupgrade1() {
pargs="$#"
args=("$@")
arg1=${args[$((pargs-1))]}
#sudo yum -y install gcc make openssl-devel bzip2-devel libffi-devel zlib-devel wget
pyver=${args[$((pargs-pargs))]}
pyver2=${args[$((pargs-$((pargs-1))))]}
pyver3=${args[$((pargs-$((pargs-2))))]}
var3="/"
wg=$pyver$pyver2$var3$pyver3
echo "WG is $wg"
sudo wget "$wg"
echo "pver3 is $pyver3"
tar xzf $pyver3
se1=$( echo "${pyver3}" | awk '{split($0,a,".");print a[1]"."a[2]"."a[3]}')
se2=$( echo "${pyver3}" | awk '{split($0,a,".");print a[1]"."a[2]}')
se3=$( echo "${pyver2}" | awk '{split($0,a,".");print a[1]"."a[2]}')
echo "se1 is $se1"
cd $se1
#sudo ./configure --enable-optimizations
#sudo make altinstall
slpy="python$se3"
sudo ln -sf "/home/ec2-user/$slpy" /home/ec2-user/python1
}
pyupgrade hello ranga swamy
pyupgrade1 https://www.python.org/ftp/python/ 3.10.0 Python-3.10.0a6.tgz
| true |
2aba3e9e2acdcfdab07565a117eee8a71e31a7d0 | Shell | efournier92/digitizer | /_tests/args/batch_args_tests.bash | UTF-8 | 1,199 | 2.90625 | 3 | [] | no_license | #!/bin/bash
#----------------
# Name : batch_args_tests.bash
# Project : digitizer
# Description : Unit test batch-mode functionality
#----------------
source "./_src/args/batch_args.bash"
source "./_src/messages/help.bash"
test_reading_batch_args_with_input_short_arg() {
local message="It should return the configured batch_file input."
local batch_file="MyBatchFile.txt"
local expected_result="$batch_file"
local result=`read_batch_args -i $batch_file`
assertEquals "$message" "$expected_result" "$result"
}
test_reading_batch_args_with_input_first_long_arg() {
local message="It should return the configured batch_file input."
local batch_file="MyBatchFile.txt"
local expected_result="$batch_file"
local result=`read_batch_args --input $batch_file`
assertEquals "$message" "$expected_result" "$result"
}
test_reading_batch_args_with_input_second_long_arg() {
local message="It should return the configured batch_file input."
local batch_file="MyBatchFile.txt"
local expected_result="$batch_file"
local result=`read_batch_args --batch_file $batch_file`
assertEquals "$message" "$expected_result" "$result"
}
. ./bin/shunit2
| true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.