blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 4 115 | path stringlengths 2 970 | src_encoding stringclasses 28 values | length_bytes int64 31 5.38M | score float64 2.52 5.28 | int_score int64 3 5 | detected_licenses listlengths 0 161 | license_type stringclasses 2 values | text stringlengths 31 5.39M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
4f3f8d52f513d0684b38e985816d0e1981a6706d | Shell | zone31/Advent-of-Code-2020 | /test.sh | UTF-8 | 1,427 | 3.765625 | 4 | [] | no_license | RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[0;33m'
NC='\033[0m' # No Color
SPACING=' '
#Ohwee slamkode
function compare {
wanted=$1
got=$2
title=$3
if [ "$wanted" == "$got" ]; then
echo -e " " $title "${GREEN}Correct!${NC}"
else
echo -e " " $title "${RED}Incorrect!${NC}"
echo "WANTED"
echo $wanted
echo "____"
echo "GOT"
echo $got
echo "____"
fi
}
for day in {1..25} ; do
for folder in Days/$day/*/ ; do
if [[ $folder == *\*/ ]] ;
then
printf "Day %s %s ${RED}not found!${NC}\n" $day "${SPACING:${#day}}"
else
res1=$(cat Days/$day/star1.txt)
res2=$(cat Days/$day/star2.txt)
case "$folder" in
*Python/)
printf "Day %s %s ${YELLOW}Python${NC}\n" $day "${SPACING:${#day}}"
pushd $folder > /dev/null
star1=$(python3 run.py 1)
star2=$(python3 run.py 2)
compare $res1 $star1 "Star 1"
compare $res2 $star2 "Star 2"
popd > /dev/null
;;
*Haskell/)
echo -e "Day $day ${GREEN}Haskell${NC}"
;;
*)
echo "Language in $folder not implemented!"
esac
fi
#echo $folder
done
done | true |
da26c6a184882f5289a7831050920d6f4c692eec | Shell | METIS-Project/ILDE | /Glue/GLUECore/GLUEletManager/bin/start-gm.sh | UTF-8 | 2,333 | 3.203125 | 3 | [] | no_license | #!/bin/sh
# Inits GLUEletManager
if [ -z $GLUE_HOME ]
then
echo "Environment variable GLUE_HOME must be defined"
exit 1
fi
# Basic locations
GM_HOME=$GLUE_HOME/manager
CONF_DIR=$GM_HOME/conf
GLUE_LIB_DIR=$GLUE_HOME/lib
GM_LIB_DIR=$GM_HOME/lib
LOG_DIR=$GM_HOME/log
# Database related libraries
JPA2_API_LIB=$GLUE_LIB_DIR/dep/eclipselink/jlib/jpa/javax.persistence_2.0.0.v200911271158.jar
JPA2_IMP_LIB=$GLUE_LIB_DIR/dep/eclipselink/jlib/eclipselink.jar
JDBC_DIR=$GLUE_LIB_DIR/dep/jdbc-connector
JDBC_DRIVER=''
for JAR in `ls $JDBC_DIR/*.jar`
do
JDBC_DRIVER=${JDBC_DRIVER}:$JAR
done
if [ -z $JDBC_DRIVER ]
then
echo "Jar file(s) for a JDBC driver must be placed at $JDBC_DIR"
exit 1
fi
# RESTlet related definitions
RESTLET_DIR=$GLUE_LIB_DIR/dep/restlet-jse-2.0.11/lib
RESTLET_HTTP_CONN=$RESTLET_DIR/org.restlet.ext.httpclient.jar:$RESTLET_DIR/net.jcip.annotations_1.0/net.jcip.annotations.jar:$RESTLET_DIR/org.apache.commons.codec_1.4/org.apache.commons.codec.jar:$RESTLET_DIR/org.apache.commons.logging_1.1/org.apache.commons.logging.jar:$RESTLET_DIR/org.apache.httpclient_4.0/org.apache.httpclient.jar:$RESTLET_DIR/org.apache.httpcore_4.0/org.apache.httpcore.jar:$RESTLET_DIR/org.apache.httpmime_4.0/org.apache.httpmime.jar:$RESTLET_DIR/org.apache.james.mime4j_0.6/org.apache.james.mime4j.jar
RESTLET_JETTY_CONN=$RESTLET_DIR/org.restlet.ext.jetty.jar:$RESTLET_DIR/org.eclipse.jetty_7.1/org.eclipse.jetty.ajp.jar:$RESTLET_DIR/org.eclipse.jetty_7.1/org.eclipse.jetty.continuations.jar:$RESTLET_DIR/org.eclipse.jetty_7.1/org.eclipse.jetty.http.jar:$RESTLET_DIR/org.eclipse.jetty_7.1/org.eclipse.jetty.io.jar:$RESTLET_DIR/org.eclipse.jetty_7.1/org.eclipse.jetty.server.jar:$RESTLET_DIR/org.eclipse.jetty_7.1/org.eclipse.jetty.util.jar:$RESTLET_DIR/javax.servlet_2.5/javax.servlet.jar
# Classpath composition - BE CAREFUL: never put ':' before $JDBC_DRIVER (watch how it was built some lines before)
CLASSPATH=$CONF_DIR:$GM_LIB_DIR/gluelet-manager.jar:$GLUE_LIB_DIR/glue-common.jar:$JPA2_API_LIB:${JPA2_IMP_LIB}$JDBC_DRIVER:$RESTLET_DIR/org.restlet.jar:$RESTLET_DIR/org.restlet.ext.xml.jar:$RESTLET_DIR/org.restlet.ext.atom.jar:$RESTLET_HTTP_CONN:$RESTLET_JETTY_CONN
# Go!
echo starting GLUEletManager
java -cp $CLASSPATH glue.core.glueletManager.GLUEletManagerServerMain >> $LOG_DIR/manager.log 2>> $LOG_DIR/manager.log &
| true |
fa61f7b86a07a059632a9bf541cdcd31c130def0 | Shell | calamargo2/SLS-1.02 | /usr/local/lib/nn/back_act | UTF-8 | 926 | 3.359375 | 3 | [] | no_license | #!/bin/sh
# Generated by nn release 6.4.18
VERSION="6.4.18"
INEWS="/usr/local/lib/news/inews"
INEWS_DIR="/usr/local/lib/news"
AWK="awk"
NNTP=false
ACTIVE=/usr/local/lib/news/active
LOG=/usr/local/lib/nn/Log
TMP=${TMPDIR-/usr/tmp}
DB=/usr/spool/nndb
BIN=/usr/bin
LIB=/usr/local/lib/nn
AUTH=false
# ---- end of prefix
# prefix is inserted above by make
#
# back_act will maintain a set of `old' active files
# in the DB directory where they can be used by nngoback
# to backtrack the rc file a number of days.
#
# It should be invoked by cron every day at midnight.
# It should run as user `news'!
#
# Call: back_act [days]
# Default: keep copy of active file for the last 14 days.
cd $DB || exit 1
p=${1-15}
l=""
while [ "$p" -gt 0 ]
do
i="`expr $p - 1`"
if [ -f active.$i ]
then
mv active.$i active.$p
l=$p
elif [ -n "$l" ]
then
ln active.$l active.$p
fi
p=$i
done
cp $ACTIVE active.0
chmod 644 active.0
| true |
756734948a9438f2a2a15f72b6d53fcd0d4049f4 | Shell | buxuyoushang/goid | /hack.sh | UTF-8 | 494 | 3 | 3 | [
"WTFPL"
] | permissive | #!/bin/sh
echo "# Hack runtime"
cat >> $GOROOT/src/runtime/runtime.c << EOF
void
runtime·GetGoId(int32 ret)
{
ret = g->goid;
USED(&ret);
}
EOF
cat >> $GOROOT/src/runtime/extern.go << EOF
func GetGoId() int32
EOF
cd $GOROOT/src
./make.bash
cat > $$$$.go << EOF
package main
import (
"fmt"
"runtime"
)
func main() {
runtime.GetGoId()
fmt.Print("done")
}
EOF
x=`go run $$$$.go`
rm $$$$.go
echo ""
echo ""
echo "---"
if [ $x = "done" ]; then
echo "Done"
else
echo "Failed"
fi
| true |
83c3eb592d62f3de5ea8e18e2d4ec2c7ab53d2c4 | Shell | geoffreylooker/dotfiles-1 | /bash/_bash_aliases | UTF-8 | 4,423 | 3.015625 | 3 | [] | no_license | #!/usr/bin/env bash
# vim: set tabstop=2 shiftwidth=2 foldmethod=marker:
# ~/.profile or ~/.bash_profile read only by interactive shells
# ~/.profile is not read if ~/.bash_profile or ~/.bash_login exists.
# look into the dropbox-only phpenv_phpbuild branch for a sane .bash_profile
export DOTFILESREPO=$(readlink -f "$( dirname "$(readlink -f "${BASH_SOURCE[0]}")")""/..")
. "${DOTFILESREPO}"/bash/_bash_extras
. "${DOTFILESREPO}"/bash/_drush_bashrc
. "${DOTFILESREPO}"/bash/_vim_bash
. "${DOTFILESREPO}"/bash/_git_bash
. "${DOTFILESREPO}"/bash/_fzf_bash
# aliases {{{
function cleandocker () {
VOLUMES="$(docker volume ls -f dangling=true -q | awk '{print $2}')"
[ -n "$VOLUMES" ] && docker volume rm "$VOLUMES"
CONTAINERS="$(docker ps -a | command grep Exited | command grep -v data | awk '{print $1}')"
[ -n "$CONTAINERS" ] && docker rm -vf ${CONTAINERS//$'\n'/ }
IMAGES="$(docker images -f dangling=true -q)"
[ -n "$IMAGES" ] && docker rmi ${IMAGES//$'\n'/ }
}
alias r='PAGER=less urxvt -e ranger&'
alias tmxu=tmux
alias ccat='pygmentize -O style=monokai -f console256 -g'
alias L='less -R'
alias df='df -h'
alias l='ls -CF --group-directories-first'
#alias ll='ls -AlFhrt --group-directories-first'
alias ls='ls --color=auto --group-directories-first'
alias sl=ls
alias fire='firefox -Profilemanager -no-remote &> /dev/null &'
alias t=trans
function cc () {
local CHROME=google-chrome
if [ ! -z "$1" ]; then
local PROFILE_DIR=$1
local CHROME_ARGS="--user-data-dir=/home/grota/.config/${CHROME}_others/$PROFILE_DIR"
fi
local CHROME_ARGS="--password-store=basic $CHROME_ARGS"
echo "Running: " "$CHROME" "$CHROME_ARGS"
$CHROME $CHROME_ARGS &> /dev/null &
}
alias pg='ps aux|grep '
alias pw='pwsafe -Eup -l'
alias rstty='stty start undef stop undef'
alias ..="cd ../"
alias ...="cd ../../"
alias ....="cd ../../../"
alias .....="cd ../../../../"
alias ......="cd ../../../../../"
alias cd-="\cd -"
alias cd..="cd ../"
alias cd...="cd ../../"
alias cd....="cd ../../../"
alias cd.....="cd ../../../../"
alias cd......="cd ../../../../../"
alias bc='bc -l'
alias grep='grep --color=always -n --exclude-dir=.svn --exclude-dir=.git '
alias ff='ag --pager=less -g'
# there's a ss bin but that's ok for me
alias ss='sudo su -'
# {tl,ll,rl}php log {{{
#alias tlphp='tail -F /tmp/php5_apache_errorlog.log'
alias llphp='less /tmp/php5_apache_errorlog.log'
alias rlphp='sudo rm /tmp/php5_apache_errorlog.log'
#}}}
# Acer specific {{{
alias toff='xinput --disable "SynPS/2 Synaptics TouchPad"'
alias ton='xinput --enable "SynPS/2 Synaptics TouchPad"'
alias koff='xinput --disable "AT Translated Set 2 keyboard"'
alias kon='xinput --enable "AT Translated Set 2 keyboard"'
alias don='xrandr --output HDMI1 --auto --right-of LVDS1'
alias doff='xrandr --output HDMI1 --off'
#alias auext='pactl set-card-profile 0 output:hdmi-stereo+input:analog-stereo'
#alias auint='pactl set-card-profile 0 output:analog-stereo+input:analog-stereo'
#}}}
alias unmute='amixer set "Speaker Front",0 unmute &> /dev/null'
alias mute='amixer set "Speaker Front",0 mute &> /dev/null'
alias update_all_dotfiles_submodules="g submodule foreach '[ \"\$path\" = \"bin/drush\" ] || (git fetch origin; git merge origin/master)'"
#}}}
# functions {{{
# list of pulseaudio devices can be fetched via pactl list
function rec() {
local outfile="$1"
if [ -z "$1" ]; then
local outfile=$(get_spotify_artists_and_title)
fi
outfile="$outfile".mp3
echo "Writing to file '$outfile'"
count=3
while [ $count -gt 0 ]; do
echo $count ...
count=$((count - 1))
sleep 1
done
local device=$(pactl list sinks|command grep Monitor\ Source|awk '{ print $3; }')
gst-launch-1.0 -e pulsesrc device="$device" ! "audio/x-raw,channels=2" ! audioconvert ! lamemp3enc target=1 bitrate=128 cbr=true ! filesink location="${outfile}"
}
function mkcd() { mkdir -p "$@" && eval cd "\"\$$#\""; }
function fixperms() {
APACHEUSER=$(ps aux | \grep -E '[a]pache|[h]ttpd' | \grep -v root | head -1 | cut -d\ -f1)
sudo setfacl -R -m u:"$APACHEUSER":rwX -m u:"$(whoami)":rwX "$@"
sudo setfacl -dR -m u:"$APACHEUSER":rwX -m u:"$(whoami)":rwX "$@"
}
function resunity() {
dbus-send --print-reply --dest=org.freedesktop.compiz /org/freedesktop/compiz/unityshell/screen0/alt_tab_next_window org.freedesktop.compiz.set string:Disabled &> /dev/null
killall xbindkeys; xbindkeys
xset r rate 200 90 && xmodmap ~/.Xmodmap
}
#}}}
| true |
e2a330c8b63aea33a3994cf80084b3a2f74c2f0c | Shell | renlord/dotfiles | /HOME-STOW/bash/.bashaux/startmaple.sh | UTF-8 | 595 | 2.984375 | 3 | [] | no_license | #!/usr/bin/env bash
startmaple() {
[[ -z $1 ]] && WINDOWNAME=MapleLegends || WINDOWNAME=$1
cd ~/.wine/drive_c/MapleLegendsHD/ || exit 1
WINEARCH=win32 WINEDEBUG=-all wine explorer /desktop=$WINDOWNAME,1024x768 MapleLegends.exe
}
startgunbound() {
[[ -z $1 ]] && WINDOWNAME=MapleLegends || WINDOWNAME=$1
cd /home/rl/.wine/drive_c/GunBound\ Origins || exit 1
WINEARCH=win32 WINEDEBUG=-all wine explorer /desktop=$WINDOWNAME,1024x768 'Gunbound Origins.exe'
}
stopallmaple() {
pgrep Maple | xargs kill -9
pgrep wine | xargs kill -9
pgrep exe | xargs kill -9
}
| true |
74b4c6c8691415aafc688bc1d80637cedff4454c | Shell | kyluca/dotfiles | /update_dotfiles.sh | UTF-8 | 884 | 2.671875 | 3 | [] | no_license | #!/bin/bash
pushd ~/repos/dotfiles
# Copy files (cp is aliased to cp -i by default, use /usr/bin/cp to avoid the interactive prompts)
/usr/bin/cp ~/.bash_aliases ./
/usr/bin/cp ~/.bashrc ./
/usr/bin/cp ~/.gitconfig ./
/usr/bin/cp ~/.inputrc ./
/usr/bin/cp ~/.pdbrc ./
/usr/bin/cp ~/.tmux.conf ./
/usr/bin/cp ~/.config/pip/pip.conf ./.config/pip/pip.conf
/usr/bin/cp ~/.config/alacritty/alacritty.yml ./.config/alacritty/alacritty.yml
# Copy powerline configs
rsync -avr ~/.config/powerline/ ./.config/powerline
# Copy Sublime Text configs except for Package Control caches, just Package Control settings
rsync -avr --exclude='*Package Control*' ~/.config/sublime-text/Packages/User/ ./.config/sublime-text/Packages/User
/usr/bin/cp ~/.config/sublime-text/Packages/User/Package\ Control.sublime-settings ./.config/sublime-text/Packages/User/Package\ Control.sublime-settings
popd
| true |
16267db2079eec86bb59f56279a64511f75fee78 | Shell | ldocao/nyc-taxi-data-postgres | /restart/load_data.sh | UTF-8 | 1,072 | 2.765625 | 3 | [
"MIT"
] | permissive | #!/bin/bash
export PGPASSWORD=K2f7mstuzakFFspH
psql -U postgres -f denormalized.sql
psql -U postgres -f normalized.sql
psql -d postgres -U postgres -p 5432 -c "\copy normalized_weather FROM PROGRAM 'gzip -dc $HOME/nyc-taxi-data/normalized/weather_xaa.csv.gz' DELIMITER ',' CSV HEADER NULL ''" &
files=`ls -1 $HOME/nyc-taxi-data/denormalized/*.csv.gz`
N=`grep -c ^processor /proc/cpuinfo`
for f in $files; do
((i=i%N)); ((i++==0)) && wait #parallel run only N at a time ; see https://unix.stackexchange.com/questions/103920/parallelize-a-bash-for-loop
psql -d postgres -U postgres -p 5432 -c "\copy denormalized_trips FROM PROGRAM 'gzip -dc $f' DELIMITER ',' CSV HEADER NULL ''" &
done
wait
files=`ls -1 $HOME/nyc-taxi-data/normalized/trips*.csv.gz`
for f in $files; do
((i=i%N)); ((i++==0)) && wait
psql -d postgres -U postgres -p 5432 -c "\copy normalized_trips FROM PROGRAM 'gzip -dc $f' DELIMITER ',' CSV HEADER NULL ''" &
done
#precompute some solution because they take several hours to run
psql -U postgres -f cube.sql
psql -U postgres -f index.sql
| true |
d2d94acf1f520b9dcff7aaab7dd424637ac086da | Shell | clearlinux-pkgs/LS_COLORS | /50-colors.sh | UTF-8 | 418 | 2.671875 | 3 | [] | no_license | # ls colors
alias ls='ls --color=auto'
if [ -f "$HOME/.dircolors" ]; then
eval `dircolors -b "$HOME/.dircolors"`
elif [ -f "/etc/dircolors" ]; then
eval `dircolors -b "/etc/dircolors"`
elif [ -f "/usr/share/defaults/etc/LS_COLORS" ]; then
eval $( dircolors -b /usr/share/defaults/etc/LS_COLORS )
fi
# GCC diagnostics/colors
export GCC_COLORS="error=01;31:warning=01;35:note=01;36:caret=01;32:locus=01:quote=01"
| true |
4ed327c0807898b6ad0cee27d063d83d281d0643 | Shell | tokenchain/dp-relay | /script/install.sh | UTF-8 | 381 | 2.53125 | 3 | [] | no_license | #!/usr/bin/env bash
. ./_auth.sh
echo "====================================="
echo "install files"
echo "====================================="
#unzip achive.zip -d /usr/bin
if [[ ! -f $DEMONPATH ]]; then
mkdir -p $DEMONPATH
fi
cp linux/$DEMON $DEMONPATH/$DEMON
echo "====================================="
echo "install completed"
echo "====================================="
| true |
992a1b43d42452f5a0c7bdcacb116b1c7b5fcaeb | Shell | jprivet-dev/git-xtended | /src/test/test.main.sh | UTF-8 | 2,810 | 4.21875 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env bash
function gx_test_count_tests_increment() {
((gx_test_count_tests++))
}
function gx_test_count_assertions_increment() {
((gx_test_count_assertions++))
}
function gx_test_count_failures_increment() {
((gx_test_count_failures++))
}
function gx_test_print_results() {
local message
local test_plurial
local assertion_plurial
local failure_plurial
[[ ${gx_test_count_tests} -gt 1 ]] && test_plurial="tests" || test_plurial="test"
[[ ${gx_test_count_assertions} -gt 1 ]] && assertion_plurial="assertions" || assertion_plurial="assertion"
[[ ${gx_test_count_failures} -gt 1 ]] && failure_plurial="failures" || failure_plurial="failure"
message="${gx_test_count_tests} ${test_plurial}, ${gx_test_count_assertions} ${assertion_plurial}, ${gx_test_count_failures} ${failure_plurial}"
if [[ ${gx_test_count_failures} -gt 0 ]]; then
gx_print_title_error "FAILURES! ${message}"
else
gx_print_title_success "OK: ${message}"
fi
}
function gx_test_find_all_func_test() {
compgen -A function | grep ^test_
}
function gx_test_find_all_func_gx() {
compgen -A function | grep ^gx_ | grep -v ^gx_test_
}
function gx_test_run() {
local func="$1"
gx_print_step "Tests launched"
if [ "${func}" == "" ]; then
TEST_GX_PARAMS_ASSERT_OK_SHOW_MESSAGE=0
gx_test_check_func_with_test
echo
gx_test_run_all
return
fi
# shellcheck disable=SC2034
TEST_GX_PARAMS_ASSERT_OK_SHOW_MESSAGE=1
gx_test_run_only "${func}"
}
function gx_test_run_all() {
local func_test_list=($(gx_test_find_all_func_test))
gx_print_title_2 "Launch all tests"
for func in "${func_test_list[@]}"; do
gx_test_run_func "${func}"
done
gx_test_print_results
}
function gx_test_run_only() {
local func="$1"
gx_print_title_2 "Launch only one test"
if type "${func}" &>/dev/null; then
gx_test_run_func "${func}"
gx_test_print_results
else
echo -e "${C_LIGHT_RED}[ERROR] Test function '${func}' does not exist${F_RESET}"
fi
}
function gx_test_run_func() {
local func="$1"
echo -e -n "${C_LIGHT_YELLOW}#${F_RESET} ${func} "
$func
gx_test_count_tests_increment
echo
gx_test_assert_pipeline_message_print_all
}
function gx_test_check_func_with_test() {
local func_gx_list=($(gx_test_find_all_func_gx))
local count=0
local count_total=0
gx_print_title_2 "Functions with test"
for func in "${func_gx_list[@]}"; do
func_test_version="test_${func}"
((count_total++))
if type "$func_test_version" &>/dev/null; then
echo -e "${C_LIGHT_GREEN}[x] ${func}${F_RESET}"
((count++))
else
echo -e "${C_LIGHT_BLUE}[ ] ${func}${F_RESET}"
fi
done
echo -e "${C_LIGHT_YELLOW}\nTOTAL: ${count}/${count_total} - $((count*100 / count_total))%\n${F_RESET}"
}
| true |
d0f73fd409e8346d7e4f729dadf36a645aa5e564 | Shell | Su-gyeomKim/Bash-Shell | /Part7_UpgradeCDH6.sh | UTF-8 | 11,973 | 2.765625 | 3 | [] | no_license | #!/bin/bash
echo " *******************************************************
go to URL MUST BE CHANGE when scripts run on dev, real server
*******************************************************"
######################Global Variable############################
Version=8
mini_Version=212
backup=/backup
mkdir -p $backup
#################################################################
##########################Part 7#################################
echo -e "\n\nPart 7\n\n"
echo "=========================================================="
echo "#7-1 Yarn NodeManager Service releas & reassignment"
if [ `free -m | grep ^Mem | awk '{print $2}'` -lt 128000 ]; then
echo "go to http://`hostname -I | awk '{print $2}'`:7180/cmf/services/21/instances"
else
echo "go to http://`hostname -I | awk '{print $1}'`:7180/cmf/services/21/instances"
fi
echo "every \'NodeManager\' checkbox check & service (releas & reassignment)"
echo "DO NOT START \'NodeManager\'!"
if [ `free -m | grep ^Mem | awk '{print $2}'` -lt 128000 ]; then
echo "go to http://`hostname -I | awk '{print $2}'`:7180/cmf/services/30/config#filterdisplayGroup=%EA%B3%A0%EA%B8%89"
else
echo "go to http://`hostname -I | awk '{print $1}'`:7180/cmf/services/30/config#filterdisplayGroup=%EA%B3%A0%EA%B8%89"
fi
echo -e "change value \'kafka.properties에 대한 Kafka Broker 고급 구성 스니펫(안전 밸브)kafka.properties에 대한 Kafka Broker 고급 구성 스니펫(안전 밸브)\' \nas \ninter.broker.protocol.version = 0.9.0 \nlog.message.format.version = 0.9.0"
echo "
**********************************************************
WARNING!!!! DO YOU FINISH THIS STEP?
then press AnyKey
IF Want to stop this Step Press CTRL+C
**********************************************************
"
read
echo "=========================================================="
echo "=========================================================="
echo "#7-2 Kafka Version Configure"
echo "go to http://`hostname -I | awk '{print $2}'`:7180/cmf/services/30/config#filterdisplayGroup=%EA%B3%A0%EA%B8%89"
echo "
**********************************************************
WARNING!!!! DO YOU FINISH THIS STEP?
then press AnyKey
IF Want to stop this Step Press CTRL+C
**********************************************************
"
read
echo "=========================================================="
echo "=========================================================="
echo "#7-3 other configuration check & adjust"
if [ `free -m | grep ^Mem | awk '{print $2}'` -lt 128000 ]; then
echo "go to http://`hostname -I | awk '{print $2}'`:7180/cmf/"
else
echo "go to http://`hostname -I | awk '{print $1}'`:7180/cmf/"
fi
echo "
**********************************************************
WARNING!!!! DO YOU FINISH THIS STEP?
then press AnyKey
IF Want to stop this Step Press CTRL+C
**********************************************************
"
read
echo "=========================================================="
echo "=========================================================="
echo "#7-4 CDH Stop"
if [ `free -m | grep ^Mem | awk '{print $2}'` -lt 128000 ]; then
echo "go to http://`hostname -I | awk '{print $2}'`:7180/cmf/"
else
echo "go to http://`hostname -I | awk '{print $1}'`:7180/cmf/"
fi
echo "CDH STOP"
echo "
**********************************************************
WARNING!!!! DO YOU FINISH THIS STEP?
then press AnyKey
IF Want to stop this Step Press CTRL+C
**********************************************************
"
read
echo "=========================================================="
echo "=========================================================="
echo "#7-5 Install CDH Packages"
yum clean all
yum repolist
yum -y remove hadoop-0.20\* hue-\* crunch llama mahout sqoop2 whirr sqoop2-client
if [ `free -m | grep ^Mem | awk '{print $2}'` -lt 128000 ]; then
tar -xvzf cdh62_dependency.tar.gz
cd cdh62_dependency
rpm -Uvh --replacepkgs *rpm
else
yum -y install libtidy lsb
fi
yum -y install avro-tools flume-ng hadoop-hdfs-fuse hadoop-hdfs-nfs3 hadoop-httpfs hadoop-kms hbase-solr hive-hbase hive-webhcat hue impala impala-shell kafka kite kudu oozie pig search sentry sentry-hdfs-plugin solr-crunch solr-mapreduce spark-core spark-python sqoop zookeeper parquet hbase solr
echo "
**********************************************************
WARNING!!!! DO YOU FINISH THIS STEP?
then press AnyKey
IF Want to stop this Step Press CTRL+C
**********************************************************
"
read
echo "=========================================================="
echo "=========================================================="
echo "#7-6 default JAVA directory Change"
mv /usr/java /usr/java_back
echo "
**********************************************************
WARNING!!!! DO YOU FINISH THIS STEP?
then press AnyKey
IF Want to stop this Step Press CTRL+C
**********************************************************
"
read
echo "=========================================================="
echo "=========================================================="
echo "#7-7 Cloudera MAnager Agent Restart"
systemctl restart cloudera-scm-agent
echo "
**********************************************************
WARNING!!!! DO YOU FINISH THIS STEP?
then press AnyKey
IF Want to stop this Step Press CTRL+C
**********************************************************
"
read
echo "=========================================================="
echo "=========================================================="
echo "#7-8 (\*Laptop) Oozie Password Change"
if [ `free -m | grep ^Mem | awk '{print $2}'` -lt 128000 ]; then
grant all privileges on oozie.* to 'oozie'@'%' identified by 'oozie'
flush privileges
echo "go to http://`hostname -I | awk '{print $2}'`:7180/cmf/services/20/config#filterdisplayGroup=%EB%8D%B0%EC%9D%B4%ED%84%B0%EB%B2%A0%EC%9D%B4%EC%8A%A4"
fi
echo "create Oozie password"
echo "
**********************************************************
WARNING!!!! DO YOU FINISH THIS STEP?
then press AnyKey
IF Want to stop this Step Press CTRL+C
**********************************************************
"
read
echo "=========================================================="
echo "=========================================================="
echo "#7-9 Run the Upgrade CDH Wizard"
if [ `free -m | grep ^Mem | awk '{print $2}'` -lt 128000 ]; then
echo "go to http://`hostname -I | awk '{print $2}'`:7180/cmf/clusters/1/upgradePopup"
else
echo "go to http://`hostname -I | awk '{print $1}'`:7180/cmf/clusters/1/upgradePopup"
fi
echo "
**********************************************************
WARNING!!!! DO YOU FINISH THIS STEP?
then press AnyKey
IF Want to stop this Step Press CTRL+C
**********************************************************
"
read
echo "=========================================================="
echo "=========================================================="
echo "#7-10 Start Yarn NodeManager "
if [ `free -m | grep ^Mem | awk '{print $2}'` -lt 128000 ]; then
echo "go to http://`hostname -I | awk '{print $2}'`:7180/cmf/services/21/instances"
else
echo "go to http://`hostname -I | awk '{print $1}'`:7180/cmf/services/21/instances"
fi
echo "every \'NodeManager\' checkbox check & start"
echo "
**********************************************************
WARNING!!!! DO YOU FINISH THIS STEP?
then press AnyKey
IF Want to stop this Step Press CTRL+C
**********************************************************
"
read
echo "=========================================================="
echo "=========================================================="
echo "#7-11 other configuration check & adjust"
if [ `free -m | grep ^Mem | awk '{print $2}'` -lt 128000 ]; then
echo "go to http://`hostname -I | awk '{print $2}'`:7180/cmf/"
else
echo "go to http://`hostname -I | awk '{print $1}'`:7180/cmf/"
fi
echo "
**********************************************************
WARNING!!!! DO YOU FINISH THIS STEP?
then press AnyKey
IF Want to stop this Step Press CTRL+C
**********************************************************
"
read
echo "=========================================================="
echo "=========================================================="
echo "#7-12 default port change & kafka version adjust"
if [ `free -m | grep ^Mem | awk '{print $2}'` -lt 128000 ]; then
echo "go to http://`hostname -I | awk '{print $2}'`:7180/cmf/config2?task=ALL_PORT_CONFIGURATIONS"
else
echo "go to http://`hostname -I | awk '{print $1}'`:7180/cmf/config2?task=ALL_PORT_CONFIGURATIONS"
fi
echo "HBase Master 웹 UI 포트 : 16010 -> 60010"
echo "HBase RegionServer 웹 UI 포트 : 16030 -> 60030"
echo "DataNode HTTP 웹 UI 포트 : 9864 -> 50075"
echo "NameNode 웹 UI 포트 : 9870 -> 50070"
echo "SAVE"
if [ `free -m | grep ^Mem | awk '{print $2}'` -lt 128000 ]; then
echo "go to http://`hostname -I | awk '{print $2}'`:7180/cmf/services/30/config#filterdisplayGroup=%EA%B3%A0%EA%B8%89"
else
echo "go to http://`hostname -I | awk '{print $1}'`:7180/cmf/services/30/config#filterdisplayGroup=%EA%B3%A0%EA%B8%89"
fi
echo -e "delete value \'kafka.properties에 대한 Kafka Broker 고급 구성 스니펫(안전 밸브)kafka.properties에 대한 Kafka Broker 고급 구성 스니펫(안전 밸브)\' \n as \ninter.broker.protocol.version = 0.9.0 \nlog.message.format.version = 0.9.0"
echo "
**********************************************************
WARNING!!!! DO YOU FINISH THIS STEP?
then press AnyKey
IF Want to stop this Step Press CTRL+C
**********************************************************
"
read
echo "=========================================================="
echo "=========================================================="
echo "#7-13 HDFS metadata Upgrade"
if [ `free -m | grep ^Mem | awk '{print $2}'` -lt 128000 ]; then
echo "go to http://`hostname -I | awk '{print $2}'`:7180/cmf/services/22/instances/26/do?command=HdfsFinalizeUpgrade"
else
echo "go to http://`hostname -I | awk '{print $1}'`:7180/cmf/services/22/instances/26/do?command=HdfsFinalizeUpgrade"
fi
echo "
**********************************************************
WARNING!!!! DO YOU FINISH THIS STEP?
then press AnyKey
IF Want to stop this Step Press CTRL+C
**********************************************************
"
read
echo "=========================================================="
echo "=========================================================="
echo "#7-14 DataBase Backup"
echo "mysqldump -u root -p --all-databases > $backup/alldatabases_6.2_`date +%F`.sql"
echo "
**********************************************************
WARNING!!!! DO YOU FINISH THIS STEP?
then press AnyKey
IF Want to stop this Step Press CTRL+C
**********************************************************
"
read
echo "=========================================================="
| true |
e7b1bedb62b08fc5f6900975bdcedfee708e73a0 | Shell | aleweichandt/tpso2c2007 | /EMULASO/ADP/ejec.sh | UTF-8 | 140 | 2.890625 | 3 | [] | no_license | #!/bin/sh
lineas=`ps -A|grep adp|wc -l|cut -d" " -f7`
echo $lineas
if [ $lineas != "0" ]; then
echo "ADP ya esta corriendo!"
else
./adp
fi
| true |
dc37b6d727d0b7f9bb749ba0557899b169a751fb | Shell | bird-house/birdhouse-deploy | /birdhouse/scripts/deprecated/trigger-pavicscrawler | UTF-8 | 1,537 | 3.6875 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/sh
# Trigger pavicscrawler on local PAVICS host.
#
# Need to open temporary Thredds "testdata/secure/" on local PAVICS host to anonymous group.
#
# pavicscrawler is a method of the catalog WPS service to index Thredds
# catalog into Solr DB for quick searching.
#
# To crawl only 1 file:
# trigger-pavicscrawler target_files=birdhouse/testdata/secure/tasmax_Amon_MPI-ESM-MR_rcp45_r2i1p1_200601-200612.nc
#
# To crawl only 1 dir:
# trigger-pavicscrawler target_files=birdhouse/testdata
#
# Set env var PAVICS_CRAWLER_HOST to target different PAVICS host.
THIS_FILE="`realpath "$0"`"
THIS_DIR="`dirname "$THIS_FILE"`"
COMPOSE_DIR="`dirname "$THIS_DIR"`"
if [ -f "$COMPOSE_DIR/read-configs.include.sh" ]; then
. "$COMPOSE_DIR/read-configs.include.sh"
# Get PAVICS_FQDN
read_configs
fi
# Allow override using same name env var.
if [ -z "$PAVICS_CRAWLER_HOST" ]; then
PAVICS_CRAWLER_HOST="$PAVICS_FQDN"
fi
set -x
curl --include "http://${PAVICS_CRAWLER_HOST}:8086/pywps?service=WPS&request=execute&version=1.0.0&identifier=pavicrawler&storeExecuteResponse=true&status=true&DataInputs=$*"
set +x
echo "
NOTE the
statusLocation=\"https://HOST/wpsoutputs/catalog/e31a4914-16e8-11ea-aab9-0242ac130014.xml\"
returned in the XML body of the curl command. The status of the crawl,
whether ongoing, failed or success will be in that link.
Once crawler is done, go check the Solr DB at
http://$PAVICS_CRAWLER_HOST:8983/solr/#/birdhouse/query for content inserted by the
crawler. Just click on \"Execute Query\".
"
| true |
9f5660aef462691c81dc2c173adb100dfc7dab15 | Shell | Gasol/prometheus-phpfpm-exporter | /debian/init.d | UTF-8 | 2,640 | 3.796875 | 4 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | #!/bin/sh
### BEGIN INIT INFO
# Provides: prometheus-phpfpm-exporter
# Required-Start: $local_fs $network $remote_fs $syslog
# Required-Stop: $local_fs $network $remote_fs $syslog
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: Prometheus exporter for PHP-FPM metrics
# Description: Prometheus exporter for PHP-FPM metrics, written in Go
### END INIT INFO
# Author: Gasol Wu <gasol.wu@gmail.com>
DESC="Prometheus exporter for PHP-FPM metrics"
DAEMON=/usr/bin/prometheus-phpfpm-exporter
ARGS=""
NAME=prometheus-phpfpm-exporter
USER=prometheus
PIDFILE=/var/run/prometheus/prometheus-phpfpm-exporter.pid
LOGFILE=/var/log/prometheus/prometheus-phpfpm-exporter.log
SCRIPTNAME=/etc/init.d/$NAME
[ -x "$DAEMON" ] || exit 0
[ -r /etc/default/$NAME ] && . /etc/default/$NAME
. /lib/init/vars.sh
. /lib/lsb/init-functions
[ -x "$HELPER" ] || exit 0
do_start()
{
mkdir -p `dirname $PIDFILE` || true
chown -R $USER: `dirname $LOGFILE`
chown -R $USER: `dirname $PIDFILE`
start-stop-daemon --start --quiet --pidfile $PIDFILE --exec $DAEMON --test > /dev/null \
|| return 1
start-stop-daemon --start --quiet --pidfile $PIDFILE --exec $DAEMON -- \
$ARGS \
|| return 2
}
do_stop()
{
start-stop-daemon --stop --quiet --retry=TERM/30/KILL/5 --pidfile $PIDFILE --name $NAME
RETVAL="$?"
[ "$RETVAL" = 2 ] && return 2
start-stop-daemon --stop --quiet --oknodo --retry=0/30/KILL/5 --exec $DAEMON
[ "$?" = 2 ] && return 2
# Many daemons don't delete their pidfiles when they exit.
rm -f $PIDFILE
return "$RETVAL"
}
do_reload() {
start-stop-daemon --stop --signal 1 --quiet --pidfile $PIDFILE --name $NAME
return 0
}
case "$1" in
start)
[ "$VERBOSE" != no ] && log_daemon_msg "Starting $DESC" "$NAME"
do_start
case "$?" in
0|1) [ "$VERBOSE" != no ] && log_end_msg 0 ;;
2) [ "$VERBOSE" != no ] && log_end_msg 1 ;;
esac
;;
stop)
[ "$VERBOSE" != no ] && log_daemon_msg "Stopping $DESC" "$NAME"
do_stop
case "$?" in
0|1) [ "$VERBOSE" != no ] && log_end_msg 0 ;;
2) [ "$VERBOSE" != no ] && log_end_msg 1 ;;
esac
;;
status)
status_of_proc "$DAEMON" "$NAME" && exit 0 || exit $?
;;
restart|force-reload)
#
# If the "reload" option is implemented then remove the
# 'force-reload' alias
#
log_daemon_msg "Restarting $DESC" "$NAME"
do_stop
case "$?" in
0|1)
do_start
case "$?" in
0) log_end_msg 0 ;;
1) log_end_msg 1 ;; # Old process is still running
*) log_end_msg 1 ;; # Failed to start
esac
;;
*)
# Failed to stop
log_end_msg 1
;;
esac
;;
*)
echo "Usage: $SCRIPTNAME {start|stop|status|restart|force-reload}" >&2
exit 3
;;
esac
:
| true |
de189c1e7e3f7e93f7695de6440738372c0eb0e0 | Shell | PavoReal/PiTerm | /jenkins-build.sh | UTF-8 | 466 | 2.78125 | 3 | [] | no_license | #! /bin/bash
time ./build.sh -r
if [ ! $? -eq 0 ]
then
echo -e "\e[7mRelease build failed...\e[0m"
exit 1
else
echo -e "\e[7mGood release build\e[0m"
fi
cp build/PiTerm build/PiTerm-Release
objdump -S --disassemble build/PiTerm > build/PiTerm-Release.list
time ./build.sh -d
if [ ! $? -eq 0 ]
then
echo -e "\e[7mDebug build failed...\e[0m"
exit 1
else
echo -e "\e[7mGood debug build\e[0m"
fi
objdump -S --disassemble build/PiTerm > build/PiTerm-Debug.list
| true |
309264ed3a1a99394f9a4795fa6e4741c0878632 | Shell | ZongpengYang/jarvis_data_eng_zongpeng | /linux_sql/scripts/host_usage.sh | UTF-8 | 1,412 | 4.0625 | 4 | [] | no_license | #!/bin/bash
host_name=$1
port_number=$2
database_name=$3
user_name=$4
password=$5
if (($#!=5));
then
echo "WARNING: The number of arguments is not correct... Please enter exact 5 arguments.
Format: host_usage.sh host_name port_number database_name user_name password >&2
exit 1
fi
# collect information for CPU, memory, disk usage
#another way: timestamp=$(vmstat -t | sed -n 3p | awk '{print $(NF-1)," ",$NF}')
timestamp=$(date '+%Y-%m-%d %H:%M:%S.%3N')
memory_free=$(vmstat --unit M | awk '{for(i=NF;i>0;i--)if($i=="free"){x=i;break}}END{print $x}')
cpu_idel=$(vmstat --unit M | awk '{for(i=NF;i>0;i--)if($i=="id"){x=i;break}}END{print $x}')
cpu_kernel=$(vmstat --unit M | awk '{for(i=NF;i>0;i--)if($i=="sy"){x=i;break}}END{print $x}')
disk_io=$(vmstat -D | egrep "inprogress\sIO" | awk '{print $1}')
disk_available=$(df -BM / | awk '{for(i=NF;i>0;i--)if($i=="Available"){x=i;break}}END{print $x}' | grep -o "[0-9]\+")
hostName=$(hostname -f)
# insert collected information into the table created
# host_id is from host_info table determined by the hostname
psql -h $host_name -U $user_name -w $database_name -p $port_number -c \
"INSERT INTO host_usage
(timestamp, host_id, memory_free, cpu_idel, cpu_kernel, disk_io,
disk_available)
VALUES ('$timestamp',
(SELECT id from host_info where hostname='$hostName'),
$memory_free, $cpu_idel, $cpu_kernel, $disk_io,
$disk_available);"
exit 0
| true |
9b4e236f42fccf07d1cab96b5f505bd52ee477bf | Shell | jaredballou/linuxgsm | /functions/command_console.sh | UTF-8 | 1,194 | 3.8125 | 4 | [
"MIT"
] | permissive | #!/bin/bash
# LGSM command_console.sh function
# Author: Daniel Gibbs
# Website: http://gameservermanagers.com
# Description: Gives access to the server tmux console.
local modulename="Console"
function_selfname="$(basename $(readlink -f "${BASH_SOURCE[0]}"))"
check.sh
echo ""
echo "${gamename} Console"
echo "============================"
echo ""
echo "Press \"CTRL+b d\" to exit console."
fn_printwarningnl "Do NOT press CTRL+c to exit."
echo ""
while true; do
read -e -i "y" -p "Continue? [y/N]" yn
case $yn in
[Yy]* ) break;;
[Nn]* ) echo Exiting; return;;
* ) echo "Please answer yes or no.";;
esac
done
fn_printdots "Starting"
sleep 1
tmuxwc=$(tmux list-sessions 2>&1|awk '{print $1}'|grep -v failed|grep -Ec "^${servicename}:")
if [ "${tmuxwc}" -eq 1 ]; then
fn_printoknl "Starting"
fn_scriptlog "accessed"
sleep 1
tmux attach-session -t ${servicename}
else
fn_printfailnl "Server not running"
fn_scriptlog "Failed to access: Server not running"
sleep 1
while true; do
read -e -i "y" -p "Do you want to start the server? [y/N]" yn
case $yn in
[Yy]* ) command_start.sh; command_console.sh; break;;
[Nn]* ) break;;
* ) echo "Please answer yes or no.";;
esac
done
fi
| true |
7f6d792f5a30d58ae0d87211ad8f6b1a45b05303 | Shell | networkcube/bashscripts | /gitpull | UTF-8 | 302 | 3.296875 | 3 | [] | no_license | #!/bin/bash
cd ..
FILES=*
echo '>>> Start pulling all vistorian repos'
for file in $FILES
do
if [[ -d "$file" ]]; then
echo '---'
echo 'GIT PULL in dir: ' $file
cd $file
git pull
cd ..
fi
done
echo '<<<<<< End pulling all vistorian repos'
# return to this folder
cd bashscripts | true |
e14fc5a365ca5856324c055c2cff93d3de392175 | Shell | killbus/clash-gateway | /init.sh | UTF-8 | 7,134 | 3.640625 | 4 | [] | no_license | #!/bin/bash
CONFIG_PATH='/etc/clash-gateway'
function check_env {
if [ ! -f /clash -o ! -f /sample_config/cg.conf -o ! -f /sample_config/config.yml ]; then
/update.sh && { exec $0 "$@"; exit 0; } ||{ echo "[ERR] Can't update, please check networking or update the container. "; return 1; }
fi; \
return 0
}
function update_mmdb {
echo "$(date +%Y-%m-%d\ %T) Updating MMDB.." && \
wget http://geolite.maxmind.com/download/geoip/database/GeoLite2-Country.tar.gz -O /tmp/GeoLite2-Country.tar.gz && \
tar zxvf /tmp/GeoLite2-Country.tar.gz -C /tmp && mkdir -p $CONFIG_PATH && \
mv /tmp/GeoLite2-Country_*/GeoLite2-Country.mmdb ${CONFIG_PATH}/Country.mmdb && rm -fr /tmp/*
}
function check_config {
NEED_EXIT="false"
# 若没有配置文件,拷贝配置文件模版
if [ ! -f "${CONFIG_PATH}/cg.conf" ]; then
cp /sample_config/cg.conf "$CONFIG_PATH"
echo "[ERR] No cg.conf, sample file copied, please configure it."
NEED_EXIT="true"
fi; \
if [ ! -f "${CONFIG_PATH}/config.yml" ]; then
cp /sample_config/config.yml "$CONFIG_PATH"
echo "[ERR] No config.yml, sample file copied, please configure it."
NEED_EXIT="true"
fi; \
if [ "$NEED_EXIT" = "true" ]; then
exit 1;
fi; \
if [ ! -f "${CONFIG_PATH}/Country.mmdb" ]; then
update_mmdb
fi
source "${CONFIG_PATH}/cg.conf"
return 0
}
function check_snat_rule {
if [ "$ipts_non_snat" != 'true' ]; then
if ! iptables -t nat -C S_NAT -s $intranet ! -d $intranet -j MASQUERADE &>/dev/null; then
iptables -t nat -A S_NAT -s $intranet ! -d $intranet -j MASQUERADE
fi
fi
}
function flush_iptables {
echo "$(date +%Y-%m-%d\ %T) flush iptables.."
iptables -t nat -D PREROUTING -j CLASH &>/dev/null
iptables -t nat -D PREROUTING -j HANDLE_DNS &>/dev/null
iptables -t nat -D PREROUTING -j NEED_ACCEPT &>/dev/null
iptables -t nat -D POSTROUTING -j S_NAT &>/dev/null
iptables -t nat -F CLASH &>/dev/null
iptables -t nat -X CLASH &>/dev/null
iptables -t nat -F HANDLE_DNS &>/dev/null
iptables -t nat -X HANDLE_DNS &>/dev/null
iptables -t nat -F NEED_ACCEPT &>/dev/null
iptables -t nat -X NEED_ACCEPT &>/dev/null
iptables -t nat -F S_NAT &>/dev/null
iptables -t nat -X S_NAT &>/dev/null
iptables -t raw -F
iptables -t raw -X
iptables -t mangle -F
iptables -t mangle -X
iptables -t nat -F
iptables -t nat -X
iptables -t filter -F
iptables -t filter -X
}
function cdr2mask {
# Number of args to shift, 255..255, first non-255 byte, zeroes
set -- $(( 5 - ($1 / 8) )) 255 255 255 255 $(( (255 << (8 - ($1 % 8))) & 255 )) 0 0 0
[ $1 -gt 1 ] && shift $1 || shift
echo $(printf %02x ${1-0})$(printf %02x ${2-0})$(printf %02x ${3-0})$(printf %02x ${4-0})
}
function start_iptables {
echo "$(date +%Y-%m-%d\ %T) Setting iptables.."
# 建立自定义chian
iptables -t nat -N HANDLE_DNS
iptables -t nat -N NEED_ACCEPT
iptables -t nat -N CLASH
iptables -t nat -N S_NAT
# 解析 server 地址
unset server_addrs && \
for server in "${proxy_server[@]}"; do
if [ $(grep -Ec '^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$' <<< "$server") -eq 0 ]; then
server_addr="$(getent hosts $server | cut -d' ' -f1)"
server_addrs+=($server_addr)
if [ -n "$(cat /etc/hosts | grep $server)" ];then
echo "$(sed "/${server}/d" /etc/hosts)" > /etc/hosts
fi
echo "${server_addr} ${server}" >> /etc/hosts
fi
done; \
# 过滤 VPS ip地址
for server in "${proxy_server[@]}"; do
iptables -t nat -A NEED_ACCEPT -d $server -j ACCEPT
done; \
# 转发至 clash
iptables -t nat -A CLASH -p tcp -j REDIRECT --to-ports $proxy_tcport
for intranet in "${ipts_intranet[@]}"; do
# handle dns
iptables -t nat -A HANDLE_DNS -p udp -s $intranet --dport 53 -j REDIRECT --to-ports 60053
# 内网地址 return
iptables -t nat -A NEED_ACCEPT -d $intranet -j ACCEPT
check_snat_rule
done
# 包转入自定义 chian
iptables -t nat -A PREROUTING -j HANDLE_DNS
iptables -t nat -A PREROUTING -j NEED_ACCEPT
iptables -t nat -A PREROUTING -j CLASH
iptables -t nat -A POSTROUTING -j S_NAT
}
function start_koolproxy {
echo "$(date +%Y-%m-%d\ %T) Starting koolproxy.."
if [ "$ad_filter" = 'kp' ]; then
mkdir -p ${CONFIG_PATH}/koolproxydata
chown -R daemon:daemon ${CONFIG_PATH}/koolproxydata
#su -s/bin/sh -c'/koolproxy/koolproxy -d -l2 -p65080 -b'${CONFIG_PATH}'/koolproxydata' daemon
/koolproxy/koolproxy -d -l2 --mark -p65080 -b${CONFIG_PATH}/koolproxydata
iptables -t nat -N KOOLPROXY
iptables -t nat -N KP_OUT
iptables -t nat -I PREROUTING 3 -p tcp -j KOOLPROXY
iptables -t nat -I OUTPUT -p tcp -j KP_OUT
for intranet in "${ipts_intranet[@]}"; do
# https://github.com/openwrt-develop/luci-app-koolproxy/blob/master/koolproxy.txt
iptables -t nat -A KOOLPROXY -s $intranet ! -d $intranet -p tcp -m multiport --dports 80,443 -j REDIRECT --to-ports 65080
kp_mark_mask=$(cdr2mask $(echo $intranet | awk -F "[./]" '{printf ($5)}'))
kp_mark=$(echo $intranet | awk -F "[./]" '{printf ("0x%02x", $1)} {printf ("%02x", $2)} {printf ("%02x", $3)} {printf ("%02x", $4)} {printf ("/0x'$kp_mark_mask'\n")}')
iptables -t nat -A KP_OUT -p tcp -m mark --mark $kp_mark -j CLASH
# iptables -t nat -A KP_OUT -p tcp -m mark ! --mark $kp_mark -j KOOLPROXY
done
fi
}
function stop_koolproxy {
echo "$(date +%Y-%m-%d\ %T) Stoping koolproxy.."
iptables -t nat -D PREROUTING -p tcp -j KOOLPROXY &>/dev/null
iptables -t nat -D PREROUTING -p tcp -j KP_OUT &>/dev/null
iptables -t nat -F KOOLPROXY &>/dev/null
iptables -t nat -X KOOLPROXY &>/dev/null
iptables -t nat -F KP_OUT &>/dev/null
iptables -t nat -X KP_OUT &>/dev/null
killall koolproxy &>/dev/null
}
function start {
sysctl -w net.ipv4.ip_forward=1 &>/dev/null
for dir in $(ls /proc/sys/net/ipv4/conf); do
sysctl -w net.ipv4.conf.$dir.send_redirects=0 &>/dev/null
done
start_iptables
echo "nameserver 127.0.0.1" > /etc/resolv.conf
echo "$(date +%Y-%m-%d\ %T) Starting clash.."
/clash -d /etc/clash-gateway/ &> /var/log/clash.log &
[ "$ad_filter" = 'kp' ] && start_koolproxy
echo -e "IPv4 gateway & dns server: \n`ip addr show eth0 |grep 'inet ' | awk '{print $2}' |sed 's/\/.*//g'`" && \
echo -e "IPv6 dns server: \n`ip addr show eth0 |grep 'inet6 ' | awk '{print $2}' |sed 's/\/.*//g'`"
}
function stop {
echo "nameserver 114.114.114.114" > /etc/resolv.conf
# 清理 /etc/hosts
unset server_addrs && \
for server in "${proxy_server[@]}"; do
if [ $(grep -Ec '^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$' <<< "$server") -eq 0 ]; then
if [ -n "$(cat /etc/hosts | grep $server)" ];then
echo "$(sed "/${server}/d" /etc/hosts)" > /etc/hosts
fi
fi
done; \
stop_koolproxy
flush_iptables
echo "$(date +%Y-%m-%d\ %T) Stoping clash.."
killall clash &>/dev/null; \
return 0
}
check_env && check_config && \
case $1 in
start) start;;
stop) stop;;
daemon) start && tail -f /var/log/clash.log;;
update-mmdb) update;;
*) stop && start;;
esac | true |
0205eccd68214972033f504df4d73e3c611c4369 | Shell | kittiphonp/CAV20Impl | /etc/fix-ambiguous-reference/fix-ambiguous-reference.sh | UTF-8 | 1,102 | 2.671875 | 3 | [] | no_license | . ./../../scripts/env.sh
echo $myRoot
code=( DFL DFL_m WP genGraph )
#code=( DFL )
for m in ${code[@]}; do
echo "Updating $m"
for visitor in ASTVisitor ASTTraverseModify ASTTraverse Rename SemanticCheck; do
echo "copying $visitor.java..."
cp -f $visitor.java $myRoot/mycode/$m/src/parser/visitor/
echo "cp -f $visitor.java $myRoot/mycode/$m/src/parser/visitor/"
done
for parser in VarList PrismParser; do
echo "copying $parser.java..."
cp -f $parser.java $myRoot/mycode/$m/src/parser/
done
for prism in ExplicitModel2MTBDD Modules2MTBDD; do
echo "copying $prism.java..."
cp -f $prism.java $myRoot/mycode/$m/src/prism/
done
for pta in DigitalClocks Modules2PTA; do
echo "copying $pta.java..."
cp -f $pta.java $myRoot/mycode/$m/src/pta/
done
for simulator in Updater; do
echo "copying $simulator.java..."
cp -f $simulator.java $myRoot/mycode/$m/src/simulator/
done
for UI in SimulationView GUISimulatorPathTableModel; do
echo "copying $UI.java..."
cp -f $UI.java $myRoot/mycode/$m/src/userinterface/simulator/
done
done | true |
a704f535ffc08a3b3e19d32f6bd815931063f620 | Shell | kwschultz/VirtualCalifornia | /test/run_test | UTF-8 | 325 | 3.046875 | 3 | [
"MIT"
] | permissive | #!/bin/sh
# run_test
# vc
#
# Created by Michael Sachs on 9/3/13.
#
if [ -f ../build/src/vc ]; then
echo "Running test..."
cp ../build/src/vc vc
./vc
else
cd ../
THE_ROOT=$(pwd)
echo "Cannot find $THE_ROOT/build/src/vc"
echo "Make sure you build and install Virtual California as described in the INSTALL document."
fi
| true |
bd4e080177d53321663b37d17963f6b3fe6bc229 | Shell | naveen13684/AWS-Learning | /Jenkins CI CD/nginx_ci_cd_loadbalancer_sso.sh | UTF-8 | 6,430 | 3.328125 | 3 | [] | no_license | LB_NAME="STGSSO-NGINX"
cdjob1=http://localhost/jobs/job/test
#cdjob1=http://localhost/jobs/job/stg-nginx-sso/lastBuild/api/json
cdjob2=http://localhost/jobs/job/test1
#cdjob2=http://localhost/jobs/job/stg-nginx-sso1/lastBuild/api/json
region="eu-central-1"
echo -e "jobs are: $cdjob1 $cdjob2"
#aws s3 ls
aws elb describe-instance-health --region=$region --load-balancer-name $LB_NAME
fun ()
{
set -- $cdjob1 $cdjob2
echo -e "jobs are: $cdjob1 $cdjob2"
echo -e "Array jobs $1 and $2"
if [ `aws elb describe-instance-health --region=$region --load-balancer-name $LB_NAME --query 'InstanceStates[*].[InstanceId]' --output=text | wc -l` -eq 2 ]
then
echo -e "There are two instances for $LB_NAME"
echo -e "Checking instance health..."
for i in `aws elb describe-instance-health --region=$region --load-balancer-name $LB_NAME --query 'InstanceStates[*].[State]' --output=text`
do
if [ "$i" == "InService" ]
then
echo -e " $i instance status is InService"
else
echo -e "$i instance is out of service. First check the status of all instances!!!"
exit 1
fi
done
for i in `aws elb describe-instance-health --region=$region --load-balancer-name $LB_NAME --query 'InstanceStates[*].[InstanceId]' --output=text`
do
#Deregister instance $i from CLB
echo -e "Deregistering $i from $LB_NAME"
aws elb deregister-instances-from-load-balancer --region=$region --load-balancer-name $LB_NAME --instances $i
IP1=`aws ec2 describe-instances --region=$region --instance-ids $i --query "Reservations[].Instances[].PrivateIpAddress" --output=text`
#IP2=`curl -X GET https://username:password@localhost/jobs/view/nginx-CD/job/dev-nginx-sso/config.xml|grep -i siteName|cut -d"@" -f2|cut -d":" -f1`
#IP3=`curl -X GET https://username:password@localhost/jobs/view/nginx-CD/job/dev-nginx-sso1/config.xml|grep -i siteName|cut -d"@" -f2|cut -d":" -f1`
#IP2=`curl -X GET https://username:password@localhost/jobs/view/nginx-CD/job/test/config.xml|grep -i siteName|cut -d"@" -f2|cut -d":" -f1`
#IP3=`curl -X GET https://username:password@localhost/jobs/view/nginx-CD/job/test1/config.xml|grep -i siteName|cut -d"@" -f2|cut -d":" -f1`
IP2="10.178.96.161"
IP3="10.178.96.139"
echo -e "The IP address of $i is IP1=$IP1 \n IP address of from cd dev-nginx-sso is IP2=$IP2 \n IP address from cd job dev-nginx-sso1 is IP=$IP3"
if [ "$IP1" == "$IP2" ]
then
echo -e "IP1:$IP1 is same as IP2:$IP2"
echo -e "Running $cdjob1"
curl -X POST $cdjob1/build --user jenkins:hashpassword
while (true)
do
echo -e "checking job $cdjob1 status...."
curl $cdjob1/lastBuild/api/json --user jenkins:hashpassword | grep -i "\"result\"\:\"SUCCESS\""
if [ $? -eq 0 ]
then
echo -e "nginx $cdjob1 job completed"
break
else
sleep 2
echo -e "$cdjob1 job still running"
fi
done
elif [ "$IP1" == "$IP3" ]
then
echo -e "IP1:$IP1 is same as IP3:$IP3"
echo -e "running $cdjob2"
curl -X POST $cdjob2/build --user jenkins:hashpassword
while (true)
do
echo -e "checking job $cdjob2 status...."
curl $cdjob2/lastBuild/api/json --user jenkins:hashpassword | grep -i "\"result\"\:\"SUCCESS\""
if [ $? -eq 0 ]
then
echo -e "nginx $cdjob2 job completed"
break
else
sleep 2
echo -e "$cdjob2 job still running"
fi
done
else
echo -e "Please check instance in the LoadBalancer!"
fi
#Run the jobs one by one.
#echo -e " From array first job is Running $1 job:"
#curl -X POST $1/build --user jenkins:hashpassword
#Register instane to CLB
echo -e "Registering $i instance back to $LB_NAME"
aws elb register-instances-with-load-balancer --region=$region --load-balancer-name $LB_NAME --instances $i
while (true)
do
status=`aws elb describe-instance-health --region=$region --load-balancer-name $LB_NAME --instances=$i --query 'InstanceStates[*].[State]' --output=text`
echo -e "Status of $i: $status"
sleep 25
if [ `aws elb describe-instance-health --region=$region --load-balancer-name $LB_NAME --instances=$i --query 'InstanceStates[*].[State]' --output=text` == "InService" ]
then
status=`aws elb describe-instance-health --region=$region --load-balancer-name $LB_NAME --instances=$i --query 'InstanceStates[*].[State]' --output=text`
echo -e "Status of $i: $status"
echo "$i is registered successfully to $LB_NAME"
break
elif [ `aws elb describe-instance-health --region=$region --load-balancer-name $LB_NAME --instances=$i --query 'InstanceStates[*].[State]' --output=text` == "OutOfService" ]
then
status=`aws elb describe-instance-health --region=$region --load-balancer-name $LB_NAME --instances=$i --query 'InstanceStates[*].[State]' --output=text`
echo -e "Status of $i: $status"
echo "$i status is still unhealth...wating to become InService"
fi
done
#shift
done
else
echo "There are no two instances for this load balancer. Mininum instances per Nginx load-balancer is 2"
exit 1
fi
}
fun
| true |
3a4b987f9a113d0bfb7c80f337735e84dc429750 | Shell | obsidiansystems/ckb | /.github/workflows/sync/terraform/scripts/render_main_tf.sh | UTF-8 | 1,050 | 4.21875 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env bash
# # Intention
#
# We plan to apply multiple ec2 instances on different regions. While I didn't
# find a simply way to do this on Terraform. As a result, I created this script.
# It accepts a list of regions and generates Terraform configurations that
# declares multiple ec2 instances on these given regions.
#
# # Usage Example
#
# ```shell
# ./render_main_tf.sh ap-northeast-1 ap-northeast-2 > main.tf
# ```
SCRIPT_PATH="$( cd -- "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )"
TEMPLATE_PATH=$SCRIPT_PATH/main.tf.template
TEMPLATE_REGION="ap-northeast-1"
regions="$@"
# Check duplicates
duplicated=$(echo "$regions" \
| tr ' ' '\n' \
| awk '
BEGIN { duplicated = "" }
{
count[$1]++;
if (count[$1] > 1) duplicated=$1
}
END { print duplicated }
')
if [ ! -z "$duplicated" ]; then
echo "ERROR: Duplicated regions \"${duplicated}\""
fi
for region in $regions; do
sed "s/$TEMPLATE_REGION/$region/g" $TEMPLATE_PATH
done
| true |
5e66c8f5bac99910f2e8e5c1020a72d5d3c81a99 | Shell | WyattRuttle/FA20-SYS-320-01 | /Week03/homework/menu.bash | UTF-8 | 1,602 | 3.6875 | 4 | [] | no_license | #!/bin/bash
# Storyline: Menu for admin, VPN, and Security functions
function invalid_opt(){
echo ""
echo "Invalid option"
echo ""
sleep 2
}
function menu() {
#clears the screen
clear
echo "[1] Admin Menu"
echo "[2] Security Menu"
echo "[3] Exit"
read -p "Please enter a choice above: " choice
case "$choice" in
1) admin_menu
;;
2) security_menu
;;
3) exit 0
;;
*)
invalid_opt
admin_menu
;;
esac
}
function admin_menu(){
clear
echo "[L]ist Running Processes"
echo "[N]etwork Sockets"
echo "[V]PN Menu"
echo "[4] Exit"
read -p "Please enter a choice above: " choice
case "$choice" in
L|l) ps -ef |less
;;
N|n) netstat -an --inet |less
;;
V|v) vpn_menu
;;
4) exit 0
;;
*)
invalid_opt
;;
esac
admin_menu
}
function vpn_menu() {
clear
echo "[A]dd a peer"
echo "[D]elete a peer"
#Add another switch that checks if user exists
echo "[C]heck if user exists in conf file"
echo "[B]ack to admin menu"
echo "[M]ain menu"
echo "[E]xit"
read -p "Please select an option: " choice
case "$choice" in
A|a)
bash peer.bash
tail -6 wg0.conf |less
;;
D|d) #create a prompt for the user
# Call the manage-user.bash
# pass the proper switches and argument to delete the user
;;
C|c)
read -p "Enter the username you would like to check:" username
if grep -Fx "$username" wg0.conf]
then
echo "$username not in file"
else
echo "$username is in file"
fi
;;
B|b) admin_menu
;;
M|m) menu
;;
E|e) exit 0
;;
*)
invalid_opt
;;
esac
vpn_menu
}
#call the main function
menu
| true |
ea4fa7c270dcf16dac54d76a751060e3d0b50c0e | Shell | scollis/SimRadar | /radarsim_single.bsub | UTF-8 | 1,064 | 2.921875 | 3 | [] | no_license | #!/bin/bash
#
#BSUB -q cuda
#BSUB -x
#BSUB -o /home/boonleng/simradar/radarsim_single_stdout.txt
#BSUB -e /home/boonleng/simradar/radarsim_single_stderr.txt
#BSUB -W 00:30
#BSUB -N
#BSUB -n 1
#BSUB -J "radarsim"
function decho() {
echo $@
echo $@ >> ${errlog}
}
export CUDA_PATH="/opt/local/software/Cuda/4.2.9"
export LD_LIBRARY_PATH="/usr/lib64:${CUDA_PATH}/lib64:${CUDA_PATH}/open64/lib:${CUDA_PATH}/lib:${LD_LIBRARY_PATH}"
export PATH="${CUDA_PATH}/bin:${CUDA_PATH}/open64/bin:${PATH}"
errlog="radarsim_single_stderr.txt"
nowstr=`date`
cd /home/boonleng/radarsim
decho "================== $nowstr ================="
decho `pwd`
decho "radarsim"
#./radarsim -v -o -p 2400 --seed 1000 -O /home/boonleng/Downloads/big/ --tightbox --density 10000 --concept DB -W 1000 --noprogress
seed=2000
while [ 1 ]; do
decho "-----------------------------------------------------------------"
./radarsim -v -o -p 2400 --seed ${seed} -O /home/boonleng/Downloads/big/ --tightbox --density 10000 --concept DB -W 1000 --noprogress
seed=$((seed+1))
done
| true |
074011c1cb91203294466ea218348f57a5a5d864 | Shell | msys2/MINGW-packages | /mingw-w64-opencolorio/PKGBUILD | UTF-8 | 3,368 | 2.765625 | 3 | [
"BSD-3-Clause"
] | permissive | # Maintainer: Alexey Pavlov <alexpux@gmail.com>
_realname=opencolorio
pkgbase=mingw-w64-${_realname}
pkgname="${MINGW_PACKAGE_PREFIX}-${_realname}"
conflicts=("${MINGW_PACKAGE_PREFIX}-${_realname}-git")
replaces=("${MINGW_PACKAGE_PREFIX}-${_realname}-git")
pkgver=2.2.1
pkgrel=4
pkgdesc="A color management framework for visual effects and animation (mingw-w64)"
arch=('any')
mingw_arch=('mingw32' 'mingw64' 'ucrt64' 'clang64' 'clang32' 'clangarm64')
url="https://opencolorio.org/"
license=('spdx:BSD-3-Clause')
depends=("${MINGW_PACKAGE_PREFIX}-expat"
"${MINGW_PACKAGE_PREFIX}-freeglut"
"${MINGW_PACKAGE_PREFIX}-gcc-libs"
"${MINGW_PACKAGE_PREFIX}-glew"
"${MINGW_PACKAGE_PREFIX}-imath"
"${MINGW_PACKAGE_PREFIX}-lcms2"
"${MINGW_PACKAGE_PREFIX}-minizip-ng"
"${MINGW_PACKAGE_PREFIX}-openexr"
"${MINGW_PACKAGE_PREFIX}-yaml-cpp")
makedepends=("${MINGW_PACKAGE_PREFIX}-cc"
"${MINGW_PACKAGE_PREFIX}-cmake"
"${MINGW_PACKAGE_PREFIX}-ninja"
"${MINGW_PACKAGE_PREFIX}-pkg-config"
"${MINGW_PACKAGE_PREFIX}-pybind11"
"${MINGW_PACKAGE_PREFIX}-pystring"
"${MINGW_PACKAGE_PREFIX}-python"
"${MINGW_PACKAGE_PREFIX}-openimageio"
"${MINGW_PACKAGE_PREFIX}-openshadinglanguage"
"${MINGW_PACKAGE_PREFIX}-zlib")
optdepends=("${MINGW_PACKAGE_PREFIX}-python: Python bindings")
source=(${_realname}-${pkgver}.tar.gz::https://github.com/imageworks/OpenColorIO/archive/v${pkgver}.tar.gz
0001-fix-build-against-minizip-ng-4.patch
0003-fix-python-sitearch.patch)
sha256sums=('36f27c5887fc4e5c241805c29b8b8e68725aa05520bcaa7c7ec84c0422b8580e'
'2f63b50e09d493b0e29d98ce579722a53a296834bf244819823764b41f5bedbd'
'73660c114f5adfd6c089d0e91ae776601734e8b1ad4f6e773867d7eee4a1a5fb')
apply_patch_with_msg() {
for _patch in "$@"
do
msg2 "Applying $_patch"
patch -Np1 -i "${srcdir}/$_patch"
done
}
prepare() {
cd ${_realname}-${pkgver}
# https://gitlab.archlinux.org/archlinux/packaging/packages/opencolorio/-/blob/2.2.1-8/minizip-ng-4.patch
apply_patch_with_msg \
0001-fix-build-against-minizip-ng-4.patch
apply_patch_with_msg \
0003-fix-python-sitearch.patch
}
build() {
mkdir -p ${srcdir}/build-${MSYSTEM} && cd ${srcdir}/build-${MSYSTEM}
declare -a extra_config
if check_option "debug" "n"; then
extra_config+=("-DCMAKE_BUILD_TYPE=Release")
else
extra_config+=("-DCMAKE_BUILD_TYPE=Debug")
fi
MSYS2_ARG_CONV_EXCL="-DCMAKE_INSTALL_PREFIX=" \
"${MINGW_PREFIX}"/bin/cmake \
-GNinja \
${extra_config} \
-DCMAKE_INSTALL_PREFIX="${MINGW_PREFIX}" \
-DOCIO_BUILD_TESTS=OFF \
-DOCIO_BUILD_GPU_TESTS=OFF \
-DPython_EXECUTABLE=${MINGW_PREFIX}/bin/python.exe \
../${_realname}-${pkgver}
"${MINGW_PREFIX}"/bin/cmake.exe --build .
}
package() {
cd build-${MSYSTEM}
DESTDIR="${pkgdir}" "${MINGW_PREFIX}"/bin/cmake.exe --install .
install -Dm644 "${srcdir}"/${_realname}-${pkgver}/LICENSE "${pkgdir}${MINGW_PREFIX}"/share/licenses/${_realname}/LICENSE
local PREFIX_WIN=$(cygpath -wm ${MINGW_PREFIX})
for _f in "${pkgdir}${MINGW_PREFIX}"/lib/cmake/OpenColorIO/*.cmake; do
sed -e "s|${PREFIX_WIN}|\$\{_IMPORT_PREFIX\}|g" -i "${_f}"
sed -e "s|${MINGW_PREFIX}|\$\{_IMPORT_PREFIX\}|g" -i "${_f}"
done
}
| true |
b8edac1e2fa599ba0121e0fe0325a6cd255d1943 | Shell | diego-hermida/ClimateChangeApp | /install-ci.sh | UTF-8 | 8,151 | 4 | 4 | [
"MIT"
] | permissive | #! /bin/bash
# ---------- Functions ---------- #
source "./utilities/bash_util.sh"
# Displays script usage and exits.
# :param $1: Exit code.
function usage () {
exit_with_message 1 "Installs the application components for CI purposes. This has a few implications:
\n\t• Docker containers won't expose any port nor volume to the outside.
\n\t• Docker images's tag will end with \"_ci\". Example: foo/baz:latest_ci.
\n\t• Docker container's names will end with \"_ci\". Example: foo_ci.
\n\t• Production containers won't be stopped. This installation won't affect any of them.
\n\t• Coverage and test result reports will be generated inside the Docker images.
\n\n> usage: install-ci.sh [-h] [--help] [--version]
\n• -h, --help: shows this message
\n• --version: displays app's version" $1;
}
# ---------- Argument manipulation ---------- #
# Parsing arguments
EXPECTED_INPUT=":h-:"
while getopts "$EXPECTED_INPUT" ARG; do
case "${ARG}" in
h) usage 0 ;;
-) case ${OPTARG} in
help) usage 0 ;;
version) show_app_version ;;
:) exit_with_message 1 "Illegal option: \"--$OPTARG\" requires an argument" >&2 ;;
*) exit_with_message 1 "Unrecognized option: --$OPTARG" >&2 ;;
esac
;;
:) exit_with_message 1 "Illegal option: \"-$OPTARG\" requires an argument" >&2 ;;
*) exit_with_message 1 "Unrecognized option: -$OPTARG" >&2 ;;
esac
done
# Setting CI deploy arguments
DATA_GATHERING_SUBSYSTEM_DEPLOY_ARGS="--all --with-test-reports";
API_DEPLOY_ARGS="--all --with-test-reports";
DATA_CONVERSION_SUBSYSTEM_DEPLOY_ARGS="--all --with-test-reports";
WEB_APPLICATION_SUBSYSTEM_DEPLOY_ARGS="--all --with-test-reports";
TELEGRAM_CONFIGURATOR_DEPLOY_ARGS="--with-test-reports";
UTILITIES_DEPLOY_ARGS="--with-test-reports";
SUPERUSER_USERNAME="FOO"
SUPERUSER_PASSWORD="BAZ"
# Setting CI values for ports
export MONGODB_PORT=27017;
export API_PORT=5000;
export POSTGRES_PORT=5432;
# ---------- Installation ---------- #
message 5 "[ACTION] Creating Docker CI containers. Application containers will not be affected.";
# MongoDB component
message 4 "[COMPONENT] MongoDB";
# Deleting the MongoDB service if it was already been created: Brand-new container.
if [ "$(docker ps -aq -f name=mongodb_ci)" ]; then
message -1 "[INFO] Removing previous MongoDB CI container.";
docker stop mongodb_ci;
docker rm mongodb_ci;
fi
# Launching the MongoDB service
message -1 "[INFO] Launching the MongoDB CI service.";
docker-compose -f docker-compose-ci.yml up -d mongodb_ci;
if [ $? != 0 ]; then
exit_with_message 1 "[ERROR] The MongoDB CI service could not be initialized." 1;
fi
# Getting internal IP address
MONGODB_IP="$(docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' mongodb_ci)"
if [ $? != 0 ]; then
exit_with_message 1 "[ERROR] Could not retrieve the local MongoDB IP address." 1;
else
message -1 "[INFO] Using \"$MONGODB_IP\" as the MongoDB IP address.";
fi
# PostgreSQL component
message 4 "[COMPONENT] PostgreSQL";
# Deleting the PostgreSQL service if it was already been created: Brand-new container.
if [ "$(docker ps -aq -f name=postgres_ci)" ]; then
message -1 "[INFO] Removing previous PostgreSQL CI container.";
docker stop postgres_ci;
docker rm postgres_ci;
fi
# Launching the PostgreSQL service
message -1 "[INFO] Launching the PostgreSQL CI service.";
docker-compose -f docker-compose-ci.yml up -d postgres_ci;
if [ $? != 0 ]; then
exit_with_message 1 "[ERROR] The PostgreSQL CI service could not be initialized." 1;
fi
# Getting internal IP address
POSTGRES_IP="$(docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' postgres_ci)"
if [ $? != 0 ]; then
exit_with_message 1 "[ERROR] Could not retrieve the local PostgreSQL IP address." 1;
else
message -1 "[INFO] Using \"$POSTGRES_IP\" as the PostgeSQL IP address.";
fi
# Telegram Configurator component
message 4 "[COMPONENT] Telegram Configurator";
# Building the Telegram Configurator component
docker-compose -f docker-compose-ci.yml build \
--build-arg DEPLOY_ARGS="${TELEGRAM_CONFIGURATOR_DEPLOY_ARGS}" telegram_bot_ci;
if [ $? != 0 ]; then
exit_with_message 1 "> The Telegram Configurator CI image could not be built." 1;
fi
# Utilities component
message 4 "[COMPONENT] Utilities";
docker-compose -f docker-compose-ci.yml build \
--build-arg MONGODB_IP=${MONGODB_IP} --build-arg MONGODB_PORT=${MONGODB_PORT} \
--build-arg POSTGRES_IP=${POSTGRES_IP} --build-arg POSTGRES_PORT=${POSTGRES_PORT} \
--build-arg DEPLOY_ARGS="${UTILITIES_DEPLOY_ARGS}" utilities_ci;
if [ $? != 0 ]; then
exit_with_message 1 "[ERROR] The Utilities CI component could not be built." 1;
fi
# Data Gathering Subsystem component
message 4 "[COMPONENT] Data Gathering Subsystem";
# Building the Data Gathering Subsystem component
docker-compose -f docker-compose-ci.yml build \
--build-arg MONGODB_IP=${MONGODB_IP} --build-arg MONGODB_PORT=${MONGODB_PORT} \
--build-arg DEPLOY_ARGS="${DATA_GATHERING_SUBSYSTEM_DEPLOY_ARGS}" data_gathering_subsystem_ci;
if [ $? != 0 ]; then
exit_with_message 1 "> The Data Gathering Subsystem CI image could not be built." 1;
fi
# API component
message 4 "[COMPONENT] API";
# Deleting the API service if it was already been created: Brand-new container.
if [ "$(docker ps -aq -f name=api_ci)" ]; then
message -1 "[INFO] Removing previous API CI container.";
docker stop api_ci;
docker rm api_ci;
fi
# Building the API service
message -1 "[INFO] Building the API CI image."
docker-compose -f docker-compose-ci.yml build \
--build-arg MONGODB_IP=${MONGODB_IP} --build-arg MONGODB_PORT=${MONGODB_PORT} \
--build-arg DEPLOY_ARGS="${API_DEPLOY_ARGS}" api_ci;
if [ $? != 0 ]; then
exit_with_message 1 "[INFO] The API CI image could not be built." 1;
fi
docker-compose -f docker-compose-ci.yml up -d api_ci;
# Data Conversion Subsystem component
message 4 "[COMPONENT] Data Conversion Subsystem";
# Getting internal IP address
API_IP="$(docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' api_ci)"
if [ $? != 0 ]; then
exit_with_message 1 "[ERROR] Could not retrieve the local API IP address." 1;
else
message -1 "[INFO] Using \"$API_IP\" as the API IP address.";
fi
# Building the Data Conversion Subsystem component
docker-compose -f docker-compose-ci.yml build \
--build-arg POSTGRES_IP=${POSTGRES_IP} --build-arg POSTGRES_PORT=${POSTGRES_PORT} \
--build-arg API_IP=${API_IP} --build-arg API_PORT=${API_PORT} \
--build-arg DEPLOY_ARGS="${DATA_CONVERSION_SUBSYSTEM_DEPLOY_ARGS}" data_conversion_subsystem_ci;
if [ $? != 0 ]; then
exit_with_message 1 "> The Data Conversion Subsystem CI image could not be built." 1;
fi
# Web Application Subsystem component
message 4 "[COMPONENT] Web Application Subsystem";
# Building the Web Application Subsystem component
docker-compose -f docker-compose-ci.yml build \
--build-arg POSTGRES_IP=${POSTGRES_IP} --build-arg POSTGRES_PORT=${POSTGRES_PORT} \
--build-arg SUPERUSER_USERNAME=${SUPERUSER_USERNAME} --build-arg SUPERUSER_PASSWORD=${SUPERUSER_PASSWORD} \
--build-arg DEPLOY_ARGS="${WEB_APPLICATION_SUBSYSTEM_DEPLOY_ARGS}" web_application_subsystem_ci
if [ $? != 0 ]; then
exit_with_message 1 "> The Web Application Subsystem image could not be built." 1;
fi
# Displaying installation summary
echo "";
message 2 "[SUCCESS] Installation results (CI):";
message 2 "\t• API: built";
message 2 "\t• Data Conversion Subsystem: built";
message 2 "\t• Data Gathering Subsystem: built";
message 2 "\t• Web Application Subsystem: built";
message 2 "\t• MongoDB: up";
message 2 "\t• PostgreSQL: up";
echo "";
| true |
ed95498d1b0752549af420f2894f7a19424469b6 | Shell | siddontang/go-leveldb | /dev.sh | UTF-8 | 570 | 3.25 | 3 | [
"MIT"
] | permissive | #!/bin/bash
SNAPPY_DIR=/usr/local/snappy
LEVELDB_DIR=/usr/local/leveldb
function add_path()
{
# $1 path variable
# $2 path to add
if [ -d "$2" ] && [[ ":$1:" != *":$2:"* ]]; then
echo "$1:$2"
else
echo "$1"
fi
}
export CGO_CFLAGS="-I$LEVELDB_DIR/include -I$SNAPPY_DIR/include"
export CGO_CXXFLAGS="-I$LEVELDB_DIR/include -I$SNAPPY_DIR/include"
export CGO_LDFLAGS="-L$LEVELDB_DIR/lib -L$SNAPPY_DIR/lib -lsnappy"
export LD_LIBRARY_PATH=$(add_path $LD_LIBRARY_PATH $SNAPPY_DIR/lib)
export LD_LIBRARY_PATH=$(add_path $LD_LIBRARY_PATH $LEVELDB_DIR/lib)
| true |
382f999b31ea780bc9a38c336e21eb8b3153132f | Shell | rk9109/dotfiles | /panel/panel | UTF-8 | 5,090 | 3.703125 | 4 | [] | no_license | #!/usr/bin/env bash
#
# lemonbar script
#
if [ $(pgrep -cx lemonbar) -gt 0 ]; then
echo "The panel is running." >&2
exit 1
fi
trap 'trap - TERM; kill 0' INT TERM QUIT EXIT
# load configuration
PANEL_CONFIG="$HOME/.panel/panel_config"
if [ -f ${PANEL_CONFIG} ]; then
source ${PANEL_CONFIG}
else
echo "${PANEL_CONFIG} not found." >&2
exit 1
fi
# create panel FIFO
PANEL_FIFO=/tmp/panel-fifo
[ -e "${PANEL_FIFO}" ] && rm "${PANEL_FIFO}"
mkfifo "${PANEL_FIFO}"
# workspace indicator
function workspace() {
while true; do
# get current workspace name
local cur=$(i3-msg -t get_workspaces | jq -r '.[] | select(.focused==true).name')
# get workspace names
local spaces=($(xprop -root _NET_DESKTOP_NAMES | awk '{$1=$2=""; print $0}' | sed -e "s/[^0-9]/ /g"))
# indicate current workspace
for i in "${!spaces[@]}"; do
if [ "${spaces[$i]}" = "$cur" ]; then
spaces[$i]="%{A:i3-msg -q workspace ${spaces[$i]}:}%{B${BLUE}} ${spaces[$i]} %{B-}%{A}";
else
spaces[$i]="%{A:i3-msg -q workspace ${spaces[$i]}:} ${spaces[$i]} %{A}";
fi
done
# concatenate strings in `spaces`
spaces=$(IFS=" "; echo "${spaces[*]}")
echo "WORKSPACE_STR ${spaces}"
sleep ${WORKSPACE_SLEEP}
done
}
# CPU status information
function cpu() {
while true; do
local cpu=$(ps -eo pcpu | awk 'BEGIN {sum=0} {sum+=$1} END {print int(sum)}')
if [[ ${cpu} -ge ${CPU_ALERT} ]]; then
echo "CPU_STR %{F${FG_DARK}}CPU:%{F-} %{F${RED}}${cpu}%{F-}"
else
echo "CPU_STR %{F${FG_DARK}}CPU:%{F-} ${cpu}"
fi
sleep ${CPU_SLEEP}
done
}
# RAM status information
function ram() {
while true; do
local ram=$(free | grep Mem | awk '{print int($3/$2 * 100)}')
if [[ ${ram} -ge ${RAM_ALERT} ]]; then
echo "RAM_STR %{F${FG_DARK}}RAM:%{F-} %{F${RED}}${ram}%{F-}"
else
echo "RAM_STR %{F${FG_DARK}}RAM:%{F-} ${ram}"
fi
sleep ${RAM_SLEEP}
done
}
# battery status information
function battery() {
while true; do
local battery=$(cat /sys/class/power_supply/BAT1/capacity)
if [[ ${battery} -lt ${BATTERY_ALERT} ]]; then
echo "BATTERY_STR %{F${FG_DARK}}BAT:%{F-} %{F${RED}}${battery}%{F-}"
else
echo "BATTERY_STR %{F${FG_DARK}}BAT:%{F-} ${battery}"
fi
sleep ${BATTERY_SLEEP}
done
}
# volume indicator
function volume() {
while true; do
local volume=$(pactl list sinks | grep Volume | head -n1 | awk '{print $5}')
local mute=$(pactl list sinks | grep Mute | awk '{print $2}')
if [[ ${mute} == "yes" ]]; then
echo "VOLUME_STR %{F${FG_DARK}}VOL:%{F-} %{F${RED}}mute%{F-}"
else
echo "VOLUME_STR %{F${FG_DARK}}VOL:%{F-} ${volume//%}"
fi
sleep ${VOLUME_SLEEP}
done
}
# network status information
function network() {
while true; do
local network=$(iwgetid -r)
if [[ -z $network ]]; then
echo "NETWORK_STR %{F${FG_DARK}}NET:%{F-} %{F${RED}}none%{F-}"
else
echo "NETWORK_STR %{F${FG_DARK}}NET:%{F-} ${network}"
fi
sleep ${NETWORK_SLEEP}
done
}
# brightness status information
function brightness() {
while true; do
local brightness=$(backlight)
echo "BRIGHTNESS_STR %{F${FG_DARK}}BRT:%{F-} ${brightness}"
sleep ${BRIGHTNESS_SLEEP}
done
}
# current time
function clock() {
while true; do
local clock="$(date +'%a %d %I:%M')"
echo "CLOCK_STR ${clock}"
sleep ${CLOCK_SLEEP}
done
}
# send output into panel FIFO
workspace > "${PANEL_FIFO}" &
cpu > "${PANEL_FIFO}" &
ram > "${PANEL_FIFO}" &
battery > "${PANEL_FIFO}" &
volume > "${PANEL_FIFO}" &
network > "${PANEL_FIFO}" &
brightness > "${PANEL_FIFO}" &
clock > "${PANEL_FIFO}" &
while read -r line; do
case $line in
WORKSPACE_STR*)
fn_workspace="${line#WORKSPACE_STR}"
;;
CPU_STR*)
fn_cpu="${line#CPU_STR}"
;;
RAM_STR*)
fn_ram="${line#RAM_STR}"
;;
BATTERY_STR*)
fn_battery="${line#BATTERY_STR}"
;;
VOLUME_STR*)
fn_volume="${line#VOLUME_STR}"
;;
NETWORK_STR*)
fn_network="${line#NETWORK_STR}"
;;
BRIGHTNESS_STR*)
fn_brightness="${line#BRIGHTNESS_STR}"
;;
CLOCK_STR*)
fn_clock="${line#CLOCK_STR}"
;;
esac
echo "%{l}${fn_workspace}" \
"%{r}${fn_cpu}" \
"${SEP}${fn_ram}" \
"${SEP}${fn_battery}" \
"${SEP}${fn_brightness}" \
"${SEP}${fn_volume}" \
"${SEP}${fn_network}" \
"${SEP}${fn_clock}" \
"${SEP}"
done < "${PANEL_FIFO}" | lemonbar ${OPTIONS} | sh > /dev/null
# vim: ft=sh
| true |
613880452bfb266ddcafde24763693156280fb76 | Shell | Edznux/utilities | /nginx_add_subdomain_to_port.sh | UTF-8 | 926 | 4 | 4 | [
"MIT"
] | permissive | #!/bin/bash
NGINX_BASE="/etc/nginx"
NGINX_SITE_AVAILABLE="$NGINX_BASE/sites-available"
NGINX_SITE_ENABLE="$NGINX_BASE/sites-enabled"
# Check root
if [ "$(id -u)" != "0" ]; then
echo "This script must be run as root" 1>&2
exit 1
fi
# Check arguments
if [ $# -lt 3 ]; then
echo "Usage :"
echo "$0 <application-name> <subdomain.domain.tld> <port>"
exit
fi
touch "$NGINX_SITE_AVAILABLE/$1"
echo "server {
listen 80;
server_name $2;
location / {
proxy_pass http://localhost:$3;
}
}" > "$NGINX_SITE_AVAILABLE/$1"
cat "$NGINX_SITE_AVAILABLE/$1"
read -p "Do you wish to enable this config file ?" yn
case $yn in
[Yy]* )
ln -s "$NGINX_SITE_AVAILABLE/$1" "$NGINX_SITE_ENABLE/$1";;
[Nn]* )
exit;;
* )
echo "Please answer yes or no.";;
esac
read -p "Do you wish to restart nginx ?" yn
case $yn in
[Yy]* )
service nginx restart;;
[Nn]* )
exit;;
* )
echo "Please answer yes or no.";;
esac
| true |
0139e9db3ed2f7a0d44601e93b391cedd06242e8 | Shell | yoyo1978/bimdroid | /power-controller.sh | UTF-8 | 1,461 | 3.46875 | 3 | [
"Apache-2.0"
] | permissive | #!/system/bin/sh
# This is the GPIO pin connected to the lead on the switch labeled OUT
GPIOpin1=191 # 191 pin is 3 pin from the top on the left on XU3/XU4
echo "$GPIOpin1" > /sys/class/gpio/export
echo "in" > /sys/class/gpio/gpio$GPIOpin1/direction
# We do not use another PIN, instead we just supply power from USB
# because XU4 doesn't cut power from GPIO after shutdown.
requestSent=0 # Indicates whether we sent a request to shutdown
log -p d -t "BMW.PWR" "Script started"
while [ 1 = 1 ]
do
power=$(cat /sys/class/gpio/gpio$GPIOpin1/value)
log -p d -t "BMW.PWR" "PIN $GPIOpin1: $power"
if [ $power = 0 ]; then
# We have power
if [ $requestSent = 1 ]; then
# Cancel shutdown request
log -p i -t "BMW.PWR" "Power ON again, cancel shutdown!"
am startservice -n "org.bimdroid.bimservice/.BmwIBusService" -a org.bimdroid.ACTION_CANCEL_DELAYED_SHUTDOWN
requestSent=0
fi
else
sleep 0.5 # Just to make sure we really received power off signal.
power=$(cat /sys/class/gpio/gpio$GPIOpin1/value)
log -p d -t "BMW.PWR" "Poweroff signal received. Double check power: PIN $GPIOpin1: $power, requestSent: $requestSent"
if [ $power = 1 ] && [ $requestSent = 0 ]; then
log -p i -t "BMW.PWR" "Poweroff signal received from power supplier!"
am start "org.bimdroid.bimservice/.ShutdownDialog" -a org.bimdroid.ACTION_SHUTDOWN_REQUEST
requestSent=1
fi
fi
sleep 1
done
| true |
2cc5389a195279cb3a1131340b81972ec5b9cc4c | Shell | clementTal/hassio-addon | /snips/run-ssh.sh | UTF-8 | 1,267 | 3.265625 | 3 | [] | no_license | CONFIG_PATH=/data/options.json
SSH_ENABLED=$(jq --raw-output '.ssh.enabled' $CONFIG_PATH)
SSH_LOGIN=$(jq --raw-output '.ssh.login' $CONFIG_PATH)
SSH_PASSWORD=$(jq --raw-output '.ssh.password' $CONFIG_PATH)
SUPERVISORD_LOGIN=$(jq --raw-output '.supervisord.login' $CONFIG_PATH)
SUPERVISORD_PASSWORD=$(jq --raw-output '.supervisord.password' $CONFIG_PATH)
SUPERVISORD_CONF_FILE="/etc/supervisor/conf.d/supervisord.conf"
echo "==================================="
echo "----------- CONFIG SSH ------------"
echo "==================================="
echo "SSH config: $SSH_LOGIN:$SSH_PASSWORD"
echo "$SSH_LOGIN:$SSH_PASSWORD" | chpasswd
# SSH login fix. Otherwise user is kicked off after login
sed 's@session\s*required\s*pam_loginuid.so@session optional pam_loginuid.so@g' -i /etc/pam.d/sshd
echo "export VISIBLE=now" >> /etc/profile
# Generate global configuration
cat <<EOT > $SUPERVISORD_CONF_FILE
[supervisord]
nodaemon=true
EOT
#[inet_http_server]
#port = 172.0.0.1:9001
#username = $SUPERVISORD_LOGIN
#password = $SUPERVISORD_PASSWORD
if [ "${SSH_ENABLED}" = true ]
then
cat <<EOT >> $SUPERVISORD_CONF_FILE
[program:sshd]
command=/usr/sbin/sshd -D -ddd -e
autostart=true
autorestart=true
startretries=20
EOT
else
echo "SSH is disabled"
fi | true |
3028d832c91b4ffc612d93345ec8ee6fdffe4efe | Shell | slanthie/clouddream | /deepdream/create_settings.sh | UTF-8 | 657 | 3.453125 | 3 | [
"MIT"
] | permissive | #!/bin/bash
# Make settings from template
echo Script name: $0
echo $# arguments
if [ "$#" -ne 5 ];
then echo "Usage: $0 <maxwidth px> <stepsize> <jitter> <layer1> <layer2>"
fi
maxwidth=$1
stepsize=$2
jitter=$3
layer1=$4
layer2=$5
echo maxwidth is $maxwidth stepsize is $stepsize jitter is $jitter layer is $layer $layer2
cp settings.json settings.bak
sed -e "s/MAXWIDTH/$maxwidth/g" -e "s/STEPSIZE/$stepsize/g" -e "s/JITTER/$jitter/g" -e "s/LAYER/$layer1\/$layer2/g" settings.template.json > settings.json
echo sed -e "s/MAXWIDTH/$maxwidth/g" -e "s/STEPSIZE/$stepsize/g" -e "s/JITTER/$jitter/g" -e "s/LAYER/$layer1\/$layer2/g" settings.template
| true |
b582fa13ac306542dc8494b011628571ac9f289d | Shell | 001101/adm-scripts | /obsolete/kurento_upload_sources.sh | UTF-8 | 3,185 | 3.75 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash
if [ $# -lt 3 ]
then
echo "Usage: $0 <source> <orig-dist> <target-dist>"
exit 1
fi
exec 3>&1 >/dev/tty || exec 3>&1 >./upload_sources_logs
SOURCE="Source: $1"
ORIG_DIST=$2
TARGET_DIST=$3
# Path information
BASEPATH="$(cd -P -- "$(dirname -- "$0")" && pwd -P)" # Absolute canonical path
PATH="${BASEPATH}:${BASEPATH}/kms:${PATH}"
if [ "${ID_RSA_FILE}x" == "x" ]
then
echo "You need to specify environment variable ID_RSA_FILE with the public key to upload packages to the repository"
exit 1
fi
if [ "${CERT}x" == "x" ]
then
echo "You need to specify environment variable CERT with the certificate to upload packages to the repository via https"
exit 1
fi
if [ "${ORIG_REPREPRO_URL}x" == "x" ]
then
echo "You need to specify environment variable ORIG_REPREPRO_URL with the address of your repository"
exit 1
fi
if [ "${TARGET_REPREPRO_URL}x" == "x" ]
then
echo "You need to specify environment variable TARGET_REPREPRO_URL with the address of your repository"
exit 1
fi
if [ "${ORIG_COMPONENT}x" == "x" ]
then
echo "You need to specify environment variable ORIG_COMPONENT with the origin component"
exit 1
fi
if [ "${DEST_COMPONENT}x" == "x" ]
then
echo "You need to specify environment variable DEST_COMPONENT with the destination component"
exit 1
fi
if [ "x${ARCH}" != "xamd64" -a "x${ARCH}" != "xi386" ]
then
echo "You need to specify environment variable ARCH with i386 or amd64"
exit 1
fi
KEY=$ID_RSA_FILE
TEMP_DIR=`mktemp -d`
cd $TEMP_DIR
curl --fail -s ${ORIG_REPREPRO_URL}/dists/${ORIG_DIST}/${ORIG_COMPONENT}/binary-${ARCH}/Packages > Packages || curl -s ${ORIG_REPREPRO_URL}/dists/${ORIG_DIST}/${ORIG_COMPONENT}/binary-${ARCH}/Packages.gz | zcat > Packages
export DIST=$TARGET_DIST
export COMPONENT=$DEST_COMPONENT
export REPREPRO_URL=$TARGET_REPREPRO_URL
export DEBIAN_PACKAGE_REPOSITORY=ubuntu-pub
if [ ! -e Packages ]
then
echo "Cannot get packages index from ${ORIG_REPREPRO_URL}"
exit 1
fi
for file in $(cat Packages | awk -v RS='' -v p="$SOURCE" '$0 ~ p' | grep Filename | cut -d':' -f2)
do
wget ${ORIG_REPREPRO_URL}/$file
done
for file in $(cat Packages | awk -v RS='' -v p="$SOURCE" '$0 ~ p' | grep Filename | cut -d':' -f2)
do
PACKAGE=$(basename $file)
echo "Uploading package $PACKAGE to ${TARGET_REPREPRO_URL}/dists/$DEST_DIST/$DEST_COMPONENT"
kurento_upload_package.sh $TARGET_DIST $PACKAGE
#curl --insecure --key $KEY --cert $CERT -X POST ${TARGET_REPREPRO_URL}/upload?dist=$TARGET_DIST\&comp=$DEST_COMPONENT --data-binary @$PACKAGE || exit 1
done
SOURCE="Package: $1"
for file in $(cat Packages | awk -v RS='' -v p="$SOURCE" '$0 ~ p' | grep Filename | cut -d':' -f2)
do
wget ${ORIG_REPREPRO_URL}/$file
done
for file in $(cat Packages | awk -v RS='' -v p="$SOURCE" '$0 ~ p' | grep Filename | cut -d':' -f2)
do
PACKAGE=$(basename $file)
echo "Uploading package $PACKAGE to ${TARGET_REPREPRO_URL}/dists/$DEST_DIST/$DEST_COMPONENT"
kurento_upload_package.sh $TARGET_DIST $PACKAGE
#curl --insecure --key $KEY --cert $CERT -X POST ${TARGET_REPREPRO_URL}/upload?dist=$TARGET_DIST\&comp=$DEST_COMPONENT --data-binary @$PACKAGE || exit 1
done
cd -
rm -rf $TEMP_DIR
exec >&3-
| true |
498767cf5d018d5a01dfa4ca24cd2881ba9c14a2 | Shell | Lraharison/docker-tomcat-mysql | /start.sh | UTF-8 | 390 | 2.640625 | 3 | [] | no_license | ##!/bin/bash
set -e
chown -R mysql:mysql /var/lib/mysql
mysql_install_db --user mysql > /dev/null
#Demarrage mysql
/etc/init.d/mysql start
#Load sql
mysql -u root < script.sql
#Copie war file dans le webapps de tomcat
cp /webapp.war /var/lib/tomcat7/webapps/webapp.war
#Demarrage tomcat
service tomcat7 start && tail -f /var/lib/tomcat7/logs/catalina.out
#Ouverture shell
/bin/bash
| true |
478c3e8a0a9befcc60c778ffe65fd06b212e9c19 | Shell | heynemann/charms | /centos/django/hooks/restart | UTF-8 | 253 | 2.765625 | 3 | [] | no_license | #!/bin/sh
CHARM_DIR=/var/lib/tsuru
APP_DIR=/home/application
$CHARM_DIR/hooks/stop-gunicorn
if [ -f ${APP_DIR}/gunicorn.pid ]; then
while [ -d /proc/`cat ${APP_DIR}/gunicorn.pid` ]; do
sleep 1
done
fi
$CHARM_DIR/hooks/start-gunicorn
| true |
f1cc32864b8b0f34132844df152982a8312a68c8 | Shell | breu/bin-tools | /show-nodes | UTF-8 | 106 | 2.625 | 3 | [] | no_license | #!/bin/bash
NODES=
if [[ -z ${1} ]]; then
NODES="breu"
else
NODES="$1"
fi
nova list --name ${NODES}
| true |
90c51a056de7854d0a92070c051b0d9db035ba8d | Shell | yshavit/loadpr | /loadpr | UTF-8 | 1,484 | 3.921875 | 4 | [] | no_license | #!/bin/bash
PR_NUM=$1
if [ -z "$PR_NUM" ]; then
echo "Usage: $0 <pr num OR \"unload\">"
exit 1
fi
if [ "$PR_NUM" = "unload" ]; then
BRANCH_NAME=$(git branch |grep '^*' | sed 's/^\* *//')
if [ "${BRANCH_NAME:0:2}" != "pr" ]; then
echo 'Can only unload if current branch starts with "pr"'
exit 1
fi
git commit -am 'pre-unload commit' && git clean -f -d && git checkout master && git branch -D "$BRANCH_NAME"
echo "Unloaded and deleted $BRANCH_NAME. Current branches:"
git branch
exit
fi
REPO=$(git remote -vv | grep '\(fetch\)'| sed -E 's/.*git@github.com:([^.]*)\.git \(fetch\)/\1/')
if [ $? != '0' ]; then
echo "Not in a github repo"
exit 1
fi
TOKEN=$(cat ~/.github-token)
if [ -z "$TOKEN" ]; then
echo "Please put a github token in ~/.github-token"
exit 1
fi
PR_INFO=$(curl -H "Authorization: token $(cat ~/.github-token)" "https://api.github.com/repos/$REPO/pulls/$PR_NUM" 2> /dev/null)
HEAD_SHA=$(python -c 'import json; import sys; print json.load(sys.stdin)["head"]["sha"]' <<< "$PR_INFO" 2>/dev/null)
BASE_SHA=$(python -c 'import json; import sys; print json.load(sys.stdin)["base"]["sha"]' <<< "$PR_INFO" 2>/dev/null)
if [ -z "$HEAD_SHA" ] || [ -z "$BASE_SHA" ]; then
echo "Couldn't get PR info. Is the PR id correct? Is your token in ~/.github-token valid?"
exit 1
fi
echo "Staging merge of $HEAD_SHA into $BASE_SHA"
set -e
git fetch
git checkout -b "pr${PR_NUM}" "$BASE_SHA"
git merge --no-ff --no-commit "$HEAD_SHA"
git reset HEAD^
| true |
6372f7581ba4dc7a96a530e409b3f37bce04a138 | Shell | floscr/old-vimrc-dotfiles | /files/setup/setup-dock.sh | UTF-8 | 419 | 2.609375 | 3 | [] | no_license | #!/usr/bin/bash
# Install utility for handling the dock from the command line
brew install dockutil
# Enable hover highlight in grid view
defaults write com.apple.dock mouse-over-hilite-stack -boolean yes;killall Dock
# Remove all items to start adding custom stuff
dockutil --remove all
# Add downloads and grid folder as grid views
dockutil --add ~/Downloads --view grid
dockutil --add /Applications/ --view grid
| true |
371d7fae79a40a3138b1108e7ce72c3d1d6732fe | Shell | jfliedner/IACryptoLab | /Fliedner_Jillian_Lab2/crypto.sh | UTF-8 | 3,204 | 4.21875 | 4 | [] | no_license | #!/bin/bash
#verifying number of arguments
if [ $# -ne 5 ]; then
echo "Incorrect number of arguments"
exit
fi
FLAG=$1 #type of flag
FIRST_KEY=$2 #first key given
SECOND_KEY=$3 #second key given
FILE_A=$4 #first file name/file
FILE_B=$5 #second file name/file
#encryption
if [ $FLAG == "-e" ]; then
#getting the AES key and IV
openssl rand 16 | hexdump -e '16/1 "%02x" "\n"' > /tmp/key
openssl rand 16 | hexdump -e '16/1 "%02x" "\n"' > /tmp/iv
#opening files from above and storing them into variables
AES_KEY="$(cat /tmp/key)"
IV_KEY="$(cat /tmp/iv)"
#creating the hash of the plaintext
openssl dgst -sha256 $FILE_A | awk {'print $2'} > /tmp/encrypt_hash
if [ $? -ne 0 ]; then
echo "Error with creating hash of plaintext"
exit
fi
#signing for authenticity
openssl dgst -sha256 -sign $SECOND_KEY -out /tmp/sign $FILE_A
if [ $? -ne 0 ]; then
echo "Error with signing plaintext"
exit
fi
#encrypting the data using AES-128
openssl aes-128-cbc -K $AES_KEY -iv $IV_KEY -e -in $FILE_A -out /tmp/encrypted
if [ $? -ne 0 ]; then
echo "Error with encrypting data"
exit
fi
#encrypting the AES key and IV using RSA
openssl rsautl -encrypt -inkey $FIRST_KEY -pubin -in /tmp/key -out /tmp/AES_PRIV.txt
if [ $? -ne 0 ]; then
echo "Error with encrypting AES key"
exit
fi
openssl rsautl -encrypt -inkey $FIRST_KEY -pubin -in /tmp/iv -out /tmp/AES_PRIV_IV.txt
if [ $? -ne 0 ]; then
echo "Error with encrypting IV"
exit
fi
#zipping the files together
zip -r $5 /tmp/encrypted /tmp/AES_PRIV.txt /tmp/AES_PRIV_IV.txt /tmp/encrypt_hash /tmp/sign
if [ $? -ne 0 ]; then
echo "Error with zipping"
exit
fi
#decryption
elif [ $FLAG == "-d" ]; then
#unzipping
unzip -o $FILE_A
if [ $? -ne 0 ]; then
echo "Error with unzipping"
exit
fi
#retrieving AES key and IV through decryption of RSA
AES_KEY="$(openssl rsautl -decrypt -inkey $FIRST_KEY -in /tmp/AES_PRIV.txt)"
if [ $? -ne 0 ]; then
echo "Error with retrieving AES key"
exit
fi
IV_KEY="$(openssl rsautl -decrypt -inkey $FIRST_KEY -in /tmp/AES_PRIV_IV.txt)"
if [ $? -ne 0 ]; then
echo "Error with retireving IV"
exit
fi
#decrypting the data using AES key and IV retrieved
openssl aes-128-cbc -K $AES_KEY -iv $IV_KEY -d -in /tmp/encrypted -out $FILE_B
if [ $? -ne 0 ]; then
echo "Error with decrypting"
exit
fi
#verifying the authenticity through signing
openssl dgst -sha256 -verify $SECOND_KEY -signature /tmp/sign $FILE_B | grep 'Verified OK' &> /dev/null
#verifying signature verify command outputted "Verified OK"
if [ $? -ne 0 ]; then
echo "Bad signature. Cannot verify authenticity"
fi
#getting the hash of the decrypted plaintext
HASH_DECRYPT="$(openssl dgst -sha256 $FILE_B | awk {'print $2'})"
if [ $? -ne 0 ]; then
echo "Error with hashing decrypted plaintext"
exit
fi
#getting the hash from the encryption
HASH_ENCRYPT="$(cat /tmp/encrypt_hash)"
if [ $? -ne 0 ]; then
echo "Error with opening /tmp/encrypt_hash"
exit
fi
#comparing to see if valid
if [ "$HASH_ENCRYPT" != "$HASH_DECRYPT" ]; then
echo "Unable to verify hash, cannot verify integrity"
fi
#not a valid flag
else
echo "Incorrect command line arguments"
exit
fi | true |
49a395f6188116d60add99655fa348ac4ac5c36d | Shell | Jephuff/updateall | /package-managers/brew/upgrade-list.sh | UTF-8 | 262 | 3.328125 | 3 | [] | no_license | #!/bin/bash
brew outdated --verbose | while read name therest; do
current=$(echo $therest | grep -o "[0-9.]*)")
latest=$(echo $therest | grep -o "[0-9.]*$")
current=${current#"("}
current=${current%")"}
echo "$name" "$current" "$latest"
done
| true |
800ed0570d569170a146b2c2c5e4f91f85a6b9a3 | Shell | xhaa123/alps | /alps-new/var/cache/alps/scripts/fetchmail.sh | UTF-8 | 2,346 | 3.296875 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
set -e
set +h
. /etc/alps/alps.conf
. /var/lib/alps/functions
. /etc/alps/directories.conf
#REQ:procmail
cd $SOURCE_DIR
NAME=fetchmail
VERSION=6.4.8
URL=https://downloads.sourceforge.net/fetchmail/fetchmail-6.4.8.tar.xz
SECTION="Mail/News Clients"
DESCRIPTION="The Fetchmail package contains a mail retrieval program. It retrieves mail from remote mail servers and forwards it to the local (client) machine's delivery system, so it can then be read by normal mail user agents."
wget -nc $URL
if [ ! -z $URL ]; then
TARBALL=$(echo $URL | rev | cut -d/ -f1 | rev)
if [ -z $(echo $TARBALL | grep ".zip$") ]; then
DIRECTORY=$(tar tf $TARBALL | cut -d/ -f1 | uniq | grep -v "^\.$")
rm -rf $DIRECTORY
tar --no-overwrite-dir -xf $TARBALL
else
DIRECTORY=$(unzip_dirname $TARBALL $NAME)
unzip_file $TARBALL $NAME
fi
cd $DIRECTORY
fi
echo $USER > /tmp/currentuser
rm -rf /tmp/rootscript.sh
cat > /tmp/rootscript.sh <<"ENDOFROOTSCRIPT"
useradd -c "Fetchmail User" -d /dev/null -g nogroup \
-s /bin/false -u 38 fetchmail
ENDOFROOTSCRIPT
chmod a+x /tmp/rootscript.sh
/tmp/rootscript.sh
rm -rf /tmp/rootscript.sh
PYTHON=python3 \
./configure --prefix=/usr \
--enable-fallback=procmail &&
make
rm -rf /tmp/rootscript.sh
cat > /tmp/rootscript.sh <<"ENDOFROOTSCRIPT"
make install &&
chown -v fetchmail:nogroup /usr/bin/fetchmail
ENDOFROOTSCRIPT
chmod a+x /tmp/rootscript.sh
/tmp/rootscript.sh
rm -rf /tmp/rootscript.sh
rm -rf /tmp/rootscript.sh
cat > /tmp/rootscript.sh <<"ENDOFROOTSCRIPT"
cat > ~/.fetchmailrc << "EOF"
# The logfile needs to exist when fetchmail is invoked, otherwise it will
# dump the details to the screen. As with all logs, you will need to rotate
# or clear it from time to time.
set logfile fetchmail.log
set no bouncemail
# You probably want to set your local username as the postmaster
set postmaster <username>
poll SERVERNAME :
user <isp_username> pass <password>;
mda "/usr/bin/procmail -f %F -d %T";
EOF
touch ~/fetchmail.log &&
chmod -v 0600 ~/.fetchmailrc
ENDOFROOTSCRIPT
chmod a+x /tmp/rootscript.sh
/tmp/rootscript.sh
rm -rf /tmp/rootscript.sh
if [ ! -z $URL ]; then cd $SOURCE_DIR && cleanup "$NAME" "$DIRECTORY"; fi
register_installed "$NAME" "$VERSION" "$INSTALLED_LIST"
| true |
e18c302c1f1289a9749d5d421c81b57947f01d00 | Shell | atulkhomane21/files | /concate.sh | UTF-8 | 321 | 2.71875 | 3 | [] | no_license | #################################
#concate strings
###############################
str1="mohit"
str2="atul"
str3="$str1$str2" #concate str1 and str2 in str3
echo $str3
str3+=" are user" #attach this sentence at end of string
echo $str3
<<:
str4=$str1
str4+=$str2 #this also works
echo $str4
:
| true |
f9fd806315a34ab181da88eca6612a48116f1989 | Shell | igayoso/watchdog-service | /init_http_service | UTF-8 | 1,092 | 3.5625 | 4 | [] | no_license | #!/bin/bash
### BEGIN INIT INFO
# Provides: myservice
# Required-Start: $remote_fs $syslog
# Required-Stop: $remote_fs $syslog
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: Start http_service at boot to get files that was created by watchdog.py
# Description: Start http_service at boot to get files that was created by watchdog.py
### END INIT INFO
# Paths and binaries location
DIR=/home/test/watchdog-service
DAEMON=$DIR/http_service.py
DAEMON_NAME=http_service
BIN=/usr/bin/python
PIDFILE=/var/run/$DAEMON_NAME.pid
. /lib/lsb/init-functions
do_start () {
log_daemon_msg "Starting system $DAEMON_NAME daemon"
$BIN $DAEMON &
log_end_msg $?
}
do_stop () {
log_daemon_msg "Stopping system $DAEMON_NAME daemon"
kill `cat $PIDFILE` && rm $PIDFILE
log_end_msg $?
}
case "$1" in
start|stop)
do_${1}
;;
restart|reload|force-reload)
do_stop
do_start
;;
restart|reload|force-reload)
do_stop
do_start
;;
status)
status_of_proc "$DAEMON_NAME" "$DAEMON" && exit 0 || exit $?
;;
*)
echo "Usage: /etc/init.d/$DAEMON_NAME {start|stop|restart|status}"
exit 1
;;
esac
exit 0
| true |
acca243fe3d359b551f9f73b016b69a99dacb332 | Shell | adrien-matta/nptool | /nptool.sh | UTF-8 | 2,170 | 3.984375 | 4 | [] | no_license | #!/bin/sh
# test if export is supported
export 1>/dev/null 2>/dev/null
if [ $? -eq 0 ]; then
CMD="export"
SEP="="
else
setenv 1>/dev/null 2>/dev/null
if [ "${?} == 0" ]; then
CMD="setenv"
SEP=" "
else
echo "Neither setenv nor export found!"
fi
fi
# find script path
if [ -n "$ZSH_VERSION" ]; then
SCRIPTPATH="$( cd "$( dirname "${(%):-%x}" )" && pwd )"
elif [ -n "$tcsh" ]; then
SCRIPTPATH="$( cd "$( dirname "$0" )" && pwd )"
elif [ -n "$BASH_VERSION" ]; then
SCRIPTPATH="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
else
echo "neither bash or zsh is used, abort"
exit 1
fi
# export NPTOOL environment variable
${CMD} NPTOOL${SEP}$SCRIPTPATH
NPARCH=$(uname)
# mac os x case
if [ "${NPARCH}" = "Darwin" ] ;
then
${CMD} DYLD_LIBRARY_PATH${SEP}$NPTOOL/NPLib/lib:$DYLD_LIBRARY_PATH
${CMD} DYLD_LIBRARY_PATH${SEP}$NPTOOL/NPSimulation/lib:$DYLD_LIBRARY_PATH
else
${CMD} LD_LIBRARY_PATH${SEP}$NPTOOL/NPLib/lib:$LD_LIBRARY_PATH
${CMD} LD_LIBRARY_PATH${SEP}$NPTOOL/NPSimulation/lib:$LD_LIBRARY_PATH
fi
${CMD} PATH=$NPTOOL/NPLib/bin:$PATH
${CMD} PATH=$NPTOOL/NPSimulation/bin:$PATH
alias npt='cd $NPTOOL'
alias npl='cd $NPTOOL/NPLib'
alias nps='cd $NPTOOL/NPSimulation'
${CMD} npa_not_supported='npa is now longer supported, use npp instead'
alias npa='echo $npa_not_supported'
# open a project
function npp {
if [[ $1 == *"Example"* ]]
then
cd $NPTOOL/Examples/$1
else
cd $NPTOOL/Projects/$1
fi
}
# tab completion for npp
_npp() {
# Pointer to current completion word.
local cur
# Array variable storing the possible completions.
COMPREPLY=()
cur=${COMP_WORDS[COMP_CWORD]}
# LIST of available choices
LIST=`ls $NPTOOL/Projects $NPTOOL/Examples`
case "$cur" in
*)
COMPREPLY=( $( compgen -W '$LIST' -- $cur ) );;
esac
return 0
}
# associate the tab completion to npp
if [ -n "$ZSH_VERSION" ]; then
# ZSH have its own command to make things easy
#compdef _directories -W $NPTOLL/Project npp
:
else
# the rest of the world use standard posix complete
complete -F _npp -o filenames npp
fi
${CMD} Geant4_DIR${SEP}$G4LIB
${CMD} NPLib_DIR${SEP}$NPTOOL/NPLib
| true |
f1fcca626759ff23caa89ce12780918410bc580b | Shell | abelQJ/SALFS | /ALFS/lfs-commands/base_tools/091-sed | UTF-8 | 379 | 2.609375 | 3 | [
"MIT"
] | permissive | #!/bin/bash
set +h
set -e
cd $PKGDIR
sed -i 's/usr/tools/' build-aux/help2man
sed -i 's/panic-tests.sh//' Makefile.in
./configure --prefix=/usr --bindir=/bin
make
make html
# make -k check >> $TEST_LOG 2>&1 || true
make install
install -d -m755 /usr/share/doc/sed-4.4
install -m644 doc/sed.html /usr/share/doc/sed-4.4
echo -e "\n\nTotalseconds: $SECONDS\n"
exit
| true |
1269f2a12ba2a5ee20d1189fa139aa91cf0ad5d5 | Shell | dapperstats/accessor | /scripts/retrieve_remote_db.bash | UTF-8 | 812 | 4.15625 | 4 | [
"MIT"
] | permissive | #!/bin/bash
# requirements: wget, mdbtools, unixodbc-dev
# Pull values from options and fill if needed
while getopts ":r:t:d:" option
do
case "${option}"
in
r ) REMOTE_DB_FILE=$OPTARG;;
t ) TEMP_DB_FILE=$OPTARG;;
d ) DATA_DIR=$OPTARG;;
\? ) echo "Invalid option: $OPTARG" 1>&2;;
: ) echo "Invalid option: $OPTARG requires an argument" 1>&2;;
esac
done
if [ ! "$REMOTE_DB_FILE" ]
then
echo "missing -r (remote db file address)"
exit 0
fi
if [ ! "$TEMP_DB_FILE" ]
then
TEMP_DB_FILE="${REMOTE_DB_FILE##*/}"
fi
if [ ! "$DATA_DIR" ]
then
DATA_DIR=data
fi
# Create the data directory
mkdir $DATA_DIR -p
# Download the database
echo "Downloading remote database..."
wget $REMOTE_DB_FILE -nv -q -O $DATA_DIR/$TEMP_DB_FILE --show-progress
| true |
1216fab16d517ab30ddcc976bc2d65d78eba0e11 | Shell | emanuelepesce/torch_fbcunn_easy_install | /install_2.sh | UTF-8 | 2,913 | 3.078125 | 3 | [] | no_license | #!/bin/bash -e
#
# Script (second part) for installing torch with fbcunn libraries
#
# Assume that cuda7.5 and libboost1.55 are already installed on the machine
#
# Authors: Emanuele Pesce, Nicolo' Savioli
#
# Last edit: 19/04/2016
##### settings
set -e
set -x
extra_packages=libiberty-dev # because ubuntu 14.04
##### set directories
launch_path=$PWD # where this script is launched
dir=$HOME/torch # where torch will be installed
echo Installing Torch libraries
luarocks install image
luarocks install nn
luarocks install cudnn
##### Install torch dependencies
curl -sk https://raw.githubusercontent.com/torch/ezinstall/master/install-deps | bash -e
##### Install other dependencies
sudo apt-get install -y \
git \
curl \
wget \
g++ \
automake \
autoconf \
autoconf-archive \
libtool \
libevent-dev \
libdouble-conversion-dev \
libgoogle-glog-dev \
libgflags-dev \
liblz4-dev \
liblzma-dev \
libsnappy-dev \
make \
zlib1g-dev \
binutils-dev \
libjemalloc-dev \
$extra_packages \
flex \
bison \
libkrb5-dev \
libsasl2-dev \
libnuma-dev \
pkg-config \
libssl-dev \
libedit-dev \
libmatio-dev \
libpython-dev \
libpython3-dev \
python-numpy \
libelf-dev \
libdwarf-dev \
libiberty-dev
# libboost-all-dev \
# libunwind8-dev
##### Install Folly
echo
echo Installing Folly
echo
cd $dir
git clone -b v0.35.0 --depth 1 https://github.com/facebook/folly.git
cp -R $launch_path/gtest-1.7.0 $dir/folly/folly/test
cd $dir/folly/folly/
autoreconf -ivf
./configure
make
make check
sudo make install
sudo ldconfig # reload the lib paths after freshly installed folly. fbthrift needs it.
##### Install fbthrift
echo
echo Installing fbthrift
echo
cd $dir
git clone -b v0.24.0 --depth 1 https://github.com/facebook/fbthrift.git
cd $dir/fbthrift/thrift
autoreconf -ivf
./configure
make
sudo make install
##### Install thpp
echo
echo Installing thpp
echo
cp -r $launch_path/thpp-1.0 $dir/thpp
cp -r $launch_path/gtest-1.7.0 $dir/thpp/thpp
cd $dir/thpp/thpp
cmake .
make
sudo make install
#### fblualib
cp -r $launch_path/fblualib-1.0 $dir/fblualib
cd $dir/fblualib/fblualib
./build.sh
#### Install fbcunn
echo
echo Installing fbcunn
echo
cd $dir
git clone https://github.com/torch/nn && ( cd nn && git checkout getParamsByDevice && luarocks make rocks/nn-scm-1.rockspec )
git clone https://github.com/facebook/fbtorch.git && ( cd fbtorch && luarocks make rocks/fbtorch-scm-1.rockspec )
git clone https://github.com/facebook/fbnn.git && ( cd fbnn && luarocks make rocks/fbnn-scm-1.rockspec )
git clone https://github.com/facebook/fbcunn.git && ( cd fbcunn && luarocks make rocks/fbcunn-scm-1.rockspec )
##### bug fixing
luarocks install nn
cp $launch_path/Optim.lua $dir/install/share/lua/5.1/fbnn
##### test
th test_multiple_gpu.lua
echo
echo Installation Complete.
| true |
72165b9b73589a61a431f8f9b4163679dfd8523c | Shell | ccn30/ENCRYPT | /ImagePrep/copy_images.sh | UTF-8 | 2,357 | 2.90625 | 3 | [] | no_license | #!/bin/bash
# copies images
ENCRYPTpath=/home/ccn30/rds/hpc-work/WBIC_lustre/ENCRYPT
p00500path=/home/ccn30/rds/rds-p00500_encrypt-URQgmO1brZ0/p00500
mysubjs=${ENCRYPTpath}/ENCRYPT_MasterRIScodes.txt
for subjID in `cat $mysubjs`
do
subject="$(cut -d'/' -f1 <<<"$subjID")"
echo "******** starting $subject ********"
# mp2rage T1 brain to images in p00500
target=${p00500path}/images/${subject}/T1brain.nii
source=${ENCRYPTpath}/images/${subjID}/mp2rage/n4mag0000_PSIR_skulled_std_struc_brain.nii
cp $source $target
done
## extra code from previous script
#pathstem=/lustre/scratch/wbic-beta/ccn30/ENCRYPT
## set paths
#groupTemplateDir=${pathstem}/images/template02
#studyTemplateDir=~/ENCRYPT/atlases/templates/ECtemplatemasks2015
#regscriptdir=${pathstem}/scripts/Registration
#hybridmaskT2dir=/home/ccn30/ENCRYPT/segmentation/ECsubdivisions_Mag
#regDir=${pathstem}/registrations/${subject}
#rawpathstem=${pathstem}/images/${subjID}
#groupTemplate=${groupTemplateDir}/para01_template0.nii.gz
#studyTemplate=${studyTemplateDir}/Study_template_wholeBrain.nii
#T1=${groupTemplateDir}/${subject}_t1.nii
#cd ${rawpathstem}
#T2dir=$(ls -d Series_???_Highresolution_TSE_PAT2_100)
#T2path=${rawpathstem}/${T2dir}
#T2=${T2path}/denoise_n42_t2.nii
#EPI=${regDir}/N4meanEPI.nii
#cd ${regscriptdir}
## set subject specific transformations (others set in ANTS call script)
T1xTempAffine=${groupTemplateDir}/para01_${subject}_t1*0GenericAffine.mat
T1xTempInvWarp=${groupTemplateDir}/para01_${subject}_t1*1InverseWarp.nii.gz
T1xTempWarp=${groupTemplateDir}/para01_${subject}_t1*1Warp.nii.gz
T1xT2affine=${regDir}/T1xT2_ANTs_0GenericAffine.mat
T1xEPIaffine=${regDir}/T1xepiSlab0GenericAffine.mat
## Perform copy images
#target=/group/p00500/Masks/images/${subject}
#mkdir -p $target
#cp $T2 $target/denoise_n42_t2.nii
#cp $EPI $target/N4meanEPI.nii
#cp $T1 $target/T1.nii
#targetreg=/group/p00500/Masks/registrations/${subject}
#mkdir -p $targetreg
#cp $T1xT2affine $targetreg/T1xT2_ANTs_0GenericAffine.mat
## need extra line for copying T1-T2 regs for 3 subjects that failed ANTs when done
#cp $T1xTempAffine $targetreg/para01_${subject}_t1*0GenericAffine.mat
#cp $T1xTempInvWarp $targetreg/para01_${subject}_t1*1InverseWarp.nii.gz
#cp $T1xTempWarp $targetreg/para01_${subject}_t1*1Warp.nii.gz
#cp $T1xEPIaffine $targetreg/T1xepiSlab0GenericAffine.mat
| true |
8bfbc1f6df864146c5e9b69ead09493a842c1b54 | Shell | anon-for-publication/TSLSynthesizer | /synthesize.sh | UTF-8 | 711 | 3.546875 | 4 | [] | no_license | #! /usr/bin/env bash
file_name=$1
file_header=${file_name:0:-4}
tlsf="$file_header.tlsf"
aag="$file_header.aag"
js="$file_header.js"
# Build TLSF
tsltools/tsl2tlsf $file_name | cat > $tlsf
# Build AAG from docker
sudo docker run --rm -v $(pwd):/files -i wonhyukchoi/tlsf_to_aag /Strix/scripts/strix_tlsf.sh files/$tlsf > $aag
# Change to unix format
dos2unix $aag 2> /dev/null
# Check for realizability
is_realizable=$(head -n1 $aag)
if [ "$is_realizable" = "UNREALIZABLE" ]; then
echo $is_realizable >&2
exit 1
fi
# Remove first line
# https://stackoverflow.com/a/339941/11801882
tail -n +2 "$aag" > "$aag.tmp" && mv "$aag.tmp" "$aag"
# Synthesize the resulting code
tsltools/cfm2code WebAudio $aag
| true |
0e0bf2ac9d1856e629945c5dbcf25b78f15446e3 | Shell | saritanus/fibonacci | /docker/entrypoint.sh | UTF-8 | 207 | 3.21875 | 3 | [] | no_license | #!/bin/bash
set -x
# Run tomcat if the first argument is run otherwise try to run whatever the argument is a command
if [ "$1" = 'run' ]; then
echo "run tomcat"
exec catalina.sh "$@"
else
exec "$@"
fi | true |
f7abdc45ff87c255c322ce1cca63a0d08707fbd2 | Shell | santospanda111/shellscripting | /day8/dictionary/birthmonth.sh | UTF-8 | 886 | 3.375 | 3 | [] | no_license | #!/bin/bash
declare -A month
month["January"]=0
month["Feburary"]=0
month["March"]=0
month["April"]=0
month["May"]=0
month["June"]=0
month["July"]=0
month["August"]=0
month["September"]=0
month["October"]=0
month["November"]=0
month["December"]=0
for((i=0;i<50;i++))
do
birth=$((RANDOM%12+1))
case $birth in
1)(( month[January]++ ));;
2)(( month[Feburary]++ ));;
3)(( month[March]++ ));;
4)(( month[April]++ ));;
5)(( month[May]++ ));;
6)(( month[June]++ ));;
7)(( month[July]++ ));;
8)(( month[August]++ ));;
9)(( month[September]++ ));;
10)(( month[October]++ ));;
11)(( month[November]++ ));;
12)(( month[December]++ ));;
esac
done
echo ${month[@]}
echo ${!month[@]}
| true |
42494d78e0f123e868616eab27bc2c4297fc80b2 | Shell | tobi-wan-kenobi/yubikey-full-disk-encryption | /testrun.sh | UTF-8 | 352 | 2.875 | 3 | [
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | #!/usr/bin/env bash
CONFFILE="/etc/ykfde.conf"
[ -e "src/hooks/ykfde" ] || { echo "ERROR: src/hooks/ykfde not found."; exit 1; }
. "$CONFFILE"
[ -z "$YKFDE_LUKS_NAME" ] && { echo "ERROR: YKFDE_LUKS_NAME not set (check '$CONFFILE')."; exit 1; }
[ -e "/dev/mapper/$YKFDE_LUKS_NAME" ] && cryptsetup luksClose "$YKFDE_LUKS_NAME"
. src/hooks/ykfde
run_hook
| true |
dd4b095358df94522694331e4ba2cc3e70616d34 | Shell | bridyboo/tetsujin | /setup.sh | UTF-8 | 1,299 | 3.484375 | 3 | [
"MIT"
] | permissive | #!/bin/bash
# Dependencies
packages=("jq" "moreutils" "python3")
for pkg in ${packages[@]}; do
is_pkg_installed=$(dpkg-query -W --showformat='${Status}\n' ${pkg} | grep "install ok installed")
if [ "${is_pkg_installed}" == "install ok installed" ]; then
echo "${pkg} is installed."; else
sudo apt install -yq ${pkg}
fi
done
pip3 install -r requirements.txt
# Ask for user Discord Bot Token
discord_token=""
current_dir=$(pwd)
printf "\n"
echo "==============================================================================="
echo "Please input Discord Bot Token, see https://discord.com/developers/applications"
echo "==============================================================================="
printf "\n"
read -p "Token: " discord_token
# Insert token to src/resources/config.json
echo $(jq --arg token "$discord_token" '.DISCORD_TOKEN = $token' $current_dir/src/resources/config.json) | sponge $current_dir/src/resources/config.json
# Create Service File
echo "[Unit]
Description=Mokujin Tekken Discord Bot
[Service]
Type=simple
User=$USER
ExecStart=/usr/bin/python3 $current_dir/src/mokujin.py
[Install]
WantedBy=multi-user.target" | sudo dd of=/etc/systemd/system/mokujin.service status=none
sudo systemctl enable mokujin.service
sudo systemctl start mokujin.service
| true |
dee137dceefe92a9722ebcf2c674746ee7bea019 | Shell | onnx/onnx-mlir | /utils/build-run-onnx-lib.sh | UTF-8 | 1,991 | 3.8125 | 4 | [
"Apache-2.0",
"LicenseRef-scancode-dco-1.1"
] | permissive | # Build the run-onnx-lib utility
#
# When called without parameters, we build the tool for dynamically linking of
# a model. It will need to be passed at runtime.
#
# When called with one parameter, we build the tool for the model passed
# as a parameter.
#
# Assumptions:
# 1) script run in the onnx-mlir/build subdir.
# 2) llvm-project is built with all its libraries (needed to run the tool)
# ask git for the onnx-mlir top level dir
ONNX_MLIR=$(git rev-parse --show-toplevel)
if [ $(realpath $(pwd)) != $ONNX_MLIR/build ] ; then
echo "Error: this script must be run from the build dir $ONNX_MLIR/build"
exit 1
fi
ONNX_MLIR_BIN=$ONNX_MLIR/build/Debug/bin
if [ -z $LLVM_PROJECT ] ; then
if [ $MLIR_DIR ] ; then
# find llvm-project in MLIR_DIR, used to configure cmake,
LLVM_PROJECT=${MLIR_DIR%llvm-project/*}llvm-project
else
# or else assume llvm-project shares parent directory with ONNX-MLIR
LLVM_PROJECT=$(dirname $ONNX_MLIR)/llvm-project
fi
fi
if [ "$#" -eq 0 ] ; then
echo "Compiling run-onnx-lib for dynamically linked models passed at runtime"
elif [ "$#" -eq 1 ] ; then
if [ -e $1 ] ; then
echo "Compiling run-onnx-lib statically linked to model $1"
else
echo "Error: could not find model $1"
exit 1
fi
else
echo "Error: pass either zero/one argument for dynamically/statically linked models"
exit 1
fi
DRIVER_NAME=$ONNX_MLIR/utils/RunONNXLib.cpp
RUN_BIN=$ONNX_MLIR_BIN/run-onnx-lib
RUN_BIN_RELATIVE=${RUN_BIN#$(pwd)/}
g++ -g $DRIVER_NAME -o $RUN_BIN -std=c++17 -D LOAD_MODEL_STATICALLY=$# \
-I $LLVM_PROJECT/llvm/include -I $LLVM_PROJECT/build/include \
-I $ONNX_MLIR/include -L $LLVM_PROJECT/build/lib \
-lLLVMSupport -lLLVMDemangle -lcurses -lpthread -ldl "$@" &&
echo "Success, built $RUN_BIN_RELATIVE"
if [ "$#" -eq 1 -a $(uname -s) = Darwin ] ; then
echo ""
echo "TO RUN: easiest is to cd into the directory where the model was built"
echo "(run \"otool -L $RUN_BIN_RELATIVE\" to see $(basename $1) path)"
fi
| true |
d049de77deebc02d30608ffc5eb4914481c264b1 | Shell | DavidAlphaFox/BSDRP | /BSDRP/make.conf | UTF-8 | 1,894 | 2.6875 | 3 | [] | no_license | #!/bin/sh
# Name of the product
# alphanumerics caracters only (not - or _)
NAME="BSDRP"
# If this project is a child of a father project put the name of the father project here
MASTER_PROJECT=""
# SVN revision number to sync with
SRC_REV="7b8696bf128"
PORTS_REV="98b2a4841162"
SRC_METHOD="git"
SRC_REPO="https://git.freebsd.org/src"
SRC_BRANCH="main"
PORTS_REPO="https://git.freebsd.org/ports"
PORTS_BRANCH="main"
# Where the FreeBSD source tree lives
FREEBSD_SRC="${PROJECT_DIR}/FreeBSD/src"
# Where the FreeBSD custom sources patches lives
SRC_PATCH_DIR="${PROJECT_DIR}/patches"
# Where the FreeBSD ports tree lives
PORTS_SRC="${PROJECT_DIR}/FreeBSD/ports"
# Where the port tree custom patches lives
PORT_PATCH_DIR="${SRC_PATCH_DIR}"
# Where the nanobsd tree lives
NANOBSD_DIR="${FREEBSD_SRC}/tools/tools/nanobsd"
# Target disk size (in MB)
: ${DISK_SIZE="2000"}
# List of kernel modules to build and install
# backlight is only useful to prevent boot crash when loader.conf instruct to load mlx5en->linuxkpi->backlight
SHARED_MODULES="backlight blake2 bridgestp carp dtrace dummynet fdescfs if_bridge if_disc if_epair if_gre if_infiniband if_lagg if_stf if_tuntap if_vxlan ipdivert ipfilter ipfw ipfw_nat ipfw_nat64 ipfw_pmod ipfw_nptv6 ipsec i2c fib_dxr ksyms libalias mlx4 mlx4en netgraph nullfs opensolaris pf pfsync pflog rc4 unionfs"
NANO_MODULES_i386="${SHARED_MODULES} acpi amdsbwd amdtemp cpuctl coretemp i2c ispfw ichwd ipmi sppp hifn hwpmc padlock safe glxsb vmware"
NANO_MODULES_i386_xenhvm=${NANO_MODULES_i386}
NANO_MODULES_i386_xenpv="${SHARED_MODULES} acpi sppp"
NANO_MODULES_amd64="${SHARED_MODULES} amdsbwd amdsmn amdtemp cpuctl coretemp dpdk_lpm4 dpdk_lpm6 ena ichwd ioat ipmi ispfw sppp hifn hyperv qat qatfw hwpmc padlock qlxgb qlxgbe safe vmware"
NANO_MODULES_amd64_xenhvm=${NANO_MODULES_amd64}
NANO_MODULES_arm=""
NANO_MODULES_sparc64="${SHARED_MODULES} sppp"
| true |
78849b8c65c8286099cd6df2599be4021286a607 | Shell | abiosoft/dotfiles | /bin/bin/convertvideo | UTF-8 | 280 | 3.609375 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env bash
set -eux
old_extension=$1
new_extension=$2
mkdir -p ./converted
for i in *."$old_extension"; do
echo converting "$1" to $new_extension...
echo
ffmpeg -i "$i" "${i%.*}.converted.${new_extension}" \
&& mv "$i" ./converted
echo done.
echo
done
| true |
47b73e19f3e359e1f5331b9935e739c54a7f1a43 | Shell | BabylonSix/zsh | /package-managers.sh | UTF-8 | 1,688 | 3.5 | 4 | [] | no_license | # Package Managers
# BREW -> Homebrew Software Installer
# NVM -> Node Version Manager
# NPM -> Node Package Manager
# PYENV -> Python Version Manager
# Homebrew Package Manager
# If brew is not installed, run setupZSH
if [[ ! -a /opt/homebrew/bin/brew ]]; then
setupzsh;
fi
# Node Version Manager
export NVM_DIR="$HOME/.nvm"
# This loads nvm
[ -s "/opt/homebrew/opt/nvm/nvm.sh" ] && . "/opt/homebrew/opt/nvm/nvm.sh"
# This loads nvm bash_completion
[ -s "/opt/homebrew/opt/nvm/etc/bash_completion.d/nvm" ] && . "/opt/homebrew/opt/nvm/etc/bash_completion.d/nvm"
# Python Version Manager
# To use Homebrew's directories rather than ~/.pyenv add to your profile:
export PYENV_ROOT=/opt/homebrew/var/pyenv
# To enable shims and autocompletion add to your profile:
if
which pyenv > /dev/null; then eval "$(pyenv init -)";
fi
# show version info of set languages
i() {
echo 'Brew \t '$(brew --version)
echo
echo 'NVM \t v'$(nvm --version)
echo 'NPM \t v'$(npm --version)
echo 'Node \t '$(node --version)
echo
echo $(python --version)
echo $(pyenv --version)
echo $(pip3 --version)
echo $(python3 --version)
}
# >>> conda initialize >>>
# !! Contents within this block are managed by 'conda init' !!
__conda_setup="$('/opt/homebrew/Caskroom/miniforge/base/bin/conda' 'shell.zsh' 'hook' 2> /dev/null)"
if [ $? -eq 0 ]; then
eval "$__conda_setup"
else
if [ -f "/opt/homebrew/Caskroom/miniforge/base/etc/profile.d/conda.sh" ]; then
. "/opt/homebrew/Caskroom/miniforge/base/etc/profile.d/conda.sh"
else
export PATH="/opt/homebrew/Caskroom/miniforge/base/bin:$PATH"
fi
fi
unset __conda_setup
# <<< conda initialize <<<
| true |
56e89709fc8446ab54f8858232281cee5a8fe3be | Shell | eib/heroku-buildpack-cpan | /bin/detect | UTF-8 | 142 | 2.796875 | 3 | [] | no_license | #!/bin/sh
# This pack is valid for apps with a cpanfile in the root
if [ -f $1/cpanfile ]; then
echo "Perl/cpan"
exit 0
else
exit 1
fi
| true |
ad1dd93b6ee1c569152db33d16f66e994746dfa6 | Shell | kairen/simple-device-plugin | /hack/setup-vm.sh | UTF-8 | 1,473 | 3.375 | 3 | [] | no_license | #!/bin/bash
#
# Setup vagrant vms.
#
set -eu
# Copy hosts info
cat <<EOF > /etc/hosts
127.0.0.1 localhost
127.0.1.1 vagrant.vm vagrant
192.16.35.11 k8s-m1
192.16.35.12 k8s-n1
192.16.35.13 k8s-n2
# The following lines are desirable for IPv6 capable hosts
::1 localhost ip6-localhost ip6-loopback
ff02::1 ip6-allnodes
ff02::2 ip6-allrouters
EOF
# Install docker
curl -fsSL "https://get.docker.com/" | sh
# Install kubernetes
if [ ${HOSTNAME} != "ldap-server" ]; then
curl -s "https://packages.cloud.google.com/apt/doc/apt-key.gpg" | sudo apt-key add -
echo "deb http://apt.kubernetes.io/ kubernetes-xenial main" | sudo tee /etc/apt/sources.list.d/kubernetes.list
sudo apt-get update && sudo apt-get install -y kubelet kubeadm kubectl kubernetes-cni
fi
swapoff -a && sysctl -w vm.swappiness=0
sed '/vagrant--vg-swap_1/d' -i /etc/fstab
if [ ${HOSTNAME} == "k8s-m1" ]; then
kubeadm init --service-cidr 10.96.0.0/12 \
--kubernetes-version v1.10.0 \
--pod-network-cidr 10.244.0.0/16 \
--token b0f7b8.8d1767876297d85c \
--apiserver-advertise-address 192.16.35.11
# copy k8s config
mkdir ~/.kube && cp /etc/kubernetes/admin.conf ~/.kube/config
# deploy calico network
kubectl apply -f "https://kairen.github.io/files/k8s-ldap/calico.yml.conf"
elif [[ ${HOSTNAME} =~ k8s-n ]]; then
kubeadm join 192.16.35.11:6443 \
--token b0f7b8.8d1767876297d85c \
--discovery-token-unsafe-skip-ca-verification
fi
| true |
3ddf471fd2da518fa7c46cc7b36461edf9906ba3 | Shell | gpertea/gffcompare | /prep_mac.sh | UTF-8 | 448 | 2.921875 | 3 | [
"MIT"
] | permissive | #!/bin/sh
ver=$(fgrep '#define VERSION ' gffcompare.cpp)
ver=${ver#*\"}
ver=${ver%%\"*}
pack=gffcompare-$ver
macpack=$pack.OSX_x86_64
echo "preparing $macpack.tar.gz"
echo "-------------------"
/bin/rm -rf $macpack
/bin/rm -f $macpack.tar.gz
mkdir $macpack
make clean
make release
cp LICENSE README.md gffcompare trmap $macpack/
tar cvfz $macpack.tar.gz $macpack
ls -l $macpack.tar.gz
echo "scp $macpack.tar.gz salz:~/html/software/stringtie/dl/"
| true |
ba42889caf5bf5fb3d8161357b1bebc0aadd749e | Shell | fuzzm/fuzzm-project | /benchmarks/LAVA-M/uniq/coreutils-8.24-lava-safe/tests/id/smack.sh | UTF-8 | 1,342 | 3.203125 | 3 | [
"Apache-2.0",
"LGPL-2.0-or-later",
"GPL-3.0-only",
"GFDL-1.3-or-later"
] | permissive | #!/bin/sh
# SMACK test for the id-command.
# Derived from tests/id/context.sh and tests/id/no-context.sh.
# Copyright (C) 2014-2015 Free Software Foundation, Inc.
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
. "${srcdir=.}/tests/init.sh"; path_prepend_ ./src
print_ver_ id
require_smack_
# Check the string "context=" presence without specified user.
id > out || fail=1
grep 'context=' out || { cat out; fail=1; }
# Check context=" is absent without specified user in conforming mode.
POSIXLY_CORRECT=1 id > out || fail=1
grep 'context=' out && fail=1
# Check the string "context=" absence with specified user.
# But if the current user is nameless, skip this part.
id -nu > /dev/null && id $(id -nu) > out
grep 'context=' out && fail=1
Exit $fail
| true |
5825e215c9b8c883917e1d23b6af0568a70721e4 | Shell | stjbrown/f5-azure-automation-lab | /scripts/spDel.sh | UTF-8 | 485 | 3.34375 | 3 | [] | no_license | #!/bin/bash
#This Script deletes all Apps that start with student from the subscription.
#
#Required Enviroment Variable
#AZURE_USERNAME=
#AZURE_PW=
USER_PREFIX='http://stu'
#Log into Azure and Output the SUbcription ID
LOGIN=$(az login -u $AZURE_USERNAME -p $AZURE_PW)
AZURE_APPS=$(az ad app list | jq ".[] | .identifierUris[0]" -r)
for APP in $AZURE_APPS;
do
if [[ $APP == http://stu* ]]; then
az ad app delete --id $APP
fi
#az ad user delete --upn-or-object-id $ID
done
| true |
472cf4e9adafb65e6c24dd365457f2d000a73beb | Shell | ws2356/ss-cache-proxy | /scripts/native_tests.sh | UTF-8 | 207 | 2.96875 | 3 | [
"MIT"
] | permissive | #! /bin/sh
scripts_dir=`dirname "$0"`
if [ "x`echo ${scripts_dir} | grep -e '^\/'`" = "x" ] ; then
scripts_dir="`pwd`/${scripts_dir}"
fi
test_bin_dir=$1
${test_bin_dir}/header
${test_bin_dir}/database
| true |
ffda0c909697d11b4c547266cfe14e8db7f89681 | Shell | flooose/public_dots | /bash_lib/scripts_available/crypt_utils.bash | UTF-8 | 4,041 | 3.9375 | 4 | [] | no_license | # Copyright (c) 2014, christopher floess
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# A simple password manager for bash. Simply source this file and
# you're ready to query your passwords database in
# ~/.password/db.txt.cpt
#
# TODO:
# - Better error handling
# 1. dependency checks (bash version)
# 2. missing files/wrong file structure
# - Loop with Q for quit on bad input
# 1. prints usage/options
# - multi-grep
# - login gets copied wrong when there are spaces in domain
# - implement padd better
function p(){
local CHOICE CLIPBOARD_COMMAND RESULTS SPACIFIED_CHOICE PASSWD LOGIN
# Necessary to make the function use options passed to it instead
# of those of the shell in which it was invoked
local OPTIND
local DIVIDER='-----'
if ! uname -a | grep -qi linux;
then # mac os
CLIPBOARD_COMMAND=pbcopy
else # regular xorg
CLIPBOARD_COMMAND=xclip
fi
usage(){
echo 'Usage: p [-h] password';
}
display_and_read_choice(){
# Display menu of choices
local LAST_INDEX=$((${#RESULTS[*]} - 1))
for entry in $(seq 0 $LAST_INDEX); do
local LINE=$(echo ${RESULTS[$entry]} | sed -e 's/\(.*|\).*$/\1|********/')
echo -n -e "$entry:\t$LINE\n"
done
echo $DIVIDER
# Choose
builtin read -p "Choose entry: " -a CHOICE
}
# Dependencies
for dependency in ccrypt $CLIPBOARD_COMMAND; do
if ! type $dependency &> /dev/null;
then
echo "Missing dependency: $dependency"
return 1
fi
done
if ! [ -f ~/.password/db.txt.cpt ];
then
echo "Expected to find ~/.password/db.txt.cpt"
echo <<EOF
This can be achieved by running:
$ touch ~/.password/db.txt
$ ccrypt ~/.password/db.txt
EOF
return 1
fi
getopts h help
if [ $help = 'h' -o $# -eq 0 ]; then
usage
return 0
fi
# Search for matches
mapfile RESULTS < <(ccat ~/.password/db.txt.cpt | grep $1)
if [ 0 -eq ${#RESULTS[*]} ]; then
echo "$1: no such password entry"
return 1
fi
echo $DIVIDER
display_and_read_choice RESULTS
if [ $CHOICE -gt ${#RESULTS[*]} ]; then
echo "Invalid choice"
return 1
fi
echo $DIVIDER
# Copy to clipboard
SPACIFIED_CHOICE=$(echo ${RESULTS[${CHOICE[0]}]} | tr -t '|' ' ')
builtin read -a FIELDS < <(echo $SPACIFIED_CHOICE)
LOGIN=${FIELDS[1]}
PASSWD=${FIELDS[-1]}
echo -n $LOGIN | $CLIPBOARD_COMMAND -selection primary
echo -n $PASSWD | $CLIPBOARD_COMMAND -selection clipboard
# Clean up
echo -e 'Copied LOGIN and PASSWORD to primary and secondary X-selection...\n'
}
function padd(){
$EDITOR ~/.password/db.txt.cpt
}
| true |
239efde5670c2003f0d33038a37efc55a6e27815 | Shell | dav23r/nand2tetris | /08/code_dir/compile_and_execute_old.sh | UTF-8 | 1,143 | 4.21875 | 4 | [] | no_license | #!/bin/bash
red="\033[0;31m"
green="\033[;32m"
usual="\033[0m"
# determines whether file is a directory and contains vm files
function needs_translation(){
file_name=$1
if [ ! -d $file_name ]; then
exit -1;
fi
ls $file_name | grep -E '*.vm' &> /dev/null
exit $?
}
function translate(){
dir_name=$1
printf "Translating files in directory ${dir_name}\n"
./VMtranslator $dir_name
exit $?
}
# export to subshells
export -f needs_translation
export -f translate
# Find directories containing .vm files, run translator on them
# and generate .ams file in each
find .. -exec /bin/bash -c "needs_translation {}" \; \
-a \( -exec /bin/bash -c "translate {}" \; \
-a -exec printf "Vm files in {} ${green}sucessfully${usual} translated\n\n" \; \
-o -exec printf "${red}Error${usual} in translation of files located in {}\n\n" \; \
\)
if [ $? -eq 0 ]; then
echo -e ">> ${green}No errors reported :)${usual} <<"
else
echo -e ">> ${red}Errors were reported during translation of one or more files :(${usual} <<"
fi
| true |
50b283eb9221d96ce17e88bec6f2c2dc20862de8 | Shell | CMSCompOps/MonitoringScripts | /Site_Config/phedex-node/run_phedex_node.sh | UTF-8 | 638 | 2.984375 | 3 | [] | no_license | #!/bin/bash
# written by John Artieda
# set up a proxy to read site config files
#source /afs/cern.ch/project/gd/LCG-share/new_3.2/etc/profile.d/grid_env.sh
#voms-proxy-init -voms cms
#X509_USER_PROXY=/tmp/x509up_u47967;export X509_USER_PROXY
#Run the script
path="/afs/cern.ch/user/j/jartieda/MonitoringScripts/Site_Config/phedex-node"
txt=$path"/phedex_node"
findText="phedex-node"
echo "python phedex_node.py > $txt.txt and $txt.json"
python $path"/"phedex_node.py $txt $findText &> $path"/"phedex_node.log
problem="$?"
echo "problem: $problem"
cp $txt".txt" /afs/cern.ch/user/c/cmst1/www/SST/
echo "The files were created succesfully." | true |
d11767bfee3910685ab51c283d64ef0b7add059a | Shell | hm1365166/opencsw | /csw/mgar/pkg/lighttpd/trunk/files/cswlighttpd | UTF-8 | 1,229 | 3.671875 | 4 | [] | no_license | #!/bin/sh
#
# Copyright 2005 Sergiusz Pawlowicz All rights reserved.
# Use is subject to license terms.
# Modified for lighttpd
#
#
LIGHTTPD_HOME=/opt/csw
CONF_FILE=/etc/opt/csw/lighttpd.conf
PIDFILE=/var/opt/csw/run/lighttpd.pid
HTTPD="${LIGHTTPD_HOME}/sbin/lighttpd"
[ ! -f ${CONF_FILE} ] && exit $CONF_FILE
start_service() {
/bin/rm -f ${PIDFILE}
# Enable NCA:
NCAKMODCONF=/etc/nca/ncakmod.conf
if [ -f $NCAKMODCONF ]; then
. $NCAKMODCONF
if [ "x$status" = "xenabled" ]; then
HTTPD="env LD_PRELOAD=/usr/lib/ncad_addr.so $HTTPD"
fi
fi
exec $HTTPD -f ${CONF_FILE} 2>&1
}
stop_service() {
if [ -f "$PIDFILE" ]; then
/usr/bin/kill -TERM `/usr/bin/cat $PIDFILE`
fi
}
refresh_service() {
if [ -f "$PIDFILE" ]; then
/usr/bin/kill -HUP `/usr/bin/cat $PIDFILE`
fi
}
case "$1" in
start)
start_service
;;
refresh|reload)
refresh_service
;;
stop)
stop_service
;;
restart)
stop_service
sleep 1
start_service
;;
*)
echo "Usage: $0 {start|stop|refresh|reload|restart}"
exit 1
;;
esac
| true |
3f1cff522377dd6b8942d208fb42b90e41174188 | Shell | krzysztof/dotfiles | /i3/screens.sh | UTF-8 | 1,107 | 2.53125 | 3 | [] | no_license | #!/bin/sh
# Laptop screen as first
FIRST="eDP1"
# External Screen on DP port
SECOND="DP1"
DPI_XPS=192
DPI_4K27=160
xrandr --output $SECOND --off --output $FIRST --auto --dpi $DPI_XPS --scale 1x1
i3-nagbar -m "SCREEN CONFIG UTILITY (run 'arandr' for config)" -t warning \
-b "XPS+4K" "xrandr --output $FIRST --dpi $DPI_4K27 --scale 1x1 --auto --output $SECOND --dpi $DPI_4K27 --scale 1x1 --auto --right-of $FIRST" \
-b "Laptop" "xrandr --output $FIRST --dpi $DPI_XPS --scale 1x1" \
-b "LG 4K" "xrandr --output $SECOND --auto --dpi $DPI_4K27 --output $FIRST --off" \
# Original Script by ebrnd ( http://ebrnd.de/?p=501 )
##!/bin/sh
#
#xrandr --output VGA-0 --off --output DVI-0 --off --output LVDS --auto
#
#i3-nagbar -m "EBRND'S SUPER-COOL I3WM SCREEN CONFIG UTILITY" -t warning \
# -b "LVDS + DVI" "xrandr --output VGA-0 --off --output LVDS --auto --output DVI-0 --auto --right-of LVDS" \
# -b "LVDS + VGA" "xrandr --output DVI-0 --off --output LVDS --auto --output VGA-0 --auto --right-of LVDS" \
# -b "CAPS OFF" "python /home/$USER/.local/bin/caps_lock_off.py"
#
#sh ~/.fehbg
| true |
1f6b12702f3ad3e7c7ce7e386748ab5ab4ba1b04 | Shell | nmccready/snippets | /bash/goClean | UTF-8 | 908 | 3.296875 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env bash
# set -e
# set -o pipefail
goclean() {
local pkg=$1; shift || return 1
local ost
local cnt
local scr
echo $pkg
# Clean removes object files from package source directories (ignore error)
go clean -i $pkg # &>/dev/null
echo clean
# Set local variables
# [[ "$(uname -m)" == "x86_64" ]] \
# && ost="$(uname)";ost="${ost,,}_amd64" \
cnt="${pkg//[^\/]}"
# echo ost: $ost
echo cnt: $cnt
# Delete the source directory and compiled package directory(ies)
if (("${#cnt}" == "2")); then
rm -rf "${GOPATH%%:*}/src/${pkg%/*}"
# rm -rf "${GOPATH%%:*}/pkg/${ost}/${pkg%/*}"
rm -rf "${GOPATH%%:*}/pkg/${pkg%/*}"
elif (("${#cnt}" > "2")); then
rm -rf "${GOPATH%%:*}/src/${pkg%/*/*}"
# rm -rf "${GOPATH%%:*}/pkg/${ost}/${pkg%/*/*}"
rm -rf "${GOPATH%%:*}/pkg/${pkg%/*/*}"
fi
# Reload the current shell
source ~/.zshrc
}
(return 0 2>/dev/null) && return
goclean $@
| true |
3b9f40acf0228855a531b520f2c0f5b7088e7fc4 | Shell | salimuddin87/Shell_Program | /programs/factorial.sh | UTF-8 | 451 | 4.1875 | 4 | [] | no_license |
factorial() {
fact=1
if [ $1 -lt 0 ]
then
echo "Negative no not allowed!"
exit 0
fi
if [ $1 -eq 0 -o $1 -eq 1 ]
then
echo "Factorial : 1"
else
num=$1
while [ $num -ne 0 ]
do
fact=`expr $fact \* $num`
num=`expr $num - 1`
done
echo "Factorial : $fact"
fi
}
read -p "Enter an integer : " num
echo "Number : $num"
factorial $num
| true |
d146f397232a2953fea5490ace89b64ec181ade0 | Shell | deemsk/dotfiles | /bash_profile | UTF-8 | 2,263 | 2.734375 | 3 | [] | no_license | PS1="\w$ " # Custom prompt text (full pwd).
export CLICOLOR=1 # Enable terminal colors
export LSCOLORS=Gxfxbxdxcxegedabagacad # File-type color definition (e.g. files=grey, directories=bold cyan, etc.) -- Dark background.
#export LSCOLORS=ExFxCxDxBxegedabagacad # Light background.
# STDERR Red.
# NOTE: Installing on OS X will break the `open` command line utility. So
# things like `mvim` and `open` itself will not work unless the application being
# opened is already opened. It's because of flat namespace forced by
# `DYLD_FORCE_FLAT_NAMESPACE` which is required by `DYLD_INSERT_LIBRARIES`.
# Alternative to enabling it globally via shell config is to create alias and
# use it to selectively colorize stderr for the commands you run:
# $ alias stderred='LD_PRELOAD=/absolute/path/to/lib/stderred.so'
# $ stderred java lol
export DYLD_INSERT_LIBRARIES=$HOME/Documents/dotfiles/stderred/lib/stderred.dylib DYLD_FORCE_FLAT_NAMESPACE=1
alias cls="clear" # Windows command. :)
alias vi=/Applications/MacVim.app/Contents/MacOS/Vim # Override pre-installed Vim and use Homebrew"s newer version MacVim instead.
alias vim="vi"
alias ll="ls -l"
alias la="ls -Al" # show hidden files, with -l.
alias grep="grep --color=auto -I" # Colorful, skipping binary files.
alias less="less -R" # Colorful less.
alias ghci="ghci-color"
export LESS="FRSXQ" # Colorful diffing in Mercurial.
export LESS_TERMCAP_mb=$'\E[01;31m' # Colorful man pages.
export LESS_TERMCAP_md=$'\E[01;31m'
export LESS_TERMCAP_me=$'\E[0m'
export LESS_TERMCAP_se=$'\E[0m'
export LESS_TERMCAP_so=$'\E[01;44;33m'
export LESS_TERMCAP_ue=$'\E[0m'
export LESS_TERMCAP_us=$'\E[01;32m'
PATH="/usr/local/bin:/usr/local/sbin:${PATH}" # Give priority to Homebrew's bin & sbin dirs on top of the system's dirs.
PATH="/usr/local/share/python:${PATH}" # Add Homebrew's Python to $PATH, before the system's Python.
#PATH="~/Applications/google-appengine:${PATH}" # Necessary for using with django-nonrel.
PATH="$HOME/.cabal/bin:$PATH" # Haskell Packages.
export PATH
#NODE_PATH="/usr/local/lib/jsctags:${NODE_PATH}" # Add doctorjs to Node's library path.
NODE_PATH="/usr/local/lib/node_modules:${NODE_PATH}" # Add Homebrew's node.js package dir to path.
export NODE_PATH
| true |
f24d96b138911bfb8498d7408df2e9b68e35e8d0 | Shell | A-Why-not-fork-repositories-Good-Luck/APT-Detection | /honeyd_config.sh | UTF-8 | 4,387 | 3.59375 | 4 | [] | no_license | #!/bin/bash
clear
# -------------------------------------------------------------
# Michael Eichinger, BSc
# Release: v 1.0
# Date: 30.04.2019
# Email: office@eichinger.co.at
# NOTE: installation script
# Startin position is Raspberry Pi 2 or 3 with brand new
# installation
# Image: Stretch 13.11.2018
# -------------------------------------------------------------
# Step 1) change the static ip-address
# -------------------------------------------------------------
if [ $(id -u) -ne 0 ]; then
printf "This script must be run as root. \n"
exit 1
fi
# ########################################
# set parameter for this honeypot
#
raspi_ip="10.0.0.22" # this ist the first honeypot
raspi_pubip="192.168.1.22"
raspi_net_mask="24"
raspi_pubnet_mask="24"
raspi_gateway="10.0.0.1"
raspi_pubgateway="192.168.1.1"
raspi_dns='208.67.222.222 208.67.220.220'
echo "###############################################"
echo "Installations-Script Version 2019-04-30"
echo "Copyright Michael Eichinger 2019"
echo "###############################################"
echo "Honeyd Honeypot wird eingerichtet:"
echo " Der Vorgang kann mehrere Minuten dauern, bitte um Geduld!"
echo " "
# ########################################
# edit SSH Banner
rm /etc/ssh/ssh-banner-honeyd.txt
cp ~/APT-Detection/honeyd/ssh-banner-honeyd.txt /etc/ssh/
echo " - SSH Banner wurde erstellt"
cat /etc/ssh/ssh-banner-honeyd.txt
sed -i 's/#Banner none/Banner \/etc\/ssh\/ssh-banner-honeyd.txt/g' /etc/ssh/sshd_config
sed -i 's/ListenAddress 0.0.0.0/ListenAddress '$raspi_ip'/g' /etc/ssh/sshd_config
# ########################################
# update IP address
#
echo " - IP Adresse wird angepasst"
cp /etc/dhcpcd.conf /etc/dhcpcd.conf.orig
sed -i 's/static ip_address=10.0.0.30\/24/static ip_address='$raspi_ip'\/'$raspi_net_mask'/g' /etc/dhcpcd.conf
echo 'interface eth1' >> /etc/dhcpcd.conf
echo 'static ip_address='$raspi_pubip'/'$raspi_pubnet_mask >> /etc/dhcpcd.conf
echo 'static routers='$raspi_pubgateway >> /etc/dhcpcd.conf
echo 'static domain_name_servers='$raspi_dns >> /etc/dhcpcd.conf
echo -e "\n- fixe" $raspi_ip " Management IP Adresse wurde eingerichtet"
echo -e "\n- fixe" $raspi_pubip " LAN IP Adresse wurde eingerichtet"
echo -e "\n- Konfiguration wurde abgeschlossen!"
# change hostname
sed -i 's/raspberrypi/honeyd/g' /etc/hostname
sed -i 's/raspberrypi/honeyd/g' /etc/hosts
echo -e "\n- IP Adressen und Hostname wurden geaendert"
# -------------------------------------------------------------
# Step 2) install honeyd honeypot software
# -------------------------------------------------------------
apt-get updade -y
# download the honeyd software
git clone https://github.com/DataSoft/Honeyd.git /usr/src/Honeyd/
cd /usr/src
# install required build dependencies before configuring and building honeyd
apt-get install libdnet libevent-dev libdumbnet-dev libpcap-dev libpcre3-dev -y
apt-get install libedit-dev bison flex libtool automake -y
# install honeyd software
cd Honeyd
./autogen.sh
./configure
make
make install
# logging
mkdir /var/log/honeyd
# install farpd
apt-get install farpd -y
rm /etc/init.d/farpd
cp /root/APT-Detection/honeyd/etc/systemd/system/farpd.service /etc/systemd/system/
chmod 644 /etc/systemd/system/farpd.service
systemctl daemon-reload
systemctl enable farpd.service
systemctl start farpd.service
# copy the config
cp /root/APT-Detection/honeyd/usr/src/Honeyd/honeyd.conf /usr/src/Honeyd/honeyd.conf
# starting as service and autostart
cp /root/APT-Detection/honeyd/etc/systemd/system/honeyd.service /etc/systemd/system/
chmod 644 /etc/systemd/system/honeyd.service
systemctl daemon-reload
systemctl enable honeyd.service
systemctl start honeyd.service
# -------------------------------------------------------------
# Step 3) config filebeat
# -------------------------------------------------------------
cp ~/APT-Detection/honeyd/etc/filebeat/filebeat.yml /etc/filebeat/
cp ~/APT-Detection/honeyd/lib/systemd/system/filebeat.service /lib/systemd/system/
echo -e "\n- Konfiguration fuer Filebeat Honeyd LogDateien wurde eingerichtet"
cat /etc/filebeat/filebeat.yml
systemctl enable filebeat.service
service filebeat start
sleep 3
service filebeat status
echo -e "\n- Filebeat Installation ist abgeschlossen"
echo -e "\n"
echo -e "\n- Honeyd Honeypot Konfiguration wurde abgeschlossen!"
echo -e "\n"
echo -e "\n nach einem finalen Reboot kann Honeyd fertig eingesetzt werden"
| true |
bd3e8a52775f8ef37860a5cf3ec3e967cde8fcdc | Shell | jhroot/elife-bot | /install.sh | UTF-8 | 260 | 3.1875 | 3 | [
"MIT"
] | permissive | #!/bin/bash
set -e
if [ ! -d venv ]; then
# build venv if one doesn't exist
virtualenv --python=`which python2` venv
fi
source venv/bin/activate
if pip list | grep elifetools; then
pip uninstall -y elifetools
fi
pip install -r requirements.txt
| true |
9efe844bd5a0ec9bc4d16ecfa9d7cbbbd6ca04f6 | Shell | gabriel-almeida/multiNLI | /parse_learning_curve.sh | UTF-8 | 786 | 3.484375 | 3 | [
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | #!/bin/bash
if [ "$#" -eq 0 ]; then
log_file=$(ls -1t logs/*.log | head -n1)
else
log_file="$1"
fi
step=$(mktemp)
egrep -o '.* Dev-matched cost: [0-9.]+' $log_file | egrep -o 'Step: [0-9]+' | egrep -o '[0-9]+' > $step
infe=$(mktemp)
egrep -o 'Dev inference rule: .*= [0-9.]+' $log_file | egrep -o '[0-9.]+$' > $infe
contra=$(mktemp)
egrep -o 'Dev contradiction rule: .*= [0-9.]+' $log_file | egrep -o '[0-9.]+$' > $contra
neutral=$(mktemp)
egrep -o 'Dev neutral rule: .*= [0-9.]+' $log_file | egrep -o '[0-9.]+$' > $neutral
val=$(mktemp)
egrep -o 'Dev-matched acc: [0-9.]+' $log_file | egrep -o '[0-9.]+' | sed -e 's/$/*100/' | bc > $val
echo "$log_file" && echo -e '$Step$ $ValidationAcc$ $Inference$ $Contradition$ $Neutral$' && paste -d' ' $step $val $infe $contra $neutral
| true |
faa192d4dc3a6362316347b3c40e51d880274d79 | Shell | goodcodedev/wp-vagrant | /vagrant/install.sh | UTF-8 | 1,996 | 2.9375 | 3 | [
"MIT"
] | permissive | sudo upt-get update
sudo apt-get install -y apache2 apache2-utils
sudo a2enmod rewrite
# Allow .htaccess
sudo sh -c 'echo "
<Directory /var/www/html/>
AllowOverride All
</Directory>
" >> /etc/apache2/apache2.conf'
sudo systemctl enable apache2
# Mysql will ask for root password in an interactive manner.
# This blocks running this script through vagrant file.
sudo apt-get install -y mysql-client mysql-server
sudo apt-get install -y php7.0 php7.0-mysql libapache2-mod-php7.0 php7.0-cli php7.0-cgi php-imagick php7.0-gd
sudo apt-get install -y php7.0-dev
curl -O -L http://xdebug.org/files/xdebug-2.6.1.tgz
tar -xvzf xdebug-2.6.1.tgz
pushd xdebug-2.6.1
phpize
./configure
make
sudo cp modules/xdebug.so /usr/lib/php/20151012
sudo sh -c 'echo "
zend_extension = /usr/lib/php/20151012/xdebug.so
[XDebug]
xdebug.remote_enable = 1
xdebug.remote_autostart = 1
xdebug.remote_connect_back = 1
" >> /etc/php/7.0/apache2/php.ini'
popd
rm xdebug-2.6.1.tgz
rm -rf xdebug-2.6.1
# Not sure what this file is (it's info about xdebug)
rm package.xml
curl -O https://raw.githubusercontent.com/wp-cli/builds/gh-pages/phar/wp-cli.phar
chmod +x wp-cli.phar
sudo mv wp-cli.phar /usr/local/bin/wp
sudo mysql -u root -e "\
create database wordpress;\
ALTER DATABASE wordpress CHARACTER SET utf8 COLLATE utf8_general_ci;\
create user wordpress@'%' identified by 'wppass';\
grant all privileges on wordpress.* to wordpress@'%';\
flush privileges;"
# allow external connections
# ZERO_DATEs are required by wordpress, these are stripped from default modes
sudo sh -c 'echo "
[mysqld]
bind-address = 0.0.0.0
sql-mode="ONLY_FULL_GROUP_BY,STRICT_TRANS_TABLES,ERROR_FOR_DIVISION_BY_ZERO,NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION"
" >> /etc/mysql/my.cnf'
sudo chown -R www-data:www-data /var/www
sudo usermod -a -G www-data vagrant
sudo chmod -R g+w /var/www
# Start apache
sudo systemctl start apache2
# login again for group to take effect
echo "== Login again for group to take effect ==" | true |
be40b09cd5de7906f506fd2d7e3f6194512983f8 | Shell | shevaua/docker-phpdev | /build/build.sh | UTF-8 | 771 | 3.765625 | 4 | [] | no_license | #!/bin/bash
PROJECT_PATH=`dirname "${BASH_SOURCE[0]}"`
cd $PROJECT_PATH
. common.sh
build_ubuntu=0
build_failed=0
docker images -a | grep $ubuntutag | grep -q $version
ubuntu_found=$?
if [ $ubuntu_found = 0 ]
then
ynQuestion="Would you like to rebuild $ubuntutag"
askYN
if [ $ynAnswer = 'y' ]
then
build_ubuntu=1
fi
else
build_ubuntu=1
fi
if [ $build_ubuntu = 1 ]
then
echo "Building ubuntu"
docker build ./container-ubuntu \
--tag $ubuntutag:$version \
--tag $ubuntutag:latest \
--no-cache
build_failed=$?
fi
if [ $build_failed = 0 ]
then
echo "Building phpdev"
docker build ./container-phpdev \
--tag $devtag:$version \
--tag $devtag:latest \
--no-cache
fi
| true |
f6db795cba1805cc642f9ded79eee3a4489b1343 | Shell | wbdana-scripts/aur-update | /loop_get_pkg_name_and_len.sh | UTF-8 | 473 | 3.796875 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env bash
loop_get_pkg_name_and_len ()
{
# Get current directory string length
dir_name_len=${#d}
# Remove './' from start of current directory
pkg_name=$(for i in $d; do echo ${i:2:$dir_name_len}; done)
# Get length of package name plus trailing '/'
pkg_name_len=${#pkg_name}
# Remove trailing '/'
pkg_name=$(for i in $pkg_name; do echo ${i:0:$pkg_name_len - 1}; done)
# Reset pkg_name_len to length of package name string
pkg_name_len=${#pkg_name}
}
| true |
345587eb608c05c46353e578343e04b205c09509 | Shell | nnkogift/dhis2-tools-ng | /setup/service/dhis2-delete-instance | UTF-8 | 1,014 | 3.96875 | 4 | [] | no_license | #!/usr/bin/env bash
# ____ __ ______________
# / __ \/ / / / _/ ___/__ \
# / / / / /_/ // / \__ \__/ /
# / /_/ / __ // / ___/ / __/
# /_____/_/ /_/___//____/____/
#
# Script to completely remove a dhis2 instance
PROG=`basename $0`
###############################################################
usage() {
echo "Usage: $PROG <name> [<postgres_container>]"
echo " name: name of the dhis2 container"
echo " postgres_container: name of the postgres container"
echo "Options:"
echo " -h, --help Display this help message"
}
while getopts h opt
do
case $opt in
h) usage; exit 1;;
*) echo "Error: Unknown parameter '$OPTARG'."
exit 1;;
esac
done
shift $(($OPTIND - 1))
if [ "$#" -lt 2 ]; then
usage
exit 1
fi
NAME=$1
DBCONTAINER=${2:-postgres}
lxc exec proxy -- rm /etc/apache2/upstream/${NAME}
lxc stop $NAME
lxc delete $NAME
lxc exec ${DBCONTAINER} -- dropdb $NAME
lxc exec ${DBCONTAINER} -- dropuser $NAME
sudo sed -i "/.* $N$/d" /etc/hosts
| true |
3455cdb18bfdd5fe5da5f8086a68f6c8b4946025 | Shell | arizonatribe/bash-utils | /setup_jailed_user.sh | UTF-8 | 1,389 | 3.796875 | 4 | [] | no_license | #!/bin/bash
# Check the username
if [ -z $1 ] ; then
echo "Usage: $0 <username>"
exit 2
fi
# Create the user and set their password
useradd $1
pword=$(pwgen -y1s 12)
echo "Inital password for $1 is $pword"
echo $1:$pword | chpasswd
space_suffix='_demo'
jailspace=$1$space_suffix
# Carve out a directory on the host for the jail
mkdir /opt/$jailspace
# Install basic tools needed in the jail
jk_init -v /opt/$jailspace ssh basicshell netbasics extendedshell jk_lsh
# Install additional files/tools
jk_cp -v -f /opt/$jailspace/ /etc/bashrc
# Bind an existing user account to this jail
jk_jailuser -m -j /opt/$jailspace/ $1
# Setup SSH
test -d /opt/$jailspace/home/$1/.ssh || mkdir -p /opt/$jailspace/home/$1/.ssh
# install the pub key (passed in as an argument)
if [ ! -z "$2" ] ; then
echo "$2" >> /opt/$jailspace/home/$1/.ssh/authorized_keys
else
touch /opt/$jailspace/home/$1/.ssh/authorized_keys
fi
# Set ownership and read/write/execute permissions
chmod 700 /opt/$jailspace/home/$1/.ssh
chmod 600 /opt/$jailspace/home/$1/.ssh/authorized_keys
chown -R $1:$1 /opt/$jailspace/home/$1/.ssh
# Change the default shell for the jailed user
sed -i s@/usr/sbin/jk_lsh@/bin/bash@g /opt/$jailspace/etc/passwd
# Get the UID for this new user so we can set the firewalld restrictions
demoUID=$(awk -F":" -v pattern=$1 ' $0 ~ pattern {print $3} ' /etc/passwd)
exit 0
| true |
940fcae9ae71556b6e6530cd9451ef511d71ec6b | Shell | caiobarbosa/atasweb | /deploy.sh | UTF-8 | 4,432 | 3.703125 | 4 | [] | no_license | #!/usr/bin/env bash
JQ="jq --raw-output --exit-status"
set -e
set -u
set -o pipefail
ENVIRONMENT=$1
VERSION=$2
TAG_SUFFIX=""
ECS_REGION="us-west-2"
ECS_CLUSTER="atasweb-production"
ECS_TASK_FAMILY="$DOCKER_IMAGE-production"
ECS_SERVICE="$DOCKER_IMAGE-production"
if [ "$ENVIRONMENT" == "staging" ]; then
TAG_SUFFIX="-beta"
ECS_CLUSTER="atasweb-cluster-staging"
ECS_TASK_FAMILY="atasweb-task-staging"
ECS_SERVICE="atasweb-service-staging"
fi
deploy_image() {
eval $(aws ecr get-login --region us-west-2)
docker push $DOCKER_REGISTER/$DOCKER_IMAGE:$CIRCLE_SHA1
}
create_task_definition() {
task_definition='{
"family": "'$ECS_TASK_FAMILY'",
"containerDefinitions": [
{
"name": "atasweb",
"image": "'$DOCKER_REGISTER/$DOCKER_IMAGE:$CIRCLE_SHA1'",
"essential": true,
"memory": 100,
"cpu": 10,
"portMappings": [
{
"hostPort": 80,
"containerPort": 80,
"protocol": "tcp"
}
]
}
]
}'
echo $task_definition > /tmp/task_definition.json
if revision=$(aws ecs register-task-definition --region $ECS_REGION --cli-input-json file:///tmp/task_definition.json --family $ECS_TASK_FAMILY | \
$JQ '.taskDefinition.taskDefinitionArn'); then
echo "Create new revision of task definition: $revision"
else
echo "Failed to register task definition"
return 1
fi
}
create_service() {
service='{
"serviceName": "'$ECS_SERVICE'",
"taskDefinition": "'$ECS_TASK_FAMILY'",
"desiredCount": 1
}'
echo $service > /tmp/service.json
if [[ $(aws ecs create-service --cluster $ECS_CLUSTER --region $ECS_REGION --service-name $ECS_SERVICE --cli-input-json file:///tmp/service.json | \
$JQ '.service.serviceName') == "$ECS_SERVICE" ]]; then
echo "Service created: $ECS_SERVICE"
else
echo "Error to create service: $ECS_SERVICE"
return 1
fi
}
stop_service(){
echo "Stop the Service: $ECS_SERVICE"
if [[ $(aws ecs update-service --cluster $ECS_CLUSTER --region $ECS_REGION --service $ECS_SERVICE --desired-count 0 | \
$JQ ".service.serviceName") == "$ECS_SERVICE" ]]; then
for attempt in {1..30}; do
if stale=$(aws ecs describe-services --cluster $ECS_CLUSTER --region $ECS_REGION --services $ECS_SERVICE | \
$JQ ".services[0].deployments | .[] | select(.taskDefinition != \"$revision\") | .taskDefinition"); then
echo "Waiting the service stops: $stale"
sleep 5
else
echo "Service stopped: $ECS_SERVICE"
return 0
fi
done
echo "Stopping the service $ECS_SERVICE took too long."
return 1
else
echo "Error to stop service: $ECS_SERVICE"
return 1
fi
}
start_service() {
echo "Start the service: $ECS_SERVICE"
if [[ $(aws ecs update-service --cluster $ECS_CLUSTER --region $ECS_REGION --service $ECS_SERVICE --desired-count 1 | \
$JQ ".service.serviceName") == "$ECS_SERVICE" ]]; then
for attempt in {1..30}; do
if [[ $(aws ecs describe-services --cluster $ECS_CLUSTER --region $ECS_REGION --services $ECS_SERVICE | \
$JQ ".services[0].runningCount") == "1" ]]; then
echo "Service started: $ECS_SERVICE"
return 0
else
echo "Waiting the service starts..."
sleep 5
fi
done
echo "Starting the service $ECS_SERVICE took too long."
return 1
else
echo "Error to start service: $ECS_SERVICE"
return 1
fi
}
restart_service() {
if [[ $(aws ecs describe-services --cluster $ECS_CLUSTER --region $ECS_REGION --services $ECS_SERVICE | \
$JQ '.services[0].runningCount') == "0" ]]; then
start_service
else
stop_service
start_service
fi
}
update_service() {
if [[ $(aws ecs update-service --cluster $ECS_CLUSTER --region $ECS_REGION --service $ECS_SERVICE --task-definition $revision | \
$JQ '.service.taskDefinition') == "$revision" ]]; then
echo "Service updated: $revision"
else
echo "Error to update service: $ECS_SERVICE"
return 1
fi
}
create_or_update_service() {
if [[ $(aws ecs describe-services --cluster $ECS_CLUSTER --region $ECS_REGION --services $ECS_SERVICE | \
$JQ '.failures | .[] | .reason') == "MISSING" ]]; then
create_service
else
update_service
restart_service
fi
}
deploy_server() {
create_task_definition
create_or_update_service
}
# Deployment
deploy_image
deploy_server
| true |
2a65c34efe93e10631b063d3f6764ca477129d52 | Shell | miguelemosreverte/MTTT_web | /configure.sh | UTF-8 | 1,578 | 3.40625 | 3 | [] | no_license | #!/bin/bash
# Removing services just in case they are up and running
sudo docker service rm moses_api
sudo docker service rm mtttweb_web
echo "Making docker accept experimental usage"
File="/etc/docker/daemon.json"
String="{\"experimental\":true}"
if ! grep -q $String $File
then
echo $String >> $File
echo sudo systemctl restart docker
echo sudo docker swarm init
fi
echo "Making docker accept experimental usage: DONE"
echo "Creating docker registry service"
# then spin up the registry service listening on port 9000
docker run -d -p 9000:5000 --restart=always --name registry \
-v `pwd`/auth/htpasswd:/auth/htpasswd:ro \
-v `pwd`/registry:/var/lib/registry \
-e "REGISTRY_AUTH=htpasswd" \
-e "REGISTRY_AUTH_HTPASSWD_REALM=Local Registry" \
-e "REGISTRY_AUTH_HTPASSWD_PATH=/auth/htpasswd" \
-e "REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY=/var/lib/registry" \
registry:2
echo "Creating docker registry service: DONE"
echo "Logging as developer to the registry server"
#login
docker login --username=username --password=password localhost:9000
echo "Logging as developer to the registry server: DONE"
echo "Pushing docker images to the registry server"
# then push your images
docker tag mtttweb_moses_api localhost:9000/mtttweb_moses_api
docker push localhost:9000/mtttweb_moses_api
docker tag mtttweb_web localhost:9000/mtttweb_web
docker push localhost:9000/mtttweb_web
echo "Pushing docker images to the registry server :DONE"
#finnally, create .configured file as flag
touch .configured
echo
echo
echo "CONFIGURATION FINALIZED SUCCESSFULLY"
echo
| true |
b6c2386fb82052c88c58b548eaa26e494133e08b | Shell | Liaison-Intl/upgrade_toolbelt | /bin/utb-build-analyze | UTF-8 | 650 | 3.484375 | 3 | [] | no_license | # vim: set ft=sh:
show_usage() {
cat <<EOF
Usage: $0 build_number
Example: $0 15946
NOTE: Use the build **number** not the build **id** from the URL!
EOF
exit 1
}
if [ "$1" == "" ]; then
show_usage
fi
build_number="$1"
cd "/var/ci"
utb-travis-logs-download "$UTB_OWNER_AND_REPO" "$build_number" "$UTB_JOB_COUNT"
cd "$UTB_OWNER_AND_REPO/build_$build_number"
utb-deprecations-categorize *.txt > deprecations.txt
utb-tests-passing *.txt > passing.txt
grep --no-filename 'Running Rails' *.txt | uniq | sed 's/Running Rails //' > rails_version.txt
grep --no-filename 'Running Ruby ' *.txt | uniq | sed 's/Running Ruby //' > ruby_version.txt
| true |
d7a3104cf8933ff49a5da75556c29cbf1ff0f685 | Shell | aldoborrero/dotfiles | /bin/besu | UTF-8 | 361 | 3.078125 | 3 | [
"MIT"
] | permissive | #!/bin/bash
BESU_VERSION=${BESU_VERSION:-"1.4.4"}
VOLUME_PATH=${VOLUME_PATH:-""}
[[ -z "$VOLUME_PATH" ]] && echo "VOLUME_PATH variable is unset. Not starting besu."
docker run \
-p 8545:8545 \
-p 8546:8546 \
-p 8547:8547 \
-p 30303:30303 \
--mount type=bind,source=${VOLUME_PATH},target=/opt/besu/database \
hyperledger/besu:${BESU_VERSION} "$@"
| true |
59a9e4840cf22da0fc1a126e976d5025ec3ea949 | Shell | carl-schelin/admin | /bin/chkprocess | UTF-8 | 1,843 | 4.03125 | 4 | [] | no_license | #!/bin/ksh
# chkprocess - Retrieve the ps output and email
# Owner: Carl Schelin
BASENAME=$(basename "$0")
DIRNAME=$(dirname "$0")
if [[ ! -f ${DIRNAME}/../.config ]]
then
echo "Unable to locate ${DIRNAME}/../.config. Exiting."
exit 1
fi
# Source the configuration file
. "${DIRNAME}"/../.config
PID=$$
PROGNAME="chkprocess"
# passed is
# server name
# email address to return the info to
if [[ -z $1 ]]
then
echo "You need to pass the server name"
exit 1
fi
if [[ -z $2 ]]
then
echo "You need to pass the email address of who gets the report"
exit 1
fi
# start the pull
# Check to make sure the target directory exists
# if not, create it and set the perms
SVRPATH="${SCRIPTS}/servers/$SERVER"
if [ ! -d $SVRPATH ]
then
mkdir $SVRPATH
chmod 775 $SVRPATH
fi
TIMEOUT=90
# Content here: scp the config file from here
CONTENT="process"
if [ -f $SVRPATH/$CONTENT.output ]
then
rm $SVRPATH/$CONTENT.output
fi
/usr/bin/ssh $SERVER "ps -ef" > $SVRPATH/$CONTENT.output &
PID=$!
# wait for the specified number of seconds for ssh to complete
# if the timeout is exceeded, kill the process and move on to the next box
while [[ $TIMEOUT -gt 0 ]]
do
/usr/bin/ps -p $PID > /dev/null 2>&1
if [[ $? -ne 0 ]]
then
break
fi
TIMEOUT=$(($TIMEOUT - 1))
sleep 1
done
# if the timeout reaches 0, then the process was killed. Report something.
if [[ $TIMEOUT -le 0 ]]
then
echo "ERROR: Unable to connect to server ($SERVER)" >> $LOG
chmod 664 /var/tmp/chkservers.status
kill -KILL $PID
fi
# if there's no output, delete the 0 byte file
if [ ! -s $SVRPATH/$CONTENT.output ]
then
rm $SVRPATH/$CONTENT.output
fi
# set the perms as the last step
if [ -f $SVRPATH/$CONTENT.output ]
then
chmod 664 $SVRPATH/$CONTENT.output
fi
cat $SVRPATH/$CONTENT.output | mailx -s "$SERVER Process Output" $EMAIL
exit 0
| true |
523386a2bacee321335cd3cce3fd768473beb648 | Shell | matipuig/docker-nginx-certbot-template | /cleanup-docker.sh | UTF-8 | 1,432 | 4.15625 | 4 | [
"MIT"
] | permissive | #! /bin/sh
# Extracted from: https://www.github.com/matipuig/shell-utils
# Count how many word are in a string.
# Use $(strings::count "Here the text")
strings::count(){
echo "$@" | wc -w
}
#
# Start the cleanup.
#
echo "This shell command will delete all the images, containers and cache of docker. It won't erase the volumes. Do you wanna delete them? Press y/n"
read USER_INPUT
if [[ ${USER_INPUT} != "y" ]]; then
exit 0
fi
# Erase all docker containers.
CONTAINERS_LIST=$(docker container ls -aq)
CONTAINERS_COUNT=$(strings::count ${CONTAINERS_LIST})
if [[ $CONTAINERS_COUNT != "0" ]]; then
echo "Cleaning containers: ''${CONTAINERS_LIST}''"
docker container rm ${CONTAINERS_LIST}
else
echo "No containers. Skipping..."
fi
# Erases all images.
IMAGES_LIST=$(docker images -q)
IMAGES_COUNT=$(strings::count ${IMAGES_LIST})
if [[ $IMAGES_COUNT != "0" ]]; then
echo "Cleaning images: ''${IMAGES_LIST}''"
docker rmi ${IMAGES_LIST}
else
echo "No images. Skipping..."
fi
# Prune everything
echo "Pruning containers..."
docker container prune -f
echo "Pruning images..."
docker image prune -af
echo "Pruning builder..."
docker builder prune -af
echo ""
echo "Done! You should remember that if you are using docker-desktop, maybe you should use in the app the opcion purge Data in the debug options. But THIS WILL ALSO ERASE VOLUMES."
echo "Everything OK! Press any key to quit."
read USER_INPUT | true |
77f499602bb002d350b5c04f2b39737e70838bd1 | Shell | yuchanggit/ZpZHllbb_13TeV | /toyMCtest/newFramework/toyMCnewRun.sh | UTF-8 | 3,877 | 2.5625 | 3 | [] | no_license | #!/bin/sh
pwd=$PWD
cmsswdr=/afs/cern.ch/work/h/htong/CMSSW_7_1_5/src
cd $cmsswdr
export SCRAM_ARCH=slc6_amd64_gcc481
eval `scramv1 runtime -sh`
cd $pwd
ch=(ele mu)
for ((i=0; i<${#ch[@]}; i++)); do
mcpath=/data7/htong/skim_samples/${ch[$i]}
datapath=/data7/htong/
cd $pwd/${ch[$i]}
echo "We are now in " $PWD
if [ `echo ${ch[$i]} | grep -c "mu"` -gt 0 ]; then
echo "Processing muon data set..."
root -q -b -l toyMCnew_${ch[$i]}.C+\(\"$mcpath/skim_${ch[$i]}_crab_SingleMuon-Run2015D-05Oct2015-v1_20151119_2p2fb_SingleMuTextFile.root\"\,\"SingleMuon-Run2015D-05Oct2015-v1\"\)
root -q -b -l toyMCnew_${ch[$i]}.C+\(\"$mcpath/skim_${ch[$i]}_crab_SingleMuon-Run2015D-PromptReco-V420151119_2p2fb_SingleMuTextFile.root\"\,\"SingleMuon-Run2015D-PromptReco-V4\"\)
mv *root data
elif [ `echo ${ch[$i]} | grep -c "ele"` -gt 0 ]; then
echo "Processing electron data set..."
root -q -b -l toyMCnew_${ch[$i]}.C+\(\"$mcpath/skim_${ch[$i]}_crab_SingleElectron-Run2015D-05Oct2015-v1_20151117_2p2fb_SingleEleTextFile.root\"\,\"SingleElectron-Run2015D-05Oct2015-v1\"\)
root -q -b -l toyMCnew_${ch[$i]}.C+\(\"$mcpath/skim_${ch[$i]}_crab_SingleElectron-Run2015D-PromptReco-V420151117_2p2fb_SingleEleTextFile.root\"\,\"SingleElectron-Run2015D-PromptReco-V4\"\)
mv *root data
fi
echo "Processing DY+jets background..."
root -q -b -l toyMCnew_${ch[$i]}.C+\(\"$mcpath/skim_${ch[$i]}_DYJetsToLL_M-50_HT-100to200_TuneCUETP8M1_13TeV-madgraphMLM-pythia8.root\"\,\"DYJetsToLL_M-50_HT-100to200_13TeV\"\)
root -q -b -l toyMCnew_${ch[$i]}.C+\(\"$mcpath/skim_${ch[$i]}_DYJetsToLL_M-50_HT-200to400_TuneCUETP8M1_13TeV-madgraphMLM-pythia8.root\"\,\"DYJetsToLL_M-50_HT-200to400_13TeV\"\)
root -q -b -l toyMCnew_${ch[$i]}.C+\(\"$mcpath/skim_${ch[$i]}_DYJetsToLL_M-50_HT-400to600_TuneCUETP8M1_13TeV-madgraphMLM-pythia8.root\"\,\"DYJetsToLL_M-50_HT-400to600_13TeV\"\)
root -q -b -l toyMCnew_${ch[$i]}.C+\(\"$mcpath/skim_${ch[$i]}_DYJetsToLL_M-50_HT-600toInf_TuneCUETP8M1_13TeV-madgraphMLM-pythia8.root\"\,\"DYJetsToLL_M-50_HT-600toInf_13TeV\"\)
mv *root Zjets
echo "Processing diBosons background..."
root -q -b -l toyMCnew_${ch[$i]}.C+\(\"$mcpath/skim_${ch[$i]}_WW_TuneCUETP8M1_13TeV-pythia8.root\"\,\"WW_TuneCUETP8M1_13TeV\"\)
root -q -b -l toyMCnew_${ch[$i]}.C+\(\"$mcpath/skim_${ch[$i]}_WZ_TuneCUETP8M1_13TeV-pythia8.root\"\,\"WZ_TuneCUETP8M1_13TeV\"\)
root -q -b -l toyMCnew_${ch[$i]}.C+\(\"$mcpath/skim_${ch[$i]}_ZZ_TuneCUETP8M1_13TeV-pythia8.root\"\,\"ZZ_TuneCUETP8M1_13TeV\"\)
mv *root VV
echo "Processing ttbar background..."
root -q -b -l toyMCnew_${ch[$i]}.C+\(\"$mcpath/skim_${ch[$i]}_TT_TuneCUETP8M1_13TeV-powheg-pythia8.root\"\,\"TT_TuneCUETP8M1_13TeV\"\)
mv *root TT
#echo "Processing singleTop background..."
#root -q -b -l toyMCnew_${ch[$i]}.C+\(\"$mcpath/skim_${ch[$i]}_ST_s-ch_4f_leptonDecays_13TeV-amcatnlo-pythia8_TuneCUETP8M1.root\"\,\"ST_s_4f_leptonDecays_13TeV\"\)
#root -q -b -l toyMCnew_${ch[$i]}.C+\(\"$mcpath/skim_${ch[$i]}_ST_t-ch_antitop_4f_leptonDecays_13TeV-powheg-pythia8_TuneCUETP8M1.root\"\,\"ST_t_antitop_4f_leptonDecays_13TeV\"\)
#root -q -b -l toyMCnew_${ch[$i]}.C+\(\"$mcpath/skim_${ch[$i]}_ST_t-ch_top_4f_leptonDecays_13TeV-powheg-pythia8_TuneCUETP8M1.root\"\,\"ST_t_top_4f_leptonDecays_13TeV\"\)
#root -q -b -l toyMCnew_${ch[$i]}.C+\(\"$mcpath/skim_${ch[$i]}_ST_tW_antitop_5f_inclusiveDecays_13TeV-powheg-pythia8_TuneCUETP8M1.root\"\,\"ST_tW_antitop_5f_inclusiveDecays_13TeV\"\)
#root -q -b -l toyMCnew_${ch[$i]}.C+\(\"$mcpath/skim_${ch[$i]}_ST_tW_top_5f_inclusiveDecays_13TeV-powheg-pythia8_TuneCUETP8M1.root\"\,\"ST_tW_top_5f_inclusiveDecays_13TeV\"\)
#mv *root SingleTop
rm -f inputdir.txt
rm -f *.pcm *.d *.so
echo "Done. Move to next directory..."
cd ../
done
echo "All the jobs are finished."
exit | true |
4a7a0e892673ad05d21134aca506c637ee7fca89 | Shell | lunatickochiya/Actions-syb1505 | /package/autoset/files/chmod_dir.sh | UTF-8 | 256 | 2.796875 | 3 | [
"MIT",
"TMate"
] | permissive | #!/bin/sh /etc/rc.common
device=$(uci get samba.@samba[0].device)
chmod_mask=$(uci get samba.@samba[0].chmod_mask)
chmod_files=$(uci get samba.@samba[0].chmod_files)
if [ $chmod_files = "true" ]; then
p="-R"
else
p=""
fi
chmod $p $chmod_mask $device
| true |
f42f4f2be7b401da6e37569793d5e19dfda4cc11 | Shell | htrc/HTRC-Solr-EF-Cloud | /OBSOLETE/SCRIPTS/htrc-ef-mongodb-init-shardservers.sh | UTF-8 | 1,100 | 3.625 | 4 | [] | no_license | #!/bin/bash
function generate_member_elems
{
hosts=$1
member_elems=""
mi=0
for h in $hosts ; do
# aiming for lines that look like:
# ,{ _id : 0, host : "shard1-rep1.example.net:27017" }
if [ "x$member_elems" != "x" ] ; then
member_elems="$member_elems,"
fi
member_elems="$member_elems{ _id : $mi, host : \\\"$h:$MONGODB_REPLSET_PORT\\\" }"
if [ $mi == 0 ] ; then
primary_host="$h"
fi
mi=$((mi+1))
done
}
repl_set_core="htrc-ef"
MONGODB_REPLSET_META_ARRAY=($MONGODB_REPLSET_METALIST)
num_shards=${#MONGODB_REPLSET_META_ARRAY[*]}
i=0
while [ $i -lt $num_shards ]; do
repl_set="$repl_set_core-shard$i"
replset_meta_hosts=${MONGODB_REPLSET_META_ARRAY[$i]}
eval replset_hosts="`echo \\$$replset_meta_hosts`"
generate_member_elems "$replset_hosts"
init_syntax="rs.initiate( { _id: \\\"$repl_set\\\", members: [ $member_elems ] } )"
echo "* Initializing MongoDB Shard Server Replica Set$i"
ssh $primary_host "mongo --host localhost --port $MONGODB_REPLSET_PORT -eval \"$init_syntax\""
i=$((i+1))
echo ""
done
| true |
746a0d85fe273cec7ac2e0830457a4545b114627 | Shell | snltd/smartos-gubbins | /packagers/package_git/build_git.sh | UTF-8 | 1,442 | 3.59375 | 4 | [] | no_license | #!/usr/bin/ksh -e
VERSION="2.15.0"
# Required version
PREFIX="/opt/local/git"
# Where the package will install. Watch out, this gets nuked!
PKG_NAME="sysdef-git-${VERSION}.tgz"
# What to call the final package. Change to suit your site.
DIR=$(mktemp -d)
# Temporary directory
PATH="/usr/bin:/usr/sbin:/opt/local/bin:/opt/local/sbin"
# Always set your path
export INSTALL=ginstall
mkdir -p $DIR
cd $DIR
if [[ $1 != "nobuild" ]]
then
SRCFILE="git-${VERSION}.tar.gz"
rm -fr $PREFIX
if ! test -f $SRCFILE
then
wget --no-verbose \
--no-check-certificate \
"https://www.kernel.org/pub/software/scm/git/$SRCFILE"
fi
rm -fr $VERSION
tar zxf $SRCFILE
cd ${SRCFILE%.tar*}
gsed -i 's|/usr/ucb/install|/opt/local/bin/install|' config.mak.uname
CFLAGS="-I/opt/local/include" LDFLAGS="-L/opt/local/lib" \
./configure \
--prefix=/opt/local/git \
--with-curl \
--with-openssl \
--without-tcltk
gmake -j4
#gmake test
gmake install
fi
find ${PREFIX} -type f | sed "s|${PREFIX}/||" >${DIR}/pkglist
pkg_info -X pkg_install \
| egrep '^(MACHINE_ARCH|OPSYS|OS_VERSION|PKGTOOLS_VERSION)' \
>${DIR}/build-info
print "minimal Git ${VERSION}" >${DIR}/comment
print "Git ${VERSION} with no dependencies." >${DIR}/description
pkg_create \
-B ${DIR}/build-info \
-d ${DIR}/description \
-c ${DIR}/comment \
-f ${DIR}/pkglist \
-I $PREFIX \
-p $PREFIX \
-P curl \
-U \
${HOME}/${PKG_NAME}
rm -fr $DIR
| true |
ee0b0a5812bc3fe58edfeb27bf0d8ac8eaf80b70 | Shell | szj2ys/sufu | /scripts/install.sh | UTF-8 | 285 | 3.09375 | 3 | [
"MIT"
] | permissive | #!/bin/bash
# Use the Python executable provided from the `-p` option, or a default.
[ "$1" = "-p" ] && PYTHON=$2 || PYTHON="python3"
PACKAGE=$(basename `pwd`)
autoflake --recursive ${PACKAGE}
yapf -irp .
pip install -y ${PACKAGE}
python3 setup.py install
bash scripts/clean.sh
| true |
d4f8e06342da2852e14ed0e86b11df32bc9f3841 | Shell | dlfkid/RVListView | /format_bootstrap.sh | UTF-8 | 450 | 3.65625 | 4 | [
"MIT"
] | permissive | #!/bin/bash
# Usage: bootstrap
set -eu
ROOT="$(dirname "$PWD")"
HOOKS_FOLDER="$ROOT/.git/hooks"
if [[ -d "$HOOKS_FOLDER" ]] && [[ ! -L "$HOOKS_FOLDER" ]] ; then
echo ">>> remove default hooks directory."
mv "$HOOKS_FOLDER" "$ROOT/.git/hooks_bak/"
fi
if [[ ! -d "$HOOKS_FOLDER" ]] || [[ ! -L "$HOOKS_FOLDER" ]] ; then
echo ">>> create hooks symbolic link."
ln -s "$ROOT/format_hooks/hooks/" "$ROOT/.git"
fi
chmod -R +x "$ROOT/format_hooks/hooks/" | true |
67b5b938e0c9596311f9abc74523409a0b8f1c1e | Shell | fadado/jtool | /jtool-stat | UTF-8 | 1,177 | 3.609375 | 4 | [] | no_license | #!/bin/bash
# Usage: jtool-stat [OPTIONS] FILE...
########################################################################
# Bash environment
########################################################################
# Safety
set -o errexit \
-o pipefail \
-o nounset \
-o noclobber
# Extensions
set +o posix
shopt -s lastpipe
shopt -s expand_aliases
shopt -s extglob
shopt -s globstar
########################################################################
# Handle private protocol
########################################################################
if (( $# == 1 )); then
case $1 in
-\?) exec stat --help ;;
-\:) exec cat <<SYNOPSYS
jtool CMD
CMD sinopsys...
SYNOPSYS
exit 0
;;
esac
fi
########################################################################
# Command
########################################################################
# `stat` with JSON output!
stat --printf '{
"mode": "%A",
"nlink": %h,
"uid": "%u",
"user": "%U",
"gid": "%g",
"group": "%G",
"size": %s,
"mtime": "%y",
"name": "%n"
}'"\n" "${@}"
exit
# vim:syntax=sh:et:sw=4:ts=4:ai
| true |
018193ae7a888882bf86f3491805f16c4a371803 | Shell | mvandermeulen/DevOps-Bash-tools | /.bash.d/ssh-agent.sh | UTF-8 | 1,965 | 3.296875 | 3 | [] | no_license | #!/usr/bin/env bash
# vim:ts=4:sts=4:sw=4:et
#
# Author: Hari Sekhon
# Date: circa 2006 (forked from .bashrc)
#
# https://github.com/harisekhon/bash-tools
#
# License: see accompanying Hari Sekhon LICENSE file
#
# If you're using my code you're welcome to connect with me on LinkedIn and optionally send me feedback to help steer this or other code I publish
#
# https://www.linkedin.com/in/harisekhon
#
# ============================================================================ #
# S S H A g e n t
# ============================================================================ #
# keychain id_rsa
# . .keychain/$HOSTNAME-sh
ssh_agent(){
#if [ $UID != 0 ]; then
local SSH_ENV_FILE=~/.ssh-agent.env
if [ -f "${SSH_ENV_FILE:-}" ]; then
# shellcheck source=~/.agent.env
# shellcheck disable=SC1090
. "$SSH_ENV_FILE" > /dev/null
if ! kill -0 "$SSH_AGENT_PID" >/dev/null 2>&1; then
echo "Stale ssh-agent found. Spawning new agent..."
killall -9 ssh-agent
eval "$(ssh-agent | tee "$SSH_ENV_FILE")" #| grep -v "^Agent pid [[:digit:]]\+$"
# lazy evaluated ssh func now so it's not prompted until used
#ssh-add
elif [ "$(ps -p "$SSH_AGENT_PID" -o comm=)" != "ssh-agent" ]; then
echo "ssh-agent PID does not belong to ssh-agent, spawning new agent..."
eval "$(ssh-agent | tee "$SSH_ENV_FILE")" #| grep -v "^Agent pid [[:digit:]]\+$"
# lazy evaluated ssh func now so it's not prompted until used
#ssh-add
fi
else
echo "Starting ssh-agent..."
killall -9 ssh-agent
eval "$(ssh-agent | tee "$SSH_ENV_FILE")"
# lazy evaluated ssh func now so it's not prompted until used
#ssh-add
fi
#clear
#fi
}
ssh_agent
| true |
ddb58d55f32bb042a39d4b57f41823c86c80e982 | Shell | wavesmp/wavesmp | /scripts/count-lines.sh | UTF-8 | 215 | 2.59375 | 3 | [] | no_license | #!/bin/bash
set -o errexit
set -o pipefail
set -o nounset
cd "$(dirname "$0")"
# Change to top directory
cd ..
fd --type file --exclude package-lock.json --exec-batch wc -l | \
sort --reverse --numeric-sort
| true |
3581727871f64c95467b966df791e5e42ec29e61 | Shell | PrincetonUniversity/smFRET_MaximumInformationMethod | /motional_narrowing/scripts/alpha_frag_boots.sh | UTF-8 | 1,548 | 3.1875 | 3 | [] | no_license | #!/bin/bash
if [ -e $final_dir/.crosstalk ]
then
XT_SWITCH="-x$(cat $final_dir/.crosstalk)"
else
XT_SWITCH=''
fi
boots_count=0
for A in $alphas
do
cd $work_dir/$A
if [ ! -e hist$A.out ]
then
jobname=hist$A
echo "
boots *.fr -a.$A -m -b25 -ohist$A.out $XT_SWITCH
" | qsub -d$work_dir/$A -kn -M nomail -m n -N $jobname -o boots_out -e boots_err >> $work_dir/job_id_list
let boots_count++
fi
done
echo
echo "$boots_count boots jobs have been submitted"
cd $final_dir
rm *.out -f > /dev/null
for A in $alphas
do
ln -s $work_dir/$A/hist$A.out ${sample}$A.out
done
frag_cnt=1
num_jobs=20
submit_job_cnt=1
submit_string=''
for A in $alphas
do
cd $work_dir/$A
total_cnt=$(ls *.fr|wc -l)
traj=1
for F in *.fr
do
if [ ! -e ${F%%.fr}.xc ]
then
jobname=fr${A}_$traj
if [[ $total_cnt -le $traj ]]
then
id_list=$work_dir/job_id_list_master
source $scripts_dir/get_job_id_list.sh
frag_cnt=1
submit_job_cnt=$num_jobs
else
id_list=$work_dir/job_id_list
depend_switch=''
fi
submit_string="$submit_string $F"
let submit_job_cnt++
if [ $submit_job_cnt -ge $num_jobs ]
then
echo "
for FRAG in $submit_string
do
frag \$FRAG -a.$A $XT_SWITCH
done
" | qsub -d $work_dir/$A -kn -M nomail -m n -N $jobname $depend_switch -o /dev/null -e /dev/null >> $id_list
submit_job_cnt=1
submit_string=''
fi
let frag_count++
fi
let traj++
done
done
echo "$frag_count frag jobs have been submitted"
exit 0
| true |
60e29642a8ae2eaad54ab42af4a59a0de5652178 | Shell | heitor57/dotfiles | /bin/google-images | UTF-8 | 211 | 2.671875 | 3 | [] | no_license | #!/usr/bin/bash
if [ $XDG_SESSION_TYPE = wayland ]
then
temp=$(wl-paste --primary)
else
temp=$(xsel -o)
fi
gtk-launch $(xdg-settings get default-web-browser) "https://www.google.com/search?q=$temp&tbm=isch"
| true |
12534d9f092dfb8195cb9b0950f3d1590307cfcb | Shell | jjcoderzero/JJcoderSN | /运维shell脚本/TestCompleteScript/shell-tools/troubleshoot/dump.sh | UTF-8 | 4,676 | 3.796875 | 4 | [] | no_license | #!/bin/bash -
#===============================================================================
#
# FILE: dump.sh
#
# USAGE: ./dump.sh
#
# DESCRIPTION: 每次线上环境一出问题,大家就慌了,通常最直接的办法回滚重启,以减少故障时间,这样现场就被破坏了,要想事后查问题就麻烦了,有些问题必须在线上的大压力下才会发生,线下测试环境很难重现,不太可能让开发或 Appops 在重启前,先手工将出错现场所有数据备份一下,所以最好在 kill 脚本之前调用 dump,进行自动备份,这样就不会有人为疏忽。
#
# OPTIONS: ---
# REQUIREMENTS: ---
# BUGS: ---
# NOTES: ---
# AUTHOR: Roc Wong (王鹏举), float.wong@icloud.com
# ORGANIZATION: 华宝证券
# CREATED: 03/27/2018 08:34:43 PM
# REVISION: ---
#===============================================================================
export PS4='+(${BASH_SOURCE}:${LINENO}): ${FUNCNAME[0]:+${FUNCNAME[0]}(): }' # 第四级提示符变量$PS4, 增强”-x”选项的输出信息
#set -o nounset # Treat unset variables as an error
#set -e
JAVA_HOME=/usr/java/jdk1.7.0_55
OUTPUT_HOME=~/output
DEPLOY_HOME=`dirname $0`
HOST_NAME=`hostname`
#--- FUNCTION ----------------------------------------------------------------
# NAME: usage
# DESCRIPTION:
# PARAMETERS:
# RETURNS:
#-------------------------------------------------------------------------------
usage()
{
echo -e "Usage: dump.sh [OPTION]。\n"
echo -e "使用\"dump.sh -p jvm进程号\"指定jvm进程进行备份,不使用-p参数时,默认查找tomcat用户对应的java进程。\n"
echo -e "dump信息涵盖操作系统和JVM,主要用到的命令包括:jstack、jinfo、jstat、jmap、lsof、sar、uptime、free、vmstat、mpstat、iostat、netstat。\n"
echo -e "备份文件路径:~/output/"
exit 1
}
input_pid=
while getopts ":p:h" optname
do
case "$optname" in
"p")
input_pid=${OPTARG}
;;
"h")
usage
;;
"?")
echo "Unknown option $OPTARG"
usage
;;
":")
echo "No argument value for option $OPTARG"
usage
;;
*)
# Should not occur
echo "Unknown error while processing options"
usage
;;
esac
done
if [ ! ${input_pid} ]; then
echo -e "当前运行的java进程有:\n"
ps -ef --width 175 | grep java
read -p "请输入执行dump的JVM进程号(PID): " pid
if [ -z "$pid" ] || [[ ! "$pid" =~ ^-?[0-9]+$ ]]; then
echo "-_- 都特么这时候了,你就不能输入正确的进程ID嘛……"
exit 1;
fi
input_pid=${pid}
fi
DUMP_PIDS=(${input_pid})
DUMP_ROOT=$OUTPUT_HOME/dump
if [ ! -d $DUMP_ROOT ]; then
mkdir -p $DUMP_ROOT
fi
DUMP_DATE=`date +%Y%m%d%H%M%S`
DUMP_DIR=$DUMP_ROOT/dump-$DUMP_DATE
if [ ! -d $DUMP_DIR ]; then
mkdir -p $DUMP_DIR
fi
echo -e "\n Dumping the server $HOST_NAME ...\c"
for PID in $DUMP_PIDS ; do
$JAVA_HOME/bin/jstack -F $PID > $DUMP_DIR/jstack-$PID.dump 2>&1
echo -e ".\c"
$JAVA_HOME/bin/jinfo $PID > $DUMP_DIR/jinfo-$PID.dump 2>&1
echo -e ".\c"
$JAVA_HOME/bin/jstat -gcutil $PID > $DUMP_DIR/jstat-gcutil-$PID.dump 2>&1
echo -e ".\c"
$JAVA_HOME/bin/jstat -gccapacity $PID > $DUMP_DIR/jstat-gccapacity-$PID.dump 2>&1
echo -e ".\c"
$JAVA_HOME/bin/jmap $PID > $DUMP_DIR/jmap-$PID.dump 2>&1
echo -e ".\c"
$JAVA_HOME/bin/jmap -heap $PID > $DUMP_DIR/jmap-heap-$PID.dump 2>&1
echo -e ".\c"
$JAVA_HOME/bin/jmap -histo $PID > $DUMP_DIR/jmap-histo-$PID.dump 2>&1
echo -e ".\c"
if [ -r /usr/sbin/lsof ]; then
/usr/sbin/lsof -n -p $PID > $DUMP_DIR/lsof-$PID.dump
echo -e ".\c"
fi
done
if [ -r /usr/bin/sar ]; then
/usr/bin/sar > $DUMP_DIR/sar.dump
echo -e ".\c"
fi
if [ -r /usr/bin/uptime ]; then
/usr/bin/uptime > $DUMP_DIR/uptime.dump
echo -e ".\c"
fi
if [ -r /usr/bin/free ]; then
/usr/bin/free -t > $DUMP_DIR/free.dump
echo -e ".\c"
fi
if [ -r /usr/bin/vmstat ]; then
/usr/bin/vmstat > $DUMP_DIR/vmstat.dump
echo -e ".\c"
fi
if [ -r /usr/bin/mpstat ]; then
/usr/bin/mpstat > $DUMP_DIR/mpstat.dump
echo -e ".\c"
fi
if [ -r /usr/bin/iostat ]; then
/usr/bin/iostat > $DUMP_DIR/iostat.dump
echo -e ".\c"
fi
if [ -r /bin/netstat ]; then
/bin/netstat -n > $DUMP_DIR/netstat.dump
echo -e ".\c"
fi
echo "OK!"
| true |
6a88ab8096e96af360670887f5173b05a4cf0b22 | Shell | jiangwenkkkk/work_sit | /sh/getdb.sh | UTF-8 | 833 | 3.046875 | 3 | [] | no_license | #!/usr/bin/env bash
function get_imsitime()
{
for ip in `cat $1`
do
echo $ip
/usr/bin/expect <<- EOF
spawn ssh james@$ip -p 29367
expect {
"*password:" { send "james@home\r" }
}
expect "*$ "
send "df \r"
expect "*$ "
send "exit\r"
EOF
done
}
get_imsitime ip_list.txt > db.txt
sed -ne "s/.*@\(10.142.234.[0-9]\+\)'[^0-9]*/\1/p" -ne '/^[0-9]\+/p' db.txt | awk -f db.awk
##!/usr/bin/env bash
#
#function get_disk_info()
#{
# for ip in `cat $1`
# do
# echo $ip
#
# /usr/bin/expect <<- EOF
#
# spawn ssh roam@$ip
#expect {
#"*password:" { send "roam\r" }
#}
#expect "*$ "
#send "df \r"
#
#expect "*$ "
#send "exit\r"
#
#EOF
# done
#
#}
#
#get_imsitime ip_list.txt > db.txt
#sed -ne "s/.*@\(10.142.234.[0-9]\+\)'[^0-9]*/\1/p" -ne '/^[0-9]\+/p' db.txt | awk -f db.awk
| true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.