blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 4 115 | path stringlengths 2 970 | src_encoding stringclasses 28 values | length_bytes int64 31 5.38M | score float64 2.52 5.28 | int_score int64 3 5 | detected_licenses listlengths 0 161 | license_type stringclasses 2 values | text stringlengths 31 5.39M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
ebfda6dcc07e0efe79304623845bbba1c598de11 | Shell | PacktPublishing/Mastering-Bash | /Chapter05/Scripts/loop1.sh | UTF-8 | 152 | 3.140625 | 3 | [
"MIT"
] | permissive | #!/bin/bash
declare -a my_array
my_array=("one" "two" "three" "four" "five")
for (( i=0 ; i<${#my_array[*]} ; i++ ));
do
echo "${my_array[i]}"
done
| true |
9bc110dd6426e7f603c2db7fec37ff9cec2fec2c | Shell | Rp70/sysadmintools | /vmware/esxivmbackup.sh | UTF-8 | 3,298 | 3.796875 | 4 | [] | no_license | #!/bin/sh
#Edit these values to match you environment
#####################################
#The datastore to backup to
backupDataStore=<backupDataStore>
#The directory on the above datastore to backup to(the default is mm-dd-yyyy)
backupDirectory=$(date +%m-%d-%Y)
#The list of virtual machine names(separated by a space) to backup
vmsToBackup="VM1 VM2 VM3"
#The amount of time to wait for the snapshot to complete, some systems are slower than others and snapshot operations may take longer to complete
waitTime=40s
#####################################
startTime=$(date)
echo Backup start time: $startTime
echo Creating backup directory /vmfs/volumes/$backupDataStore/$backupDirectory
mkdir -p /vmfs/volumes/$backupDataStore/$backupDirectory
echo Backing up ESXi host configuration...
vim-cmd hostsvc/firmware/backup_config
cp /scratch/downloads/*.tgz /vmfs/volumes/$backupDataStore/$backupDirectory/
for vm in $vmsToBackup;do
vmName=$vm
vmIdAndConfigPath=$( vim-cmd vmsvc/getallvms | awk '{ if ($2 == vmname) print $1 ";" $3 $4}' vmname=$vm)
vmId=${vmIdAndConfigPath%;*}
if [ "$vmId" != "" ]; then
echo Backing up virtual machine: $vmName
echo Backing up the virtual machines configuration...
vmConfigurationFilePath=$(echo ${vmIdAndConfigPath#*;} | sed -e 's/\[\(.*\)\]\(.*\)/\1;\2/')
vmConfigurationSourceDataStore=${vmConfigurationFilePath%;*}
vmConfigurationFile=${vmConfigurationFilePath#*;}
echo Making directory /vmfs/volumes/$backupDataStore/$backupDirectory/${vmConfigurationFile%/*}
mkdir -p /vmfs/volumes/$backupDataStore/$backupDirectory/${vmConfigurationFile%/*}
echo Copying /vmfs/volumes/$vmConfigurationSourceDataStore/$vmConfigurationFile to /vmfs/volumes/$backupDataStore/$backupDirectory/$vmConfigurationFile
cp /vmfs/volumes/$vmConfigurationSourceDataStore/$vmConfigurationFile /vmfs/volumes/$backupDataStore/$backupDirectory/$vmConfigurationFile
echo Taking the snapshot...
vim-cmd vmsvc/snapshot.create $vmId "Backup"
echo Waiting $waitTime for the snapshot to complete...
sleep $waitTime
echo Getting diskFile list...
vmDiskFilePaths=$(vim-cmd vmsvc/get.filelayout $vmId | grep -i snapshotFile -A2000 | sed -n -e 's/\"\[\(.*\)\]\s\(.*\.vmdk\)\"\,/\1;\2/pg')
echo Found $(echo $vmDiskFilePaths | wc -l) disk file\(s\)...
for vmDiskFilePath in $vmDiskFilePaths; do
vmDiskFileSourceDataStore=${vmDiskFilePath%;*}
vmDiskFile=${vmDiskFilePath#*;}
if [ -e /vmfs/volumes/$vmDiskFileSourceDataStore/$vmDiskFile ]; then
if [ ! -d /vmfs/volumes/$backupDataStore/$backupDirectory/${vmDiskFile%/*} ]; then
mkdir -p /vmfs/volumes/$backupDataStore/$backupDirectory/${vmDiskFile%/*}
fi
echo Cloning /vmfs/volumes/$vmDiskFileSourceDataStore/$vmDiskFile to /vmfs/volumes/$backupDataStore/$backupDirectory/$vmDiskFile
vmkfstools -d 2gbsparse -i /vmfs/volumes/$vmDiskFileSourceDataStore/$vmDiskFile /vmfs/volumes/$backupDataStore/$backupDirectory/$vmDiskFile
fi
done
echo Removing the snapshot...
vim-cmd vmsvc/snapshot.removeall $vmId
else
echo ERROR: Could not get an id for $vmName
fi
done
endTime=$(date)
echo Backup end time: $endTime
#echo Elapsed time: $(($startTime - $endTime))
| true |
85cd97420310914fdcbccbb82f2a590d14c48e6e | Shell | ctaggart/KoreBuild | /template2/build.sh | UTF-8 | 3,683 | 4.15625 | 4 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env bash
# KoreBuild 2.0
# Colors
GREEN="\033[1;32m"
BLACK="\033[0;30m"
RED="\033[0;31m"
RESET="\033[0m"
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
cd $DIR
show_help() {
echo "Usage: $0 [-r] [--] [arguments to msbuild]"
echo " $0 [-r] [-u <URL>] [--] [arguments to msbuild]"
echo " $0 [-r] [-b <BRANCH>] [--] [arguments to msbuild]"
echo ""
echo "Arguments:"
echo " -r, --reset-korebuild Delete the current `.build` directory and re-fetch KoreBuild"
echo " -u, --korebuild-url <URL> Fetch KoreBuild from URL"
echo " -b, --korebuild-branch <BRANCH> Fetch KoreBuild from BRANCH in the default repository (https://github.com/aspnet/KoreBuild)"
echo " --korebuild-dir <DIR> Copy KoreBuild from DIR instead of downloading it"
echo " -- Consider all remaining arguments arguments to MSBuild when building the repo."
echo ""
echo "Notes:"
echo " The '--' switch is only necessary when you want to pass an argument that would otherwise be recognized by this"
echo " script to MSBuild. By default, any unrecognized argument will be forwarded to MSBuild."
echo ""
echo " If you wish to build a specific target from the MSBuild project file, use the '-t:<TARGET>' switch, which will be forwarded"
echo " to MSBuild. For example `.\build.sh -t:Verify`"
}
while [[ $# > 0 ]]; do
case $1 in
-h|-\?|--help)
show_help
exit 0
;;
-r|--reset-korebuild)
KOREBUILD_RESET=1
;;
-u|--korebuild-url)
KOREBUILD_URL=$2
shift
;;
-b|--korebuild-branch)
KOREBUILD_BRANCH=$2
shift
;;
--korebuild-dir)
KOREBUILD_LOCAL=$2
shift
;;
--)
shift
break
;;
*)
break
;;
esac
shift
done
if [ -z $KOREBUILD_URL ]; then
if [ ! -z $KOREBUILD_BRANCH ]; then
KOREBUILD_URL="https://github.com/aspnet/KoreBuild/tarball/$KOREBUILD_BRANCH"
else
KOREBUILD_URL="https://github.com/aspnet/KoreBuild/tarball/dev"
fi
fi
BUILD_FOLDER="$DIR/.build"
KOREBUILD_ROOT="$BUILD_FOLDER/KoreBuild"
BUILD_FILE="$KOREBUILD_ROOT/scripts/KoreBuild.sh"
if [[ -d $BUILD_FOLDER && $KOREBUILD_RESET = "1" ]]; then
echo -e "${GREEN}Cleaning old KoreBuild folder to force a reset ...${RESET}"
rm -Rf $BUILD_FOLDER
fi
if [ ! -d $BUILD_FOLDER ]; then
mkdir -p $BUILD_FOLDER
if [ ! -z $KOREBUILD_LOCAL ]; then
echo -e "${GREEN}Copying KoreBuild from $KOREBUILD_LOCAL ...${RESET}"
cp -R "$KOREBUILD_LOCAL" "$KOREBUILD_ROOT"
else
echo -e "${GREEN}Downloading KoreBuild from $KOREBUILD_URL ...${RESET}"
KOREBUILD_DIR=`mktemp -d`
KOREBUILD_TAR="$KOREBUILD_DIR/korebuild.tar.gz"
retries=6
until (wget -O $KOREBUILD_TAR $KOREBUILD_URL 2>/dev/null || curl -o $KOREBUILD_TAR --location $KOREBUILD_URL 2>/dev/null); do
echo -e "${RED}Failed to download '$KOREBUILD_TAR'${RESET}"
if [ "$retries" -le 0 ]; then
exit 1
fi
retries=$((retries - 1))
echo "${BLACK}Waiting 10 seconds before retrying. Retries left: $retries${RESET}"
sleep 10s
done
mkdir $KOREBUILD_ROOT
tar xf $KOREBUILD_TAR --strip-components 1 --directory $KOREBUILD_ROOT
rm -Rf $KOREBUILD_DIR
fi
fi
cd $DIR
chmod a+x $BUILD_FILE
$BUILD_FILE "$@"
| true |
e94be0d1363234cb0ac431199d89808be6f01e8d | Shell | ShilGen/my_unix_sh | /breaksleep.sh | UTF-8 | 854 | 3.1875 | 3 | [] | no_license | #! /bin/bash
# Copyright 2013 ShilGen.ru geniusshil@gmail.com
action=$(yad --width 300 --entry --title "System Logout" \
--image=gnome-shutdown \
--button="Switch User:2" \
--button="gtk-ok:0" --button="gtk-close:1" \
--text "Choose action:" \
--entry-text \
"min10" "min30" "Cansel" "Logout")
ret=$?
[[ $ret -eq 1 ]] && exit 0
if [[ $ret -eq 2 ]]; then
gdmflexiserver --startnew &
exit 0
fi
case $action in
min10*) cmd="sudo shutdown -P +10" ;;
min30*) cmd="sudo shutdown -P +30" ;;
Cansel*) cmd="sudo shutdown -c" ;;
Logout*)
case $(wmctrl -m | grep Name) in
*Openbox) cmd="openbox --exit" ;;
*FVWM) cmd="FvwmCommand Quit" ;;
*Metacity) cmd="gnome-save-session --kill" ;;
*) exit 1 ;;
esac
;;
*) exit 1 ;;
esac
eval exec $cmd
| true |
8a0a8e1af921c86376c95989c1fbab3f0f67d392 | Shell | brainsciencecenter/salt | /salt/files/usr/local/bin/resetPublicInterface | UTF-8 | 430 | 3.046875 | 3 | [] | no_license | #!/bin/bash
cmd=$(basename "$0")
if ip addr show wlx00e04c29e2e1 | grep -q "state UP"
then
exit 0
fi
logger -t "$cmd" "Removing rtl8812au and rtl8812au modules"
rmmod rtl8812au
rmmod cfg80211
sleep 10
logger -t "$cmd" "Re-inserting rtl8812au module"
modprobe rtl8812au
sleep 10
logger -t "$cmd" "Bring AirPenNet interface up"
# Looks like systemd sees the new interface and brings it up
nmcli connection up AirPennNet
| true |
98c40b7b452a1e08ac91106c8a8a2af93506f529 | Shell | seanreed1111/TTP-EXCEL-CONTENT | /update-validators | UTF-8 | 194 | 2.625 | 3 | [] | no_license | #!/bin/bash
if [ ! -f .gitmodules ];
then
echo "No submodules initialized. Run './setup' to correct"
exit 1
fi
git submodule update --init --remote --force
git submodule foreach './build'
| true |
4d6e2cb75c22fe3458bdbdf714b07b384fea9fa1 | Shell | HeroCC/dotfiles | /install.sh | UTF-8 | 3,311 | 3.90625 | 4 | [] | no_license | #!/bin/bash
function pull {
cd $DOTFILE_DIR
git pull origin master
git submodule init
git submodule update
# git submodule foreach git submodule update --init
# git submodule foreach update --init
}
function update {
if [ "$noUpdate" == 'false' ]; then
if [ "$autoUpdate" == 'true' ]; then
sudo apt-get install "$@"
elif [ "$autoUpdate" == 'false' ]; then
echo "Do you want to update $@"
read update
if [ "$update" == y ]; then
sudo apt-get install "$@"
fi
fi
fi
}
function link {
if [ "$forseLN" = 'true' ]; then
ln -sf $1 $2
else
if [[ -e "$2" ]]; then
echo "$2 already exists, if it is a symlink it will be deleted"
if [[ -h "$2" ]]; then
rm -rf "$2"
ln -s $1 $2
else
echo "Not a symlink, renaming and linking"
mv -f "$2" "$2_old"
ln -s $1 $2
fi
else
ln -s $1 $2
fi
fi
}
function installConfigs {
#ZSH
update zsh
echo "Installing ZSH Config"
link $DOTFILE_DIR/zsh ~/.zsh
link $DOTFILE_DIR/zsh/zshrc ~/.zshrc
link $DOTFILE_DIR/zsh/zshenv ~/.zshenv
if [ "$CI" == "false" ] && [ "$SHELL" != "/usr/bin/zsh" ]; then
chsh -s $(which zsh)
fi
echo ""
# #git
# update git
# echo "Installing Git Config"
# link $DOTFILE_DIR/git/gitconfig ~/.gitconfig
# link $DOTFILE_DIR/git/gitignore_global ~/.gitignore_global
# link $DOTFILE_DIR/git/gitattributes ~/.gitattributes
# echo ""
#Screen
update screen
echo "Installing Screen Config"
link $DOTFILE_DIR/screen/screenrc ~/.screenrc
echo ""
# Tmux
update tmux
echo "Installing Tmux Config"
link $DOTFILE_DIR/tmux/tmux.conf ~/.tmux.conf
echo ""
#SSH
update openssh-client
update openssh-server
echo "Installing SSH Config"
mkdir -p ~/.ssh/
link $DOTFILE_DIR/ssh/config ~/.ssh/config
echo ""
# #Gem
# update ruby-full
# echo "Installing Gem Config"
# link $DOTFILE_DIR/gem/gemrc ~/.gemrc
# echo Installing Rbenv
# git clone https://github.com/sstephenson/rbenv.git ~/.rbenv
# git clone https://github.com/sstephenson/ruby-build.git ~/.rbenv/plugins/ruby-build
# echo ""
# if [[ -n "$XDG_CURRENT_DESKTOP" ]]; then
# #Sublime 3
# echo "Installing Sublime Text 3 Config"
# mkdir -p ~/.config/sublime-text-3/Packages/
# cd ~/.config/sublime-text-3/Packages/
# link $DOTFILE_DIR/sublimetext/User User
# echo ""
# fi
#VIM
update vim
echo "Installing VIM config"
link $DOTFILE_DIR/vim ~/.vim
link $DOTFILE_DIR/vim/vimrc ~/.vimrc
link ~/.vim ~/.config/nvim
link ~/.vimrc ~/.config/nvim/init.vim
vim +PluginInstall +qall
echo ""
# #Gradle
# echo "Installing Gradle Config"
# mkdir -p ~/.gradle/
# link $DOTFILE_DIR/gradle/gradle.properties ~/.gradle/gradle.properties
# echo ""
}
function main {
autoUpdate='false'
noLN='false'
forseLN='false'
noUpdate='false'
while getopts 'nufi' flag; do
case "${flag}" in
i) noUpdate='true' ;;
n) noLN='true' ;;
u) autoUpdate='true' ;;
f) forseLN='true' ;;
*) error "Unexpected option ${flag}" ;;
esac
done
if [ "$USER" == "travis" ]; then
CI='true'
DOTFILE_DIR="$(pwd)"
else
CI='false'
DOTFILE_DIR="$HOME/.dotfiles"
fi
if [ "$noLN" == 'true' ]; then
pull
elif [ "$noLN" == 'false' ]; then
pull
installConfigs
fi
}
main "$@"
| true |
79e287d0cba6419fb68d32936cd54b4e94b70c06 | Shell | bearlin/dotvim | /scripts/update_pathogen.sh | UTF-8 | 902 | 3.25 | 3 | [] | no_license | #!/bin/bash
DOTVIMHOME=~/.vim
source $DOTVIMHOME/scripts/handy_functions.sh
# Reference:
# https://github.com/tpope/vim-pathogen
cd "$DOTVIMHOME"
# ------------------------
hasWget=0
hasCurl=0
setFlagsHasWgetHasCurlAndExitIfBothEqualZero "$@"
echo "hasWget=$hasWget"
echo "hasCurl=$hasCurl"
rm -rf $DOTVIMHOME/autoload/pathogen.vim
if [ $hasWget == 1 ]; then
#wget "https://tpo.pe/pathogen.vim" -O "$DOTVIMHOME/autoload/pathogen.vim"
wget "https://raw.githubusercontent.com/tpope/vim-pathogen/master/autoload/pathogen.vim" -O "$DOTVIMHOME/autoload/pathogen.vim"
elif [ $hasCurl == 1 ]; then
#curl -LSo "$DOTVIMHOME/autoload/pathogen.vim" "https://tpo.pe/pathogen.vim"
curl -LSo "$DOTVIMHOME/autoload/pathogen.vim" "https://raw.githubusercontent.com/tpope/vim-pathogen/master/autoload/pathogen.vim"
else
die "Unknow parameters, exit"
fi
# ------------------------
cd -
byebye "$@"
| true |
815698c7060f4553e9f257143ae2575140ec27f6 | Shell | Nils-TUD/aconfmgr | /test/t/t-2_apply-2_files-2_managed-1_modified-4_editgone.sh | UTF-8 | 702 | 2.515625 | 3 | [
"MIT"
] | permissive | #!/bin/bash
source ./lib.bash
# Test modifying a file that's not on the filesystem.
TestPhase_Setup ###############################################################
TestAddPackageFile test-package /testfile.txt foo
TestCreatePackageFile test-package
TestAddConfig 'echo bar >> $(CreateFile /testfile.txt)'
TestPhase_Run #################################################################
AconfApply
TestPhase_Check ###############################################################
# XFAIL - FIXME!
# diff -u "$test_data_dir"/files/testfile.txt /dev/stdin <<<bar
diff -u "$test_data_dir"/files/testfile.txt <(printf foo)
TestDone ######################################################################
| true |
cb9901a905e0daa9d7e9dd510ee47226963bf5f0 | Shell | DookTibs/shellScripts | /tomcatHelper.sh | UTF-8 | 15,360 | 3.484375 | 3 | [] | no_license | #!/bin/bash
# jdb -connect com.sun.jdi.SocketAttach:port=9005,hostname=localhost -sourcepath .
# JAVA_HOME="/cygdrive/c/Program Files/Java/jdk1.8.0_112/" bash -c '/cygdrive/c/development/tomcat/apache-tomcat-9.0.0.M15/bin/catalina.sh start'
# todo - rewrite this to use python or something to be more modular. And then start using this for deployments.
# not needed anymore; now we get things like the AWS secrets from environment variables...
# source sensitiveData.sh
# this will convert to "localdev" or "localprod" for the Spring profile...
# default value
targetEnv="sandbox"
# targetEnv="dev"
# targetEnv="prod"
# targetEnv="dev2021"
# targetEnv="prod2021"
# targetEnv="sandbox"
if [ "${2}" != "" ]; then
targetEnv="${2}"
fi
if [ "${targetEnv}" == "dev2021" ]; then
dragonEnv="dev"
tunnelGrepper="2432"
elif [ "${targetEnv}" == "oldprod" ]; then
dragonEnv="oldprod"
tunnelGrepper="9432"
elif [ "${targetEnv}" == "prod2021" ]; then
dragonEnv="prod"
tunnelGrepper="1432"
elif [ "${targetEnv}" == "sandbox" ]; then
dragonEnv="sandbox"
tunnelGrepper="8432"
elif [ "${targetEnv}" == "dev" ]; then
dragonEnv="dev"
tunnelGrepper="3432"
elif [ "${targetEnv}" == "prod" ]; then
dragonEnv="prod"
tunnelGrepper="4432"
else
echo "invalid environment"
exit 1
fi
dynamic_key_env_lookup="LITSTREAM_APP_SDK_USER_ACCESS_KEY_ID_${targetEnv}"
dynamic_secret_env_lookup="LITSTREAM_APP_SDK_USER_SECRET_ACCESS_KEY_${targetEnv}"
# note the exclamation point; I want to grab something from an environment variable,
# but it's a dynamic name based on the environment passed into this script
# neat trick, learned in Feb 2022, never needed this before!
# https://stackoverflow.com/questions/9714902/how-to-use-a-variables-value-as-another-variables-name-in-bash
aws_access_key_id="${!dynamic_key_env_lookup}"
aws_secret_access_key="${!dynamic_secret_env_lookup}"
echo "for '${targetEnv}', gonna use [${aws_access_key_id}] and [${aws_secret_access_key}] (defined in sensitive_data.sh which should never get checked in..."
# see https://stackoverflow.com/questions/15555838/how-to-pass-tomcat-port-number-on-command-line
tomcatHttpPort=8081
if [ ! -z "${3}" ]; then
tomcatHttpPort=${3}
fi
# tomcatShutdownPort=$(($tomcatHttpPort + 10))
openTunnels=`checkTunnels.sh | grep "$tunnelGrepper:.*_jumpbox" | wc -l`
if [ $openTunnels -ne 1 ]; then
echo "$openTunnels tunnels found running for dragon env '$dragonEnv'."
echo "Make sure tunnel(s) are configured properly before proceeding. (probably tunnel_dragon_${dragonEnv}_start)"
echo "Exiting without doing anything."
exit 1
fi
actualDragonEnv="not_set"
actualDragonEnv="local${dragonEnv}"
source ~/development/configurations/bash/functions.bash
runningTomcatVersion=`echo $TOMCAT_HOME | awk -F "/" '{print $(NF-1)}'`
echo "#################"
echo "# env == $dragonEnv"
echo "#################"
# echo "Running on '$runningTomcatVersion' (Tomcat 8 is port 8088, Tomcat9 is port 8081)"
echo "Running on '$runningTomcatVersion' (port $tomcatHttpPort)"
# TOMCAT_HOME=/cygdrive/c/development/tomcat/apache-tomcat-9.0.0.M15/
logfile=${TOMCAT_HOME}logs/catalina.out
runTomcatCmd() {
if [ "${1}" == "stop" ] || [ "${1}" == "start" ]; then
# JPDA are debugger related
# debugging launch
# JPDA_ADDRESS="localhost:9005" JPDA_TRANSPORT="dt_socket" CLASSPATH="/cygdrive/c/Program\ Files/Java/jdk1.8.0_112/lib/tools.jar" CATALINA_OPTS="-Dspring.profiles.active=prod,migration -DbaseUrl=http://localhost:8081 -Djava.endorsed.dirs=/cygdrive/c/development/tomcat/apache-tomcat-9.0.0.M15/endorsed -XX:+CMSClassUnloadingEnabled -Dfile.encoding=Cp1252" JAVA_HOME="/cygdrive/c/Program Files/Java/jdk1.8.0_112/" bash -c "/cygdrive/c/development/tomcat/apache-tomcat-9.0.0.M15/bin/catalina.sh jpda $1"
# dev, prod, localdev, localprod
# standard launch
# AWS_ACCESS_KEY_ID="${S3_USER_AWS_ACCESS_KEY_ID}" AWS_SECRET_ACCESS_KEY="${S3_USER_AWS_SECRET_ACCESS_KEY}" CLASSPATH="/cygdrive/c/Program\ Files/Java/jdk1.8.0_112/lib/tools.jar" CATALINA_OPTS="-Dspring.profiles.active=${actualDragonEnv},tibs -Ddragon.tierType=web -DbaseUrl=http://localhost:8081 -Djava.endorsed.dirs=${TOMCAT_HOME}endorsed -XX:+CMSClassUnloadingEnabled -Dfile.encoding=Cp1252" JAVA_HOME="/cygdrive/c/Program Files/Java/jdk1.8.0_112/" bash -c "${TOMCAT_HOME}bin/catalina.sh $1"
# AWS_ACCESS_KEY_ID="${S3_USER_AWS_ACCESS_KEY_ID}" AWS_SECRET_ACCESS_KEY="${S3_USER_AWS_SECRET_ACCESS_KEY}" CLASSPATH="/cygdrive/c/Program\ Files/Java/jdk1.8.0_161/lib/tools.jar" CATALINA_OPTS="-Dspring.profiles.active=${actualDragonEnv},tibs -Ddragon.tierType=web -DbaseUrl=http://localhost:${tomcatHttpPort} -Djava.endorsed.dirs=${TOMCAT_HOME}endorsed -Dport.http=${tomcatHttpPort} -XX:+CMSClassUnloadingEnabled -Dfile.encoding=Cp1252" JAVA_HOME="/cygdrive/c/Program Files/Java/jdk1.8.0_161/" bash -c "${TOMCAT_HOME}bin/catalina.sh $1"
if [ "no" == "yes" ]; then
if [ "${DRAGON_11_UPGRADE}" == "yes" ]; then
echo "JAVA 11 WORK USING ${TOMCAT_HOME}!!!"
# testing for JDK 11 / Tomcat 8.5 support - also had to remove some things like endorsed.dirs, CLASSPATH, etc.
JAVA_TO_USE="/Library/Java/JavaVirtualMachines/amazon-corretto-11.jdk/Contents/Home/"
# JAVA_TO_USE="/Library/Java/JavaVirtualMachines/openjdk-11.jdk/Contents/Home/"
AWS_ACCESS_KEY_ID="${S3_USER_AWS_ACCESS_KEY_ID}" AWS_SECRET_ACCESS_KEY="${S3_USER_AWS_SECRET_ACCESS_KEY}" CATALINA_OPTS="-Dspring.profiles.active=${actualDragonEnv},tibs,xmigration -Ddragon.tierType=web -DbaseUrl=http://localhost:${tomcatHttpPort} -Dport.http=${tomcatHttpPort} -XX:+CMSClassUnloadingEnabled -Dfile.encoding=Cp1252" JAVA_HOME="${JAVA_TO_USE}" bash -c "${TOMCAT_HOME}bin/catalina.sh $1"
# maybe set JAVA_HOME like this?
# JAVA_HOME="$HOME/.jenv/versions/`jenv version-name`
else
echo "LAUNCHING STANDARD"
# current command as of 20180530
AWS_ACCESS_KEY_ID="${S3_USER_AWS_ACCESS_KEY_ID}" AWS_SECRET_ACCESS_KEY="${S3_USER_AWS_SECRET_ACCESS_KEY}" CLASSPATH="/Library/Java/JavaVirtualMachines/jdk1.8.0_162.jdk/Contents/Home/lib/tools.jar" CATALINA_OPTS="-Dspring.profiles.active=${actualDragonEnv},tibs,xmigration -Ddragon.tierType=web -DbaseUrl=http://localhost:${tomcatHttpPort} -Djava.endorsed.dirs=${TOMCAT_HOME}endorsed -Dport.http=${tomcatHttpPort} -XX:+CMSClassUnloadingEnabled -Dfile.encoding=Cp1252" JAVA_HOME="/Library/Java/JavaVirtualMachines/jdk1.8.0_162.jdk/Contents/Home/" bash -c "${TOMCAT_HOME}bin/catalina.sh $1"
fi
fi
# trying to add jvisualvm support
# AWS_ACCESS_KEY_ID="${S3_USER_AWS_ACCESS_KEY_ID}" AWS_SECRET_ACCESS_KEY="${S3_USER_AWS_SECRET_ACCESS_KEY}" CLASSPATH="/Library/Java/JavaVirtualMachines/jdk1.8.0_162.jdk/Contents/Home/lib/tools.jar" CATALINA_OPTS="-Dspring.profiles.active=${actualDragonEnv},tibs,xmigration -Ddragon.tierType=web -DbaseUrl=http://localhost:${tomcatHttpPort} -Djava.endorsed.dirs=${TOMCAT_HOME}endorsed -Dport.http=${tomcatHttpPort} -Dcom.sun.management.jmxremote=true -Dcom.sun.management.jmxremote.port=9090 -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false -Djava.rmi.server.hostname=localhost -XX:+CMSClassUnloadingEnabled -Dfile.encoding=Cp1252" JAVA_HOME="/Library/Java/JavaVirtualMachines/jdk1.8.0_162.jdk/Contents/Home/" bash -c "${TOMCAT_HOME}bin/catalina.sh $1"
# current command as of 20180530
# AWS_ACCESS_KEY_ID="${S3_USER_AWS_ACCESS_KEY_ID}" AWS_SECRET_ACCESS_KEY="${S3_USER_AWS_SECRET_ACCESS_KEY}" CLASSPATH="/Library/Java/JavaVirtualMachines/jdk1.8.0_162.jdk/Contents/Home/lib/tools.jar" CATALINA_OPTS="-Dspring.profiles.active=${actualDragonEnv},tibs,xmigration -Ddragon.tierType=web -DbaseUrl=http://localhost:${tomcatHttpPort} -Djava.endorsed.dirs=${TOMCAT_HOME}endorsed -Dport.http=${tomcatHttpPort} -XX:+CMSClassUnloadingEnabled -Dfile.encoding=Cp1252" JAVA_HOME="/Library/Java/JavaVirtualMachines/jdk1.8.0_162.jdk/Contents/Home/" bash -c "${TOMCAT_HOME}bin/catalina.sh $1"
# current command as of 20220216
# AWS_ACCESS_KEY_ID="${aws_access_key_id}" AWS_SECRET_ACCESS_KEY="${aws_secret_access_key}" CATALINA_OPTS="-Daws_profile_for_sdk=use_this_ptofile -Dspring.profiles.active=${actualDragonEnv} -Ddragon.tierType=web -DbaseUrl=http://localhost:${tomcatHttpPort} -Dport.http=${tomcatHttpPort} -XX:+CMSClassUnloadingEnabled -Dfile.encoding=Cp1252" bash -c "${TOMCAT_HOME}bin/catalina.sh $1"
# current command as of 20220725
if [ "${DEBUG_LITSTREAM}" == "yes" ] && [ "${1}" == "start" ]; then
echo "(debug ${1}; unset environment variable DEBUG_LITSTREAM=yes if you want JPDA debugging turned off)"
JPDA_ADDRESS="localhost:9005" JPDA_TRANSPORT="dt_socket" AWS_ACCESS_KEY_ID="${aws_access_key_id}" AWS_SECRET_ACCESS_KEY="${aws_secret_access_key}" CATALINA_OPTS="-Daws_profile_for_sdk=use_this_ptofile -Dspring.profiles.active=${actualDragonEnv} -Ddragon.tierType=web -DbaseUrl=http://localhost:${tomcatHttpPort} -Dport.http=${tomcatHttpPort} -XX:+CMSClassUnloadingEnabled -Dfile.encoding=Cp1252" bash -c "${TOMCAT_HOME}bin/catalina.sh jpda start"
else
echo "(normal ${1}; set environment variable DEBUG_LITSTREAM=yes if you want JPDA debugging turned on)"
AWS_ACCESS_KEY_ID="${aws_access_key_id}" AWS_SECRET_ACCESS_KEY="${aws_secret_access_key}" CATALINA_OPTS="-Daws_profile_for_sdk=use_this_ptofile -Dspring.profiles.active=${actualDragonEnv} -Ddragon.tierType=web -DbaseUrl=http://localhost:${tomcatHttpPort} -Dport.http=${tomcatHttpPort} -XX:+CMSClassUnloadingEnabled -Dfile.encoding=Cp1252" bash -c "${TOMCAT_HOME}bin/catalina.sh $1"
fi
# TRYING WITHOUT KEYS IN ENVIRONMENT
# CATALINA_OPTS="-Daws_profile_source_for_sdk=aws_credentials_${targetEnv} -Dspring.profiles.active=${actualDragonEnv} -Ddragon.tierType=web -DbaseUrl=http://localhost:${tomcatHttpPort} -Dport.http=${tomcatHttpPort} -XX:+CMSClassUnloadingEnabled -Dfile.encoding=Cp1252" bash -c "${TOMCAT_HOME}bin/catalina.sh $1"
else
echo "Bad arg to runTomcatCmd..."
fi
}
startTomcat() {
echo "Deleting Tomcat logfile..."
rm -f "$logfile"
echo "Starting Tomcat"
runTomcatCmd start
echo "Waiting for webapp initialization to complete..."
startLine=`logwatcher.sh $logfile "Server startup in"`
msToStart=`echo "${startLine}" | awk '{ print $(NF-1) }'`
prettyTime $msToStart
echo "Initialization completed in ${_prettyTime}"
# blinkRed
# now we take a guess - Tomcat takes at least 45 seconds to start up if
# everything went well w/ Dragon. So let's blink greenish if it took awhile. If
# there was a Spring error during boot, the app will not initialize correctly
# and Tomcat will come up faster. So if Tomcat starts up faster than that, we know
# it's an error, and we blink red.
if [ ${msToStart} -gt 45000 ]; then
tmux_blink colour28
else
tmux_blink red
fi
}
stopTomcat() {
if [ -n "${processId}" ]; then
echo "Stopping running Tomcat process (port ${tomcatShutdownPort})..."
runTomcatCmd stop
waitForStoppage
fi
}
processId=""
# runs and sets global var processId
getProcessId() {
if [ "cygwin" = ${TOM_OS} ];then
processId=`procps all | grep $runningTomcatVersion | grep "\-DbaseUrl=.*localhost:${tomcatHttpPort}" | awk '{ print $3 }'`
# echo "got process id [$processId] from [$runningTomcatVersion]/[$tomcatHttpPort]"
else
# OSX
processId=`ps -eax | grep $runningTomcatVersion | grep "\-DbaseUrl=.*localhost:${tomcatHttpPort}" | awk '{ print $1 }'`
fi
}
usage() {
if [ "${1}" != "" ]; then
echo "Unsupported command '${1}'"
else
echo "No command supplied"
fi
echo "Supported commands:"
echo "'status' Check if Tomcat is running or not"
echo "'stop' Stop Tomcat localhost instance using scripts"
echo "'kill' Stop Tomcat localhost instance using shell kill command"
echo "'start' Stop Tomcat localhost instance"
echo "'redeploy' Builds and deploys DRAGON Online war, bouncing the Tomcat localhost instance"
echo "'watch' Tails the Tomcat log file"
}
_prettyTime=""
prettyTime() {
ms=${1}
seconds=$(expr ${ms} / 1000)
minutes=$(expr ${seconds} / 60)
leftoverSeconds=$(expr ${seconds} % 60)
leftoverMs=$(expr ${ms} % 1000)
# _prettyTime="${minutes}:${leftoverSeconds}.${leftoverMs}"
_prettyTime=`printf "%d:%02d.%d" $minutes $leftoverSeconds $leftoverMs`
}
waitForStoppage() {
# keep checking til processId is dead
while [ 1 -eq 1 ]; do
echo "waiting for tomcat to stop..."
getProcessId
if [ -z "${processId}" ]; then
break
fi
sleep 1
done
echo "Tomcat stopped!"
}
# get the process id
getProcessId
if [ -z $1 ]; then
usage
elif [ "${1}" == "stop" ] || [ "${1}" == "status" ] || [ "${1}" == "kill" ]; then
if [ -n "${processId}" ]; then
echo "Tomcat is running as pid ${processId}"
if [ "${1}" == "stop" ]; then
stopTomcat
elif [ "${1}" == "kill" ]; then
echo "Killing..."
kill ${processId}
fi
else
echo "Tomcat does not appear to be running."
fi
elif [ "${1}" == "bounce" ]; then
stopTomcat
startTomcat
elif [ "${1}" == "start" ]; then
if [ -n "${processId}" ]; then
echo "Tomcat is already running as pid ${processId}"
else
startTomcat
fi
elif [ "${1}" == "redeploy" ]; then
echo "Not rebuilding styles..."
cd $DRAGON_HOME/src/main/webapp/
# gulp styles
# tr -d '\r' < css/main.css > css/tempUnix.css
# mv css/tempUnix.css css/main.css
#we'll assume the sass build was ok; it's not currently returning an error exit code when compilation error occurred...
echo "Rebuilding war..."
cd $DRAGON_HOME
# get the hash of the current commit in Git; we'll use this to name the war
currentHash=`git log --pretty=format:'%H' -n 1`
echo "Commit hash is [$currentHash]...."
mvn clean package
# rm $DRAGON_HOME/last_build_attempt.log
# mvn -X clean package > $DRAGON_HOME/last_build_attempt.log
if [ $? -ne 0 ]; then
echo "Error building war; not proceeding."
exit 1
fi
if [ -n "${processId}" ]; then
stopTomcat
fi
echo "Clearing out installed webapp from Tomcat..."
rm -rf ${TOMCAT_HOME}webapps/ROOT/
if [ "xhotswap" == "hotswap" ]; then
echo "HOT SWAPPING!"
# make a temp dir...
cd target
mkdir hotswap
cd hotswap
# unzip the war
cp ../dragon-0.0.1-SNAPSHOT.war .
unzip dragon-0.0.1-SNAPSHOT.war
rm dragon-0.0.1-SNAPSHOT.war
# replace some jars...
cp ~/Downloads/poi-src-5.2.2-20220312/build/dist/maven/poi-ooxml/poi-ooxml-5.2.2.jar ./WEB-INF/lib/
cp ~/Downloads/poi-src-5.2.2-20220312/build/dist/maven/poi-ooxml-lite/poi-ooxml-lite-5.2.2.jar ./WEB-INF/lib/
cp ~/Downloads/poi-src-5.2.2-20220312/build/dist/maven/poi/poi-5.2.2.jar ./WEB-INF/lib/
cp ~/Downloads/poi-src-5.2.2-20220312/build/dist/maven/poi-scratchpad/poi-scratchpad-5.2.2.jar ./WEB-INF/lib/
# re-zip it up...
zip -r ./hotswapped.zip *
mv hotswapped.zip ../hotswapped.war
cd ../..
cp target/hotswapped.war ${TOMCAT_HOME}webapps2/dragon-0.0.1-SNAPSHOT.war
else
cp target/dragon-0.0.1-SNAPSHOT.war ${TOMCAT_HOME}webapps2
fi
cp target/dragon-0.0.1-SNAPSHOT.war target/dragon-${currentHash}.war
# echo "Deleting problematic .ebextensions from worker tier"
# cp target/dragon-0.0.1-SNAPSHOT.war target/dragon-${currentHash}-web.war
# cp target/dragon-0.0.1-SNAPSHOT.war target/dragon-${currentHash}-worker.war
# zip -d target/dragon-${currentHash}-worker.war .ebextensions/increase_request_timeout_eb.config .ebextensions/httpd/
startTomcat
elif [ "${1}" == "watch" ]; then
tail -F ${logfile}
# rainbow --red=EANDK tail -F ${logfile}
else
usage "${1}"
fi
| true |
2b8ba5a6a920cb7ac96113f530dbdcb3ebd37267 | Shell | darkuranium/tclib | /tests/codec_images/generate.sh | UTF-8 | 2,711 | 4.25 | 4 | [
"CC0-1.0",
"LicenseRef-scancode-public-domain"
] | permissive | #!/bin/sh
# Generate reference images.
# The tool `texconv` is from DirectXUtils. It is used as the reference codec.
# Note that it must be compiled with OpenEXR support!
SRCDIR='source'
COMDIR='compressed'
REFDIR='reference'
# Sadly, the texconv API doesn't let us name output directly. So we'll generate
# this and then rename.
PREFIX_COM='COM-'
PREFIX_REF='REF-'
# these can be UNORM or UNORM_SRGB
ALGOS_INT='BC1 BC2 BC3 BC7'
# these can be UNORM or SNORM
ALGOS_RGTC='BC4 BC5'
# these can be UFLOAT or SFLOAT
ALGOS_HDR='BC6H'
mkdir -p "$COMDIR" "$REFDIR"
#texconv_and_mv $name $tag $refformat $comformat $otherflags...
texconv_and_mv() {
local name="$1"
local tag="$2"
local comformat="$3"
local refformat="$4"
shift
shift
shift
shift
# strip `.src.*`
local bname="$(basename "$name" | sed 's/\(.*\)\.[^.]*$/\1/')"
local sdir="$(dirname "$name")"
local pwd="$PWD"
# note that simply using `-sx` won't work, because we want a lowercase extension!
cd "$sdir"
# generate compressed
texconv -px "$PREFIX_COM" -sx ".$tag" -f "$comformat" -o "$pwd/$COMDIR" -y "$@" "$(basename "$name")"
cd "$pwd/$COMDIR"
mv "$PREFIX_COM$bname.$tag.DDS" "$bname.$tag.dds"
# generate decompressed
texconv -px "$PREFIX_REF" -f "$refformat" -o "$pwd/$REFDIR" -y "$@" "$bname.$tag.dds"
cd "$pwd/$REFDIR"
mv "$PREFIX_REF$bname.$tag.DDS" "$bname.$tag.dds"
cd "$pwd"
}
# INT & RGTC
for img in ls "$SRCDIR"/*.png; do
if [ ! -f "$img" ]; then
continue
fi
for algo in $ALGOS_INT; do
btag="$(printf '%s' "$algo" | tr 'A-Z' 'a-z')"
texconv_and_mv "$img" "${btag}" "${algo}_UNORM" "R8G8B8A8_UNORM" -srgbi -srgbo &
texconv_and_mv "$img" "${btag}-srgb" "${algo}_UNORM_SRGB" "R8G8B8A8_UNORM_SRGB" -srgbi -srgbo &
done
for algo in $ALGOS_RGTC; do
btag="$(printf '%s' "$algo" | tr 'A-Z' 'a-z')"
if [ "$algo" = "BC4" ]; then
btype='R8'
elif [ "$algo" = "BC5" ]; then
btype='R8G8'
else
echo "Error: Unknown RGTC algorithm '$algo'"
exit 1
fi
texconv_and_mv "$img" "${btag}-u" "${algo}_UNORM" "${btype}_UNORM" -srgbi -srgbo &
texconv_and_mv "$img" "${btag}-s" "${algo}_SNORM" "${btype}_SNORM" -srgbi -srgbo &
done
done
# HDR
for img in "$SRCDIR"/*.exr; do
if [ ! -f "$img" ]; then
continue
fi
for algo in $ALGOS_HDR; do
btag="$(printf '%s' "$algo" | tr 'A-Z' 'a-z')"
echo $btag
texconv_and_mv "$img" "${btag}-u" "${algo}_UF16" "R16G16B16A16_FLOAT" &
texconv_and_mv "$img" "${btag}-s" "${algo}_SF16" "R16G16B16A16_FLOAT" &
done
done
wait
| true |
dccf164e4ea91777583f212999999a5fdf6040b8 | Shell | adswa/python-ml | /code/generate_simg.sh | UTF-8 | 392 | 2.8125 | 3 | [] | no_license | #!/bin/sh
set -e
# Generate a singularity recipe with Neurodocker
# requires the Python package neurodocker (pip install neurodocker)
# Usage: bash code/generate_simg.sh <version-number>
neurodocker generate singularity \
--base=neurodebian:buster-non-free \
--pkg-manager=apt \
--install datalad \
--run-bash 'pip install numpy pandas scikit-learn scikit-image' \
> envs/Singularity.$1
| true |
29055ff66e12de1e51855130eff4046b2f02f685 | Shell | strongit/NewhostInit | /useful-scripts/tcp-connection-state-counter.sh | UTF-8 | 334 | 3.125 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
# @Function
# show count of tcp connection stat.
#
# @Usage
# $ ./tcp-connection-state-counter.sh
#
# @author Strong It
netstat -tna | awk 'NR > 2 {
s[$NF]++
}
END {
for(v in s) {
printf "%-12s%s\n", v, s[v]
}
}' | sort -nr -k2,2
#netstat -n | awk '/^tcp/ {++S[$NF]} END {for(a in S) print a, S[a]}'
| true |
e47c4017135b0fbfd00dadd884f1768b46532568 | Shell | abhirupju/PeriodicityPy | /runbatch.sh | UTF-8 | 2,865 | 2.515625 | 3 | [] | no_license | #!/bin/bash
# Kill all jombies if things go wrong:
# kill -9 `ps aux | grep s1563028 | awk '{if ($11~"python")print $2}'`
echo "
#-> Plain
# -Period range: 0-30
# -Prior: period - uniform
# -Resample: every step
# -Dynamics: static. t-dist scale=0.1
"
python run.py -r noise -n 1000 -p 30 &
#python run.py -r noise -n 5000 -p 100000 &
python run.py -r std -n 1000 -p 30 &
#python run.py -r std -n 5000 -p 100000 &
wait
echo "
#-> Resample_ESS
# -Period range: 0-30
# -Prior: period - uniform
# -Resample: ess based N/2
# -Dynamics: static. t-dist scale=0.1
"
python run.py -r noise -n 1000 -p 30 -s Resample_ESS &
#python run.py -r noise -n 5000 -p 100000 -s Resample_ESS &
python run.py -r std -n 1000 -p 30 -s Resample_ESS &
#python run.py -r std -n 5000 -p 100000 -s Resample_ESS &
wait
echo "
#-> Dynamics_Mixed_PHalf
# -Period range: 0-30
# -Prior: period - uniform
# -Resample: every step
# -Dynamics: static. Mixed dist. pi*(oldP/2) + (1-pi)*(t-dist scale=0.1)
"
python run.py -r noise -n 1000 -p 30 -s Dynamics_Mixed_PHalf &
#python run.py -r noise -n 5000 -p 100000 -s Dynamics_Mixed_PHalf &
python run.py -r std -n 1000 -p 30 -s Dynamics_Mixed_PHalf &
#python run.py -r std -n 5000 -p 100000 -s Dynamics_Mixed_PHalf &
wait
echo "
#-> Dynamics_Mixed_PHalf_PDouble
# -Period range: 0-30
# -Prior: period - uniform
# -Resample: every step
# -Dynamics: static. Mixed dist. pi1*(oldP/2) + pi2*(oldP*2) * (1-pi1-pi2)*(t-dist scale=0.1)
"
python run.py -r noise -n 1000 -p 30 -s Dynamics_Mixed_PTwoSided &
#python run.py -r noise -n 5000 -p 100000 -s Dynamics_Mixed_PHalf_PDouble &
python run.py -r std -n 1000 -p 30 -s Dynamics_Mixed_PTwoSided &
#python run.py -r std -n 5000 -p 100000 -s Dynamics_Mixed_PHalf_PDouble &
wait
echo "
#-> Dynamics_ESS
# -Period range: 0-30
# -Prior: period - uniform
# -Resample: every step
# -Dynamics: static. t-dist scale=1/ESS
"
python run.py -r noise -n 1000 -p 30 -s Dynamics_ESS &
#python run.py -r noise -n 5000 -p 100000 -s Dynamics_ESS &
python run.py -r std -n 1000 -p 30 -s Dynamics_ESS &
#python run.py -r std -n 5000 -p 100000 -s Dynamics_ESS &
wait
echo "
#-> Dynamics_MW_W
# -Period range: 0-30
# -Prior: period - uniform
# -Resample: every step
# -Dynamics: static. dynamics only particles with (w < max_weight) with t-dist scale=0.1
"
python run.py -r noise -n 1000 -p 30 -s Dynamics_MW_W &
#python run.py -r noise -n 5000 -p 100000 -s Dynamics_MW_W &
python run.py -r std -n 1000 -p 30 -s Dynamics_MW_W &
#python run.py -r std -n 5000 -p 100000 -s Dynamics_MW_W &
wait
#nParticle: 1000
#nSamples: 100
#nIterations: 10
#Period: 10
#Test Each with:
#1. Prior_Prange_uniform: Uniform with range 0-30
#2. Prior_Punbound_expon: expon scale=1, range 0-1e10
#3. NParticle_5000: 5000
#** Save configuration and numpy result with scheme name
| true |
863c6565f34167909f0d46e99f462775db75ce34 | Shell | qrvd/discord-unix-bot | /sbin/mention.user | UTF-8 | 165 | 3.21875 | 3 | [
"BSL-1.0"
] | permissive | #!/bin/bash
set -ueo pipefail
if [[ "$#" -eq 0 ]]; then
echo "Mention which user ID?"
exit 1
fi
uid="$(sbin/readnum "$0" 'user ID' "$1")"
printf '<@!%s>' "$uid"
| true |
b71c991516a4cd58b1f51e4b66bd18687a4ca08d | Shell | dineshondev/portal | /entrypoint.sh | UTF-8 | 2,948 | 2.828125 | 3 | [
"MIT"
] | permissive | #!/bin/sh
export NODE=`which node`
export NODE_OPTIONS=--max_old_space_size=1024
export NODE_PG_FORCE_NATIVE=true
export NODE_ENV=${NODE_ENV:=production}
# FIX: https://github.com/typeorm/typeorm/blob/master/docs/migrations.md
# "Typically it is unsafe to use synchronize: true for schema synchronization on production
# once you get data in your database. Here is where migrations come to help."
if [ -n "$*" -a "$1" = "test" ]; then
NODE_ENV=test
node_modules/.bin/jest $2 $3 $4 $5
elif [ -n "$*" -a "$1" = "schema" ]; then
$NODE ./node_modules/typeorm/cli.js -f .next/typeorm/ormconfig.js schema:sync
elif [ -n "$*" -a "$1" = "start" ]; then
$NODE .next/nest/main.js
elif [ -n "$*" -a "$1" = "start:sync" ]; then
$NODE ./node_modules/typeorm/cli.js -f .next/typeorm/ormconfig.js schema:sync
$NODE dist/apps/sync/main.js
elif [ -n "$*" -a "$1" = "start:syncJob" ]; then
$NODE ./node_modules/typeorm/cli.js -f .next/typeorm/ormconfig.js schema:sync
$NODE dist/apps/sync-job/main.js
elif [ -n "$*" -a "$1" = "build" ]; then
mkdir -p .local
cat <<EOF > .local/.env
# App
PORT="${PORT}"
# PORT_DEBUG="${PORT_DEBUG}"
DOMAIN="${DOMAIN}"
DEVELOPMENT="${DEVELOPMENT}"
# Logging
LOG_LEVEL="${LOG_LEVEL}"
LOG_SERVER="${LOG_SERVER}"
# Database
DATABASE_URI="${DATABASE_URI}"
DATABASE_URI_RD="${DATABASE_URI_RD}"
DATABASE_SCHEMA="${DATABASE_SCHEMA}"
DATABASE_SYNCHRONIZE="${DATABASE_SYNCHRONIZE}"
DATABASE_DROP_SCHEMA="${DATABASE_DROP_SCHEMA}"
DATABASE_MIGRATIONS_RUN="${DATABASE_MIGRATIONS_RUN}"
DATABASE_LOGGING="${DATABASE_LOGGING}"
# Database Redis
DATABASE_REDIS_URI="${DATABASE_REDIS_URI}"
DATABASE_REDIS_TTL="${DATABASE_REDIS_TTL}"
# GraphQL Redis
GRAPHQL_REDIS_URI="${GRAPHQL_REDIS_URI}"
GRAPHQL_REDIS_TTL="${GRAPHQL_REDIS_TTL}"
# HTTP Redis
HTTP_REDIS_URI="${HTTP_REDIS_URI}"
HTTP_REDIS_TTL="${HTTP_REDIS_TTL}"
HTTP_REDIS_MAX_OBJECTS="${HTTP_REDIS_MAX_OBJECTS}"
# Session Redis
SESSION_NAME="${SESSION_NAME}"
SESSION_REDIS_URI="${SESSION_REDIS_URI}"
SESSION_COOKIE_TTL="${SESSION_COOKIE_TTL}"
SESSION_SECRET="${SESSION_SECRET}"
# LDAP
LDAP="${LDAP}"
# LDAP Redis
LDAP_REDIS_URI="${LDAP_REDIS_URI}"
LDAP_REDIS_TTL="${LDAP_REDIS_TTL}"
# MICROSERVICE
MICROSERVICE_URL="${MICROSERVICE_URL}"
# SOAP
TICKETS_URL="${TICKETS_URL}"
REPORTS_URL="${REPORTS_URL}"
DOCFLOW_URL="${DOCFLOW_URL}"
TICKETS_REDIS_URI="${TICKETS_REDIS_URI}"
TICKETS_REDIS_TTL="${TICKETS_REDIS_TTL}"
REPORTS_REDIS_URI="${REPORTS_REDIS_URI}"
REPORTS_REDIS_TTL="${REPORTS_REDIS_TTL}"
DOCFLOW_REDIS_URI="${DOCFLOW_REDIS_URI}"
DOCFLOW_REDIS_TTL="${DOCFLOW_REDIS_TTL}"
# OSTICKET
OSTICKET_URL="${OSTICKET_URL}"
# NEXTCLOUD
NEXTCLOUD_URL="${NEXTCLOUD_URL}"
NEXTCLOUD_REDIS_URI="${NEXTCLOUD_REDIS_URI}"
NEXTCLOUD_REDIS_TTL="${NEXTCLOUD_REDIS_TTL}"
MAX_FILE_SIZE="${MAX_FILE_SIZE}"
# NEWS
NEWS_URL="${NEWS_URL}"
NEWS_API_URL="${NEWS_API_URL}"
# MAIL
MAIL_URL="${MAIL_URL}"
MAIL_LOGIN_URL="${MAIL_LOGIN_URL}"
# MEETING
MEETING_URL="${MEETING_URL}"
EOF
fi
| true |
965732eb44a1d644bdd93861cd6f36176e4eebbd | Shell | wh5a/arch | /.zer0prompt | UTF-8 | 4,268 | 3.78125 | 4 | [] | no_license | #!/bin/bash
#
# zer0prompt
# By: Wes Brewer [zer0]
# Last updated: Nov 8, 2009
#
# Credit for ideas/info: Phil!'s ZSH Prompt, Bashish, TERMWIDE prompt
# Bash Prompt Howto
#
# Usage: Add the follwing lines to your ~/.bashrc file
# source ~/.zer0prompt
# zer0prompt
# unset zer0prompt
#### user config ####
## set colour theme
# options -- cyan, blue, green, red, purple, yellow, black, white, none
zpcl="cyan"
## set info colours
# colour ref -- http://www.gilesorr.com/bashprompt/howto/c333.html#AEN335
zi1="\[\033[1;32m\]" # user@host:tty
zi2="\[\033[1;35m\]" # current path
zi3="\[\033[1;33m\]" # time
zi4="\[\033[1;31m\]" # exit status
zi5="\[\033[1;32m\]" # user identifier ($ or #)
## set time format
# options -- 12, 24, 12s, 24s
zptm="12"
## set line graphics to use
zg1="─"; zg2="┌"; zg3="└"; zg4="┤"; zg5="├"; zg6=">"; zg7="|"
#zg1="-"; zg2="+"; zg3="+"; zg4="|"; zg5="|"; zg6=">"; zg7="|"
#### code ####
# Use bash builtin checkwinsize option for terminals which fail to properly
# set the $COLUMNS variable. (bug workaround)
shopt -s checkwinsize
# if root user then colour user@host info and user identifier red.
[ "${UID}" = 0 ] && zi1="\[\033[1;31m\]" && zi5="\[\033[1;31m\]"
# This function is run at every prompt update, keeping our variables updated.
# Bash's PROMPT_COMMAND option handles this (see end of this function).
function pre_prompt {
# show exit code of last failed command, must be the first line in the function
ZEXIT="${?}"
[ "$ZEXIT" = "0" ] && ZEXIT=""
# Whenever displaying the prompt, write the previous line to disk;
history -a
ZPWD=${PWD/#$HOME/\~} # sorten home dir to ~
# set length of our important info
local infolength="$(whoami)@$(hostname):$(basename $(tty))$ZPWD"
# set length of our graphics
local gfxlength=23
# construct ZFILL size to fill terminal width (minus info/gfx lengths).
local fillsize
let fillsize=${COLUMNS}-${gfxlength}-${#infolength}
ZFILL=""
while [ "$fillsize" -gt "0" ]; do
ZFILL="$ZFILL$zg1"
let fillsize=${fillsize}-1
done
# determine how much to truncate ZPWD, if ZFILL can't shrink anymore.
if [ "$fillsize" -lt "0" ]; then
local cut=3-${fillsize} # some tricky math, 3-(-number)=+number
ZPWD="...${ZPWD:${cut}}"
fi
# Required by /etc/profile.d/autojump.bash
# It conflicts with our function so we have to copy over.
(autojump -a "$(pwd -P)"&)>/dev/null 2>>${HOME}/.autojump_errors
}
PROMPT_COMMAND=pre_prompt
# This function tells bash how to draw our prompt
function zer0prompt {
local zc0="\[\033[0m\]" # clear all colors
local zc1="\[\033[1;37m\]"
local zc2="\[\033[0;37m\]"
# set colour theme
if [ "$zpcl" = "cyan" ]; then
local zc3="\[\033[1;36m\]"; local zc4="\[\033[0;36m\]"
elif [ "$zpcl" = "blue" ]; then
local zc3="\[\033[1;34m\]"; local zc4="\[\033[0;34m\]"
elif [ "$zpcl" = "green" ]; then
local zc3="\[\033[1;32m\]"; local zc4="\[\033[0;32m\]"
elif [ "$zpcl" = "red" ]; then
local zc3="\[\033[1;31m\]"; local zc4="\[\033[0;31m\]"
elif [ "$zpcl" = "purple" ]; then
local zc3="\[\033[1;35m\]"; local zc4="\[\033[0;35m\]"
elif [ "$zpcl" = "yellow" ]; then
local zc3="\[\033[1;33m\]"; local zc4="\[\033[0;33m\]"
elif [ "$zpcl" = "black" ]; then
local zc3="\[\033[1;30m\]"; local zc4="\[\033[0;30m\]"
elif [ "$zpcl" = "white" ]; then
local zc3="\[\033[1;37m\]"; local zc4="\[\033[0;37m\]"
else # no colour
local zc3=""; local zc4=""; local zc1=""; local zc2=""
zi1=""; zi2=""; zi3=""; zi4=""; zi5=""
fi
# set time format
if [ "$zptm" = "24" ]; then
local ZTIME="\A"
elif [ "$zptm" = "12s" ]; then
local ZTIME="\T"
elif [ "$zptm" = "24s" ]; then
local ZTIME="\t"
else
local ZTIME="\@"
fi
# set titlebar info if xterm/rxvt
case $TERM in
xterm*|rxvt*)
local TITLEBAR='\[\033]0;\u (\w) [${COLUMNS}x${LINES}]\007\]';;
*)
local TITLEBAR="";;
esac
# prompt
PS1="${TITLEBAR}\
$zi1\u@\h:\l$zc4$zg5$zg1\
$zg4$zi1\$ZPWD$zc4$zg5$zg1$zc4\
\$ZFILL$zc3$zg1$zg1$zg1$zg1$zc1$zg1$zg1$zg1$zc3$zg1$zg1$zc4$zg1\
$zg1$zc2$zg1
$zi4\$ZEXIT$zi5\\\$$zc0 "
# continuation prompt
PS2="$zc3$zg3$zc4$zg1$zg4$zi5\\\$$zc4$zg5$zc2$zg1$zc3$zg6$zc0 "
}
| true |
bc13020924d9461882b71a6b0e563fa657333781 | Shell | kientv80/webtore_web | /hayhay/monitor.sh | UTF-8 | 2,035 | 2.96875 | 3 | [] | no_license | #!/bin/bash
echo "Start running `date`" >> /kientv/hayhay/monitorlog.txt
if [ $running == "true" ]; then
echo "Other cron is running" >> /kientv/hayhay/monitorlog.txt
else
running="true"
export $running
echo "Checking webserver ....." >> /kientv/hayhay/monitorlog.txt
result=`curl http://360hay.com/googleb15db46baf3c868f.html | grep googleb15db46baf3c868f.html`
ok="google-site-verification: googleb15db46baf3c868f.html"
if [ "$result" == "$ok" ]; then
echo "Webserver is ok" >> /kientv/hayhay/monitorlog.txt
else
echo "Restart server `date`....." >> /kientv/hayhay/monitorlog.txt
cd /kientv/hayhay
app=`jps | grep JettyServer`
echo "Found app $app " >> /kientv/hayhay/monitorlog.txt
pid=${app:0:5}
echo "Found PID $pid " >> /kientv/hayhay/monitorlog.txt
kill -9 $pid
echo "check if pid is killed" >> /kientv/hayhay/monitorlog.txt
jps >> /kientv/hayhay/monitorlog.txt
if [ "$app" != "" ] && [ "$pid" == "" ]; then
echo "pid not found correctly and is NOT killed" >> /kientv/hayhay/monitorlog.txt
else
echo "Start server " >> /kientv/hayhay/monitorlog.txt
nohup sh runservice start >> logfile.log &
fi
fi
#Check webcollector
cd /kientv/webcollector
echo "Ping collector `date`....." >> /kientv/hayhay/monitorlog.txt
ping=`ant ping | grep alive`
echo "Collector status $ping" >> /kientv/hayhay/monitorlog.txt
collector=`jps | grep WebCollector`
colpid=${collector:0:5}
echo "found collector pid $colpid" >> /kientv/hayhay/monitorlog.txt
if [ "$ping" != "" ]; then
echo "Collector still alive" >> /kientv/hayhay/monitorlog.txt
else
echo "Collector is die so restart" >> /kientv/hayhay/monitorlog.txt
if [ "$colpid" != "" ]; then
echo "Kill colpid $colpid" >> /kientv/hayhay/monitorlog.txt
kill -9 $colpid
echo "Check after kill" >> /kientv/hayhay/monitorlog.txt
jps >> /kientv/hayhay/monitorlog.txt
fi
nohup sh runservice start >> logfile.log &
fi
running="false"
export $running
echo "Finished" >> /kientv/hayhay/monitorlog.txt
fi
| true |
66aac2e44520028905c2e78e73ea014a08061f14 | Shell | ftakao2007/vagrant | /script/backup_redmine.sh | UTF-8 | 300 | 2.96875 | 3 | [
"MIT"
] | permissive | #!/bin/sh
BACKUP_DIR="redmine_backup"
NOW=`date +%Y%m%d`
if [ ! -d "${BACKUP_DIR}" ]; then mkdir ${BACKUP_DIR}; fi
mysqldump -uredmine -predmine redmine > ${BACKUP_DIR}/redmine.${NOW}.sql
cd /var/www/redmine
tar zcvf ~/${BACKUP_DIR}/files.${NOW}.tar.gz files
#mv files.${NOW}.tar.gz ${BACKUP_DIR}
| true |
7b582f1ee2e1bdece19c6b3303f857960e697cf3 | Shell | lizehang/jsproxy | /i.sh | UTF-8 | 3,646 | 3.953125 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env bash
CDN=https://cdn.jsdelivr.net/gh/etherdream/jsproxy-bin@master
JSPROXY_VER=0.0.9
PCRE_VER=8.43
ZLIB_VER=1.2.11
OPENSSL_VER=1.1.1b
OPENRESTY_VER=1.15.8.1
SUPPORTED_OS="Linux-x86_64"
OS="$(uname)-$(uname -m)"
NGX_DIR="$HOME/openresty"
COLOR_RESET="\033[0m"
COLOR_RED="\033[31m"
COLOR_GREEN="\033[32m"
COLOR_YELLOW="\033[33m"
output() {
local color=$1
shift 1
local sdata=$@
local stime=$(date "+%H:%M:%S")
printf "$color[jsproxy $stime]$COLOR_RESET $sdata\n"
}
log() {
output $COLOR_GREEN $1
}
warn() {
output $COLOR_YELLOW $1
}
err() {
output $COLOR_RED $1
}
check_nginx() {
local ngx_exe="$NGX_DIR/nginx/sbin/nginx"
local ngx_ver=$($ngx_exe -v 2>&1)
if [[ "$ngx_ver" != *"nginx version:"* ]]; then
err "$ngx_exe 无法执行!尝试编译安装"
exit 1
fi
log "$ngx_ver"
log "nginx path: $NGX_DIR"
}
install_jsproxy() {
log "下载代理服务 ..."
curl -s -O $CDN/server-$JSPROXY_VER.tar.gz
if [ -x ./server/run.sh ]; then
warn "尝试停止当前服务 ..."
./server/run.sh quit
fi
if [ -d "server" ]; then
backup="$PWD/bak/$(date +%Y_%m_%d_%H_%M_%S)"
warn "当前 server 目录备份到 $backup"
mkdir -p $backup
mv server $backup
fi
tar zxf server-$JSPROXY_VER.tar.gz
rm -f server-$JSPROXY_VER.tar.gz
log "启动服务 ..."
./server/run.sh
log "服务已开启。后续维护参考 https://github.com/EtherDream/jsproxy"
}
compile() {
local tmp_dir="$PWD/__tmp__"
mkdir -p $tmp_dir
cd $tmp_dir
log "下载 pcre 源码 ..."
curl -O https://ftp.pcre.org/pub/pcre/pcre-$PCRE_VER.tar.gz
tar zxf pcre-$PCRE_VER.tar.gz
log "下载 zlib 源码 ..."
curl -O https://zlib.net/zlib-$ZLIB_VER.tar.gz
tar zxf zlib-$ZLIB_VER.tar.gz
log "下载 openssl 源码 ..."
curl -O https://www.openssl.org/source/openssl-$OPENSSL_VER.tar.gz
tar zxf openssl-$OPENSSL_VER.tar.gz
log "下载 nginx 源码 ..."
curl -O https://openresty.org/download/openresty-$OPENRESTY_VER.tar.gz
tar zxf openresty-$OPENRESTY_VER.tar.gz
cd openresty-$OPENRESTY_VER
export PATH=$PATH:/sbin
log "配置中 ..."
./configure \
--with-openssl=../openssl-$OPENSSL_VER \
--with-pcre=../pcre-$PCRE_VER \
--with-zlib=../zlib-$ZLIB_VER \
--with-http_v2_module \
--with-http_ssl_module \
--with-pcre-jit \
--prefix=$NGX_DIR
log "编译中 ..."
make
make install
log "编译完成"
rm -rf $tmp_dir
check_nginx
install_jsproxy
}
install() {
log "下载 nginx 程序 ..."
curl -O $CDN/$OS/openresty-$OPENRESTY_VER.tar.gz
tar zxf openresty-$OPENRESTY_VER.tar.gz
rm -f openresty-$OPENRESTY_VER.tar.gz
check_nginx
install_jsproxy
}
update() {
install_jsproxy
}
pack() {
log "压缩 openresty ..."
GZIP=-9
tar cvzf openresty.tar.gz openresty
log "done"
ls -la
}
main() {
if [[ "$SUPPORTED_OS" != *"$OS"* ]]; then
err "当前系统 $OS 不支持自动安装。尝试编译安装"
exit 1
fi
if [[ "$USER" != "root" ]]; then
err "自动安装需要 root 权限。如果无法使用 root,尝试编译安装"
exit 1
fi
if ! id -u jsproxy > /dev/null 2>&1 ; then
log "创建用户 jsproxy ..."
groupadd nobody > /dev/null 2>&1
useradd jsproxy -g nobody --create-home
fi
local src=$0
local dst=/home/jsproxy/i.sh
warn "当前脚本移动到 $dst"
mv -f $src $dst
chmod +x $dst
log "切换到 jsproxy 用户,执行安装脚本 ..."
su - jsproxy -c "$dst install"
}
case "$1" in
"install") install
exit;;
"compile") compile
exit;;
"update") update
exit;;
"pack") pack
exit;;
*) main
exit;;
esac | true |
a0b46de591c9c077b9829ff4a5af56b4cd129641 | Shell | greenelab/ccc | /scripts/convert_ipynb_to_py.sh | UTF-8 | 754 | 3.96875 | 4 | [
"BSD-2-Clause-Patent",
"BSD-3-Clause"
] | permissive | #!/bin/bash
# show commands being executed (for debugging purposes)
#set -x
SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
NOTEBOOK="${1}"
if [ -z "${NOTEBOOK}" ]; then
echo "Provide the notebook path"
exit 1
fi
# capture whether notebook has a python or R kernel
regex="\"file_extension\": \"(\.[a-zA-Z]+)\"\,"
value=`cat ${NOTEBOOK} | grep "file_extension"`
if [[ $value =~ $regex ]]; then
fext="${BASH_REMATCH[1]}"
else
echo "ERROR: file extension not found"
exit 1
fi
# select code formatter according to file extension
PIPE_CMD=("black {}")
if [ "$fext" = ".r" ] || [ "$fext" = ".R" ]; then
PIPE_CMD=("${SCRIPT_DIR}/styler.r {}")
fi
jupytext \
--sync \
--pipe "${PIPE_CMD[@]}" \
${NOTEBOOK}
| true |
eac429af874df62b37181a94dab097607c2aa904 | Shell | pipitone/datasets | /setup.sh | UTF-8 | 974 | 3.15625 | 3 | [
"MIT"
] | permissive | #!/bin/bash
set -e
virtualenv env
. env/bin/activate
pip install pyyaml docopt
cat <<'EOF' > env/bin/datasets
#!/bin/bash
source $(dirname $0)/activate
python $(dirname $0)/../../datasets.py "$@"
EOF
echo Fetching sample data...
mkdir -p test/US_crime
echo -e "---\ndataset: true\ndescription: Sample US crime data\n---\n" > test/US_crime/README
echo -e "---\ndatasets:\n - $PWD/test/US_crime\n" > datasets.yml
(cd test/US_crime;
curl -O http://hci.stanford.edu/jheer/workshop/data/crime/CrimeStatebyState.csv;
curl -O http://hci.stanford.edu/jheer/workshop/data/fec/fec96_10.csv;
curl -O http://hci.stanford.edu/jheer/workshop/data/fec/fec_codebook.txt:
curl -O http://hci.stanford.edu/jheer/workshop/data/fec/fec96_10.csv)
cat <<EOF
---
'datasets' has been been installed. To activate, run:
source env/bin/activate
Then run 'datasets list' to see installed datasets, or run 'datasets --help'
for more information on how to use this tool.
EOF
| true |
149296bd82c2f5497452b506e86174250af43fe8 | Shell | NYULibraries/dlts_viewer_distro | /bin/link_build.sh | UTF-8 | 2,032 | 4.09375 | 4 | [] | no_license | #!/bin/bash
die () {
echo $1
exit 1
}
SOURCE="${BASH_SOURCE[0]}"
# resolve $SOURCE until the file is no longer a symlink
while [ -h "$SOURCE" ]; do
DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
SOURCE="$(readlink "$SOURCE")"
# if $SOURCE was a relative symlink, we need to resolve it relative to the path where
# the symlink file was located
[[ $SOURCE != /* ]] && SOURCE="$DIR/$SOURCE"
done
DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
DEBUG=""
while getopts ":c:hd" opt; do
case $opt in
c)
[ -f $OPTARG ] || die "Configuration file does not exist."
CONF_FILE=$OPTARG
;;
d)
DEBUG="-d -v"
;;
h)
echo " "
echo " Usage: ./migrate.sh -c example.conf"
echo " "
echo " Options:"
echo " -h Show brief help"
echo " -c <file> Specify the configuration file to use (e.g., -c example.conf)."
echo " "
exit 0
;;
esac
done
[ $CONF_FILE ] || die "No configuration file provided."
# load configuration file
. $CONF_FILE
LIBRARY="$(dirname "$DIR")"/lib
[ -d $LIBRARY ] || die "Library directory ${LIBRARY} does not exist"
if [[ -f $BUILD_DIR/$BUILD_BASE_NAME/index.php ]]; then
# check if this directory looks like Drupal 7
MATCH=`grep -c 'DRUPAL_ROOT' $BUILD_DIR/$BUILD_BASE_NAME/index.php`
if [ $MATCH -gt 0 ]; then
echo Linking build ${BUILD_DIR}/${BUILD_BASE_NAME}
site_dirs=(modules themes)
# find modules/themes and symlink them to the repo code
for site_dir in "${site_dirs[@]}"
do
for dir in $LIBRARY/${site_dir}/*
do
base=${dir##*/}
if [ -d ${BUILD_DIR}/${BUILD_BASE_NAME}/sites/all/${site_dir}/${base} ] && [ -d ${LIBRARY}/${site_dir}/${base} ]
then
rm -rf ${BUILD_DIR}/${BUILD_BASE_NAME}/sites/all/${site_dir}/${base}
ln -s $LIBRARY/${site_dir}/${base} ${BUILD_DIR}/${BUILD_BASE_NAME}/sites/all/${site_dir}/${base}
fi
done
done
fi
fi
exit 0
| true |
f52348e8a11f0c51d9d91ed3cb493e4fc56a5ac8 | Shell | jovalle/.jsh | /ubuntu.sh | UTF-8 | 1,530 | 3.390625 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env bash
# Install prerequisites
sudo apt update && sudo apt upgrade -y
sudo apt install -y software-properties-common build-essential zlib1g-dev libncurses5-dev libgdbm-dev libnss3-dev libssl-dev libreadline-dev libffi-dev libsqlite3-dev wget libbz2-dev zsh
# Install Meslo NF
sudo apt install fontconfig
curl -Lo /tmp/Meslo.zip $(curl -s https://api.github.com/repos/ryanoasis/nerd-fonts/releases/latest | grep "browser_download_url.*Meslo.zip" | cut -d : -f 2,3 | tr -d \")
mkdir -p $HOME/.local/share/fonts
unzip /tmp/Meslo.zip -d $HOME/.local/share/fonts
rm $HOME/.local/share/fonts/*Windows*
rm /tmp/Meslo.zip
fc-cache -fv
# Install python3.10
if [[ ! -f /usr/local/bin/python3.10 && $(python3 --version) != "*3.10*" ]]; then
pushd /tmp
curl -LO https://www.python.org/ftp/python/3.10.5/Python-3.10.5.tgz
tar -xf Python-3.10.*.tgz
pushd Python-3.10.*/
./configure --enable-optimizations
make -j $(nproc)
sudo make altinstall
popd
rm -rf /tmp/Python-3.10.*/
popd
fi
# Install pip
curl -sS https://bootstrap.pypa.io/get-pip.py | python3.10
# Set up python3 symlink
common_paths=(
/usr/bin/python
/usr/bin/python3
/usr/local/bin/python
/usr/local/bin/python3
)
for path in ${common_paths[@]}; do
sudo unlink $path &>/dev/null
if [[ $path == *local* ]]; then
if [[ -f /usr/local/bin/python3.10 ]]; then
sudo ln -s /usr/local/bin/python3.10 $path
elif [[ -f /usr/bin/python3.10 ]]; then
sudo ln -s /usr/bin/python3.10 $path
fi
fi
done
# Install jsh
./j.sh install
| true |
125b019537340a0e34989aa15444c5d2a0c4365b | Shell | bartonlp/site-class | /mk-html-gitlog.sh | UTF-8 | 1,206 | 2.921875 | 3 | [
"MIT"
] | permissive | #!/bin/bash
# !!! You need pandoc: sudo apt-get install pandoc
css='
<style>
div.sourceCode {
background-color: #EEF3E2;
border-left: 10px solid gray;
padding-left: 5px;
}
code {
background-color: #EEF3E2;
}
</style>
';
# Make .html files from .md files
pagetitle="Main Readme file";
/usr/bin/pandoc -Vpagetitle="$pagetitle" -Vmath="$css" -s -f gfm -t html5 README.md -o README.html
# Create 'git log >~/www/bartonlp.com/gitlog
git log --all > ~/www/bartonlp.com/gitlog
# now move into the docs directory and do those html files
cd docs
pagetitle="dbTables";
/usr/bin/pandoc -Vpagetitle="$pagetitle" -Vmath="$css" -s -f gfm -t html5 dbTables.md -o dbTables.html
pagetitle="SiteClass Methods";
/usr/bin/pandoc -f gfm -t html5 -Vpagetitle="$pagetitle" -Vmath="$css" -s siteclass.md -o siteclass.html
pagetitle="Additional Files";
/usr/bin/pandoc -f gfm -t html5 -Vpagetitle="$pagetitle" -Vmath="$css" -s files.md -o files.html
pagetitle="Analysis";
/usr/bin/pandoc -f gfm -t html5 -Vpagetitle="$pagetitle" -Vmath="$css" -s analysis.md -o analysis.html
pagetitle="examplereadme";
/usr/bin/pandoc -f gfm -t html5 -Vpagetitle="$pagetitle" -Vmath="$css" -s examplereadme.md -o examplereadme.html
| true |
91d75bce13f3fb581784a93756bab626bfb2d2e9 | Shell | bmwant/dotfiles | /loadcfg.sh | UTF-8 | 353 | 3.375 | 3 | [] | no_license | #!/usr/bin/env bash
set -e
CYAN='\033[0;36m'
NC='\033[0m'
function info() {
echo -e "${CYAN}$@${NC}"
}
info "Restoring $1/.bash_profile to ${HOME}/.bash_profile..."
cp "$1/.bash_profile" "${HOME}/.bash_profile"
info "Restoring $1/.zshrc ${HOME}/.zshrc to $1/.zshrc..."
cp "$1/.zshrc" "${HOME}/.zshrc"
info "Restoring $1/.vimconfig to sdafasfsa"
| true |
dfb4f293e97934c4c290f9e199c6c1fbc91c1146 | Shell | markj/tvos_scripts | /scripts/fix_image_alpha_channels.sh | UTF-8 | 2,786 | 3.640625 | 4 | [
"MIT"
] | permissive | #!/bin/sh
# Apple TV stack images can not contain an alpha channel on the back layer.
# This script uses ImageMagic to detect and fix this problem as it can be hard to make apps not export the channel
# Install ImageMagic by `brew install ImageMagick`
#
export PATH=$PATH:/usr/local/bin
ROOT="$1"
function fix_alpha_channels_in_folder() {
local folder=$1
find "$folder" -name "*.png" -print0 | while IFS= read -r -d $'\0' file; do
echo " $file"
check_and_fix_alpha_channel "$file"
done
}
function fix_alpha_channels() {
local asset=$1
echo "Processing: $asset"
find "$asset" -name "Back.imagestacklayer" -print0 | while IFS= read -r -d $'\0' folder; do
# echo " Checking: $folder"
fix_alpha_channels_in_folder "$folder"
done
}
function find_all_branded_assets() {
find $ROOT -name "App Icon & Top Shelf Image.brandassets" -print0 | while IFS= read -r -d $'\0' asset; do
fix_alpha_channels "$asset"
done
}
function check_and_fix_alpha_channel() {
local FILE=$1
local channels=`identify -format '%[channels]' "$FILE"`
# echo "Result = $channels"
if [ "$channels" != "srgb" ]; then
echo " ***WARNING**** $FILE has invalid channels : $channels"
convert "$FILE" -alpha off output.png
rm "$FILE"
mv output.png "$FILE"
echo " Fixed.."
else
echo " OK"
fi
}
find_all_branded_assets
exit
#check_no_alpha_channel "./MyPixPo/tvOS/Assets.xcassets/App Icon & Top Shelf Image.brandassets/App Icon - Large.imagestack/Back.imagestacklayer/Content.imageset/main.png"
#check_no_alpha_channel "./MyPixPo/tvOS//Assets.xcassets/App Icon & Top Shelf Image.brandassets/App Icon - Small.imagestack/Back.imagestacklayer/Content.imageset/main.png"
check_no_alpha_channel "./MyPixPo/tvOS//Assets.xcassets/App Icon & Top Shelf Image.brandassets/App Icon - App Store.imagestack/Back.imagestacklayer/Content.imageset/Background.png"
check_no_alpha_channel "./MyPixPo/tvOS//Assets.xcassets/App Icon & Top Shelf Image.brandassets/App Icon.imagestack/Back.imagestacklayer/Content.imageset/Background.png"
check_no_alpha_channel "./MyPixPo/tvOS//Assets.xcassets/App Icon & Top Shelf Image.brandassets/App Icon.imagestack/Back.imagestacklayer/Content.imageset/Background@2x.png"
check_no_alpha_channel "./MyPixPo/tvOS//Assets.xcassets/App Icon & Top Shelf Image.brandassets/Top Shelf Image Wide.imageset/Layer 0.png"
check_no_alpha_channel "./MyPixPo/tvOS//Assets.xcassets/App Icon & Top Shelf Image.brandassets/Top Shelf Image Wide.imageset/Layer 0@2x.png"
check_no_alpha_channel "./MyPixPo/tvOS//Assets.xcassets/App Icon & Top Shelf Image.brandassets/Top Shelf Image.imageset/Layer 0.png"
check_no_alpha_channel "./MyPixPo/tvOS//Assets.xcassets/App Icon & Top Shelf Image.brandassets/Top Shelf Image.imageset/Layer 0@2x.png"
| true |
01d35bc74463613da26f65ad0a87aa492402b017 | Shell | costa-group/installation-scripts | /saco/ei/common_config.sh | UTF-8 | 339 | 2.71875 | 3 | [] | no_license | #!/bin/bash
TOOL_PATH=$1
echo -e "\n\
# Path to saco --- where you've extracted \n\
# \n\
# http://costa.ls.fi.upm.es/download/saco.colab.zip \n\
# \n"
#/Systems/costa/costabs/dist/saco
echo -e "EC_SACOHOME=$TOOL_PATH \n\
export SACOHOME=\${EC_SACOHOME} \n\
export COSTABSHOME=\${SACOHOME} \n\
export PATH=\${PATH}:\${SACOHOME}/bin "
| true |
10595d059ed2197672c1e545bcddf4d31e0b3aa5 | Shell | green-pro/grdk-core | /services/install/lib/lib-install-cron.sh | UTF-8 | 5,428 | 3.828125 | 4 | [] | no_license | #!/bin/bash
### HELP
_install_cron_help()
{
echo "Usage: $prog_name install cron <subcommand> [options]"
echo "Subcommands:"
echo " empty (install main)"
echo " start"
echo " stop"
echo " restart"
echo " add (add scripts)"
echo " rm (remove scripts)"
echo ""
echo "For help with each subcommand run:"
echo "$prog_name install cron <subcommand> -h | --help"
echo ""
}
### COMMAND
_install_cron()
{
case $_subcommand in
"-h" | "--help")
_install_cron_help
;;
"")
__install_cron_main
;;
*)
shift 1
_install_cron_${_subcommand} $@
if [ $? = 127 ]; then
echo "Error: '$_subcommand' is not a known subcommand." >&2
echo "Run '$prog_name cron --help' for a list of known subcommands." >&2
exit 1
fi
;;
esac
}
### MAIN
__install_cron_main()
{
# RUN
_install_cron_rm
_install_cron_add
_install_cron_restart
return 0
}
### SUBCOMMANDS
_install_cron_start()
{
# RUN
echo "Start crontab service"
service cron start
return 0
}
_install_cron_stop()
{
# RUN
echo "Stop crontab service"
service cron stop
return 0
}
_install_cron_restart()
{
# RUN
echo "Restart crontab service"
service cron restart
return 0
}
_install_cron_add()
{
# RUN
echo "Create link to cron scripts"
if [ -d "${DK_INSTALL_PATH}/vendor/grdk-core/scripts/cron" ]; then
for entry2 in "${DK_INSTALL_PATH}/vendor/grdk-core/scripts/cron"/*; do
if [ -f "${entry2}" ]; then
file="${entry2##*/}"
file_name="${file%.*}"
file_ext=$([[ "$file" = *.* ]] && echo "${file##*.}" || echo '')
if [[ $file == grdk-cron-daily-* ]]; then
cmd="ln -s ${entry2} /etc/cron.daily/${file_name}"
echo $cmd
$cmd
elif [[ $file == grdk-cron-hourly-* ]]; then
cmd="ln -s ${entry2} /etc/cron.hourly/${file_name}"
echo $cmd
$cmd
fi
fi
done
fi
if [ -d "${DK_INSTALL_PATH}/vendor/grdk-core/scripts/cron/${DK_SERVER_NODE_ROLE}" ]; then
for entry2 in "${DK_INSTALL_PATH}/vendor/grdk-core/scripts/cron/${DK_SERVER_NODE_ROLE}"/*; do
if [ -f "${entry2}" ]; then
file="${entry2##*/}"
file_name="${file%.*}"
file_ext=$([[ "$file" = *.* ]] && echo "${file##*.}" || echo '')
if [[ $file == grdk-cron-daily-* ]]; then
cmd="ln -s ${entry2} /etc/cron.daily/${file_name}"
echo $cmd
$cmd
elif [[ $file == grdk-cron-hourly-* ]]; then
cmd="ln -s ${entry2} /etc/cron.hourly/${file_name}"
echo $cmd
$cmd
fi
fi
done
fi
for entry in "${DK_INSTALL_PATH}/vendor/grdk-core/services"/*; do
if [ -d "$entry" ]; then
if [ -d "$entry/scripts/cron" ]; then
for entry2 in "${entry}/scripts/cron"/*; do
if [ -f "${entry2}" ]; then
file="${entry2##*/}"
file_name="${file%.*}"
file_ext=$([[ "$file" = *.* ]] && echo "${file##*.}" || echo '')
if [[ $file == grdk-cron-daily-* ]]; then
cmd="ln -s ${entry2} /etc/cron.daily/${file_name}"
echo $cmd
$cmd
elif [[ $file == grdk-cron-hourly-* ]]; then
cmd="ln -s ${entry2} /etc/cron.hourly/${file_name}"
echo $cmd
$cmd
fi
fi
done
fi
if [ -d "$entry/scripts/cron/${DK_SERVER_NODE_ROLE}" ]; then
for entry2 in "${entry}/scripts/cron/${DK_SERVER_NODE_ROLE}"/*; do
if [ -f "${entry2}" ]; then
file="${entry2##*/}"
file_name="${file%.*}"
file_ext=$([[ "$file" = *.* ]] && echo "${file##*.}" || echo '')
if [[ $file == grdk-cron-daily-* ]]; then
cmd="ln -s ${entry2} /etc/cron.daily/${file_name}"
echo $cmd
$cmd
elif [[ $file == grdk-cron-hourly-* ]]; then
cmd="ln -s ${entry2} /etc/cron.hourly/${file_name}"
echo $cmd
$cmd
fi
fi
done
fi
fi
done
for entry in "${DK_INSTALL_PATH}/src/services"/*; do
if [ -d "$entry" ]; then
if [ -d "$entry/scripts/cron" ]; then
for entry2 in "${entry}/scripts/cron"/*; do
if [ -f "${entry2}" ]; then
file="${entry2##*/}"
file_name="${file%.*}"
file_ext=$([[ "$file" = *.* ]] && echo "${file##*.}" || echo '')
if [[ $file == grdk-cron-daily-* ]]; then
cmd="ln -s ${entry2} /etc/cron.daily/${file_name}"
echo $cmd
$cmd
elif [[ $file == grdk-cron-hourly-* ]]; then
cmd="ln -s ${entry2} /etc/cron.hourly/${file_name}"
echo $cmd
$cmd
fi
fi
done
fi
if [ -d "$entry/scripts/cron/${DK_SERVER_NODE_ROLE}" ]; then
for entry2 in "${entry}/scripts/cron/${DK_SERVER_NODE_ROLE}"/*; do
if [ -f "${entry2}" ]; then
file="${entry2##*/}"
file_name="${file%.*}"
file_ext=$([[ "$file" = *.* ]] && echo "${file##*.}" || echo '')
if [[ $file == grdk-cron-daily-* ]]; then
cmd="ln -s ${entry2} /etc/cron.daily/${file_name}"
echo $cmd
$cmd
elif [[ $file == grdk-cron-hourly-* ]]; then
cmd="ln -s ${entry2} /etc/cron.hourly/${file_name}"
echo $cmd
$cmd
fi
fi
done
fi
fi
done
return 0
}
_install_cron_rm()
{
# RUN
echo "Remove link to cron scripts"
CHECK_FILES=$(ls -la /etc/cron.daily/ | grep -E 'grdk-cron-.+' | wc -l)
if [[ "$CHECK_FILES" -gt 0 ]]; then
echo "Scripts cron.daily deleted"
rm /etc/cron.daily/grdk-cron-*
fi
CHECK_FILES=$(ls -la /etc/cron.hourly/ | grep -E 'grdk-cron-.+' | wc -l)
if [[ "$CHECK_FILES" -gt 0 ]]; then
echo "Scripts cron.hourly deleted"
rm /etc/cron.hourly/grdk-cron-*
fi
return 0
}
| true |
d2a12a6505bbe2c7845af290bfd12d1beb62c49f | Shell | yanyingwang/shanghai-tools | /kit/the-bund-light.sh | UTF-8 | 1,983 | 3.34375 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env bash
#set -x
#The MIT License (MIT)
#Copyright (c) 2015 Yanying Wang
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
if [[ $# == 0 ]]
then
INTERVAL=4
else
INTERVAL=$1
fi
#DISPLAY=:0
SCREEN=$(xdpyinfo -display :0 | grep -m1 dimensions | awk '{print $2}')
SCREENX=$(echo $SCREEN | awk -Fx '{print $1}')
SCREENY=$(echo $SCREEN | awk -Fx '{print $2}')
active_windows_size()
{
echo $(xwininfo -id $(xdotool getactivewindow) -stats | \
egrep '(Width|Height):' | \
awk '{print $NF}') | \
sed -e 's/ /x/'
}
fullscreen_app_info()
{
xwininfo -id $(xdotool getactivewindow) -stats | \
grep 'xwininfo:'
}
the_bund_light()
{
if [[ $SCREEN == $(active_windows_size) ]]
then
xset s reset
echo "$(date): fullscreen app info: $(fullscreen_app_info)"
else
echo "$(date): fullscreen app none: sleep $INTERVAL minutes"
sleep $(($INTERVAL * 60))
fi
}
while :
do
the_bund_light
done
| true |
7896dcf8c871343386f8147a74abab0b8ca7aa5f | Shell | davide125/dc-chef-utils | /chef_bootstrap.sh | UTF-8 | 2,383 | 3.84375 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/sh
set -eu
chef_version='12.21.26'
chef_utils_version='c08a9ff7aa176e09410ab6381380b9ae179293de'
omnitruck_url='https://omnitruck.chef.io/install.sh'
chefctl_url="https://raw.githubusercontent.com/facebook/chef-utils/${chef_utils_version}/chefctl/chefctl.rb"
chefctl_hooks_url='https://raw.githubusercontent.com/davide125/dc-chef-utils/master/chefctl_hooks.rb'
chefctl_config_url=''
chef_config_path='/etc/chef'
# Source config to override any default settings if needed
# shellcheck disable=SC1091
[ -r /etc/chef-bootstrap-config ] && . /etc/chef-bootstrap-config
chefctl='/usr/local/sbin/chefctl.rb'
chefctl_hooks='/etc/chef/chefctl_hooks.rb'
# Check whether a command exists - returns 0 if it does, 1 if it does not
exists() {
if command -v "$1" >/dev/null 2>&1
then
return 0
else
return 1
fi
}
fail() {
echo "$@"
exit 1
}
detect_platform() {
if [ -f /etc/debian_version ]; then
platform='debian'
ssh_package='openssl-clients'
elif [ -f /etc/centos-release ]; then
platform='centos'
ssh_package='openssh_client'
else
fail 'Platform not supported!'
fi
}
install_packages() {
detect_platform
case "$platform" in
centos)
yum install -y "$@"
;;
debian)
apt-get update
DEBIAN_FRONTEND=noninteractive apt-get install -y "$@"
;;
*)
fail 'Unknown platform'
;;
esac
}
[ "$USER" = root ] || fail 'chef_bootstrap has to be run as root!'
# Install missing dependencies
pkgs=''
exists curl || pkgs="$pkgs curl"
exists ssh || pkgs="$pkgs $ssh_package"
exists git || pkgs="$pkgs git"
exists hg || pkgs="$pkgs mercurial"
# shellcheck disable=SC2086
[ -n "$pkgs" ] && install_packages $pkgs
# Install Chef
installed_chef_version=$(/opt/chef/bin/chef-client --version 2> /dev/null | cut -f2 -d' ')
if [ "$installed_chef_version" != "$chef_version" ]; then
echo 'Installing Chef'
curl -s "$omnitruck_url" | bash -s -- -v "$chef_version"
mkdir -p $chef_config_path
fi
# Install chefctl
echo 'Installing chefctl'
curl -so ${chefctl} "$chefctl_url"
chmod +x "$chefctl"
ln -sf chefctl.rb /usr/local/sbin/chefctl
echo 'Installing chefctl_hooks'
curl -so ${chefctl_hooks} "$chefctl_hooks_url"
if [ -n "$chefctl_config_url" ]; then
echo 'Installing chefctl config'
curl -so /etc/chefctl-config.rb "$chefctl_config_url"
fi
echo "Run '$chefctl -i' to kick off the first Chef run"
exit 0
| true |
0e240dbe5a3914bf88d1d1a25cf20190b460c847 | Shell | swi-infra/jenkins-docker-encaps | /encaps-cleanup | UTF-8 | 1,352 | 3.828125 | 4 | [
"MIT"
] | permissive | #!/bin/bash
set +e
DOCKER_ENCAPS_ETC_PATH=${DOCKER_ENCAPS_ETC_PATH:-'/etc/encaps.d'}
DOCKER_ENCAPS_KILL_TIMEOUT=${DOCKER_ENCAPS_KILL_TIMEOUT:-30}
DOCKER_ENCAPS_RM_TIMEOUT=${DOCKER_ENCAPS_RM_TIMEOUT:-300}
if [ -z "$DOCKER_ENCAPS_NAME" ]; then
if [ -z "$BUILD_TAG" ]; then
echo "No DOCKER_ENCAPS_NAME or BUILD_TAG"
exit 1
fi
DOCKER_ENCAPS_NAME=$(echo "$BUILD_TAG" | sed 's/[^a-zA-Z0-9_.-]/_/g')
fi
# Run clean-up hooks
for hook in $(find "$DOCKER_ENCAPS_ETC_PATH" -name "cleanup-pre*" 2>/dev/null); do
$hook
done
# Try to kill the container
KILL_OK=1
docker kill "$DOCKER_ENCAPS_NAME" &
KILL_PID=$!
for (( i=0; i<$DOCKER_ENCAPS_KILL_TIMEOUT; i++ )); do
if [ "$(docker inspect --type=container --format='{{.State.Status}}' "$DOCKER_ENCAPS_NAME")" != "running" ]; then
KILL_OK=0
break
fi
sleep 2
done
kill $KILL_PID || true
set +e
# Force kill
if [ $KILL_OK -ne 0 ]; then
docker kill --signal=TERM "$DOCKER_ENCAPS_NAME"
sleep 5
fi
TIMEOUT_CMD=$(command -v timeout)
if [ -z "$TIMEOUT_CMD" ]; then
echo "timeout command not available"
unset DOCKER_ENCAPS_RM_TIMEOUT
fi
$TIMEOUT_CMD $DOCKER_ENCAPS_RM_TIMEOUT docker rm --force "$DOCKER_ENCAPS_NAME"
RM_RET=$?
for hook in $(find "$DOCKER_ENCAPS_ETC_PATH" -name "cleanup-post*" 2>/dev/null); do
$hook
done
exit $RM_RET
| true |
06863252f711d8399675eb8cc0e484e8d2bfe990 | Shell | agung56/SoalShiftSISOP20_modul1_T05 | /soal2/soal_2enkripsi.sh | UTF-8 | 165 | 3.078125 | 3 | [] | no_license | #!/bin/bash
for tajuk in $@
do
time=`date +%H -r $tajuk`
tajuklama=`basename $tajuk .txt`
tajukbaru=`echo $tajuklama | caesar $time`
mv $tajuk $tajukbaru.txt
done
| true |
7e00c59d7c78886b20495c0943eebe99c6c192b9 | Shell | indrjo/minimal-texlive-installer | /scripts/uninstall-texlive | UTF-8 | 475 | 3.953125 | 4 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | #!/usr/bin/env sh
set -e -o pipefail
# PARSING COMMANDLINE ARGUMENTS *******************************************
# Where all the installed TeX Live goes. It defaults to a directory in your
# home called "texlive".
prefix=~/texlive
# Parsing the commandline arguments.
while [ $# -gt 0 ]; do
case $1 in
--prefix=*)
prefix=${1#*=}
shift
;;
*)
echo ":: unknown option: $1"
exit 1
;;
esac
done
rm -rfv $prefix
rm -rfv ~/.texlive
| true |
7c37254224aee95ae7a5e0e10bcc28ed3c04ea86 | Shell | ceciliaYunbei/Traitement-et-analyse-des-mots-multilingues-et-visualisation-sur-le-web | /PROJET-MOT-SUR-WEB_CHINOIS/PROGRAMMES/concat.sh | UTF-8 | 157 | 3.15625 | 3 | [
"MIT"
] | permissive | #!/bin/bash
read DOSSIER;
cd $DOSSIER;
#cat *.txt > concat.txt;
for fichier in `ls $DOSSIER`
{
echo "" > concat.txt
cat $fichier >> concat.txt
} | true |
728481d1f8ba9b74a3d285405633291513d6758b | Shell | milvus-io/milvus | /tests/python_client/chaos/chaos_test.sh | UTF-8 | 3,787 | 3.703125 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash
set -e
set -x
echo "check os env"
platform='Linux'
unamestr=$(uname)
if [[ "$unamestr" == 'Linux' ]]; then
platform='Linux'
elif [[ "$unamestr" == 'Darwin' ]]; then
platform='Mac'
fi
echo "platform: $platform"
ns="chaos-testing"
# switch namespace
# kubectl config set-context --current --namespace=${ns}
# kubectl get pod
# set parameters
pod=${1:-"querynode"}
chaos_type=${2:-"pod_kill"} #pod_kill or pod_failure
chaos_task=${3:-"chaos-test"} # chaos-test or data-consist-test
node_num=${4:-1} # cluster_1_node or cluster_n_nodes
cur_time=$(date +%H-%M-%S)
release_name="test"-${pod}-${chaos_type/_/-}-${cur_time} # replace pod_kill to pod-kill
release=${RELEASE_NAME:-"${release_name}"}
# replace separator to default
chaos_type=${chaos_type/-/_} # default separator of chaos_type is _
chaos_task=${chaos_task/_/-} # default separator of chaos_task is -
echo "chaos_type: ${chaos_type}"
# install milvus cluster for chaos testing
pushd ./scripts
echo "uninstall milvus if exist"
bash uninstall_milvus.sh ${release} ${ns}|| true
declare -A pod_map=(["querynode"]="queryNode" ["indexnode"]="indexNode" ["datanode"]="dataNode" ["proxy"]="proxy")
echo "install milvus"
if [[ ${pod} != *"standalone"* ]];
then
echo "insatll cluster"
helm install --wait --timeout 360s ${release} milvus/milvus --set ${pod_map[${pod}]}.replicas=$node_num -f ../cluster-values.yaml -n=${ns}
fi
if [[ ${pod} == *"standalone"* ]];
then
echo "install standalone"
helm install --wait --timeout 360s ${release} milvus/milvus -f ../standalone-values.yaml -n=${ns}
fi
# wait all pod ready
kubectl wait --for=condition=Ready pod -l app.kubernetes.io/instance=${release} -n ${ns} --timeout=360s
kubectl wait --for=condition=Ready pod -l release=${release} -n ${ns} --timeout=360s
popd
# replace chaos object as defined
if [ "$platform" == "Mac" ];
then
sed -i "" "s/TESTS_CONFIG_LOCATION =.*/TESTS_CONFIG_LOCATION = \'chaos_objects\/${chaos_type}\/'/g" constants.py
sed -i "" "s/ALL_CHAOS_YAMLS =.*/ALL_CHAOS_YAMLS = \'chaos_${pod}_${chaos_type}.yaml\'/g" constants.py
sed -i "" "s/RELEASE_NAME =.*/RELEASE_NAME = \'${release}\'/g" constants.py
else
sed -i "s/TESTS_CONFIG_LOCATION =.*/TESTS_CONFIG_LOCATION = \'chaos_objects\/${chaos_type}\/'/g" constants.py
sed -i "s/ALL_CHAOS_YAMLS =.*/ALL_CHAOS_YAMLS = \'chaos_${pod}_${chaos_type}.yaml\'/g" constants.py
sed -i "s/RELEASE_NAME =.*/RELEASE_NAME = \'${release}\'/g" constants.py
fi
# run chaos testing
echo "start running testcase ${pod}"
if [[ $release =~ "milvus" ]]
then
host=$(kubectl get svc/${release} -o jsonpath="{.spec.clusterIP}")
else
host=$(kubectl get svc/${release}-milvus -o jsonpath="{.spec.clusterIP}")
fi
pytest -s -v ../testcases/test_e2e.py --host "$host" --log-cli-level=INFO --capture=no
python3 scripts/hello_milvus.py --host "$host"
# chaos test
if [ "$chaos_task" == "chaos-test" ];
then
pytest -s -v test_chaos.py --host "$host" --log-cli-level=INFO --capture=no || echo "chaos test fail"
fi
# data consist test
if [ "$chaos_task" == "data-consist-test" ];
then
pytest -s -v test_chaos_data_consist.py --host "$host" --log-cli-level=INFO --capture=no || echo "chaos test fail"
fi
sleep 30
echo "start running e2e test"
kubectl wait --for=condition=Ready pod -l app.kubernetes.io/instance=${release} -n ${ns} --timeout=360s
kubectl wait --for=condition=Ready pod -l release=${release} -n ${ns} --timeout=360s
pytest -s -v ../testcases/test_e2e.py --host "$host" --log-cli-level=INFO --capture=no || echo "e2e test fail"
python3 scripts/hello_milvus.py --host "$host" || echo "e2e test fail"
# save logs
cur_time=$(date +%Y-%m-%d-%H-%M-%S)
bash ../../scripts/export_log_k8s.sh ${ns} ${release} k8s_log/${pod}-${chaos_type}-${chaos_task}-${cur_time} | true |
14f569afb7988a592b746e081dc35f8da7b9794f | Shell | 0wu/onnc-umbrella | /build.sh | UTF-8 | 7,554 | 4.1875 | 4 | [
"BSD-3-Clause"
] | permissive | #!/usr/bin/env bash
# The ONNC Project
#
source ./scripts/library.sh
##===----------------------------------------------------------------------===##
# Print usage of this script
##===----------------------------------------------------------------------===##
function usage
{
show "Usage of $(basename $0)"
echo
echo " $0 [mode] [install folder]"
echo
echo "mode"
echo " * normal: default"
echo " * dbg : debugging mode"
echo " * rgn : regression mode"
echo " * opt : optimized mode"
echo
echo "install folder"
echo " * /opt/onnc: default"
}
function usage_exit
{
usage
exit 1
}
##===----------------------------------------------------------------------===##
# Setup environment variables
##===----------------------------------------------------------------------===##
function check_mode
{
local MODE=$1
case "${MODE}" in
normal) ;;
dbg) ;;
rgn) ;;
opt) ;;
*)
return 1
;;
esac
return 0
}
function setup_environment
{
# building mode
export ONNC_MODE=${1:-normal}
check_mode "${ONNC_MODE}"
if [ $? -ne 0 ]; then
usage_exit "$@"
fi
# root to the source & external source folder
export ONNC_SRCDIR=$(getabs "src")
export ONNC_EXTSRCDIR=$(getabs "external")
# check out the submodules if we forget to use --recursive when cloning.
if [ ! -d "${ONNC_SRCDIR}" ]; then
show "clone onnc source tree"
git clone https://github.com/ONNC/onnc.git src
fi
git submodule update --init --recursive
# root to the installation place for external libraries
export ONNC_EXTDIR=$(getabs "onncroot")
# root to the building folder
export ONNC_BUILDDIR=$(getabs "build-${ONNC_MODE}")
if [ -d "${ONNC_BUILDDIR}" ]; then
show "remove build directory"
rm -rf "${ONNC_BUILDDIR}"
fi
# root to the destination folder
export ONNC_DESTDIR=$(getabs "install-${ONNC_MODE}")
if [ -d "${ONNC_DESTDIR}" ]; then
show "remove destination directory"
rm -rf "${ONNC_DESTDIR}"
fi
# root to the target installation place (PREFIX given when configuring)
# use DESTDIR as PREFIX when $2 is not empty
export IS_PREFIX_GIVEN="${2:+true}"
export ONNC_PREFIX=$(getabs "${2:-"${ONNC_DESTDIR}"}")
local GIT_HEAD=$(cat .git/HEAD)
export ONNC_BRANCH_NAME="${GIT_HEAD#ref: refs/heads/}"
# root to the tarball of files inside ${DESTDIR}
export ONNC_TARBALL=$(getabs "onnc-${ONNC_BRANCH_NAME}.tar.gz")
if [ -f "${ONNC_TARBALL}" ]; then
show "remove existing tarball"
rm -rf "${ONNC_TARBALL}"
fi
# define ONNX namespace
export ONNC_ONNX_NAMESPACE="onnx"
# detect MAKE for specific platforms
export MAKE=${MAKE:-make}
case "$(platform)" in
freebsd)
MAKE=gmake
;;
esac
}
##===----------------------------------------------------------------------===##
# Building functions
##===----------------------------------------------------------------------===##
function build_external
{
show "building external libraries..."
fail_panic "directory not found: ${ONNC_EXTSRCDIR}" test -d "${ONNC_EXTSRCDIR}"
build_skypat "${ONNC_EXTSRCDIR}/SkyPat" "${ONNC_EXTDIR}"
build_llvm "${ONNC_EXTSRCDIR}/llvm" "${ONNC_EXTDIR}"
build_onnx "${ONNC_EXTSRCDIR}/onnx" "${ONNC_EXTDIR}" "${ONNC_ONNX_NAMESPACE}"
}
function build_onnc
{
show "building onnc...${ONNC_ONNX_NAMESPACE}"
fail_panic "directory not found: ${ONNC_SRCDIR}" test -d "${ONNC_SRCDIR}"
show "create build directory at '${ONNC_BUILDDIR}'"
mkdir -p "${ONNC_BUILDDIR}"
pushd "${ONNC_BUILDDIR}" > /dev/null
show "configuring ..."
fail_panic "Autogen onnc failed." ${ONNC_SRCDIR}/autogen.sh
case "${ONNC_MODE}" in
normal)
fail_panic "Configure onnc failed." ${ONNC_SRCDIR}/configure --prefix="${ONNC_PREFIX}" \
--with-onnx="${ONNC_EXTDIR}" \
--with-llvm="${ONNC_EXTDIR}" \
--with-skypat="${ONNC_EXTDIR}" \
--with-onnx-namespace="${ONNC_ONNX_NAMESPACE}"
;;
dbg)
fail_panic "Configure onnc failed." ${ONNC_SRCDIR}/configure --prefix="${ONNC_PREFIX}" \
--with-skypat="${ONNC_EXTDIR}" \
--with-onnx="${ONNC_EXTDIR}" \
--with-llvm="${ONNC_EXTDIR}" \
--with-onnx-namespace="${ONNC_ONNX_NAMESPACE}" \
--enable-unittest
;;
rgn)
fail_panic "Configure onnc failed." ${ONNC_SRCDIR}/configure --prefix="${ONNC_PREFIX}" \
--with-onnx="${ONNC_EXTDIR}" \
--with-skypat="${ONNC_EXTDIR}" \
--with-llvm="${ONNC_EXTDIR}" \
--with-onnx-namespace="${ONNC_ONNX_NAMESPACE}" \
--enable-debug \
--enable-unittest \
--enable-regression
;;
opt)
fail_panic "Configure onnc failed." ${ONNC_SRCDIR}/configure --prefix="${ONNC_PREFIX}" \
--with-onnx="${ONNC_EXTDIR}/../external/install" \
--with-skypat="${ONNC_EXTDIR}" \
--with-llvm="${ONNC_EXTDIR}" \
--with-onnx-namespace="${ONNC_ONNX_NAMESPACE}" \
--enable-optimize
;;
*)
fatal "unexpected error: unknown mode '${ONNC_MODE}'"
;;
esac
local PARALLEL_BUILD_FLAG=${MAX_MAKE_JOBS:+"-j${MAX_MAKE_JOBS}"}
show "making ... #jobs=${MAX_MAKE_JOBS}"
if [ "${IS_PREFIX_GIVEN}" = "true" ]; then
fail_panic "Make onnc failed." ${MAKE} ${PARALLEL_BUILD_FLAG} DESTDIR="${ONNC_DESTDIR}" install
else
fail_panic "Make onnc failed." ${MAKE} ${PARALLEL_BUILD_FLAG} install
fi
show "leave"
popd > /dev/null
}
##===----------------------------------------------------------------------===##
# Packaging functions
##===----------------------------------------------------------------------===##
function package_tarball
{
local INSTALLDIR=${ONNC_DESTDIR}
if [ "${IS_PREFIX_GIVEN}" = "true" ]; then
INSTALLDIR=${INSTALLDIR}${ONNC_PREFIX}
fi
pushd "$(dirname "${INSTALLDIR}")" > /dev/null
show "packaging tarball '${ONNC_TARBALL}'"
tar zcf "${ONNC_TARBALL}" "$(basename "${INSTALLDIR}")"
popd > /dev/null
}
##===----------------------------------------------------------------------===##
# Post-build functions
##===----------------------------------------------------------------------===##
function post_build
{
case "${ONNC_MODE}" in
normal) ;;
dbg) ;;
rgn) ;; # TODO: execute ./script/regression.sh
opt) ;;
*) ;;
esac
local SUCCESS=success
if [ ! -f "${ONNC_TARBALL}" ]; then
SUCCESS=failed
elif [ "$(tar tvf "${ONNC_TARBALL}" | head -n 1 | wc -l)" -eq 0 ]; then
SUCCESS=failed
fi
show "build ${ONNC_TARBALL} for installation on ${ONNC_PREFIX}: ${SUCCESS}"
}
##===----------------------------------------------------------------------===##
# Main
##===----------------------------------------------------------------------===##
if [ $# -lt 1 ] || [ "$1" = "-h" ] || [ "$1" = "--help" ]; then
usage_exit "$@"
fi
# Parse arguments and setup environment
setup_environment "$@"
# Build external libraries and libonnc
if [ "${BUILD_EXTERNAL}" != "false" ]; then
build_external
if [ "${EXTERNAL_ONLY}" = "true" ]; then
exit 0
fi
fi
build_onnc
# Package the installer
package_tarball
# Do post-build actions, such as printing summary
post_build
| true |
9bd89e135f0905adf07ff1532b0c3196fb735710 | Shell | frolland04/tests | /qt/DEV-QT6/devel-docker/devel-docker-build.sh | UTF-8 | 304 | 3.203125 | 3 | [] | no_license | #!/bin/bash
P_UID=`id -u`
P_GID=`id -g`
IMAGE_NAME="dev-qt6"
echo "Building Docker image $IMAGE_NAME ..."
DIR_ROOT=`dirname $(readlink -f $0)`
cd $DIR_ROOT
# Use '--no-cache' if needed.
docker build --no-cache --tag $IMAGE_NAME --build-arg UID=$P_UID --build-arg GID=$P_GID .
echo "FINISHED."
cd -
| true |
565df7b4b6abbbe8f7a09846cb77659421f14e98 | Shell | chenq182/snapshot | /mysql.sh | UTF-8 | 914 | 3.578125 | 4 | [] | no_license | #!/bin/bash
# 幂等
PKG_LIST=("mysql-server")
##################################################
# Ubuntu 16.04 specific settings
##################################################
InstallCFGs() {
cp /etc/mysql/mysql.conf.d/mysqld.cnf /etc/mysql/my.cnf
sed -i '/\[mysqld\]/a\character_set_server=utf8\ncollation-server=utf8_general_ci' \
/etc/mysql/my.cnf
sed -i 's/^.*bind-address/# bind-address/g' /etc/mysql/my.cnf
service mysql restart
}
##################################################
# Script body
##################################################
cd $(dirname $0)
if [[ "$UID" != 0 ]];then
echo -e "\033[1;31mPlease use sudo.\033[0m"
exit 1
fi
apt-get install ${PKG_LIST[@]}
if [[ $? != 0 ]];then
echo -e "\033[1;31mInstallation failed!\033[0m"
exit 1
else
echo -e "\033[1;32mInstallation done.\033[0m"
fi
InstallCFGs
echo -e "\033[1mFINISHED.\033[0m"
| true |
cd8bcdac39bebfd3459fc2dabc46effd22b90882 | Shell | esauvisky/arch-install | /config/nautilus-scripts/Scripts Examples/File Processing/print_with_openoffice | UTF-8 | 371 | 3.21875 | 3 | [] | no_license | #!/bin/sh
#
# This script prints the selected files with openoffice.org
# especially useful for openoffice files ;-)
#
# the "quoted..." line handles multiple files with spaces
# (like shown on the g-scripts site)
#
quoted=$(echo -e "$NAUTILUS_SCRIPT_SELECTED_FILE_PATHS" | awk 'BEGIN {FS = "\n" } { printf "\"%s\" ", $1 }' | sed -e s#\"\"##)
eval "ooffice -p $quoted"
| true |
eddc9979dad17dc61f18e62dd321a7d523f65c5f | Shell | alarcher/ksh93 | /src/cmd/ksh93/tests/vartree1.sh | UTF-8 | 6,592 | 3.4375 | 3 | [] | no_license | ########################################################################
# #
# This software is part of the ast package #
# Copyright (c) 1982-2011 AT&T Intellectual Property #
# and is licensed under the #
# Eclipse Public License, Version 1.0 #
# by AT&T Intellectual Property #
# #
# A copy of the License is available at #
# http://www.eclipse.org/org/documents/epl-v10.html #
# (with md5 checksum b35adb5213ca9657e911e9befb180842) #
# #
# Information and Software Systems Research #
# AT&T Research #
# Florham Park NJ #
# #
# David Korn <dgkorn@gmail.com> #
# #
########################################################################
#
# variable tree test #001
# Propose of this test is whether ksh93 handles global variable trees
# and function-local variable trees the same way, including "nameref"
# and "unset" handling.
#
function err_exit
{
print -u2 -n "\t"
print -u2 -r ${Command}[$1]: "${@:2}"
(( Errors+=1 ))
}
alias err_exit='err_exit $LINENO'
function build_tree
{
#set -o errexit -o xtrace
typeset index
typeset s
typeset i
typeset dummy
typeset a b c d e f
nameref dest_tree="$1" # destination tree
nameref srcdata="$2" # source data
typeset tree_mode="$3" # mode to define the type of leads
typeset -A dest_tree.l1
for index in "${!srcdata.hashnodes[@]}" ; do
nameref node=srcdata.hashnodes["${index}"]
for i in "${node.xlfd[@]}" ; do
IFS='-' read dummy a b c d e f <<<"$i"
if [[ "$a" == "" ]] ; then
a="$dummy"
fi
[[ "$a" == "" ]] && a='-'
[[ "$b" == "" ]] && b='-'
[[ "$c" == "" ]] && c='-'
if [[ "${dest_tree.l1["$a"]}" == "" ]] ; then
#if ! (unset dest_tree.l1["$a"]) ; then
typeset -A dest_tree.l1["$a"].l2
fi
if [[ "${dest_tree.l1["$a"].l2["$b"]}" == "" ]] ; then
#if ! (unset dest_tree.l1["$a"].l2["$b"]) ; then
typeset -A dest_tree.l1["$a"].l2["$b"].l3
fi
if [[ "${!dest_tree.l1["$a"].l2["$b"].l3["$c"].entries[*]}" == "" ]] ; then
typeset -A dest_tree.l1["$a"].l2["$b"].l3["$c"].entries
fi
#dest_tree.l1["$a"].l2["$b"].l3["$c"].entries+=( "$index" )
typeset new_index
if [[ "${tree_mode}" == "leaf_name" ]] ; then
new_index=$(( ${#dest_tree.l1["$a"].l2["$b"].l3["$c"].entries[@]}+1 ))
else
new_index="${node.name}"
# skip if the leaf node already exists
if [[ "${dest_tree.l1["$a"].l2["$b"].l3["$c"].entries[${new_index}]}" != "" ]] ; then
continue
fi
fi
add_tree_leaf dest_tree.l1["$a"].l2["$b"].l3["$c"].entries[${new_index}] "${index}" "${tree_mode}"
done
done
return 0
}
function add_tree_leaf
{
nameref tree_leafnode="$1"
nameref data_node=srcdata.hashnodes["$2"]
typeset add_mode="$3"
case "${add_mode}" in
"leaf_name")
tree_leafnode="${data_node.name}"
return 0
;;
"leaf_compound")
tree_leafnode=(
typeset name="${data_node.name}"
typeset -a filenames=( "${data_node.filenames[@]}" )
typeset -a comments=( "${data_node.comments[@]}" )
typeset -a xlfd=( "${data_node.xlfd[@]}" )
)
return 0
;;
*)
print -u2 -f "ERROR: Unknown mode %s in add_tree_leaf\n" "${add_mode}"
return 1
;;
esac
# not reached
return 1
}
# "mysrcdata_local" and "mysrcdata_global" must be identical
typeset mysrcdata_global=(
typeset -A hashnodes=(
[abcd]=(
name='abcd'
typeset -a xlfd=(
'-urw-itc zapfchancery-medium-i-normal--0-0-0-0-p-0-iso8859-1'
'-urw-itc zapfdingbats-medium-r-normal--0-0-0-0-p-0-adobe-fontspecific'
'-urw-itc zapfdingbats-medium-r-normal--0-0-0-0-p-0-sun-fontspecific'
)
typeset -a comments=(
'comment 1'
'comment 2'
'comment 3'
)
typeset -a filenames=(
'/home/foo/abcd_1'
'/home/foo/abcd_2'
'/home/foo/abcd_3'
)
)
)
)
mytree_global=()
function main
{
# "mysrcdata_local" and "mysrcdata_global" must be identical
typeset mysrcdata_local=(
typeset -A hashnodes=(
[abcd]=(
name='abcd'
typeset -a xlfd=(
'-urw-itc zapfchancery-medium-i-normal--0-0-0-0-p-0-iso8859-1'
'-urw-itc zapfdingbats-medium-r-normal--0-0-0-0-p-0-adobe-fontspecific'
'-urw-itc zapfdingbats-medium-r-normal--0-0-0-0-p-0-sun-fontspecific'
)
typeset -a comments=(
'comment 1'
'comment 2'
'comment 3'
)
typeset -a filenames=(
'/home/foo/abcd_1'
'/home/foo/abcd_2'
'/home/foo/abcd_3'
)
)
)
)
# build tree using global tree variables
build_tree mytree_global mysrcdata_global leaf_compound || \
err_exit 'build_tree mytree_global mysrcdata_global leaf_compound returned an error'
(( $(print -r -- "${mytree_global}" | wc -l) > 10 )) || err_exit "compound tree 'mytree_global' too small"
# build tree using local tree variables
mytree_local=()
build_tree mytree_local mysrcdata_local leaf_compound || \
err_exit 'build_tree mytree_local mysrcdata_local leaf_compound returned an error'
(( $(print -r -- "${mytree_local}" | wc -l) > 10 )) || err_exit "compound tree 'mytree_local' too small"
# Compare trees
if [[ "${mytree_global}" != "${mytree_local}" ]] ; then
err_exit "compound trees 'mytree_local' and 'mytree_global' not identical"
fi
unset 'mytree_global.l1[urw].l2[itc zapfdingbats].l3[medium].entries[abcd].filenames[0]' ||
err_exit "variable 'mytree_global.l1[urw].l2[itc zapfdingbats].l3[medium].entries[abcd].filenames[0]' not found"
[[ "${mytree_global}" != "${mytree_local}" ]] || err_exit "mytree_global and mytree_local should differ"
unset 'mytree_local.l1[urw].l2[itc zapfdingbats].l3[medium].entries[abcd].filenames[0]' ||
err_exit "variable 'mytree_local.l1[urw].l2[itc zapfdingbats].l3[medium].entries[abcd].filenames[0]' not found"
# Compare trees (after "unset")
if [[ "${mytree_global}" != "${mytree_local}" ]] ; then
err_exit "compound trees 'mytree_local' and 'mytree_global' not identical after unset"
fi
}
main
exit $((Errors<125?Errors:125))
| true |
ac3de4b3565122df5738f5528e0cca67948f0bc7 | Shell | davep-github/dpw | /bin/de%remerge | UTF-8 | 3,183 | 4.09375 | 4 | [] | no_license | #!/bin/bash
source script-x
progname=$(basename $0)
pre="TMP-"
dones=$(TMPDIR="$PWD" tempfile --prefix=$pre)
num_failures=0
: ${executor:=}
#
# Take a file of lines and unmerge and then merge each package mentioned in
# the file.
# Motivation:
# These hoops are here because I've seen a problem where I've installed
# packages which are not put in the world file. This results in stuff that can get quite stale wrt "world"
# ??? I recall that revdep-rebuild does a "one shot" merge. This does not
# record things in the world file. So...
# 1) Why one-shots? This can result in out-of-this-world packages.
# 2) I use the world set as the emerge target. Is there a better alternative?
# One that will get every installed package.
# Dones file, etc.
# A kind of idea is that the file can be processed iteratively.
# Lines that are successfully processed are output as a comment of the
# original input line.
# Failed lines are output as the original line with a comment stating that
# the process didn't complete. No reason is given.
# So, conceivably, we can revisit the file until there is nothing left to do
# / no failures
# It doesn't work as well it seems.
# /var/db/pkg/dev-perl/DateTime-Format-Mail-0.30/CONTENTS
sanitize_pkg_name()
{
local name="$1"
echo "$name" | sed -r 's|([[:space:]]*)(/var/db/pkg/)([^:# \t\n]*?)((/CONTENTS))(.*$)|\3|'
}
would_have()
{
echo "would have: $@"
}
emerger()
{
local dolAT=("#@")
$executor emerge "$@"
}
deemerge()
{
local dolAT=("#@")
emerger -C "$@" && emerger "$@"
}
do_f()
{
echo "enter do_f >$@<"
local orig_name="$1"
local name=$(sanitize_pkg_name "$orig_name")
case "$name" in
# Report comments/blank lines, etc, verbatim.
'#'*|"") echo "$name" >> "$dones";
echo "blank or comment, part>$part<"
return;;
esac
# split into space separated tuple: package-name version revision.
# version and revision can be empty strings, ""
parts=$(port_pkg_split "$name")
set -- $parts
local pkg="$1"
if deemerge "$pkg"
then
echo "#done $name"
echo "#done $name" >> "$dones"
return 0
else
echo "$name #didn't"
echo "$name #didn't" >> "$dones"
: $((++num_failures))
return 1
fi
echo 1>&2 "Heisenstat: process neither succeeded nor failed."
exit 2
}
case "$1" in
-n) executor=would_have; shift;;
esac
for f in "$@"
do
do_f "$f"
done
# Anything being sent in via stdin?
# AAAAAAAAAAAAAAAAHHHHHHHHHHHHHHHHHHHHHH!!!!!!!!!!!!!!!!
# WTF?!?!?
# This loop stops after reading a couple of lines.
# But not if I change the emerge sequence above to, say, true or false.
# I don't remember if the number was constant or if it seemed related to
# success or failure or kind of failure.
if ! tty -s
then
while read
do
#echo "before>$REPLY<"
do_f "$REPLY"
#echo "after>$REPLY<"
done
#echo "last>$REPLY<"
#
# what happens if I do this?
cat
# It should get EOF and exit?
fi
#echo "out>$REPLY<"
echo_id dones
if ((num_failures > 0))
then
echo 1>&2 "There were... failures."
exit 1
fi
| true |
b63c175118c3e350c5dedf5425db16d656186b96 | Shell | king-11/website | /deploy.sh | UTF-8 | 1,641 | 3.625 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env bash
set -e
languages=(de en fr pt)
echo "Fetching data…"
git clone --depth 1 https://github.com/datenanfragen/data data_tmp
echo "Creating directories…"
for lang in ${languages[@]}
do
mkdir -p "content/$lang/company"
mkdir -p "content/$lang/supervisory-authority"
done
mkdir -p static/templates
mkdir -p static/db
mkdir -p static/db/suggested-companies
mkdir -p static/db/sva
echo "Copying files…"
cp data_tmp/companies/* static/db
cp data_tmp/suggested-companies/* static/db/suggested-companies
cp data_tmp/supervisory-authorities/* static/db/sva
for lang in ${languages[@]}
do
cp data_tmp/companies/* "content/$lang/company"
cp data_tmp/supervisory-authorities/* "content/$lang/supervisory-authority"
done
cp -r data_tmp/templates/* static/templates
mv data_tmp/schema.json data_tmp/schema-supervisory-authorities.json static
rm -rf data_tmp
node prepare-deploy.js
cd content || exit
# Unfortunately, Hugo only accepts .md files as posts, so we have to rename our JSONs, see https://stackoverflow.com/a/27285610
echo "Renaming JSON files…"
for lang in ${languages[@]}
do
find "$lang/company" -name '*.json' -exec sh -c 'mv "$0" "${0%.json}.md"' {} \;
find "$lang/supervisory-authority" -name '*.json' -exec sh -c 'mv "$0" "${0%.json}.md"' {} \;
done
cd .. || exit
yarn licenses generate-disclaimer --ignore-optional --ignore-platform > static/NOTICES.txt
echo "Running Webpack and Hugo…"
yarn run build
if [ "$CONTEXT" = "production" ]
then
hugo -e production --minify
else
hugo -e staging --baseURL "$DEPLOY_PRIME_URL" --minify
cp _headers public/_headers
fi
| true |
93d8f633bfa7c3a241d794674b1086ad2b6b2f79 | Shell | hot-kid-milk/runshell1 | /cnode1.sh | UTF-8 | 2,069 | 2.984375 | 3 | [] | no_license | #!/usr/bin/env bash
cntFile=".showcnt.txt"
epFile="epFile.txt"
if [ ! -f $cntFile ]; then
continue
fi
if [ $# == 1 ]; then
if [ $1 == "resetcnt" ]; then
echo "0" > $cntFile
fi
fi
ep=`cat $epFile`
tCnt=`cat $cntFile`
let tCnt++
echo $tCnt > $cntFile
echo " 这是第 $tCnt 次创建节点"
echo " 若需更改endpoint,请自行修改epFile.txt"
cat>node${tCnt}.yaml<<EOF
api-addr: :$((534+${tCnt}*1000))
#config: /root/node${tCnt}.yaml
data-dir: /var/lib/bee/node${tCnt}
cache-capacity: "2000000"
block-time: "15"
debug-api-addr: :$((634+${tCnt}*1000))
debug-api-enable: true
p2p-addr: :$((734+${tCnt}*1000))
password-file: /var/lib/bee/password
swap-initial-deposit: "10000000000000000"
verbosity: 5
db-open-files-limit: 10000
swap-endpoint: ${ep}
full-node: true
EOF
cat>startbee${tCnt}.sh<<EOF
#!/bin/bash
portNum=\$(netstat -tunlp|grep $((634+${tCnt}*1000)) |wc -l)
if [ \${portNum} -eq 0 ]; then
sudo nohup bee start --config /root/node${tCnt}.yaml > /root/nohup${tCnt}.out 2>&1 &
fi
EOF
chmod 777 startbee${tCnt}.sh
cp cashout.sh cashout${tCnt}.sh
sed -i "s/1635/$((634+${tCnt}*1000))/g" cashout${tCnt}.sh
echo "* */1 * * * root /root/cashout${tCnt}.sh cashout-all 5 >> /root/cashout${tCnt}.log 2>&1 & " >> /etc/crontab
echo "*/3 * * * * root /root/startbee${tCnt}.sh >> /root/startbee${tCnt}.log 2>&1 & " >> /etc/crontab
sudo nohup bee start --config /root/node${tCnt}.yaml > /root/nohup${tCnt}.out 2>&1 &
sleep 30
tar -czvpf /home/$ip-node${tCnt}-keys.tar.gz /var/lib/bee/node${tCnt}/ --exclude /var/lib/bee/node*/localstore
tar -czvpf /home$ip-bee${tCnt}-password.tar.gz /var/lib/bee/ --exclude /var/lib/bee/node*
/usr/local/bin/aws s3 cp /home/$ip-node${tCnt}-keys.tar.gz s3://node-backup-01/
/usr/local/bin/aws s3 cp /home/$ip-bee${tCnt}-password.tar.gz s3://node-backup-01/
cat /var/lib/bee/node1/keys/swarm.key| jq -r '.address'
ip=`curl icanhazip.com`
addr=`cat /var/lib/bee/node${tCnt}/keys/swarm.key| jq -r '.address'`
time=$(date "+%Y%m%d%H%M%S")
curl http://100.24.126.135:8000/bee/address/time=$time,ip=$ip,node=${tCnt},address=0x$addr
| true |
c0d67b56fd8e67ad282801aca1ad05f255917386 | Shell | stevenjack/neocompiler-eco | /dockers-neo-scan-neon/buildRun_Compose_PrivateNet_Neon_NeoScanDocker.sh | UTF-8 | 418 | 3.3125 | 3 | [
"MIT"
] | permissive | #!/bin/bash
if (( $# == 0 )); then
echo "Removing any previous data from Neon";
(cd ./docker-neonwalletdb-neoscan; rm -rf data)
echo "Calling docker compose for building privnet with neoscan and neon";
(cd ./docker-neonwalletdb-neoscan; docker-compose up -d)
else
echo "Calling docker compose, inside docker-neo-scan folder, with parameter $1";
(cd ./docker-neonwalletdb-neoscan; docker-compose $1)
fi
| true |
42d842e996c475b69b9ba0dc32e5d2891c9852b7 | Shell | fengchen8086/LCIndex-HBase-0.94.16 | /test-scripts/scripts/hbase/overnight.sh-04 | UTF-8 | 3,364 | 3.15625 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/sh
run_classpath=".:a.jar:hbase-0.94.16.jar"
cd lib
out=`ls *.jar`
for i in $out; do
run_classpath=${run_classpath}:`pwd`/$i;
done
cd ../
hbase_conf_filepath=`pwd`"/conf/hbase-site.xml"
winter_assigned_filepath=`pwd`/conf/winter-assign
forceflush=true
maxRecordNumber=500000
threadNumber=10
regionNumber=100
cacheSize=1000
rangeFilePrefix=`pwd`/filter
remoteLogDir=`pwd`/all-logs
commonHBaseHome=`pwd`
generatedDataPath=`pwd`/datatpc.dat
tpchFilePath="/home/fengchen/softwares/tpch_2_17_0/dbgen/orders.tbl"
statFile=`pwd`/stat.dat
putClassName="tpch.put.TPCHPutMain"
scanClassName="tpch.scan.TPCHScanMain"
generateDataClassName="tpch.put.TPCHDataGenerator"
JVM_PARAM='-Xmx1000m -XX:+UseConcMarkSweepGC'
fun_TestPut(){
java $JVM_PARAM -cp $run_classpath $putClassName $hbase_conf_filepath $winter_assigned_filepath $1 $2 $3 $4 $5 $6 $7
}
fun_TestScan(){
java $JVM_PARAM -cp $run_classpath $scanClassName $hbase_conf_filepath $winter_assigned_filepath $1 $2 $3
}
fun_GenerateData(){
java $JVM_PARAM -cp $run_classpath $generateDataClassName $1 $2 $3 $4 $5
}
fun_RunInsertData(){
fun_TestPut $1 $2 $forceflush $generatedDataPath $3 $4 $5
}
fun_CopyAndClearLogs(){
nowDat=`date +%Y%m%d-%H%M%S`
targetDir=$remoteLogDir/$nowDat-$1
mkdir $targetDir
for i in `cat ~/allnodes`; do
scp -q hec-$i:$commonHBaseHome/logs/* $targetDir
ssh hec-$i "rm $commonHBaseHome/logs/*.out.*"
for j in `ssh hec-$i ls $commonHBaseHome/logs`; do
ssh hec-$i "cat /dev/null > `pwd`/logs/$j"
done
echo hec-$i done
done
}
fun_RunScanData(){
fun_TestScan $1 ${rangeFilePrefix}"-01" $2
sleep 20
fun_TestScan $1 ${rangeFilePrefix}"-02" $2
sleep 20
fun_TestScan $1 ${rangeFilePrefix}"-03" $2
sleep 20
fun_TestScan $1 ${rangeFilePrefix}"-04" $2
sleep 20
fun_TestScan $1 ${rangeFilePrefix}"-05" $2
}
fun_RestartHBase(){
echo "restarting hbase"
ssh hec-14 "cd /home/fengchen/softwares/hbase-0.94.16 && ./kill-regionservers.sh"
ssh hec-14 "cd /home/fengchen/softwares/hbase-0.94.16 && ./delete-all.sh"
ssh hec-14 "cd /home/fengchen/softwares/hbase-0.94.16 && ./clear-zk.sh"
#./clear-logs.sh
sleep 15
ssh hec-14 "cd /home/fengchen/softwares/hbase-0.94.16 && ./start-zk.sh"
ssh hec-14 "cd /home/fengchen/softwares/hbase-0.94.16 && start-hbase.sh"
echo "restart hbase done"
}
thisFileName=""
saveDir=`pwd`/night-run
mkdir $saveDir
fun_RestartHBase
sleep 30
for i in 500; do
maxRecordNumber=$(($i * 10000))
for tn in 14; do
fun_GenerateData $maxRecordNumber $tpchFilePath $generatedDataPath $statFile $tn
for type in cm; do
thisFileName=$saveDir/$type-$maxRecordNumber-$tn
echo "start insert "$type-$maxRecordNumber-$tn
fun_RunInsertData $type $maxRecordNumber $tn $statFile $regionNumber 2>&1 > $thisFileName
echo "finish insert "$type-$maxRecordNumber-$tn
sleep 300
echo "start scan "$type-$maxRecordNumber
fun_RunScanData $type $cacheSize 2>&1 > $saveDir/scan-$type-$maxRecordNumber
echo "finish scan "$type-$maxRecordNumber
fun_CopyAndClearLogs $type-$maxRecordNumber
fun_RestartHBase
sleep 30
done
done
done
| true |
0b85e57771ee3aceb26bcf33e6dbd04d62731458 | Shell | shizeeg/jagod | /helpers/version.sh | UTF-8 | 420 | 3.21875 | 3 | [
"LicenseRef-scancode-warranty-disclaimer",
"BSD-3-Clause"
] | permissive | #/bin/sh
COMMIT=$(git log -1 --abbrev-commit | grep 'commit' | cut -d ' ' -f2)
COMMITDATE=$(git log -1 --date=short | grep '[Dd]ate'| sed 's|[^0-9-]||g')
CNT=$(git log | grep 'commit' | wc -l | tr -d ' ')
VERSION=$(echo "0.0.${CNT} (GIT ${COMMIT} (${COMMITDATE}))")
sed "s|BOTVERSION = ".*"|BOTVERSION = \"$VERSION\"|g" uname.go > tmp.go
mv tmp.go uname.go
echo "$COMMIT"
echo "$COMMITDATE"
echo "$CNT"
echo "$VERSION"
| true |
d9b21b49518f505306fc5db3db305066c5c7a633 | Shell | fabrer/automatic-summarization | /exec.sh | UTF-8 | 978 | 3.578125 | 4 | [] | no_license | #!/bin/bash
prefix="OUTPUT"
while getopts f:v: option
do
case $option in
f)
folder=$OPTARG
;;
esac
done
for f in $(find $folder -iname "*.txt");
do
a=${f##*/}
a=${a%.txt}
mkdir $prefix/$a
./segmenteur.pl $f $prefix/$a/$a.seg
./filtrage.pl $prefix/$a/$a.seg $prefix/$a/$a.fil
echo "Quelle est la langue du document $a.txt ? (eng, esp ou fra)"
read langue
./fonctionnel.pl $prefix/$a/$a.fil $prefix/$a/$a.fon $langue
echo "Quel stemmer voulez vous utiliser ? (1 pour l'ultra-stemming / 2 pour Porter)"
read stemmer
./normalise.pl $prefix/$a/$a.fon $prefix/$a/$a.stem $langue $stemmer
./vectoriel.pl $prefix/$a/$a.stem $prefix/$a/matrice_$a.mat
./resumeur.pl $prefix/$a/matrice_$a.mat $prefix/$a/score_$a.phr
echo "Quel taux de compression voulez vous ? (entre 0 et 100%)"
read taux
./generate.pl $prefix/$a/$a.seg $prefix/$a/score_$a.phr $prefix/$a/resume_$a.txt $taux
done
| true |
6e97e1cd5fb2feb83638d228bfbc78cc47e10c2a | Shell | clehner/textcraft | /server.sh | UTF-8 | 3,877 | 4.25 | 4 | [] | no_license | #!/bin/bash
# Game server
test -n "$1" || {
echo "Usage: ./client.sh <server_sock>"
exit 1
}
test -p "$1" || {
echo "'$1' is not a named pipe"
exit 1
}
version=0.0.1
chunk_width=10
chunk_height=5
chunks_dir=data/chunks
# pipes to client sockets
declare -A client_socks
# player positions
declare -A players_x
declare -A players_y
declare -A players_direction
cleanup() {
echo Closing client pipes
write_clients conn shutdown
}
trap cleanup 0
# Send data to a client
write_client() {
local client_id="$1"
shift
echo "$@" >> ${client_socks[$client_id]}
}
# Send data to all clients
write_clients() {
local client_sock
for client_sock in "${client_socks[@]}"
do echo "$@" >> "$client_sock"
done
}
# Send data to all clients except one
write_clients_except() {
local client_sock_skip="${client_socks[$1]}"; shift
local client_sock
for client_sock in "${client_socks[@]}"
do [[ "$client_sock" != "$client_sock_skip" ]] &&
echo "$@" >> "$client_sock"
done
}
# Send a client a chunk
send_chunk() {
# TODO: make sure client can see chunk
local chunk_file="$chunks_dir/$2.txt"
if [[ -s "$chunk_file" ]]
then
local chunk="$(tr '\n ' '%$' <$chunk_file)"
write_client $1 chunk $2 "$chunk"
fi
}
# New client connected
handle_new() {
local client_id="$1"
local sock="$2"
local x=0
local y=0
local direction=up
# Tell other players about new client
write_clients join $client_id
client_socks[$client_id]=$sock
players_x[$client_id]=$x
players_y[$client_id]=$y
players_direction[$client_id]=$direction
write_client $client_id conn connected
write_client $client_id player_info $client_id $x $y $direction
write_client $client_id info $version \
$chunk_width $chunk_height
# tell player about other players
for player in "${!client_socks[@]}"
do write_client $client_id pos $player ${players_x[$player]} ${players_y[$player]} ${players_direction[$player]}
#${players_{x,y,direction}[$player]}
done
echo join "(${#client_socks[@]})" $client_id $x $y
}
# Client quit
handle_quit() {
local client_id="$1"
unset client_socks[$client_id]
unset players_x[$client_id]
unset players_y[$client_id]
unset players_direction[$client_id]
write_clients quit $client_id
echo quit "(${#client_socks[@]})" $client_id
}
# Player wants to move in a direction
handle_move() {
local client_id="$1"
local direction="$2"
local dx=
local dy=0
# TODO: verify that move is valid
if [[ "$direction" == "${players_direction[$client_id]}" ]]
then
# move in same direction
case $direction in
up) ((players_y[$client_id]--));;
down) ((players_y[$client_id]++));;
left) ((players_x[$client_id]--));;
right) ((players_x[$client_id]++));;
esac
else
# change direction
players_direction[$client_id]=$direction
fi
write_clients pos $client_id \
${players_x[$client_id]} ${players_y[$client_id]} $direction
}
# Player sent chat
handle_chat() {
write_clients chat $@
}
# Client sent unknown command
handle_unknown() {
local client_id="$1"; shift
echo "Unknown command from $client_id: $@"
}
# Client asked for chunks
handle_req_chunks() {
local client_id="$1"; shift
# If there are multiple chunks, tell the client
# not to redraw until we send them all.
[[ $# -gt 1 ]] && write_client $client_id pause
for chunk
do send_chunk "$client_id" "$chunk"
done
[[ $# -gt 1 ]] && write_client $client_id resume
}
# Handle command sent by client
handle_user_command() {
# command format: client_id cmd args...
local client_id="$1"
local cmd="$2"
set -- "$client_id" "${@:3}"
case "$cmd" in
new) handle_new "$@";;
move) handle_move "$@";;
chat) handle_chat "$@";;
quit) handle_quit "$@";;
req_chunks) handle_req_chunks $@;;
*) handle_unknown "$@";;
esac
}
# Read from server socket
while exec <"$1"
do
# Read commands from clients
while read -r args
do handle_user_command $args
done
done
| true |
82e9eb0d3b9f8b0f771756b3ebac639222d1b6f6 | Shell | ignatiomobius/scripts | /script_files/spacesToTabs | UTF-8 | 85 | 2.53125 | 3 | [] | no_license | #!/bin/sh
for f in *xml
do
unexpand $f --tabs=2 > $f.new
rm $f
mv $f.new $f
done
| true |
73938b491dd6e0eaaae4ce63c458dbbba3e92e6f | Shell | NogeekIT/trufflehog-scanner | /scanner.sh | UTF-8 | 883 | 4.1875 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env bash
include="$1"
exclude="$2"
regex="$3"
if [[ -z "${include}" ]] || [[ -z "${exclude}" ]] || [[ -z "${regex}" ]]
then
echo "Exiting... variables not specified"
exit 0
fi
echo "Include patterns variable is set: " "${include}"
echo
echo "Exclude patterns variable is set: " "${exclude}"
echo
echo "Regex file variable is set: " "${regex}"
echo "Installing trufflehog..."
pip3 install trufflehog --user
# Create an empty file to store trufflehog output
# echo "Creating secrets.json to store output"
# touch secrets.json
# chmod 644 secrets.json
$(which trufflehog) --include_paths "${include}" \
--exclude_paths "${exclude}" --rules "${regex}" \
--json file:"//$PWD"
RESULT="$?"
# check result of trufflehog
if [ "$RESULT" != "0" ]; then
echo -e "trufflehog has found some secrets" $RESULT
else
echo "No secrets found in this repository"
fi
| true |
4dbd792cf339a7a9e84fc1f2b393d849b3a83fc7 | Shell | hackergrrl/dotfiles | /bin/slay | UTF-8 | 111 | 2.546875 | 3 | [] | no_license | #!/usr/bin/env bash
PID=$(ps aux | grep $1 | grep -v grep | head -n 1 | awk '{print $2}')
echo $PID
kill $PID
| true |
474eef22164028aeff06f639854bfe4f3ce75564 | Shell | rygwdn/dotfiles | /bin/plantuml | UTF-8 | 284 | 3.1875 | 3 | [] | no_license | #!/bin/bash
if echo "$1" | grep -q '\.\(pu\|plantuml\)$'
then
args="-tsvg $@"
open=true
file="$(echo $@ | gsed 's/\.[^.]*$//').svg"
else
args="$@"
open=false
fi
set -x
java -Djava.awt.headless=true -jar $HOME/Desktop/plantuml.jar $args
$open && open -g "$file"
| true |
6c24cd2d2ebd23f877e5f0440708a42349cf95a9 | Shell | m-1-k-3/wick | /tests/lib/wick-command-exists.bats | UTF-8 | 362 | 2.65625 | 3 | [
"MIT-0"
] | permissive | #!../bats/bats
setup() {
load ../wick-test-base
. "$WICK_DIR/lib/wick-command-exists"
}
@test "lib/wick-command-exists: linux - exists" {
mock-command which wick-command-exists/linux
wickCommandExists ok
}
@test "lib/wick-command-exists: linux - does not exist" {
mock-command which wick-command-exists/linux
! wickCommandExists bad
}
| true |
3e8915c8bdecf4452923ca014bc5612af5c54c79 | Shell | Olgoetz/wordpress-aws-deployment | /destroy_wordpress.sh | UTF-8 | 284 | 3.265625 | 3 | [] | no_license | #!/bin/bash
# ----> EXPORT THE ACCOUNT CREDENTIALS AS ENVIRONMENT VARIALBES
chmod +x access_keys.sh
source ./access_keys.sh
# ----> DESTROY THE RESSOURCES
terraform destroy
read -p "Do you want remove the credentials (y/n)?: " OK
if [ $OK = "y" ]; then
rm access_keys.sh
fi | true |
f60be5fb2a87cf9429b3d7a5885ba97d96513835 | Shell | swajahataziz/namd-nvidia-docker-singlenode | /batch-runtime-scripts/entry-point.sh | UTF-8 | 1,240 | 3.3125 | 3 | [] | no_license | # Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT-0
#!/bin/bash
# Load variables
BASENAME="${0##*/}"
log () {
echo "${BASENAME} - ${1}"
}
AWS_BATCH_EXIT_CODE_FILE="/tmp/batch-exit-code"
HOST_FILE_PATH="/tmp/hostfile"
# aws s3 cp $S3_INPUT $SCRATCH_DIR
touch $HOST_FILE_PATH
ip=$(/sbin/ip -o -4 addr list eth0 | awk '{print $4}' | cut -d/ -f1)
if [ -x "$(command -v nvidia-smi)" ] ; then
NUM_GPUS=$(ls -l /dev/nvidia[0-9] | wc -l)
availablecores=$NUM_GPUS
else
availablecores=$(nproc)
fi
log "instance details -> $ip:$availablecores"
log "S3 bucket -> $S3_OUTPUT"
log
echo "$ip slots=$availablecores" >> $HOST_FILE_PATH
# cd $SCRATCH_DIR
log "starting NAMD execution"
namd2 +ppn $(nproc) +setcpuaffinity +idlepoll apoa1/apoa1.namd
sleep 2
log "copying data to scratch directory"
cp -R /usr/tmp $SCRATCH_DIR
log "zip files"
tar -czvf $JOB_DIR/batch_output_$AWS_BATCH_JOB_ID.tar.gz $SCRATCH_DIR/*
log "copy data to S3"
aws s3 cp $JOB_DIR/batch_output_$AWS_BATCH_JOB_ID.tar.gz $S3_OUTPUT/batch_output_$AWS_BATCH_JOB_ID.tar.gz
log "done! goodbye, writing exit code to
$AWS_BATCH_EXIT_CODE_FILE and shutting down"
echo "0" > $AWS_BATCH_EXIT_CODE_FILE | true |
d3801220d7288b57d45c7b955bae82f96dab9e1c | Shell | mnsmar/itutils | /send-to-codereview/bin/send-to-codereview | UTF-8 | 682 | 4.0625 | 4 | [
"MIT"
] | permissive | #!/bin/bash
# Check for proper number of command line args.
EXPECTED_ARGS=2
E_BADARGS=65
if [ $# -lt $EXPECTED_ARGS ]
then
echo "Usage: send-to-codereview [DIR] [DESTINATION]"
echo "Send a git repository [DIR] to code review."
echo "The destination directory will be a bare clone of the repository."
exit $E_BADARGS
fi
WORKING_DIR=`pwd`
dir=$1
destination=$2
if [ -d $destination ]; then
echo "FAIL - $destination already exists"
exit 1
fi
git clone --bare --no-hardlinks $dir $destination
chgrp -R bioinformatics $destination
cd $destination
git config core.sharedRepository group
find . -type f | xargs chmod g+w
find . -type d | xargs chmod g+ws
cd $WORKING_DIR
| true |
c35cd570c8c11cdd0db5ae0d5901d1ab740779b3 | Shell | wujun728/jun_linux | /Linux_shell/LNMT/proftpd.sh | UTF-8 | 5,849 | 3.65625 | 4 | [] | no_license | #!/bin/bash
PATH=/bin:/sbin:/usr/bin:/usr/sbin:/usr/local/bin:/usr/local/sbin:~/bin
export PATH
# Check if user is root
if [ $(id -u) != "0" ]; then
printf "Error: You must be root to run this script!\n"
exit 1
fi
clear
printf "=========================================================================\n"
printf "ProFTPd for LNMT V0.1 , Written by Xiao Dong \n"
printf "=========================================================================\n"
printf "LNMT is a tool to auto-compile & install Nginx + MySQL + Tomcat on Linux \n"
printf "This script is a tool to install ProFTPd for LNMT \n"
printf "\n"
printf "For more information please visit http://lnmt.org \n"
printf "\n"
printf "Usage: ./proftpd.sh \n"
printf "=========================================================================\n"
cur_dir=$(pwd)
files_dir=$cur_dir/files
script_dir=$cur_dir/script
get_char()
{
SAVEDSTTY=`stty -g`
stty -echo
stty cbreak
dd if=/dev/tty bs=1 count=1 2> /dev/null
stty -raw
stty echo
stty $SAVEDSTTY
}
echo ""
echo "Press any key to start install ProFTPd..."
char=`get_char`
echo "============================Check files=================================="
if [ -d $files_dir ]; then
echo "files folder [found]"
else
echo "Error: files folder not found!!!create it now......"
mkdir $files_dir
fi
if [ -d $script_dir ]; then
echo "files folder [found]"
else
echo "Error: files folder not found!!!create it now......"
mkdir $script_dir
fi
cd $files_dir
if [ -s proftpd-1.3.4b.tar.gz ]; then
echo "proftpd-1.3.4b.tar.gz [found]"
else
echo "Error: proftpd-1.3.4b.tar.gz not found!!!download now......"
wget -c ftp://ftp.proftpd.org/distrib/source/proftpd-1.3.4b.tar.gz
fi
cd $script_dir
if [ -s init.d.proftpd ]; then
echo "ProFTPd service script [found]"
else
echo "Error: ProFTPd service script not found!!!download now......"
wget -c http://lnmt.org/online/script/init.d.proftpd
fi
echo "Install building packages..."
cat /etc/issue | grep -Eqi '(Debian|Ubuntu)' && apt-get update;apt-get install build-essential gcc g++ make -y || yum -y install make gcc gcc-c++ gcc-g77
cd $files_dir
echo "Start download files..."
#wget -c ftp://ftp.proftpd.org/distrib/source/proftpd-1.3.4b.tar.gz
tar zxf proftpd-1.3.4b.tar.gz
cd proftpd-1.3.4b
./configure --prefix=/usr/local/proftpd
make && make install
cd ../
ln -s /usr/local/proftpd/sbin/proftpd /usr/local/bin/
ln -s /usr/local/proftpd/bin/ftpasswd /usr/local/bin/
mkdir /usr/local/proftpd/var/log/
mkdir /usr/local/proftpd/etc/vhost/
cat >/usr/local/proftpd/etc/proftpd.conf<<EOF
# This is a basic ProFTPD configuration file (rename it to
# 'proftpd.conf' for actual use. It establishes a single server
# and a single anonymous login. It assumes that you have a user/group
# "nobody" and "ftp" for normal operation and anon.
ServerName "ProFTPD FTP Server for LNMT"
ServerType standalone
DefaultServer on
# Port 21 is the standard FTP port.
Port 21
# Don't use IPv6 support by default.
UseIPv6 off
# Umask 022 is a good standard umask to prevent new dirs and files
# from being group and world writable.
Umask 022
# To prevent DoS attacks, set the maximum number of child processes
# to 30. If you need to allow more than 30 concurrent connections
# at once, simply increase this value. Note that this ONLY works
# in standalone mode, in inetd mode you should use an inetd server
# that allows you to limit maximum number of processes per service
# (such as xinetd).
MaxInstances 30
# Set the user and group under which the server will run.
User nobody
Group nogroup
PassivePorts 20000 30000
# To cause every FTP user to be "jailed" (chrooted) into their home
# directory, uncomment this line.
DefaultRoot ~
AllowOverwrite on
AllowRetrieveRestart on
AllowStoreRestart on
UseReverseDNS off
IdentLookups off
#DisplayLogin welcome.msg
ServerIdent off
RequireValidShell off
AuthUserFile /usr/local/proftpd/etc/ftpd.passwd
AuthOrder mod_auth_file.c mod_auth_unix.c
# Normally, we want files to be overwriteable.
AllowOverwrite on
# Bar use of SITE CHMOD by default
<Limit SITE_CHMOD>
DenyAll
</Limit>
SystemLog /usr/local/proftpd/var/log/proftpd.log
Include /usr/local/proftpd/etc/vhost/*.conf
EOF
cd $script_dir
#wget -c http://lnmt.org/online/script/init.d.proftpd
cp init.d.proftpd /etc/init.d/proftpd
chmod +x /etc/init.d/proftpd
cat /etc/issue | grep -Eqi '(Debian|Ubuntu)' && update-rc.d -f proftpd defaults;ln -s /usr/sbin/nologin /sbin/nologin || chkconfig --level 345 proftpd on
if [ -s /sbin/iptables ]; then
/sbin/iptables -I INPUT -p tcp --dport 21 -j ACCEPT
/sbin/iptables -I INPUT -p tcp --dport 20 -j ACCEPT
/sbin/iptables -I INPUT -p tcp --dport 20000:30000 -j ACCEPT
/sbin/iptables-save
fi
cd $cur_dir
cp proftpd_vhost.sh /root/proftpd_vhost.sh
clear
printf "=======================================================================\n"
printf "Starting ProFTPd...\n"
/etc/init.d/proftpd start
printf "=======================================================================\n"
printf "Install ProFTPd completed, enjoy it!\n"
printf "=======================================================================\n"
printf "Install ProFTPd for LNMT V0.1 , Written by Xiao Dong \n"
printf "=======================================================================\n"
printf "LNMT is a tool to auto-compile & install Nginx + MySQL + Tomcat on Linux \n"
printf "This script is a tool to install ProFTPd for lnmt \n"
printf "\n"
printf "For more information please visit http://lnmt.org \n"
printf "=======================================================================\n"
| true |
c8e2a58b82ff1cf5a1c01559ebd2f1d865236d0f | Shell | rjnienaber/ruby-cachespeed | /scripts/install.sh | UTF-8 | 1,987 | 2.671875 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env bash
gpg --keyserver hkp://keys.gnupg.net --recv-keys D39DC0E3
sudo add-apt-repository -y ppa:rwky/redis
sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv 7F0CEB10
echo 'deb http://downloads-distro.mongodb.org/repo/ubuntu-upstart dist 10gen' | sudo tee /etc/apt/sources.list.d/mongodb.list
sudo apt-get -y update
sudo apt-get -y upgrade
export DEBIAN_FRONTEND=noninteractive
echo "mysql-server-5.6 mysql-server/root_password password root" | sudo debconf-set-selections
echo "mysql-server-5.6 mysql-server/root_password_again password root" | sudo debconf-set-selections
#install mysql, postgresql, redis
sudo apt-get -y install python-software-properties git maven libmaven-compiler-plugin-java openjdk-7-jdk libpq-dev postgresql-9.4 postgresql-contrib-9.4 libmysqlclient-dev mysql-server-5.6 redis-server memcached mongodb-org
#redis config
sudo sh -c "echo appendfsync everysec >> /etc/redis/conf.d/local.conf"
sudo sh -c "echo never > /sys/kernel/mm/transparent_hugepage/enabled"
sudo killall redis-server
#mysql
mysql -u root -proot mysql -e "GRANT ALL ON *.* to root@'%' IDENTIFIED BY 'root'; FLUSH PRIVILEGES;"
mysql -u root -proot mysql < /vagrant/scripts/mysql_db.sql
sudo sed -i "s/bind-address.*/bind-address\ \=\ 0.0.0.0/" /etc/mysql/my.cnf
sudo service mysql restart
#postgresql
sudo sed -i "s/.*listen_addresses.*/listen_addresses\ =\ '*'/" /etc/postgresql/9.4/main/postgresql.conf
sudo sh -c "echo host all all 0.0.0.0/0 md5 >> /etc/postgresql/9.4/main/pg_hba.conf"
sudo -H -u postgres bash -c 'psql -f /vagrant/scripts/postgres_db.sql'
sudo service postgresql restart
#memcache
sudo sed -i "s/-l.*/\-l\ 0.0.0.0/" /etc/memcached.conf
sudo sed -i "s/-p.*/\-p\ 11210/" /etc/memcached.conf
sudo /etc/init.d/memcached restart
#infinispan
wget -P /tmp http://downloads.jboss.org/infinispan/7.0.3.Final/infinispan-server-7.0.3.Final-bin.zip
cd /tmp
unzip infinispan-server-7.0.3.Final-bin.zip
sudo mv infinispan-server-7.0.3.Final /opt
cd | true |
22e21e2ad396faccf43a01291cc4c5c2ddd38079 | Shell | k838450/torumaru | /filter/port_conversion.sh | UTF-8 | 488 | 3 | 3 | [] | no_license | #!/bin/sh
inode_num=`cat /proc/net/tcp | grep -i :$1 | awk '{print $10}'`
pid=()
for i in $inode_num
do
#pid+=(`ls -l /proc/[1-9]*/fd/[1-9]* 2>/dev/null | grep $i | awk '{print $9}'|sed "s/\/proc\//\/proc /g" | tr "/" "\n" | grep proc | awk '{print $2}' | tr "\n" "," `)
pid+=(`ls -l /proc/[1-9]*/fd/[1-9]* 2>/dev/null | grep $i | awk '{print $9}' | sed -e "s/\/proc\///" | sed -e "s/\/fd\/[0-9]\+//" | tr "\n" "," `)
done
echo -e ${pid[@]} > pid.txt
#echo -e ${pid[@]}
exit 0
| true |
64d6936e0472816679e44244177d47f842e6677f | Shell | jacklee032016/pbx | /FileSystem/PreReleases/common/etc/init.d/rcS | ISO-8859-3 | 1,100 | 2.65625 | 3 | [] | no_license | #!/bin/sh
cat /etc/motd
echo
cat /etc/version.info
echo
echo "Intel XScale IXP422, NPEs(Chengdu@Assistcn.com)"
if [ ! -f /proc/mounts ] ; then
/bin/mount -t proc proc /proc
fi
/bin/mount -t usbfs usb /proc/bus/usb
# /bin/mount -t ext3 -o rw -o remount /dev/hda1 /
if [ ! -f /etc/mtab ] ; then
ln -s /proc/mounts /etc/mtab
fi
# mount tmpfs file system into /tmp
# remount when used to load and update File System
mount -t tmpfs -o size=16M tmpfs /tmp
touch /var/run/ftpd.pid
mkdir -p /tmp/dhcpd
touch /tmp/dhcpd/udhcpd.leases
touch /tmp/dhcpd/wdhcpd.leases
LD_LIBRARY_PATH=/lib
export LD_LIBRARY_PATH
touch /etc/ld.so.conf
touch /etc/ld.so.cache
/sbin/ifconfig lo 127.0.0.1
/sbin/route add -net 127.0.0.0 netmask 255.255.255.0 lo
/sbin/insmod ixp400.o
/sbin/insmod ixp400_ethMii.o
/sbin/insmod ixp400_eth.o
/sbin/insmod span.o
/sbin/insmod pcm.o
/sbin/insmod zl.o
/sbin/insmod as_misc.o
echo "1" > /proc/sys/net/ipv4/ip_forward
echo "0" > /proc/sys/net/ipv4/route/min_delay
#echo " Build Socket/Named Pipe of /dev/log"
# mksock $1/dev/log
# chmod 777 $1/dev/log
/sbin/webadmin_setup
| true |
c8afcc4fdcb6fed0c9de45b51119b50b0dbffe67 | Shell | kpaxer/ncc_src | /usr/sh/task_snd.sh | UTF-8 | 749 | 3.1875 | 3 | [] | no_license | #!/usr/bin/ksh
#netstat -Aan|grep LISTEN|awk -F ' ' '{print $5}'|awk -F '.' '{print $NF}'|sort -n
#for fname in `ls`;do
# if [[ $fname != tran_* ]] ;then
# continue;
# fi
#
# if [ -f $fname ] ;then
# echo $fname;
# echo x$fname;
# mv $fname x$fname;
# fi
#
# #echo $dir
#done
#
for fname in `ls`;do
if [[ $fname != xtran_* ]] ;then
continue;
fi
if [ -f $fname ] ;then
#new_fname=${fname:1};
new_fname=`echo $fname|cut -c 2-`;
#echo $fname:$new_fname
line=`grep "^CronTaskRun" $fname`;
#echo $line
echo '#!/usr/bin/ksh' >$new_fname
echo '. $HOME/tools/task_pub.sh' >$new_fname
echo $line >> $new_fname;
fi
done | true |
5d4f543f1616ddc5c11b229a432439b7aacfaab1 | Shell | archetypal/sh | /teststring.sh | UTF-8 | 210 | 2.71875 | 3 | [] | no_license | #!/bin/sh
# string and numeric tests
#
# display the through `test`
# STRING1 = STRING2
[ "$1" = "$2" ] && echo the strings are equal
# STRING1 != STRING2
! [ "$1" = "$2" ] && echo the strings are not equal
| true |
3496724b2e1144d8671bbba66e350b3dbc6c79f3 | Shell | ekivemark/rhel_python3_django_setup | /mkenv3 | UTF-8 | 4,971 | 3.828125 | 4 | [] | no_license | #!/bin/bash
set +x
set -e
set -u
export PS4='+(${BASH_SOURCE}:${LINENO}): ${FUNCNAME[0]:+${FUNCNAME[0]}(): }'
(
user=$(id -u)
if [[ $user -eq 0 ]]; then
echo "Do you want to run this as root?"
fi
)
(
set +e
rpm -q mod_wsgi
status=$?
set -e
if [[ $status -eq 0 ]]; then
echo "uninstall mod_wsgi"
exit 1
else
echo "which is great since we do not want the default mod_wsgi"
fi
)
(
error=0
# http://www.1stbyte.com/2005/06/26/configure-and-compile-python-with-zlib/
# Postgreql:
# DB="postgresql postgresql-devel postgresql-libs"
# MySQL:
DB="mysql"
rpms="wget openssl openssl-devel gcc httpd httpd-devel httpd-tools httpd-manual zlib-devel readline readline-devel bzip2-devel $DB"
for i in $rpms; do
set +e
rpm -q $i
status=$?
set -e
if [[ $status -ne 0 ]]; then
error=1
fi
done
if [[ $error -ne 0 ]]; then
echo "Also do \"yum groupinstall \"development tools\"\""
exit 1
fi
)
PYTHON_DOWNLOAD_PATH="https://www.python.org/ftp/python/3.5.1/"
PYTHON_FULL_NAME="Python-3.5.1.tgz"
PYTHON_BASE_NAME="Python-3.5.1"
ROOT=/MyRoot
MyProject="bb-hos"
MyProjectPath="$ROOT/$MyProject"
PYTHONHOME="$ROOT/python35"
mkdir -p cache
mkdir -p build/python
mkdir -p build/mod_wsgi
# fetch python3
(
echo "PYTHONHOME=$PYTHONHOME"
cd cache
if [[ ! -r "$PYTHON_FULL_NAME" ]]; then
wget "$PYTHON_DOWNLOAD_PATH/$PYTHON_FULL_NAME"
fi
if [[ ! -r "$PYTHON_FULL_NAME" ]]; then
echo "Could not find $PYTHON_FULL_NAME in $(pwd)"
exit 1
fi
cp "$PYTHON_FULL_NAME" ../build/python
)
# fetch mod_wsgi
(
cd cache
if [[ ! -r mod_wsgi-4.5.2.tar.gz ]]; then
# wget http://modwsgi.googlecode.com/files/mod_wsgi-4.5.2.tar.gz
wget https://github.com/GrahamDumpleton/mod_wsgi/archive/4.5.2.tar.gz
mv 4.5.2.tar.gz mod_wsgi-4.5.2.tar.gz
fi
if [[ ! -r mod_wsgi-4.5.2.tar.gz ]]; then
echo "Could not find mod_wsgi-4.5.2.tar.gz in $(pwd)"
exit 1
fi
)
# build python3
(
echo "PYTHONHOME=$PYTHONHOME"
if [[ ! -r "$PYTHONHOME/bin/python3" ]]; then
cd build/python
tar xvzf "$PYTHON_FULL_NAME"
cd "$PYTHON_BASE_NAME"
./configure --prefix="$PYTHONHOME" --with-zlib --with-openssl --enable-shared
if [[ $? -ne 0 ]]; then
echo "configure failed"
exit 1
fi
make
if [[ $? -ne 0 ]]; then
echo "make failed"
exit 1
fi
make install
if [[ $? -ne 0 ]]; then
echo "install of python failed"
exit 1
fi
else
echo "Python already built, skipping"
fi
)
# build mod_wsgi
(
echo "/$ROOT/python35/lib > /etc/ld.so.conf.d/$ROOT-python3.conf"
ldconfig -v
if [[ ! -r build/mod_wsgi/mod_wsgi-4.5.2/mod_wsgi.lo ]]; then
cd build/mod_wsgi
tar xvzf ../../cache/mod_wsgi-4.5.2.tar.gz
cd mod_wsgi-4.5.2
./configure --prefix=$ROOT/mod_wsgi --with-python=$ROOT/python35/bin/python3.5 --with-apxs=/usr/sbin/apxs LDFLAGS="-Wl,-rpath -Wl,$ROOT/python35/lib"
sed -i "s/-lpython3.5/-lpython3.5m/" Makefile # https://groups.google.com/forum/#!topic/modwsgi/zk28E_XS68w
sed -i "s|DESTDIR =|DESTDIR = $ROOT/mod_wsgi|" Makefile
make
make install
libtool --finish $ROOT/mod_wsgi/usr/lib64/httpd/modules
else
echo "mod_wsgi already built, skipping"
fi
)
if [[ ! -r "$MyProjectPath/bin/activate" ]] ; then
"$PYTHONHOME/bin/python3.5" "$PYTHONHOME/bin/pyvenv" "$MyProjectPath"
else
echo "$MyProjectPath already there, skipping..."
fi
set +u # I think this is a bug with activate
source "$MyProjectPath/bin/activate"
set -u
echo
#echo "PYTHONHOME=$PYTHONHOME" # activate will unset $PYTHONHOME
echo "which python:"
which python
echo "python -V"
python -V
echo
echo "LoadModule wsgi_module $ROOT/mod_wsgi/usr/lib64/httpd/modules/mod_wsgi.so" > /etc/httpd/conf.d/$MyProject-wsgi.conf
cat << EOF > /etc/httpd/conf.d/$MyProject.conf
<Directory "/var/www/html/$MyProject/">
AllowOverride All
Order allow,deny
Allow from all
</Directory>
WSGIScriptAlias /$MyProject $ROOT/$MyProject-site/$MyProject/$MyProject/wsgi.py
WSGIPythonPath $ROOT/$MyProject-site/$MyProject:$MyProjectPath/lib/python3.5/site-packages
#WSGIApplicationGroup %{GLOBAL}
<Directory $ROOT/$MyProject-site/$MyProject/$MyProject>
<Files wsgi.py>
#Require all granted
</Files>
</Directory>
EOF
# use the fill path for pip else it installs in the global python installation instead of our venv
$ROOT/python35/bin/pip3.5 install https://www.djangoproject.com/download/1.8.13/tarball/
#$ROOT/python35/bin/pip3.5 install django
# MySQL
#$ROOT/python35/bin/pip3.5 install mysqlclient
# postgrSQL
#$ROOT/python35/bin/pip3.5 install psycopg2
# iPython
#$ROOT/python35/bin/pip3.5 install ipython
| true |
7cb13a42ec99e81e52545f70817947333f60da50 | Shell | alroman/log-server | /scripts/data/clean_sql.sh | UTF-8 | 1,028 | 3.921875 | 4 | [] | no_license | #!/bin/bash
#
# This script will take the mld_log SQL file and filter it so that
# only the INSERT rows are left in a convenient format
# cat log.sql | grep -E '\([0-9]+,.*\)' | sed 's|),(|),\n(|g' > temp.sql
# $1 = input file
# $2 = output file
# Expect timestamp file
DATE=`date +'%Y-%m-%d_%H%M'`
FILE_IN="$DATE.sql"
OUT_FILE="$DATE.out.sql"
POINTER="$3"
if [ -n "$1" ]; then
FILE_IN=$1
fi
# Check if file exists
if [[ ! -e $FILE_IN ]]; then
echo "1"
# exit $?
fi
if [ -n "$2" ]; then
OUT_FILE=$2
fi
# Create preliminary
echo "INSERT INTO \`mdl_log\` VALUES " > $OUT_FILE
# Run command
# cat: This takes a raw mdl_log dump file
# grep: filter out rows with pattern: (123, 1234567890, ...)
# sed: separate the rows with neline, so that we get one row per line
cat $FILE_IN | grep -E '\([0-9]+,.*\)' | sed 's|),(|),\n(|g' | sed -e '1,/($POINTER,/d' >> $OUT_FILE
# cat $IN_FILE | sed -e '1,/($LINE,/d' >> $OUT_FILE
# exit status
echo $?
# Don't exit like this.. python doesn't like it!
# exit $? | true |
c1ac94153a9fbb3ed929e5c5d4d940007dd4fac5 | Shell | bridgecrew-perf7/postfacto-deployment | /tasks/cf-lib.sh | UTF-8 | 3,274 | 3.609375 | 4 | [
"Apache-2.0"
] | permissive | cf_auth() {
local opsman_domain_or_ip_address=${1:-127.0.0.1}
local opsman_username=${2:-user}
local opsman_password=${3:-pass}
om_cli="om-linux -k -t https://$opsman_domain_or_ip_address -u $opsman_username -p $opsman_password"
cf_guid=$(${om_cli} curl --path /api/v0/deployed/products | jq -r '.[] | select(.type == "cf") | .guid')
cf_sys_domain=$(${om_cli} curl --path /api/v0/staged/products/${cf_guid}/properties \
| jq -r '.properties[".cloud_controller.system_domain"].value')
cf_user=$($om_cli credentials -p cf -c .uaa.admin_credentials -f identity)
cf_password=$($om_cli credentials -p cf -c .uaa.admin_credentials -f password)
# Create org and space where nozzle is deployed
set +x
cf api api.${cf_sys_domain} --skip-ssl-validation
cf auth ${cf_user} ${cf_password}
cf target -o demo -s demo
}
cf_service_exists() {
local cf_service_name=${1}
cf service $cf_service_name --guid 2>/dev/null
}
# returns the service instance guid, otherwise null if not found
function cf_get_service_instance_guid() {
local service_instance=${1:?service_instance null or not set}
# swallow "FAILED" stdout if service not found
local service_instance_guid=
if service_instance_guid=$(CF_TRACE=false cf service "$service_instance" --guid 2>/dev/null); then
echo "$service_instance_guid"
fi
}
# returns true if service exists, otherwise false
function cf_service_exists() {
local service_instance=${1:?service_instance null or not set}
local service_instance_guid=$(cf_get_service_instance_guid "$service_instance")
[ -n "$service_instance_guid" ]
}
function cf_create_service() {
local service=${1:?service null or not set}
local plan=${2:?plan null or not set}
local service_instance=${3:?service_instance null or not set}
local broker=${4:-}
local configuration=${5:-}
local tags=${6:-}
local args=("$service" "$plan" "$service_instance")
[ -n "$broker" ] && args+=(-b "$broker")
[ -n "$configuration" ] && args+=(-c "$configuration")
[ -n "$tags" ] && args+=(-t "$tags")
cf create-service "${args[@]}"
}
function cf_wait_for_service_instance() {
local service_instance=${1:?service_instance null or not set}
local timeout=${2:-1200}
local guid=$(cf_get_service_instance_guid "$service_instance")
if [ -z "$guid" ]; then
echo "Service instance does not exist: $service_instance"
exit 1
fi
local start=$(date +%s)
echo "Waiting for service: $service_instance"
while true; do
# Get the service instance info in JSON from CC and parse out the async 'state'
local state=$(cf curl "/v2/service_instances/$guid" | jq -r .entity.last_operation.state)
if [ "$state" = "succeeded" ]; then
echo "Service is ready: $service_instance"
return
elif [ "$state" = "failed" ]; then
local description="$(cf curl "/v2/service_instances/$guid" | jq -r .entity.last_operation.description)"
echo "Failed to provision service: $service_instance error: $description"
exit 1
fi
local now=$(date +%s)
local time=$(($now - $start))
if [[ "$time" -ge "$timeout" ]]; then
echo "Timed out waiting for service instance to provision: $service_instance"
exit 1
fi
sleep 15
done
} | true |
c664e7bb9244eb9d732f74f8e0bd616e5cef6dba | Shell | ElvenMonky/papp | /scripts/publish.sh | UTF-8 | 371 | 2.75 | 3 | [] | no_license |
./node_modules/.bin/node-pre-gyp package ${NPM_FLAGS}
COMMIT_MESSAGE=$(git show -s --format=%B $TRAVIS_COMMIT | tr -d '\n')
if [[ ${COMMIT_MESSAGE} =~ "[publish binary]" ]]; then
./node_modules/.bin/node-pre-gyp publish ${NPM_FLAGS}
elif [[ ${COMMIT_MESSAGE} =~ "[republish binary]" ]]; then
./node_modules/.bin/node-pre-gyp unpublish publish ${NPM_FLAGS}
fi;
| true |
174d7efd9c4a797c195ff73b5e2ab52587198467 | Shell | sunu-nandakumar/shellProgramming | /sequence-statement/dice.sh | UTF-8 | 103 | 2.765625 | 3 | [] | no_license | #!/bin/bash -x
dice=$((RANDOM%6+1))
echo $dice
dicee=$((RANDOM%6+1))
add=$(($dice+$dicee))
echo $add
| true |
151e9633235f0a61c8b1619e9638aa2c3bec6279 | Shell | CosmicToast/toasty-zsh | /plugins/xterm-title | UTF-8 | 226 | 2.53125 | 3 | [
"LicenseRef-scancode-unknown-license-reference",
"Unlicense"
] | permissive | function xterm-title {
print -Pn '\e]0;%n@%m $(history $HISTCMD | cut -b8-)\a'
}
autoload -Uz add-zsh-hook
add-zsh-hook precmd xterm-title
add-zsh-hook preexec xterm-title
add-zsh-hook chpwd xterm-title
# vim: ft=zsh
| true |
1d5f8dcbea804295f6f2c25223b4410f7374951c | Shell | tekknikk/ansible-hypertable | /Library/aws_cli | UTF-8 | 1,302 | 4.25 | 4 | [
"MIT"
] | permissive | #!/bin/bash
#
# Absoluterly simple wrapper around `awscli` Python module
# to avoid unnecessary JSON template manipulations
#
# Exit upon any error
set -e
#
# Ansible transports arguments to modules in a file. The
# path to the arguments file is in $1, and the file
# contains the module's arguments like this:
#
# key1="value 1" key2=value2 ...
#
# Hence we simply source that file here
. "$1"
# There is just one required argument, which is called args
if [ -z "$commandline" ]; then
msg="'args' is a required argument"
exit 1
fi
PROG=${0##*/}
# Directory to store all temporary files in
TMPDIR="/tmp/$PROG.$$"
mkdir -p "$TMPDIR"
# Clean up after yourself on exit
trap '_exit_trap' 0 HUP INT QUIT ABRT TERM
# Function to execute upon termination
_exit_trap()
{
exit_status=$?
# Clear the traps
trap 0 HUP INT QUIT ABRT TERM
local tempfile;
if [ -d "$TMPDIR" ]; then
rm -rf "$TMPDIR"
fi
exit $exit_status
}
echo aws $commandline > "$TMPDIR/commandline.$$"
{
[ $? -eq 0 ] || exit 1
perl -pne '{ s/:[ \t](false|true)/: "\1"/g;
$cnt++ if !/^\s+?$/; }
END{ if ($cnt == 0) {
printf "{}\n";
}
}'
}<<EOF
$(/bin/bash "$TMPDIR/commandline.$$")
EOF
| true |
d72ca3ef74908c93b5c9e29daf88b6f7ad216053 | Shell | jeremyss/cisco | /cisco-commands.sh | UTF-8 | 2,516 | 3.453125 | 3 | [] | no_license | #!/usr/bin/expect
match_max 99999
if { [lindex $argv 0] == "-h" || [lindex $argv 0] == "--help" } {
puts "\nUsage:\n\tcisco-commands.expect \'host-file\' \'commands-file\' \'fileID\'\nEx:\n\tcisco-commands.expect hosts.txt commands.txt interfaces\n";
exit 1;
} elseif { [lindex $argv 0] == "" } {
puts "\nUsage:\n\tcisco-commands.expect \'host-file\' \'commands-file\' \'fileID\'\nEx:\n\tcisco-commands.expect hosts.txt commands.txt interfaces\n";
exit 1;
}
#Get username and password
send_user "Username: "
expect_user -re "(.*)\n" {set user $expect_out(1,string)}
send_user "Password: "
stty -echo
expect_user -re "(.*)\n" {set password $expect_out(1,string)}
send_user "\n"
stty echo
#End get username and password
set prompt "#" ;# -- main activity
set DATE [exec date +%0m-%0d-%0y-%0H-%0M-%0S]
proc dostuff { query DATE host commands } {
;# do something with currenthost
send -- "\r"
expect "#"
send -- "terminal length 0\r"
expect "#"
#run commands in commands file
foreach commnd [split $commands "\n"] {
send -- "$commnd\r"
expect "#"
send -- "\r"
expect "#"
}
#End run commands in commands file
return} ;# -- start of task
#Open hosts file
set hostsFile [lindex $argv 0]
set fd [open $hostsFile r]
set hosts [read -nonewline $fd]
close $fd
#End open hosts file
#Open commands file
set commandsFile [lindex $argv 1]
set fd [open $commandsFile r]
set commands [read -nonewline $fd]
close $fd
#End open commands file
#Set file name
set query [lindex $argv 2]
#End set file name
foreach host [split $hosts "\n" ] {
set timeout 5
spawn /usr/bin/telnet $host
while (1) {
expect {
timeout {
set tof [open $host-$query-timed-out-$DATE.txt w]
close $tof
break
}
"sername: " {
send -- "$user\r"
}
"assword: " {
send -- "$password\r"
}
"ogin: " {
send -- "$user\r"
}
"assword: " {
send -- "$password\r"
}
"$prompt" {
exp_log_file -a $host-$query-$DATE.txt
dostuff $query $DATE $host $commands
exp_log_file
break
}
}
}
expect "$prompt"
send -- "exit\r"
}
expect eof | true |
37dbd0ee3a1c9728f5b8e5006aec884f5d2ea50b | Shell | tobby-lie/Cyber-Security-Programming-Powershell-and-Bash-Scripting-for-Security-Administration | /Bash/NetworkChangeMonitor.sh | UTF-8 | 1,521 | 3.796875 | 4 | [] | no_license | #!/bin/bash
# initialize start time
start_time="$(date -u +%s)"
while true
do
# get all open ports and put in log file
nmap -vv 192.168.10.*/24 | grep -E "Discovered open port" > initial_output.txt
# put each line of log file into array
init_arr=()
if [ -s initial_output.txt ]
then
mapfile -t myArray < initial_output.txt
while IFS= read -r line;
do
init_arr+=("$line");
done < initial_output.txt
fi
# get elapsed time and print out
curr_time="$(date -u +%s)"
elapsed="$((curr_time - $start_time))"
echo "Current time: $elapsed seconds"
sleep 300
# get all discovered ports in second log file
nmap -vv 192.168.10.*/24 | grep -E "Discovered open port" > new_output.txt
# get each line of log file into array
fin_arr=()
if [ -s new_output.txt ]
then
mapfile -t myArray < new_output.txt
while IFS= read -r line;
do
fin_arr+=("$line");
done < new_output.txt
fi
# for every element in second array
# check if that element exists in the first array
# if it does not then notate a new open port in third log file
# else note that no open ports are discovered
for i in "${fin_arr[@]}"
do
if [[ " ${init_arr[*]} " != *" $i "* ]]; then
curr_time="$(date -u +%s)"
elapsed="$(($curr_time-$start_time))"
echo "Time stamp: $elapsed seconds - $i" >> logfile.txt
else
curr_time="$(date -u +%s)"
elapsed="$(($curr_time-$start_time))"
echo "Time stamp: $elapsed seconds - no open ports open since last execution" >> logfile.txt
fi
done
done
| true |
e1eb6bf049466b273e32ef88a4fc05a6f089095c | Shell | mmehride/MatRox_RU | /scripts/testSTFlops | UTF-8 | 1,152 | 3.078125 | 3 | [
"MIT"
] | permissive | #!/bin/bash
#SBATCH -A mit162
#SBATCH --job-name="Strumpack"
#SBATCH --output="ml.%j.%N.out"
#SBATCH --partition=compute
#SBATCH --nodes=1
#SBATCH --ntasks-per-node=24
#SBATCH --export=ALL
#SBATCH -t 01:30:00
## all SPD matrix files stored in dense column major format
declare -a filearray=(
"../data/letter20000.points.bin"
"../data/pen10992.points.bin"
"../data/Gas13910.points.bin"
"../data/unit32000.points.bin"
)
module load gnu/6.2.0
array=(16, 16, 129, 2)
if [ -f stout ]; then
rm -rf stout
fi
executable="./artifact_strumpack.x"
echo "#@DATE"
date
i=0
for filename in "${filearray[@]}"
do
d=${array[$i]}
echo "#dim=$d"
export OMP_NUM_THREADS=1
$executable $filename $d 5 | tee -a stout
export OMP_NUM_THREADS=12
$executable $filename $d 5 | tee -a stout
let i++
done
grep "Evaluation time" stout | grep -Eo '[0-9]*+[.]+[0-9]*' > eval.csv
awk -F "\"*,\"*" '{print $1}' eval.csv | xargs -n2 -d'\n' > tstscal.csv
declare -a tfilearray=(
"letter"
"pen"
"gas"
"unit"
)
rm data.csv
for filename in "${tfilearray[@]}"
do
echo "$filename," >> data.csv
done
paste data.csv tstscal.csv > stflops.csv
rm tstscal.csv
| true |
4440150dd8167cc030ecb0a3850c2024b14c7e53 | Shell | jartigag/dotfiles | /scripts/.bring_to_front.sh | UTF-8 | 679 | 3.609375 | 4 | [] | no_license | #!/bin/bash
#author: @jartigag
#date: 2022-03-02
#version: 1.0
#
# Associate keyboard shortcuts with this script + different arguments,
# so it brings the desired window to front when a shortcut is pressed.
#
#usage:
# $ bash .bring_to_front.sh "Spotify" /tmp/spotify-winID.wid
#
#requeriments:
# $ sudo apt install wmctrl
windowTitle=$1
widFile=$2
if [ ! -f $widFile ]; then
flagDone=`wmctrl -l | grep $windowTitle | cut -d " " -f1`
while [ -z "$flagDone" ]
do
flagDone=`wmctrl -l | grep $windowTitle | cut -d " " -f1`
sleep 1
done
echo "$flagDone" > $widFile
fi
wmctrl -ia $(< $widFile) || rm $widFile
| true |
7e4a505af1bd1e186f918a94ce7730de3c40f79a | Shell | abrbon/LetsEncrypt_DuckDNS_script_toolset | /certbot_firstrun_script.sh | UTF-8 | 652 | 3.359375 | 3 | [] | no_license | #!/usr/bin/env bash
#
# Start Certbot for first time to generate all files and folder.
#
set -e
# Set global vars.
source /usr/local/certbot_files/certbot_config_vars.sh
# Create log directory.
if [ ! -d $LOGDIR ]
then
mkdir -p $LOGDIR >/dev/null 2>&1 && \
echo "Directory $LOGDIR created."; \
chmod o=,g=r $LOGDIR \
|| echo "Error: Failed to create $LOGDIR directory."
else
echo "Directory $LOGDIR already exists."
fi
certbot certonly $STAGING $DRY_RUN $REGISTER_EMAIL --manual --preferred-challenges=dns \
--manual-auth-hook $CERTBOT_WRKDIR/$AUTH_HOOK \
--manual-cleanup-hook $CERTBOT_WRKDIR/$CLEANUP_HOOK \
-d $SUB_DOMAIN_NAME.$DOMAIN
| true |
406429a5301c9db5659e26bccd02e484b2328cfd | Shell | veltzer/demos-bash | /src/examples/core/glob/nullglob_for_a_piece_of_code.bash | UTF-8 | 817 | 3.578125 | 4 | [
"MIT"
] | permissive | #!/bin/bash -u
# This example shows how to use nullglob to get strict globbing
# and to enable and disable it just for a small piece of code.
#
# NOTES:
# - The problem with saving the nullglob state solution is that it
# requires a subshell # and that is slow ($(shopt -p nullglob)).
# I don't know of a better solution.
#
# References:
# - https://stackoverflow.com/questions/9126060/is-there-an-easy-way-to-set-nullglob-for-one-glob
shopt -s nullglob
for x in /tmp/doesntexist*.foo
do
echo $x
done
shopt -u nullglob
# in order to return nullglob to it's former state we need to store
# it's previous state.
function save_nullglob() {
nullglob=$(shopt -p nullglob)
}
function restore_nullglob() {
$nullglob
}
save_nullglob
shopt -s nullglob
for x in /tmp/doesntexist*.foo
do
echo $x
done
restore_nullglob
| true |
8eb5606d95420289157ef72a34f2539fd25b39db | Shell | hepyu/shell-script | /stat/lantency/manual-cal-lantency-byDay.sh | UTF-8 | 691 | 3.359375 | 3 | [
"Apache-2.0"
] | permissive | #command example:
#sh manual-cal-lantency-byDay.sh /data/logs/userspace/user-space-request.log 2014-01-01 8
#notice:
#the format of logfile name should be like 'user-space-request.log.2014-01-01-00.log', to be hour,notice the date format during the file name.
LOGFILE_PREFIX=$1
DAY=$2
TOP_N=$3
PWD_PATH=`pwd`"/"
if [ $LOGFILE_PREFIX = '-' ];
then
echo "LOGFILE_PREFIX must be appointed.";
exit 0;
fi;
if [ $DAY = '-' ];
then
echo "Day must be appointed.";
exit 0;
fi;
if [[ $TOP_N =~ ^[0-9]+$ ]]
then
echo "TOP_N is valid.";
else
echo "invalid TOP_N.";
exit 0;
fi
cd $PWD_PATH"base_code"
sh cal-lantency-topN-byDay.sh $LOGFILE_PREFIX $DAY $TOP_N $PWD_PATH
| true |
00301a90cdc8207f032d475c4435634f2dd532da | Shell | thiagoa/dotfiles | /linux/packages/setup_terminal_packages.sh | UTF-8 | 2,929 | 3.53125 | 4 | [] | no_license | #!/bin/bash
LINUXBREW=${LINUXBREW:-true}
sudo apt install curl
echo "Installing terminal deb packages"
# libnotify-bin -> notify-send command
# libwxbase3.0-0v5 and libwxgtk3.0-gtk3-0v5 -> Veracrypt deps
sudo apt install \
silversearcher-ag \
ripgrep \
redis \
nginx \
rlwrap \
zsh \
tmux \
python3-pip \
neovim \
awscli \
tree \
git \
xdg-utils
# Ruby dependencies
sudo apt install \
libsecret-1-dev \
libssl-dev \
libreadline-dev \
libxslt-dev \
libxml2-dev \
libpq-dev
# Dependencies to compile emacs
echo "Installing Emacs dependencies. If this fails, uncomment the debian deb-src repository in /etc/apt/sources.list"
echo "Trying to do that automatically..."
apt_sources_file=/etc/apt/sources.list
deb_src_pattern='deb-src.+universe'
uncommented_deb_src=`egrep "^$deb_src_pattern" $apt_sources_file`
if [[ -z "$uncommented_deb_src" ]]; then
commented_deb_src=`egrep "^# *${deb_src_pattern}" $apt_sources_file | head -1 | tr -d "\n"`
if [[ -n "$commented_deb_src" ]]; then
sudo sed -i "\;$commented_deb_src;s;^# *;;g" $apt_sources_file
sudo apt update
fi
fi
sudo apt install libgccjit0 libgccjit-10-dev
sudo add-apt-repository ppa:ubuntu-toolchain-r/ppa \
&& sudo apt-get update -y \
&& sudo apt-get install -y \
gcc-10 \
libgccjit0 \
libgccjit-10-dev \
gcc-10 \
g++-10 \
libjansson4 \
libjansson-dev \
build-essential \
libgtk-3-dev \
libgnutls28-dev \
libtiff5-dev \
libgif-dev \
libjpeg-dev \
libpng-dev \
libxpm-dev \
libncurses-dev \
texinfo
sudo apt-get build-dep -y emacs
# Linuxbrew
if [[ "$LINUXBREW" == "true" ]] && [[ ! -d /home/linuxbrew ]]; then
echo "Installing linuxbrew..."
if ! /bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)"; then
echo "Failed to install Linuxbrew. Are you on arm64? Try running LINUXBREW=false ~/.dotfiles/setup.sh"
fi
test -d ~/.linuxbrew && eval "$(~/.linuxbrew/bin/brew shellenv)"
test -d /home/linuxbrew/.linuxbrew && eval "$(/home/linuxbrew/.linuxbrew/bin/brew shellenv)"
fi
# WSL systemctl hack
# User systemctl services are located in ~/.config/systemd/user. That's where
# linuxbrew installs them. After installing this hack, you can install services
# with "brew service start ..." as normal
if uname -r | grep microsoft > /dev/null; then
systemctl_file=/usr/local/bin/systemctl
if [[ ! -f "$systemctl_file" ]]; then
sudo wget https://raw.githubusercontent.com/gdraheim/docker-systemctl-replacement/master/files/docker/systemctl.py -O $systemctl_file
sudo chmod +x $systemctl_file
fi
fi
| true |
08d3f0e5720610ba5d02afc5d04a6e0ed824b015 | Shell | daoducnha/shell_script | /chap17/test13.sh | UTF-8 | 280 | 3.828125 | 4 | [] | no_license | #!/bin/bash
# using recursion
function factorial {
if [ $1 -eq 1 ]
then
echo 1
else
local tmp=$[ $1 - 1 ]
local result=$(factorial $tmp)
echo $[ $result * $1 ]
fi
}
read -p "Enter value: " value
result=$(factorial $value)
echo "The factorial of $value is: $result"
| true |
bdc98e7691df33c8b7b1ef3ae4630ed9efb64990 | Shell | timbertson/app-customisations | /bin/remove-unwanted-stereo-pictures | UTF-8 | 628 | 3.328125 | 3 | [] | no_license | #!/bin/bash
set -e
if [ "$1" = '-f' -o "$1" = '--force' ]; then
trash_cmd='sh("trash", file)'
else
trash_cmd='True'
echo "(printing; use -f or --force to really delete)" >&2
fi
set -u
for dir in orig straight; do
if [ -d "$dir" ]; then
(cd $dir && \
piep -i <(ls -1) \
'file = p | path.join("..","cross-eyed", p) | not path.exists(p) | '"$trash_cmd"' | "removed", file')
#piep -i <(ls -1) -e 'outname = lambda x: "-out".join(path.splitext(x)).replace("-out-out","-out")' \
#'file = p | outname(p) | path.join("..","cross-eyed", p) | not path.exists(p) | sh("trash", file) | "removed", file')
fi
done
| true |
36a92b8f8d1e4885ccacd5adf45982c606717323 | Shell | glynastill/slony1-engine | /tools/build_release.sh | UTF-8 | 1,969 | 3.84375 | 4 | [
"PostgreSQL"
] | permissive | #!/bin/sh
############################################################################
#
# Builds a slony release in an automated way.
#
# usage: build_release.sh git_branch version pg_configdir
############################################################################
GIT_TAG=$1
REL_VERSION=$2
PGCONFIG_DIR=$3
if [ "$GIT_TAG" = "" -o "$REL_VERSION" = "" \
-o "$PGCONFIG_DIR" = "" ]
then
echo "usage: build_release.sh GIT_TAG VERSION PGCONFIG_DIR"
exit -1
fi
DIR=`basename $PWD`
if [ "$DIR" != "slony1-engine" ]
then
echo "must be in slony1-engine directory"
exit -1
fi
git checkout $GIT_TAG
REL_TAG=REL_`echo $REL_VERSION|sed -e 's|\.|_|g'|tr '[:lower:]' '[:upper:]' `
git tag -a -m "Tagging $REL_VERSION" $REL_TAG
if [ -f "/tmp/slony1-engine-$REL_VERSION.tar" ]
then
echo "/tmp/slony1-engine-$REL_VERSION.tar exists please delete first"
exit -1
fi
git archive -o /tmp/slony1-engine-$REL_VERSION.tar $REL_TAG
if [ $? -ne 0 ]
then
echo "git archive failed"
exit -1
fi
cd ..
if [ -f "slony1-$REL_VERSION" ]
then
echo "slony1-REL_VERSION directory exists. please delete first"
exit -1
fi
mkdir slony1-$REL_VERSION
cd slony1-$REL_VERSION
tar -xf /tmp/slony1-engine-$REL_VERSION.tar
autoconf
./configure --with-pgconfigdir=$PGCONFIG_DIR --with-docs
make
cd doc/adminguide
make html
make html
make man
# build PDF. This requires dblatex
#
make slony.xml
make slony.pdf
cd ..
cd ..
sh tools/release_checklist.sh
ANS=""
while [ "$ANS" != "Y" -a "$ANS" != "N" ]
do
echo "Does the release checklist look okay? (Y/N)"
read ANS
done
if [ "$ANS" != "Y" ]
then
exit -1;
fi
cd ..
tar -cjf slony1-$REL_VERSION-docs.tar.bz2 slony1-$REL_VERSION/doc/adminguide/*html slony1-$REL_VERSION/doc/adminguide/man[0-9] slony1-$REL_VERSION/doc/adminguide/*png slony1-$REL_VERSION/doc/adminguide/*css slony1-$REL_VERSION/doc/adminguide/slony.pdf
cd slony1-$REL_VERSION
make distclean
cd ..
tar -cjf slony1-$REL_VERSION.tar.bz2 slony1-$REL_VERSION
| true |
e18332f109f80b989bb0b8ffce6304b1b39c31f0 | Shell | jlsam/scripts-crypto-nodes | /Polis/setup-Polis-VPS.sh | UTF-8 | 8,713 | 3.984375 | 4 | [
"MIT"
] | permissive | #!/bin/bash
# assumes root login and requires pre-existing SSH key
# run script with dot/space (source): '. setup_polis.sh' or 'source setup_polis.sh' to preserve directory changes.
# This script will: 1) fix locale, 2) update system and install dependencies, 3) create a service user to run the node
# 4) create a sudo user, 5) set SSHd to use keys only, to not accept root login (only accepts the new sudo user) and set other security restrictions
# 6) configure UFW, 7) download wallet and place execs in /usr/local/bin, 8) create a complete wallet .conf
# 9) create logrotate rules for debug.log, 10) create a systemd service to run the node,
# 11) setup Sentinel, 12) disable root login and 13) reboot to apply changes and start the node
# Setup parameters // change default values - accounts and key - before running the script
new_NOlogin="nologin"
new_sudoer="sudoer"
wallet_genkey="---" # Needs to be a valid key, otherwise the node won't even run
# Get the latest download link from https://github.com/polispay/polis/releases
installer_url="https://something.tar.gz"
# Setting locale for en_US.UTF-8, but it should work with your prefered locale too.
# Depending on your location, you may need to add/modify locales here to avoid errors,
# ex. "en_GB.UTF-8 de_DE.UTF-8"
locs="en_US.UTF-8"
# !!! NO NEED FOR MORE EDITS BEYOND THIS POINT
# Check for existing SSH key
if grep -P "ssh-rsa AAAA[0-9A-Za-z+\/]+[=]{0,3} [^@]+@?[^@]+$" ~/.ssh/authorized_keys; then
printf "\e[93mSSH key detected. Script will proceed.\n"
read -n1 -rsp "$(printf 'Press any key to continue or Ctrl+C to exit...\e[0m')"
echo
else
printf "\e[93mSSH key NOT detected. Script will terminate.\n"
printf "You can run SSH-key-setup.sh in your local machine to create and upload a SSH key to the server\n"
printf "and after run this script remotely again.\e[0m"
echo
exit 1
fi
# Other checks
if [ "${wallet_genkey}" = "---" ]; then
printf "\nPlease set your masternode genkey from the cold wallet and run again.\n"
exit 1
elif [ "${new_NOlogin}" = "nologin" ]; then
printf "\nPlease set your own username for the service account (no login) and run again.\n"
exit 1
elif [ "${new_sudoer}" = "sudoer" ]; then
printf "\nPlease set your own username with sudo access and run again.\n"
exit 1
elif [ "${installer_url}" = "https://something.tar.gz" ]; then
printf "\nPlease set the URL for the current wallet version and run again.\n"
exit 1
fi
# Fix locale. Particularly important for python Sentinel installation
locale-gen ${locs}
# During the next command interactive choices, it should be enough to OK everything
#dpkg-reconfigure locales
# Update system & install packages
printf "\n\e[93mUpgrading Ubuntu...\e[0m\n"
apt update && apt -y upgrade
apt install -y virtualenv python-pip
echo
read -n1 -rsp "$(printf '\e[93mPress any key to continue or Ctrl+C to exit...\e[0m\n')"
echo
# Create service account
useradd -r -m -s /usr/sbin/nologin -c "masternode service user" ${new_NOlogin}
# Create login account with sudo permission
adduser ${new_sudoer}
usermod -aG sudo ${new_sudoer}
# Move SSH key to new user
mv ~/.ssh /home/${new_sudoer}/
chown -R ${new_sudoer}:${new_sudoer} /home/${new_sudoer}/.ssh/
chmod -R 700 /home/${new_sudoer}/.ssh/
# Edit sshd_config
printf "\n\e[93m/etc/ssh/sshd_config edits:\e[0m\n"
sed -i -r -e "s/^#?PermitRootLogin yes/PermitRootLogin no/w /dev/stdout" \
-e "s/^#?PasswordAuthentication yes/PasswordAuthentication no/w /dev/stdout" \
-e "s/^#?ChallengeResponseAuthentication yes/ChallengeResponseAuthentication no/w /dev/stdout" \
-e "s/^HostKey \/etc\/ssh\/ssh_host_dsa_key/#HostKey \/etc\/ssh\/ssh_host_dsa_key/w /dev/stdout" \
-e "s/^HostKey \/etc\/ssh\/ssh_host_ecdsa_key/#HostKey \/etc\/ssh\/ssh_host_ecdsa_key/w /dev/stdout" \
-e "s/^X11Forwarding yes/X11Forwarding no/w /dev/stdout" \
-e "s/^#?(AuthorizedKeysFile.*)/\1/w /dev/stdout" /etc/ssh/sshd_config
echo -e "
# Specify MACs, Ciphers, and Kex algos
KexAlgorithms curve25519-sha256@libssh.org,diffie-hellman-group-exchange-sha256
Ciphers chacha20-poly1305@openssh.com,aes256-gcm@openssh.com,aes128-gcm@openssh.com,aes256-ctr,aes192-ctr,aes128-ctr
MACs hmac-sha2-512-etm@openssh.com,hmac-sha2-256-etm@openssh.com,hmac-ripemd160-etm@openssh.com,umac-128-etm@openssh.com,hmac-sha2-512,hmac-sha2-256,hmac-ripemd160,umac-128@openssh.com
# List of users allowed to login using SSH
AllowUsers ${new_sudoer}
" | tee -a /etc/ssh/sshd_config
systemctl daemon-reload
read -n1 -rsp "$(printf '\e[93mPress any key to continue or Ctrl+C to exit...\e[0m\n')"
echo
# Setup UFW
ufw disable
ufw default deny incoming
ufw default allow outgoing
ufw allow ssh/tcp
ufw limit ssh/tcp
ufw allow 24126/tcp # some coin nodes may need tcp and udp, in that case remove /tcp
ufw logging on
ufw --force enable
ufw status
read -n1 -rsp "$(printf '\e[93mPress any key to continue or Ctrl+C to exit...\e[0m')"
echo
# Setup Polis Masternode
# Download and install node wallet
installer_file="$(basename ${installer_url})"
wget ${installer_url}
tar -xvf ${installer_file}
top_lvl_dir="$(tar -tzf ${installer_file} | sed -e 's@/.*@@' | uniq)"
cp -v ${top_lvl_dir}/bin/polis{d,-cli} /usr/local/bin
rm -v ${installer_file}
rm -Rv ${top_lvl_dir}
# Setup polis.conf
# https://github.com/polispay/polis-doc/tree/master/masternode-setup
random_user="$(head /dev/urandom | tr -dc A-Za-z0-9 | head -c 16)"
random_pass="$(head /dev/urandom | tr -dc A-Za-z0-9 | head -c 26)"
ext_IP_addr="$(dig +short myip.opendns.com @resolver1.opendns.com)"
echo
mkdir -pv /etc/polis
printf "\n\e[93m .conf settings:\e[0m\n"
echo -e "rpcuser=${random_user}
rpcpassword=${random_pass}
rpcallowip=127.0.0.1
listen=1
server=1
daemon=1
masternode=1
logtimestamps=1
maxconnections=256
externalip=${ext_IP_addr}
masternodeprivkey=${wallet_genkey}
addnode=polis.seeds.mn.zone
addnode=polis.mnseeds.com
addnode=polispay.org
addnode=node1.polispay.org
addnode=node2.polispay.org
addnode=insight.polispay.org
addnode=insight2.polispay.org
addnode=explorer.polispay.org
addnode=199.247.2.29:24126
addnode=46.101.32.72:24126
addnode=144.202.19.190:24126
addnode=207.148.5.135:24126
addnode=89.47.165.165:24126
addnode=62.75.139.140:24126
addnode=207.148.5.135:24126
addnode=209.250.245.66:24126
addnode=199.247.3.98:24126
addnode=199.247.29.65:24126
addnode=45.32.149.254:24126
" | tee /etc/polis/polis.conf
read -n1 -rsp "$(printf '\e[93mPress any key to continue or Ctrl+C to exit...\e[0m')"
echo
# Setup logrotate
# Break debug.log into weekly files, compress and keep at most 5 older log files
printf "\n\e[93mCreating logrotate rules...\e[0m\n"
echo -e "/home/${new_NOlogin}/.poliscore/debug.log {
rotate 5
copytruncate
weekly
missingok
notifempty
compress
delaycompress
}" | tee /etc/logrotate.d/polis-debug
# Setup systemd service file
# https://github.com/bitcoin/bitcoin/blob/master/contrib/init/bitcoind.service
printf "\n\e[93mCreating systemd service file...\e[0m\n"
echo -e "[Unit]
Description=Polis Masternode
After=network.target
[Service]
User=${new_NOlogin}
Group=${new_NOlogin}
# Creates /run/polisd owned by ${new_NOlogin}
RuntimeDirectory=polisd
Type=forking
ExecStart=/usr/local/bin/polisd -pid=/run/polisd/polisd.pid -conf=/etc/polis/polis.conf
ExecStop=/usr/local/bin/polis-cli -conf=/etc/polis/polis.conf stop
PIDFile=/run/polisd/polisd.pid
Restart=on-failure
RestartSec=20
TimeoutStopSec=60s
TimeoutStartSec=15s
StartLimitInterval=120s
StartLimitBurst=5
# Hardening measures
# Provide a private /tmp and /var/tmp.
PrivateTmp=true
# Mount /usr, /boot/ and /etc read-only for the process.
ProtectSystem=full
# Disallow the process and all of its children to gain
# new privileges through execve().
NoNewPrivileges=true
# Use a new /dev namespace only populated with API pseudo devices
# such as /dev/null, /dev/zero and /dev/random.
PrivateDevices=true
# Deny the creation of writable and executable memory mappings.
MemoryDenyWriteExecute=true
[Install]
WantedBy=multi-user.target
" | tee /etc/systemd/system/polisd.service
systemctl enable polisd.service
read -n1 -rsp "$(printf '\e[93mPress any key to continue or Ctrl+C to exit...\e[0m\n')"
echo
# Setup Polis Sentinel
sudo -H -u $new_NOlogin sh <<EOF
cd /home/${new_NOlogin}/
git clone https://github.com/polispay/sentinel.git /home/${new_NOlogin}/sentinel
cd sentinel/
virtualenv ./venv
./venv/bin/pip install -r requirements.txt
echo "* * * * * cd /home/${new_NOlogin}/sentinel && ./venv/bin/python bin/sentinel.py >/dev/null 2>&1" >> tmpcron
crontab tmpcron
rm tmpcron
EOF
# Disable root login
printf "\n\e[93mDisabling root login:\e[0m\n"
passwd -l root
# Reboot
printf "\n\e[93mScript completed.\n"
read -n1 -rsp "$(printf 'Press any key to reboot or Ctrl+C to exit...\e[0m\n')"
reboot
| true |
f785099aa02d8a6df5c020e705ae97bd74e87abf | Shell | nathanmyles/minikube-local-infrastructure | /enter_regcreds.sh | UTF-8 | 785 | 3.28125 | 3 | [] | no_license | #!/usr/bin/env bash
set -e
read -r -p "Secret name [regcred]: " name
name="${name:-regcred}"
read -r -p "Registry URL: " registry_url
read -r -p "Email: " email
read -r -p "Username [${email}]: " username
username="${username:-${email}}"
read -r -p "Password: " -s password
echo "
kubectl create secret docker-registry ${name}
--docker-server=${registry_url}
--docker-password=********
--docker-email=${email}
--docker-username=${username}"
kubectl create secret docker-registry ${name} \
--docker-server="${registry_url}" \
--docker-password="${password}" \
--docker-email="${email}" \
--docker-username="${username}"
echo "
Add this to pull a container from the repo:
spec:
template:
spec:
imagePullSecrets:
- name: ${name}" | true |
2494c9eaebd0aa2b3cfa13e8f70705cfc915605d | Shell | ppvastar/jigg | /.checker/scripts/install-knp.sh | UTF-8 | 275 | 3.140625 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
set -e
home_dir=`pwd ./`
url=http://nlp.ist.i.kyoto-u.ac.jp/nl-resource/knp/knp-4.19.tar.bz2
file=knp-4.19.tar.bz2
dir=knp-4.19
# download
wget ${url}
# unpack bz2 file
tar -jxvf ${file}
# build
cd ${dir}
./configure
make
sudo make install
cd ${home_dir}
| true |
a980b3e72bb57ec85dee290e9416de3da9b6c8a0 | Shell | JiriHusak-lab/POC-T-REF-NODE2DB2 | /tests/tests.sh | UTF-8 | 452 | 2.78125 | 3 | [] | no_license | #!/bin/sh
while ! timeout 1 bash -c "echo > /dev/tcp/vote/80"; do
sleep 1
done
curl -sS -X POST --data "vote=b" http://vote > /dev/null
sleep 10
if phantomjs render.js http://result | grep -q '1 vote'; then
echo -e "\\e[42m------------"
echo -e "\\e[92mTests passed"
echo -e "\\e[42m------------"
exit 0
else
echo -e "\\e[41m------------"
echo -e "\\e[91mTests failed"
echo -e "\\e[41m------------"
exit 1
fi
| true |
4eeb5746b54dfcd9bb3652f65efbb2fe8daff40e | Shell | invernizzie/fiuba-tp-grupal-intro-2011 | /Servicios/Instalacion/raw_service.sh | UTF-8 | 354 | 3.6875 | 4 | [] | no_license | #! /bin/bash
# Utilizamos el primer parametro para especificar el paquete.
SERVICE=$1
SERVICE_INSTALL=$(dpkg --get-selections $SERVICE | sed "s/\t//g" | grep $SERVICE"install")
if [ -n "$SERVICE_INSTALL" ];
then
echo "$SERVICE service is already installed.";
else
echo "$SERVICE service is not installed.";
apt-get install $SERVICE
fi
| true |
7ab90d03def97aac65148e381c73da08dd6451f0 | Shell | Silva97/cli-tools | /shellcoder | UTF-8 | 8,133 | 4.1875 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env bash
# Developed by Luiz Felipe <felipe.silva337@yahoo.com>
# GitHub: https://github.com/Silva97
if ! which nasm > /dev/null; then
echo "This script uses nasm and ndisasm - https://nasm.us/"
echo "Please, install it:"
echo " $ sudo apt install nasm"
exit 1
fi
bits=32
tempasm=$(mktemp "/tmp/XXXXXX.asm")
tempbin=$(mktemp "/tmp/XXXXXX.bin")
blacklist="00"
shellcode=""
function pcolor() {
local color
case $1 in
red) color="\x1b[31;1m" ;;
esac
echo -ne "${color}${2}\x1b[0m"
}
function showbyte() {
if echo "$blacklist" | grep -q "$1"; then
pcolor red "$1 "
else
echo -n "$1 "
fi
}
echo "Developed by Luiz Felipe <felipe.silva337@yahoo.com>"
echo "GitHub: https://github.com/Silva97"
echo
echo "For see help: /help"
while true; do
read -ep "shellcoder> " command
history -s "$command"
if [ "${command:0:1}" == "/" ]; then
list=($command)
case "${list[0]:1}" in
help)
case "${list[1]}" in
e|exit)
echo "Exit from the shellcoder."
;;
b|bits)
echo "Sets the bit-mode for assembly the instructions."
echo "If value is not defined, just print the current bit-mode."
;;
black|blacklist)
echo "Sets the list of invalid bytes to the shellcode."
echo "Any byte on this list is printed in another color."
echo "Example: /blacklist 00 0A 0D 20 09"
echo
echo "If value is not defined, just print the current blacklist."
echo "For set a void blacklist, run: /blacklist -"
;;
erase)
echo "Erases the current shellcode."
;;
show)
echo "Shows the current shellcode."
echo "Formats:"
echo " asm Shows the disassembly of the shellcode."
echo " hexa Shows the shellcode in hexadecimal."
;;
export)
echo "Writes the shellcode in a file."
echo " format The format to export."
echo " filename The output file."
echo
echo "Valid formats:"
echo " hexa Byte sequence in hexadecimal: 0A 0B 0C"
echo ' string C string | char shellcode[] = "\x0A\x0B\x0C";'
echo " array C array | char shellcode[] = {0x0A, 0x0B, 0x0C};"
echo ' python Python string | shellcode = "\x0A\x0B\x0C"'
echo " nasm NASM syntax | shellcode: db 0x0A, 0x0B, 0x0C"
echo " raw Raw binary code"
;;
sys|syscall)
echo "Shows information about a syscall with the current bit-mode."
echo "Example: /syscall write"
;;
*)
echo "/e, /exit Exit."
echo "/b, /bits [bits] Sets or shows bits mode."
echo "/black, /blacklist [XX XX..] Sets or shows bytes blacklist."
echo "/erase Erases the shellcode."
echo "/show <format> Shows the shellcode."
echo "/export <format> [filename] Exports the shellcode."
echo "/sys, /syscall <name> Shows syscall information."
echo
echo "For detailed help: /help command-name"
;;
esac
;;
e|exit)
exit 0
;;
b|bits)
if [ -z "${list[1]}" ]; then
echo "$bits"
elif echo "${list[1]}" | grep -qE "^(16|32|64)$"; then
bits=${list[1]}
else
echo "Error: Bits must be 16, 32 or 64."
fi
;;
black|blacklist)
if [ -z "${list[1]}" ]; then
echo "$blacklist"
elif [ "${list[1]}" == "-" ]; then
blacklist=""
elif [ -z "$(echo "${list[*]:1}" | sed -E 's/([A-F0-9]{2}(\s|$))+//')" ]; then
blacklist=${list[*]:1}
else
echo "Error: The format of the bytes is invalid."
echo "See help: /help blacklist"
fi
;;
erase)
shellcode=""
;;
show)
case "${list[1]}" in
asm)
echo -n > "$tempbin"
for ((i=0; i<${#shellcode}; i+=3)); do
echo -ne "\x${shellcode:$i:2}" >> "$tempbin"
done
ndisasm -b$bits "$tempbin"
;;
hexa)
for ((i=0; i<${#shellcode}; i+=3)); do
showbyte "${shellcode:$i:2}"
done
echo
;;
*)
echo "Error: Invalid format. See help: /help show"
;;
esac
;;
export)
if [ ! -z "${list[2]}" ]; then
output="${list[2]}"
else
if [ "${list[1]}" == "raw" ]; then
output="shell-raw${RANDOM}.bin"
else
output="shell-${list[1]}${RANDOM}.txt"
fi
fi
case "${list[1]}" in
hexa)
echo "$shellcode" >> "$output"
;;
string)
echo -n "char shellcode[] = \"" >> "$output"
for ((i=0; i<${#shellcode}; i+=3)); do
echo -n "\x${shellcode:$i:2}" >> "$output"
done
echo "\";" >> "$output"
;;
python)
echo -n "shellcode = \"" >> "$output"
for ((i=0; i<${#shellcode}; i+=3)); do
echo -n "\x${shellcode:$i:2}" >> "$output"
done
echo "\"" >> "$output"
;;
array)
echo -n "char shellcode[] = {" >> "$output"
for ((i=0; i<${#shellcode}; i+=3)); do
if [ $i -ge $((${#shellcode}-3)) ]; then
echo -n "0x${shellcode:$i:2}" >> "$output"
else
echo -n "0x${shellcode:$i:2}, " >> "$output"
fi
done
echo "};" >> "$output"
;;
nasm)
echo -n "shellcode: db " >> "$output"
for ((i=0; i<${#shellcode}; i+=3)); do
if [ $i -ge $((${#shellcode}-3)) ]; then
echo -n "0x${shellcode:$i:2}" >> "$output"
else
echo -n "0x${shellcode:$i:2}, " >> "$output"
fi
done
echo >> "$output"
;;
raw)
echo -n > "$output"
for ((i=0; i<${#shellcode}; i+=3)); do
echo -ne "\x${shellcode:$i:2}" >> "$output"
done
;;
*)
echo "Format not valid. See help: /help export"
continue
;;
esac
echo "Shellcode exported to '$output' file."
;;
sys|syscall)
if [ "$bits" == "16" ]; then
echo "Error: Syscall is not valid to 16-bit mode."
continue
fi
path="/usr/include/asm/unistd_${bits}.h"
if [ ! -f "$path" ]; then
echo "Error: Sorry but i am not found '$path'"
continue
fi
if [ "$bits" == "64" ]; then
echo "Instruction: syscall"
echo "RAX | RDI | RSI | RDX | R10 | R8 | R9"
echo -n "RAX = "
else
echo "Instruction: int 0x80"
echo "EAX | EBX | ECX | EDX | ESI | EDI"
echo -n "EAX = "
fi
cat "$path" | grep "NR_${list[1]} " | grep -oE "\w+$"
man "${list[1]}.2" | grep -A1 "${list[1]}" | head -n5 | grep -vE "\--|^$"
;;
*)
echo "Command '${list[0]}' not exists."
echo "See help: /help"
;;
esac
else
(
echo "bits $bits"
echo "$command"
) > "$tempasm"
nasm "$tempasm" -o "$tempbin"
code=$(ndisasm -b$bits "$tempbin" | sed -E 's/\w+\s+(\w+)\s+(.+)/\1/')
endcode=$(echo "$code" | sed -E 's/[A-F0-9]{2}/& /g')
insert=1
for ((i=0; i<${#code}; i+=2)); do
byte=${code:$i:2}
showbyte "$byte"
if echo "$blacklist" | grep -q "$byte"; then
insert=0
fi
done
if [ $insert -eq 1 ]; then
shellcode+="$endcode"
fi
echo
fi
done
| true |
b6e0746f1e8f519427d44303316b2731d7821f2d | Shell | finmily/mac-init | /sh/set_ssh.sh | UTF-8 | 701 | 3.25 | 3 | [] | no_license | #!/bin/bash
read -p '输入远程服务器IP或者域名' DOMAIN
read -p '输入远程服务器端口(默认请输入22)' PORT
read -p '输入用户名(默认请输入root)' USER
read -p '请输入密钥的名称(如github),然后一路回车' ENAME
ssh-keygen -t rsa -f $ENAME
echo '' >> ~/.ssh/config
echo 'Host '$ENAME >> ~/.ssh/config
echo ' IdentityFile ~/.ssh/'$ENAME >> ~/.ssh/config
echo ' Hostname '$DOMAIN >> ~/.ssh/config
echo ' Port '$PORT >> ~/.ssh/config
echo ' User '$USER >> ~/.ssh/config
echo '' >> ~/.ssh/config
mv $ENAME ~/.ssh/
echo '已经生成秘钥文件到.ssh下面,config文件已配置好,在当前目录下还有一个公钥需要弄到服务器上。' | true |
ecf6366ad470897052300438c84e1ffe2c5a275c | Shell | teddyhwang/base16-tmux-powerline | /themepack.tmux | UTF-8 | 258 | 3.03125 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env bash
CURRENT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
main() {
tmux source-file "$CURRENT_DIR/base16-powerline.tmuxtheme"
if [ $SPIN ]; then
tmux source-file "$CURRENT_DIR/base16-powerline-spin.tmuxtheme"
fi
}
main
| true |
f178d952cad77e1d7f3540ba79a066a3cf49894b | Shell | urmi-21/orphan-prediction | /plots_publication/Figure5/data-source/gc_hist.sh | UTF-8 | 458 | 3.109375 | 3 | [] | no_license | #!/bin/bash
file="$1"
base=$(basename $file |cut -f 1 -d "_")
f=$(cut -f 3 $file |grep -v "GC" |awk '$NF<0.3' |wc -l)
echo -e "<0.30\t$base\t$f"
for i in $(seq 0.30 0.01 0.65); do
g=$(cut -f 3 $file |grep -v "GC" |awk -v x=$i '$NF>=x && $NF<x+0.01' |wc -l)
echo -e "$i\t$base\t$g"
#echo -e "$i-$(echo "$i +0.01" |bc |awk '{printf "%.3f\n", $0}')\t$base\t$g"
done
h=$(cut -f 3 $file |grep -v "len" |awk '$NF>=0.65 && $NF<1' |wc -l)
echo -e ">0.65\t$base\t$h"
| true |
ece5b247d24d93c3e96c2b1c2425b902b5d57bf8 | Shell | golangtech-id/gin-mvc | /_deploy/docker/mongo/replicaset/0-prepare.sh | UTF-8 | 372 | 3.15625 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env bash
cd $(dirname $0) && source ./NODE.variables
../network-setup.sh
set_name=${REPLICA_SET_NAME}
node_name="${set_name}-${NODE_BEGIN}"
if [ -d "./build/${node_name}" ]; then
exit 0
fi
for node in `seq $NODE_BEGIN $NODE_END`; do \
node_name="${set_name}-${node}"; \
mkdir -p ./build/${node_name} \
&& chmod a+w ./build/${node_name}; \
done
| true |
bf214915596d2380d06444d0fc0daf7fafcc241c | Shell | narcher7/rack | /script/build | UTF-8 | 268 | 3.40625 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
if [ -n "$GOPATH" ]; then
echo "Building rack!"
COMMIT=$(git rev-parse --verify HEAD)
echo -e "package util\n\nvar Commit = \"$COMMIT\"" > util/commit.go
go build -o $GOPATH/bin/rack
else
echo '$GOPATH must be defined. Do you have go setup?'
fi
| true |
e710924365d27e6274a060023095fa217e96e58f | Shell | Nephyrin/NephScripts | /bin/ghup | UTF-8 | 1,616 | 4.15625 | 4 | [] | no_license | #!/bin/bash
# Update the git repo, syncing its map file with any local exports
# and fixing missing tags
set -e
cd "$MOZPATH/moz-git"
. "$(dirname "$0")"/lib/util.sh
#
# Functions
#
# List keys under some prefix, e.g. | hg_config_list paths |
hg_config_list() {
for config in $(hg -R "$MOZPATH/$MOZHG" showconfig "$1"); do
[[ ! "$config" =~ ^"$1."(.+)'=' ]] || echo "${BASH_REMATCH[1]}";
done
}
syncbranch() {
estat "Syncing $1"
cmd ghsync $update "$1"
}
#
# Args
#
parse_args ghup ua "all" "$@"
[ $(num_args) -le 1 ] || die "Unexpected argument"
branch="$(get_arg 1)"
all="$(get_option all)$(get_option a)"
[[ -z $all || -z $branch ]] || die "-a/--all doesn't make sense with a specific branch name"
update="$(get_option u)"
[ -z "$update" ] || update="-u"
#
# Update git
#
estat "Updating git"
git fetch bangles
estat "Updating git map"
(
cd "$MOZPATH/moz-git-map"
git pull --no-edit
)
#
# Call ghsync on given branches or everything in the config file with -a.
# -a/--all ignores keys under no-git-sync-paths in hgrc
#
[ -z "$branch" ] || syncbranch "$branch"
if [ -n "$all" ]; then
paths=($(hg_config_list paths))
skip=($(hg_config_list no-git-sync-paths))
for path in "${paths[@]}"; do
unset skipme
for skip in "${skip[@]}"; do
if [[ $path = $skip ]]; then
skipme=1
break
fi
done
[[ -z $skipme ]] || continue
syncbranch "$path"
done
fi
missing="$((git tag >/dev/null ) 2>&1 | sed -r 's/error: (.*) does.*/\1/g;tx;d;:x')"
if [ ! -z "$missing" ]; then
estat "Fixing missing tags"
git fetch bangles $missing
fi
| true |
f8a94eb88f747cba0f0638d72b14c3b4d19483fc | Shell | phrynchanka/db-query-benchmarking-service | /run.sh | UTF-8 | 2,934 | 2.8125 | 3 | [] | no_license | #!/usr/bin/env bash
#Build application.
echo "Building app"
docker-compose down
docker volume rm db-query-benchmarking-service_postgres-remote-1-data
docker volume rm db-query-benchmarking-service_postgres-remote-2-data
docker volume rm db-query-benchmarking-service_postgres-querydb-data
docker volume rm db-query-benchmarking-service_mysql-remote-1-data
mvn clean package -DskipTests
#Run services
echo "Starting services"
docker-compose up --build -d
echo "Waiting for app staring"
until curl localhost:8081/actuator/health; do
echo "App is unavailable - sleeping"
sleep 5
done
#Post queries for processing
echo "Post query processing"
echo "[
{
"metrics": ["EXECUTION_TIME"],
"queryName": "select_all_posts",
"sql": "SELECT * from perf",
"version" : "v1" ,
"databases" : ["mysql-remote-1","postgres-remote-1","postgres-remote-2"]
},
{
"metrics": ["EXECUTION_TIME"],
"queryName": "select_all_posts",
"sql": "SELECT author2 from perf",
"version" : "v2" ,
"databases" : ["mysql-remote-1","postgres-remote-1","postgres-remote-2"]
},
{
"metrics": ["EXECUTION_TIME"],
"queryName": "select_all_posts",
"sql": "SELECT content from perf",
"version" : "v3" ,
"databases" : ["mysql-remote-1","postgres-remote-2"]
}
]"
curl -X POST \
http://localhost:8081/measure-performance \
-H 'Content-Type: application/json' \
-d '[
{
"metrics": ["EXECUTION_TIME"],
"queryName": "select_all_posts",
"sql": "SELECT * from perf",
"version" : "v1" ,
"databases" : ["mysql-remote-1","postgres-remote-1","postgres-remote-2"]
},
{
"metrics": ["EXECUTION_TIME"],
"queryName": "select_all_posts",
"sql": "SELECT author2 from perf",
"version" : "v2" ,
"databases" : ["mysql-remote-1","postgres-remote-1","postgres-remote-2"]
},
{
"metrics": ["EXECUTION_TIME"],
"queryName": "select_all_posts",
"sql": "SELECT content from perf",
"version" : "v3" ,
"databases" : ["mysql-remote-1","postgres-remote-2"]
}
]'
#Wait for processing requests
echo "Waiting 15s for processing"
sleep 15
echo "Request report"
#Request report by query name
curl -X GET http://localhost:8081/report/select_all_posts | json_pp
echo "Destroying app"
docker-compose down
docker volume rm db-query-benchmarking-service_postgres-remote-1-data
docker volume rm db-query-benchmarking-service_postgres-remote-2-data
docker volume rm db-query-benchmarking-service_postgres-querydb-data
docker volume rm db-query-benchmarking-service_mysql-remote-1-data
| true |
661c6b851b364e1d9b5155707db4f4e467964e7e | Shell | nandu90/STH_Dos | /HW1/q8/run.sh | UTF-8 | 177 | 2.59375 | 3 | [] | no_license | #!/bin/bash
PROCS=$1
ARG1=$2
ARG2=$3
rm -rf RUN
echo "Running Job on $PROCS processors"
echo "See RUN folder"
mkdir RUN
cd RUN
mpirun -np $PROCS ../bin/system1PH $ARG1 $ARG2
| true |
b003da29cbc4d995b60463e9849c611b72eb08c8 | Shell | jasoncodes/dotfiles | /shell/aliases/ruby.sh | UTF-8 | 981 | 3.171875 | 3 | [] | no_license | alias b='bundle check || bundle'
alias bo='bundle open'
alias be='bundle exec'
alias cap='bundle exec cap'
function rake
{
if [ -S .zeus.sock ]; then
zeus rake "$@"
elif [ -e bin/rake ]; then
bin/rake "$@"
elif [ -f Gemfile ]; then
bundle exec rake "$@"
else
command rake "$@"
fi
}
function _bundle_spec_names() {
ruby <<-RUBY
NAME_VERSION = '(?! )(.*?)(?: \(([^-]*)(?:-(.*))?\))?'
File.open 'Gemfile.lock' do |io|
in_specs = false
io.lines.each do |line|
line.chomp!
case
when in_specs && line == ''
in_specs = false
when line =~ /^ +specs:\$/
in_specs = true
when in_specs && line =~ %r{^ +#{NAME_VERSION}\$}
puts \$1
end
end
end
RUBY
}
function _bundle_open() {
local curw
COMPREPLY=()
curw=${COMP_WORDS[COMP_CWORD]}
COMPREPLY=($(compgen -W '$(_bundle_spec_names)' -- $curw));
return 0
}
if type complete > /dev/null 2>&1; then
complete -F _bundle_open bo
fi
| true |
f7efc13b968e2f015188d14d5af952817c9c6aa0 | Shell | daniiel/cursos | /Oracle 11g - Administration workshop I/courselabs/labs/lab_11_01_04.sh | UTF-8 | 820 | 2.734375 | 3 | [] | no_license | # Oracle Database 10g: Administration Workshop I
# Oracle Server Technologies - Curriculum Development
#
# ***Training purposes only***
#***Not appropriate for production use***
#
# Start this script as OS user: oracle
# This script creates DML on HR.JOBS as 2 different users
cd ~/labs
. set_db.sh
sqlplus / as sysdba << EOF
drop user audit_user cascade;
create user audit_user identified by oracle_4U;
grant connect to audit_user;
grant all on hr.jobs to audit_user;
connect audit_user/oracle_4U
PROMPT As Audit_user
select * from hr.jobs;
update hr.jobs
set max_salary = max_salary * 10;
commit;
select * from hr.jobs;
connect hr/oracle_4U
PROMPT as HR user
update hr.jobs
set max_salary = max_salary / 10;
commit;
select * from hr.jobs;
connect / as sysdba
drop user audit_user cascade;
exit;
EOF
| true |
6a76ecdb6e8ed055957aab045967c06d0d983a7a | Shell | venki-tech/aep-terraform-create-aws | /createInventory.sh | UTF-8 | 3,053 | 3.671875 | 4 | [] | no_license | #!/bin/bash
echo "-------------------------------------"
rm -rf ${WORKSPACE}/temp_repo_ws
cd ${WORKSPACE}/aep-terraform-create-aws/
git remote set-url origin git@github.com:venki-tech/aep-terraform-create-aws.git
git add terraform.tfstate*
git commit -m "Added terraform state files to repo" || true
git push origin HEAD:master
. ./supply_hosts.txt
echo "Creating inventory file for current run for deploy"
inv_file_deploy="${keyname}_deploy_inventory.txt"
cp inventory.txt ${inv_file_deploy}
echo "Creating hosts file to be copied to the newly provisioned servers"
hosts_file="${keyname}_hosts"
perl -0777 -nle 'print "$2\t$1\n" while m/(.*) ansible_host=(.*)ansible_connection/g' ${inv_file_deploy} > ${hosts_file}
perl -i -pe 's/^(.*db[0-9]*)/$1 db/g' ${hosts_file}
mkdir -p ${WORKSPACE}/temp_repo_ws
echo "Copying files into temp_repo_ws"
cp terraform.tfstate* inventory.txt supply_hosts.txt ${inv_file_deploy} ${hosts_file} ${WORKSPACE}/temp_repo_ws/
rm -rf ${WORKSPACE}/aep-terraform-create-aws
echo "-------------------------------------"
cd ${WORKSPACE}
git clone git@github.com:venki-tech/aep-ansible-provision.git
cd ${WORKSPACE}/aep-ansible-provision
echo "Copy file ${WORKSPACE}/temp_repo_ws/${inv_file_deploy} to current location"
cp ${WORKSPACE}/temp_repo_ws/${inv_file_deploy} .
echo "Update within temp location contents of hosts.template from ${WORKSPACE}/temp_repo_ws/${hosts_file}"
cp ${WORKSPACE}/aep-ansible-provision/hosts.template ${WORKSPACE}/temp_repo_ws/
cat ${WORKSPACE}/temp_repo_ws/${hosts_file} >> ${WORKSPACE}/temp_repo_ws/hosts.template
echo "Showing conents of ${WORKSPACE}/temp_repo_ws/hosts.template:"
cat ${WORKSPACE}/temp_repo_ws/hosts.template
echo
echo
if [[ -f runninginventory.txt ]];then
echo "Check if the servers already exists, if yes do not add it to runninginventory.txt"
cp ${WORKSPACE}/temp_repo_ws/supply_hosts.txt .
exists=$(grep hosts_exists supply_hosts.txt|cut -f2 -d'=')
if [[ $exists == "no" ]];then
echo "runninginventory.txt file exists and the hosts are newly provisioned. Will add the new hosts into it."
cp ${WORKSPACE}/temp_repo_ws/inventory.txt .
perl -0777 -nle 'print $1 if m/(\[all\](.|\n|\r)*)\[aws_instances/g' runninginventory.txt >> newrunninginventory.txt
perl -0777 -nle 'print $1 if m/all\]((.|\n|\r)*)\[aws_instances/g' inventory.txt >> newrunninginventory.txt
perl -0777 -nle 'print $1 if m/(\[aws_instances\](.|\n|\r)*)/g' runninginventory.txt >> newrunninginventory.txt
perl -0777 -nle 'print $1 if m/aws_instances\]((.|\n|\r)*)/g' inventory.txt >> newrunninginventory.txt
mv -f newrunninginventory.txt runninginventory.txt
rm -f inventory.txt
fi
else
echo "Running inventory file doesnt exist. Will just rename inventory file as runninginventory file"
cp ${WORKSPACE}/temp_repo_ws/inventory.txt .
mv inventory.txt runninginventory.txt
fi
git add runninginventory.txt ${inv_file_deploy}
git commit -m "Added inventory files to repo" || true
git push origin HEAD:master
rm -rf ${WORKSPACE}/aep-ansible-provision
| true |
67ce27d507db8853dbe999176924a7f521838c1c | Shell | RobertAudi/tsm | /tools/generate_command.zsh | UTF-8 | 1,297 | 3.515625 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env zsh
emulate -LR zsh
setopt noclobber
function __tsm_tools::trim() {
local str="$(builtin printf "%s" "${@#"${@%%[![:space:]]*}"}")"
builtin printf "%s" "${str%"${str##*[![:space:]]}"}"
}
function {
local __tsm_root="${${(%):-%x}:A:h:h}"
local __tsm_commands_dir="${__tsm_root}/src/commands"
local __tsm_command_name="$(__tsm_tools::trim "$1")"
if [[ -z "$__tsm_command_name" ]]; then
__tsm_command_name="$(__tsm_tools::trim $(builtin read -e "?Command name: "))"
if [[ -z "$__tsm_command_name" ]]; then
builtin print -P -- "[%F{196}ERROR%f] Command name required" >&2
return 1
fi
fi
local __tsm_command_filename="${__tsm_commands_dir}/${__tsm_command_name:r}.zsh"
if [[ -f "$__tsm_command_filename" ]]; then
builtin print -P -- "[%F{196}ERROR%f] Command already exist: %B${__tsm_command_name}%b" >&2
return 1
fi
{
builtin print -l -- \
"function __tsm::commands::${__tsm_command_name}() {" \
" # TODO" \
" __tsm::utils::log error \"Not implemented\"" \
" return 1" \
"}" > "${__tsm_command_filename}"
} && builtin print -P -- "[%F{034}SUCCESS%f] New command generated: %B${__tsm_command_name}%b in %F{245}${__tsm_command_filename}%f"
} "$@"
unfunction __tsm_tools::trim &>/dev/null
| true |
2d0f25205fad14414b2bbf0779f9b01e60de3a19 | Shell | sayem-eee-kuet/High-Performence-Scientific-Computing | /Home Works/HW_4/Task 1/task1_a.sh | UTF-8 | 451 | 2.6875 | 3 | [] | no_license | #!/bin/bash
file_2="cpp_compile_error.dat"
file_3="c_compile_error.dat"
if [ -f $file_2 ] ; then
rm $file_2
fi
if [ -f $file_3 ] ; then
rm $file_3
fi
mpiCC secret_function.o -Wall -o task1_a_cpp task1_a.cpp -lm -no-pie #&>> $file_2
#mpicc secret_function.o -Wall -std=c99 -o task1_a_c task1_a.c -lm &>> $file_3
file_1="task1_a.dat"
if [ -f $file_1 ] ; then
rm $file_1
fi
threadNo=4
N=4
mpirun -np $threadNo task1_a_cpp $N >> $file_1 | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.