blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 4 115 | path stringlengths 2 970 | src_encoding stringclasses 28 values | length_bytes int64 31 5.38M | score float64 2.52 5.28 | int_score int64 3 5 | detected_licenses listlengths 0 161 | license_type stringclasses 2 values | text stringlengths 31 5.39M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
c9b6748221f77533e81710ad21ab19e4c520d5c8 | Shell | zmousm/git-stdio-push-fetch | /git_stdio_client.sh | UTF-8 | 6,586 | 3.515625 | 4 | [] | no_license | #!/bin/bash
function git-pipe-push () {
local shost spath dhost dpath opts=() args=() justafifo rc
if [ $# -ge 2 ]; then
eval $(perl -e \
'my @d = qw(s d);
for (my $i=0; $i<=1; $i++) {
if ($ARGV[$i] =~ /^(\[[0-9A-Fa-f:]+\]|[^:]+):(.*)$/) {
printf "%1\$shost=\"%2\$s\" %1\$spath=\"%3\$s\" ", $d[$i], $1, $2 || ".";
}
}' "${@:1:2}")
fi
if [ -z "$shost" -o -z "$dhost" -o "$1" = "-h" ]; then
echo "Usage: ${FUNCNAME[0]} sending-host:[path] receiving-host:[path] [git-send-pack options]" >&2
[ "$1" = "-h" ] && return 0 || return 1
else
shift 2
fi
while [ -n "$1" ]; do
case "$1" in
--*)
opts+=("$1")
;;
*)
args+=("$1")
esac
shift
done
opts+=(".")
opts+=("${args[@]}")
justafifo=$(mktemp -u /tmp/gitpipe.XXXXXX)
mkfifo "$justafifo"
rc=$?
if [ $rc -eq 0 ]; then
trap 'rm -f "$justafifo"' HUP INT QUIT TERM KILL
else
return $rc
fi
ssh $shost \
"cd \"$spath\";"\
"socat - EXEC:'git send-pack --receive-pack=\\\"socat - 5 #\\\" ${opts[@]//:/\:}',fdin=5"\
<"$justafifo" | \
ssh $dhost \
"git receive-pack \"$dpath\"" \
>"$justafifo"
rc=$?
rm -f "$justafifo"
return $rc
}
function git-pipe-fetch () {
local shost spath dhost dpath remote opts=() args=() justafifo rc
if [ $# -ge 2 ]; then
eval $(perl -e \
'my @d = qw(s d);
for (my $i=0; $i<=1; $i++) {
if ($ARGV[$i] =~ /^(\[[0-9A-Fa-f:]+\]|[^:]+):(.*)$/) {
printf "%1\$shost=\"%2\$s\" %1\$spath=\"%3\$s\" ", $d[$i], $1, $2 || ".";
}
}' "${@:1:2}")
fi
if [ -z "$shost" -o -z "$dhost" -o "$1" = "-h" ]; then
echo "Usage: ${FUNCNAME[0]} receiving-host:[path] sending-host:[path] [--remote=remote_name] [git-fetch-pack options]" >&2
[ "$1" = "-h" ] && return 0 || return 1
else
shift 2
fi
if test -n "$1" && echo "$1" | grep -q "^--remote="; then
remote="${1#--remote=}"
shift
fi
while [ -n "$1" ]; do
case "$1" in
--*)
opts+=("$1")
;;
*)
args+=("$1")
esac
shift
done
# fetch-pack apparently needs refs
if test -z "${args[*]}" && echo "${opts[*]}" | grep -qv -- "--all"; then
opts+=("--all")
fi
opts+=(".")
opts+=("${args[@]}")
justafifo=$(mktemp -u /tmp/gitpipe.XXXXXX)
mkfifo "$justafifo"
rc=$?
if [ $rc -eq 0 ]; then
trap 'rm -f "$justafifo"' HUP INT QUIT TERM KILL
else
return $rc
fi
ssh $shost \
"cd \"$spath\";"\
"export refstmp=$(mktemp -u /tmp/gitfetchpack.XXXXXXXX);"\
"socat - SYSTEM:'git fetch-pack --upload-pack=\\\"socat - 5 #\\\" ${opts[@]//:/\:} >\$refstmp',fdin=5;"\
"awk -v remote=\"${remote:-$dhost}\""\
"'\$1 ~ /^[0-9a-f]+$/ && length(\$1) == 40 {
if (\$2 == \"HEAD\") { sref = \"refs/remotes/\" remote \"/\" \$2; }
else { sref = \$2; sub(/heads/, \"remotes/\" remote, sref); };
print \"update-ref\", sref, \$1;
}' \${refstmp} | xargs -L 1 git;"\
"rm \${refstmp}"\
<"$justafifo" | \
ssh $dhost \
"git upload-pack \"$dpath\"" \
>"$justafifo"
rc=$?
rm -f "$justafifo"
return $rc
}
function git-pipe-push2 () {
local shost spath dhost dpath opts=() sudo justafifo rc
if [ $# -ge 2 ]; then
eval $(perl -e \
'my @d = qw(s d);
for (my $i=0; $i<=1; $i++) {
if ($ARGV[$i] =~ /^(\[[0-9A-Fa-f:]+\]|[^:]+):(.*)$/) {
printf "%1\$shost=\"%2\$s\" %1\$spath=\"%3\$s\" ", $d[$i], $1, $2 || ".";
}
}' "${@:1:2}")
fi
if [ -z "$shost" -o -z "$dhost" -o "$1" = "-h" ]; then
echo "Usage: ${FUNCNAME[0]} sending-host:[path] receiving-host:[path] [--sudo[=user]] [git-send-pack options]" >&2
[ "$1" = "-h" ] && return 0 || return 1
else
shift 2
fi
while [ -n "$1" ]; do
case "$1" in
--sudo*)
if [ -z "${1#--sudo}" ]; then
sudo=root
else
sudo="${1#--sudo=}"
fi
;;
*)
opts+=("$1")
;;
esac
shift
done
justafifo=$(mktemp -u /tmp/gitpipe.XXXXXX)
mkfifo "$justafifo"
rc=$?
if [ $rc -eq 0 ]; then
trap 'rm -f "$justafifo"' HUP INT QUIT TERM KILL
else
return $rc
fi
if [ -n "$sudo" ]; then
ssh $shost \
"sudo -u \"$sudo\" -- sh -c '"\
"cd \"$spath\";"\
"git pipe-send-pack ${opts[@]};"\
"'"\
<"$justafifo" | \
ssh $dhost \
"sudo -u \"$sudo\" -- sh -c '"\
"git receive-pack \"$dpath\"" \
"'"\
>"$justafifo"
else
ssh $shost \
"cd \"$spath\";"\
"git pipe-send-pack ${opts[@]};"\
<"$justafifo" | \
ssh $dhost \
"git receive-pack \"$dpath\"" \
>"$justafifo"
fi
rc=$?
rm -f "$justafifo"
return $rc
}
function git-pipe-fetch2 () {
local shost spath dhost dpath opts=() remote sudo justafifo rc
if [ $# -ge 2 ]; then
eval $(perl -e \
'my @d = qw(s d);
for (my $i=0; $i<=1; $i++) {
if ($ARGV[$i] =~ /^(\[[0-9A-Fa-f:]+\]|[^:]+):(.*)$/) {
printf "%1\$shost=\"%2\$s\" %1\$spath=\"%3\$s\" ", $d[$i], $1, $2 || ".";
}
}' "${@:1:2}")
fi
if [ -z "$shost" -o -z "$dhost" -o "$1" = "-h" ]; then
echo "Usage: ${FUNCNAME[0]} receiving-host:[path] sending-host:[path] [--remote=remote_name] [--sudo[=user]] [git-fetch-pack options]" >&2
[ "$1" = "-h" ] && return 0 || return 1
else
shift 2
fi
while [ -n "$1" ]; do
case "$1" in
--remote*)
if [ -z "${1#--remote}" ]; then
echo "missing required argument to --remote" >&2
return 1
else
remote="${1#--remote=}"
fi
;;
--sudo*)
if [ -z "${1#--sudo}" ]; then
sudo=root
else
sudo="${1#--sudo=}"
fi
;;
*)
opts+=("$1")
;;
esac
shift
done
justafifo=$(mktemp -u /tmp/gitpipe.XXXXXX)
mkfifo "$justafifo"
rc=$?
if [ $rc -eq 0 ]; then
trap 'rm -f "$justafifo"' HUP INT QUIT TERM KILL
else
return $rc
fi
if [ -n "$sudo" ]; then
ssh $shost \
"sudo -u \"$sudo\" -- sh -c '"\
"cd \"$spath\";"\
"export refstmp=$(mktemp -u /tmp/gitfetchpack.XXXXXXXX);"\
"git pipe-fetch-pack --write-refs=\${refstmp} ${opts[@]};"\
"gfp2gur.awk -v remote=\"${remote:-$dhost}\" \${refstmp} | xargs -L 1 git;"\
"rm \${refstmp}"\
"'"\
<"$justafifo" | \
ssh $dhost \
"sudo -u \"$sudo\" -- sh -c '"\
"git upload-pack \"$dpath\"" \
"'"\
>"$justafifo"
else
ssh $shost \
"cd \"$spath\";"\
"export refstmp=$(mktemp -u /tmp/gitfetchpack.XXXXXXXX);"\
"git pipe-fetch-pack --write-refs=\${refstmp} ${opts[@]};"\
"gfp2gur.awk -v remote=\"${remote:-$dhost}\" \${refstmp} | xargs -L 1 git;"\
"rm \${refstmp}"\
<"$justafifo" | \
ssh $dhost \
"git upload-pack \"$dpath\"" \
>"$justafifo"
fi
rc=$?
rm -f "$justafifo"
return $rc
}
| true |
363ee8fa97ae7af9fa678b78be5b77ceca0d307b | Shell | nodchip/icfpc2018 | /pack.sh | UTF-8 | 732 | 3.1875 | 3 | [
"MIT"
] | permissive | #!/bin/sh
export WORKSPACE=`pwd`
export ENGINES=`cat engines.txt`
git submodule init
git submodule update
cd ${WORKSPACE}/src || exit 1
make -j ${NUMBER_OF_PROCESSORS} || exit 1
cd ${WORKSPACE}/scripts || exit 1
python3 update_result.py --temp_trace_directory_path ../data/by_hand/trace \
--temp_info_directory_path ../data/by_hand/info \
--result_trace_directory_path ../resultF/trace \
--result_info_directory_path ../resultF/info || exit 1
cd ${WORKSPACE}
for engine in ${ENGINES}
do
export BINARY_FILE_NAME=${engine}
./execute_engine.sh || exit 1
done
cd resultF/trace
FILENAME=sanma`date +%Y%m%d%H%M%S`.zip
zip ${FILENAME} *.nbt
cd ${WORKSPACE}
mv resultF/trace/${FILENAME} .
sha256sum ${FILENAME}
| true |
9a53177437eb73707d4655db650b778e18b98373 | Shell | an-dr-eas-k/myDesktopProfiles | /etc/profile.d/acd_func.sh | UTF-8 | 2,487 | 3.796875 | 4 | [] | no_license | # do ". acd_func.sh"
# acd_func 1.0.5, 10-nov-2004
# petar marinov, http:/geocities.com/h2428, this is public domain
init_file=${HOME}/.plan
cd_func ()
{
local x2 the_new_dir adir index
local -i cnt
if [[ $1 == "-h" ]]; then
echo
echo "cd_func is an advanced version to change directory"
echo
echo "usage: cd_func [<option>|dir]"
echo "where 'dir' specifies a directory to change to or"
echo "options are"
echo "--"
echo " shows the directory stack"
echo "-i"
echo " reinitializes the directory stack with the directories from"
echo " ${init_file}"
echo "-d"
echo " deletes the directory stack"
echo "-h"
echo " print this help"
return 0
fi
if [[ $1 == "-i" ]]; then
wd=$(pwd)
cd_func -d
for i in $(cat ${init_file}); do
if [ -d $i ]; then
cd_func $i > /dev/null;
fi;
done;
cd_func ${wd}
return 0
fi
if [[ $1 == "-d" ]]; then
while popd -0 &>/dev/null; do true; done
# cd_func -i
return 0
fi
if [[ $1 == "--" ]]; then
echo "Directory Stack:"
dirs -v
return 0
fi
the_new_dir=$1
[[ -z $1 ]] && the_new_dir=$HOME
if [[ ${the_new_dir:0:1} == '-' ]]; then
#
# Extract dir N from dirs
index=${the_new_dir:1}
[[ -z $index ]] && index=1
adir=$(dirs +$index)
[[ -z $adir ]] && return 1
the_new_dir=$adir
fi
#
# '~' has to be substituted by ${HOME}
[[ ${the_new_dir:0:1} == '~' ]] && the_new_dir=${HOME}${the_new_dir:1}
#
# Now change to the new dir and add to the top of the stack
pushd "${the_new_dir}" > /dev/null
[[ $? -ne 0 ]] && return 1
the_new_dir=$(pwd)
#
# Trim down everything beyond 31th entry
popd -n +31 2>/dev/null 1>/dev/null
#
# Remove any other occurence of this dir, skipping the top of the stack
for ((cnt=1; cnt <= 30; cnt++)); do
x2=$(dirs +${cnt} 2>/dev/null)
[[ $? -ne 0 ]] && return 0
[[ ${x2:0:1} == '~' ]] && x2=${HOME}${x2:1}
if [[ "${x2}" == "${the_new_dir}" ]]; then
popd -n +$cnt 2>/dev/null 1>/dev/null
cnt=cnt-1
fi
done
return 0
}
alias cd=cd_func
if [ -e "${init_file}" ]; then
IFS_TMP=${IFS}
IFS=$'\n'
currWd=`pwd`;
for i in `cat ${init_file}`; do
if [ -d "$i" ]; then
cd "$i" > /dev/null;
fi;
done;
IFS=${IFS_TMP}
cd $currWd;
fi
if [[ $BASH_VERSION > "2.05a" ]]; then
# ctrl+w shows the menu
bind -x '"\ew"':"cd_func --" 2>/dev/null
# bind -x '"\ew\":cd_func -- ;"
fi
| true |
fe70f3717ab8d130bca82512df058bf123aa46d1 | Shell | kborchers/vertxbustest | /server.sh | UTF-8 | 518 | 3.75 | 4 | [] | no_license | #!/bin/sh
VERTX_VERSION="vert.x-2.0.0-final"
if [ "$1" == "stop" ]; then
echo "Server stopped!"
for i in `ps -ef | grep -i vertx | awk '{print $2}'`
do
kill -9 $i 2> /dev/null
done
exit 0
fi
if [ -d "$VERTX_VERSION" ]; then
rm -rf $VERTX_VERSION/
fi
if [ -f "$VERTX_VERSION.tar.gz" ]; then
tar xzvf $VERTX_VERSION.tar.gz
else
echo "The path does not contain a vertx distribution"
fi
echo "Server started!"
nohup $VERTX_VERSION/bin/vertx run diy/server.js -conf conf/config.json &
| true |
9c07d1421ad38bd199fbbe346d171f6325bc47b0 | Shell | will-mei/hw_check | /hwinfo/functions | UTF-8 | 872 | 3.625 | 4 | [] | no_license | #!/bin/bash
OLD_IFS=$IFS
tmpfile=$(mktemp)
lshw > $tmpfile || exit 1
#report_file="report.csv"
function _refresh_report_file(){
[[ -f $report_file ]] && rm -f $report_file
touch $report_file
}
function _append_field(){
local line_num=$1
shift
local field_content="$@"
local line_max=$(wc -l < $report_file)
if [[ $line_max -lt ${line_num} ]] ;then
echo "$field_content" >> $report_file
else
sed -i "${line_num}s/$/,${field_content//\//\\/}/g" $report_file
fi
}
function _update_info(){
local info=$@
IFS="|"
local nu=1
for field in $info;do
IFS="$OLD_IFS"
_append_field $nu $field
nu=$((nu + 1))
done
IFS="$OLD_IFS"
}
function _change_delimiter(){
[[ $# -gt 0 ]] && exec 0<$1
sed ':label;N;s/\n/|/g;b label' <&0
# close when no data
exec 0<&-
}
| true |
664fca75783a9bd804c356e3fde031a6f89be935 | Shell | drewkhoury/docker-datacenter | /scripts/docker2.sh | UTF-8 | 1,160 | 2.984375 | 3 | [
"MIT"
] | permissive | #!/bin/bash
echo `date`
start=`date +%s`
# paths
export SCRIPT_PATH=/home/vagrant/sync
# common scripts
source ${SCRIPT_PATH}/scripts/supporting/common.sh
# ucp
docker run --rm -t --name ucp \
-e "UCP_ADMIN_USER=admin" -e "UCP_ADMIN_PASSWORD=orca" \
-v /var/run/docker.sock:/var/run/docker.sock \
-v /etc/hosts:/etc/hosts \
docker/ucp join \
--debug \
--url https://${DOCKER1_IP}:8443 \
--fingerprint `curl -s http://${DOCKER1_IP}:8000/fingerprint.log` \
--replica --host-address ${DOCKER2_IP} \
--controller-port 8443
echo
echo
echo '=============================================================='
echo '=================== Docker Datacenter ========================'
echo
echo "Try the following in your favourite browser:"
echo
echo "Universal Control Plane (UCP) :: https://docker2:${UCP_HTTPS_PORT}"
echo
echo '=================== Docker Datacenter ========================'
echo '=============================================================='
echo
echo `date`
end=`date +%s`
let deltatime=end-start
let hours=deltatime/3600
let minutes=(deltatime/60)%60
let seconds=deltatime%60
printf "Time spent: %d:%02d:%02d\n" $hours $minutes $seconds | true |
8eb375d4b674af369c09e8b415459313f004c496 | Shell | fluent/fluent-bit | /packaging/windows-checksums.sh | UTF-8 | 264 | 3.546875 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash
set -eu
# Generate checksums for local Windows binaries
SOURCE_DIR=${SOURCE_DIR:?}
pushd "$SOURCE_DIR"
for i in *.exe
do
echo "$i"
sha256sum "$i" > "$i".sha256
done
for i in *.zip
do
echo "$i"
sha256sum "$i" > "$i".sha256
done
popd
| true |
8efe3f4cf91b7262307b1ecd665a3cbe877d0c6c | Shell | vintagegamingsystems/Zabbix | /zabbixRecovery.sh | UTF-8 | 7,359 | 4.125 | 4 | [] | no_license | #!/bin/bash
#
# zabbix_recovery.sh
#
# Author: Joshua Cagle
# Organization: University of Oregon
# Restores Zabbix MySQL database, MIBs, and puppet modules
# Restores the following files:
# MySQL Database
# SNMP MIBs
#
# Puppet modules including:
# zabbixserver
# iptables
#
#
###########################################
# Backup directory location
###########################################
backupDir="<path to backup directory>"
###########################################
# Temporary directory location
###########################################
temporaryDir="/tmp/"
###############################################
# Backup locations
###############################################
mibsLocation="/usr/share/snmp/mibs"
mysqlLocation="/var/lib/mysql"
###############################################
# Declaration of restoration variables
###############################################
mibRestore=0
mysqlRestore=0
###########################################
# Function declaration
###########################################
function functionInput ()
{
echo "Below is a list of backups"
echo ""
declare -a files
# Retrieves an array of file names in the $backupDir
local files=$(ls -l $backupDir | grep '^d' | awk '{print $9}')
# Function reads input from user.
local y=1
for file in ${files[@]}
do
echo "[$y] $file"
#For loop puts directory names into $files array again
files[$y]=$file
((y++))
done
echo ""
# Read user input for backup filename
echo "Enter the number of the backup you would like to restore."
read inputBackupDirectoryNum
# Sanitizes variable to only accept numeric data
inputBackupDirectoryNum=${inputBackupDirectoryNum//[^0-9]/}
((y--))
while [[ ! $inputBackupDirectoryNum =~ ^[0-9]+$ || $inputBackupDirectoryNum -lt 1 || $inputBackupDirectoryNum -gt $y ]]
do
echo "Enter the "number" of the backup listed above, the number must be between 1 and $y."
read inputBackupDirectoryNum
inputBackupDirectoryNum=${inputBackupDirectoryNum//[^0-9]/}
done
# Pull backup directory name from files array index number using user input
inputBackupDirectory=${files[$inputBackupDirectoryNum]}
}
clear
echo "###########################################"
echo " Zabbix Recovery Script"
echo "###########################################"
# Calls function functionInput
functionInput
x=1
while [ $x==1 ]
do
echo ""
echo "Would you like to restore $inputBackupDirectory?"
echo ""
echo -e "Press '\e[1;31my\e[0m' key and Enter key, if this is true."
echo -e "If this is '\e[1;34mNOT\e[0m' true, press any other key to be prompted again for name of backup directory."
read answer
answer=${answer//[^a-zA-Z]/}
if [[ "$answer" == "y" || "$answer" == "Y" ]]
then
echo "Restoring from...$inputBackupDirectory"
break
else
clear
functionInput
fi
done
echo "Would you like to restore the MIB files to $mibsLocation?"
echo -e "Enter '\e[1;31my\e[0m' and press Enter to '\e[1;31mRESTORE\e[0m' MIBs."
echo -e "Enter '\e[05;34mn\e[0m' and press Enter to '\e[1;34mNOT RESTORE\e[0m' MIBs."
read answer
answer=${answer//[^a-zA-Z]/}
x=1
while [[ $x==1 ]]
do
if [[ $answer == "y" || $answer == "Y" ]]
then
mibRestore=1
break
elif [[ $answer == "n" || $answer == "N" ]]
then
echo "User chose to not restore MIBs."
break
else
echo "Would you like to restore the MIBs to $mibsLocation?"
echo -e "Enter '\e[1;31my\e[0m' and press Enter to '\e[1;31mRESTORE\e[0m' MIBs."
echo -e "Enter '\e[05;34mn\e[0m' and press Enter to '\e[1;34mNOT RESTORE\e[0m' MIBs."
read answer
answer=${answer//[^a-zA-Z]/}
fi
done
echo "Would you like to restore the MySQL database to $mysqlLocation?"
echo -e "Enter '\e[1;31my\e[0m' and press Enter to '\e[1;31mRESTORE\e[0m' the MySQL database."
echo -e "Enter '\e[05;34mn\e[0m' and press Enter to '\e[1;34mNOT RESTORE\e[0m' the MySQL database."
read answer
answer=${answer//[^a-zA-Z]/}
x=1
while [[ $x==1 ]]
do
if [[ $answer == "y" || $answer == "Y" ]]
then
mysqlRestore=1
break
elif [[ $answer == "n" || $answer == "N" ]]
then
echo "User chose to not restore MySQL database."
break
else
echo "Would you like to restore the MySQL database to $mysqlLocation?"
echo -e "Enter '\e[1;31my\e[0m' and press Enter to '\e[1;31mRESTORE\e[0m' the MySQL database."
echo -e "Enter '\e[05;34mn\e[0m' and press Enter to '\e[1;34mNOT RESTORE\e[0m' the MySQL database."
read answer
answer=${answer//[^a-zA-Z]/}
fi
done
# Start time variable for runtime
res1=$(date +%s.%N)
##################################################
# Decompresses the backup file
##################################################
if [[ $mibRestore == 0 && $mysqlRestore == 0 ]]
then
echo "No files choose to be restored"
exit 1
else
compressedBackupFileLocation="${backupDir}/${inputBackupDirectory}"
backupFileName=$(ls ${compressedBackupFileLocation})
backupFile=$compressedBackupFileLocation/$backupFileName
# Decompresses user chosen backup directory to
echo "Decompressing backup directory"
tar -xjvf $backupFile -C $temporaryDir
###################################################
# Restoration of MIBs, MySQL database directories
###################################################
if [[ $mibRestore == 1 ]]
then
echo "Copying MIBs to $mibsLocation..."
cp -rpfv /tmp/backup/mibs/* $mibsLocation/
chown -R root:root $mibsLocation
fi
if [[ $mysqlRestore == 1 ]]
then
cd /root
echo "Stopping apache daemon."
service httpd stop
echo "Stopping Zabbix daemon."
service zabbix-server stop
echo "Stopping mysqld daemon."
service mysqld stop
echo "Restoring MySQL database..."
cp -rpfv /tmp/backup/mysqlBackup/* $mysqlLocation
echo "Changing permissions of files in $mysqlLocation"
chown -R mysql:mysql $mysqlLocation
chmod 660 $mysqlLocation/ib*
chmod 640 $mysqlLocation/backup-my.cnf
chmod 660 $mysqlLocation/zabbix/*
chmod 700 $mysqlLocation/zabbix/
echo "Starting mysql daemon.."
service mysqld start
echo "Starting Zabbix server daemon."
service zabbix-server start
echo "Starting apache daemon."
service httpd start
fi
zabbixStatus=$(service zabbix-server status | awk '{print $3}')
if [[ $zabbixStatus == "stopped" ]]
then
service zabbix-server start
fi
# remove temporary backup directory located in $backupDir
rm -rfv /tmp/backup
# Statements below returns runtime of current script
# These values are used in conjunction with the res1 variable above.
res2=$(date +%s.%N)
dt=$(echo "$res2 - $res1" | bc)
dd=$(echo "$dt/86400" | bc)
dt2=$(echo "$dt-86400*$dd" | bc)
dh=$(echo "$dt2/3600" | bc)
dt3=$(echo "$dt2-3600*$dh" | bc)
dm=$(echo "$dt3/60" | bc)
ds=$(echo "$dt3-60*$dm" | bc)
printf "Total runtime: %d:%02d:%02d:%02.4f\n" $dd $dh $dm $ds
fi
| true |
d771c47fd7f40d5267159ef69bbed00048908a7a | Shell | ricofehr/nextdeploy | /vagrant/modules/pm/files/scripts/puma-restart | UTF-8 | 142 | 2.6875 | 3 | [
"MIT"
] | permissive | #!/bin/bash
# if we are root, exit
(( $UID == 0 )) && echo "please execute with modem user, not root" && exit 1
puma-stop
sleep 5
puma-start | true |
794256be239ba1b4783137fa861ddedb1d90d336 | Shell | mmockus/scripts | /updatehosts.sh | UTF-8 | 897 | 2.953125 | 3 | [] | no_license | ## update hostname
## sudo hostname new-server-name-here
## -- On localhost
## scp .scriptlocation username@serverip:updatehosts.sh
## remote host usage
## sudo ./updatehosts.sh $HOSTNAME <<newname>>
## https://git.io/JWQWt
## curl https://raw.githubusercontent.com/mmockus/scripts/main/updatehosts.sh --output updatehosts.sh
## curl https://raw.githubusercontent.com/mmockus/scripts/main/updatehosts.sh | sh -s - oldhost newhost
oldname=$1
newname=$2
if [ -z "$oldname" ]
then
exit N
fi
if [ -z "$newname" ]
then
exit N
fi
sudo hostnamectl set-hostname $newname
## update the hosts
sudo sed -i "s/$oldname/$newname/g" /etc/hosts
## Reset machineId
sudo rm -f /etc/machine-id /var/lib/dbus/machine-id
sudo dbus-uuidgen --ensure=/etc/machine-id
sudo dbus-uuidgen --ensure
## regen SSH
## regen ssh keys
sudo rm /etc/ssh/ssh_host_*
sudo dpkg-reconfigure openssh-server
##
sudo reboot
| true |
7dbb05aaa3bc663214326246ce1e2f27e7dc0c16 | Shell | crawley/qcif-mailout | /bin/rolling-outage | UTF-8 | 1,805 | 3.71875 | 4 | [] | no_license | #!/bin/bash
if [ $# -eq 0 ] ; then
echo usage "bin/rolling-outage [<mailout opts>] <type> <opts> <CN>"
exit 1
fi
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
. $DIR/process-options.sh
TYPE=$1
case $TYPE in
-*)
echo misplaced option $ARG
exit 1
;;
notice)
WHEN="$2"
# if [ expr match "$WHEN" " " == 0 ] ; then
# echo "Invalid '<when>' phrase following 'notice'"
# exit 1
# fi
shift 2
TEMPLATE=rolling-maint-notice
SUBJECT="QRIScloud instance alert: rolling outage $WHEN"
;;
start)
TEMPLATE=rolling-maint-start
SUBJECT="QRIScloud instance alert: rolling outage starting now"
shift 1
;;
end)
TEMPLATE=rolling-maint-end
SUBJECT="QRIScloud instance alert: rolling outage ended"
shift 1
;;
*)
echo "Unknown type"
exit 1
;;
esac
while [ $# -gt 0 ] ; do
case $1 in
-*)
echo misplaced option $ARG
exit 1
;;
*)
ARGS="$ARGS --host $1.qld.nectar.org.au"
shift
;;
esac
done
if [ "$ARGS" == "" ] ; then
echo "No compute node names supplied"
exit 1
fi
if [ "$ARTICLE" == "" ] ; then
echo "Missing --article option. The rolling-update script assumes that"
echo "you have created an article in the Zendesk help centre."
exit 1
fi
URL=https://support.qriscloud.org.au/hc/en-us/articles/$ARTICLE
cd ~/git/qcif-mailout
. venv/bin/activate
mailout/mailout.py $MARGS \
-s "$SUBJECT" \
-p "timeframe=$WHEN" -p "url=$URL" \
--template $TEMPLATE --by-group instances $ARGS \
--managers --members
exit
| true |
b9d655c91f8651aaca11747010d4876699ecbf53 | Shell | huileizhan227/untitled | /qa-tools/follow/check_topic_name.sh | UTF-8 | 183 | 2.734375 | 3 | [] | no_license | #!/usr/bin/env bash
echo "发现重复的topic名字"
echo arg1: file, arg2: oper id
cat $1 | sort | uniq | sort | awk -F "," -v oper=$2 '$1==oper{print $4}' | sort | uniq -c | sort
| true |
a42363754239b017e447bba9b4f9d26f6301eee3 | Shell | DruidBSD/druid83 | /src/freebsd/repos/8.3-RELEASE-amd64/base/install.sh | UTF-8 | 442 | 3.78125 | 4 | [
"BSD-3-Clause"
] | permissive | #!/bin/sh
#
# $FreeBSD: src/release/scripts/base-install.sh,v 1.6.36.1.8.1 2012/03/03 06:15:13 kensmith Exp $
#
if [ "`id -u`" != "0" ]; then
echo "Sorry, this must be done as root."
exit 1
fi
echo "You are about to extract the base distribution into ${DESTDIR:-/} - are you SURE"
echo -n "you want to do this over your installed system (y/n)? "
read ans
if [ "$ans" = "y" ]; then
cat base.?? | tar --unlink -xpzf - -C ${DESTDIR:-/}
fi
| true |
79353df2737ed7bbd225ad349376dc04eb0837d3 | Shell | futurewei-cloud/fksdr | /dorado_poc/backup_cleanup | UTF-8 | 297 | 2.515625 | 3 | [
"MIT"
] | permissive | #!/bin/bash
kubectl -n velero exec -ti `kubectl -n velero get pod | grep "^velero" | awk '{print $1}'` -- /bin/bash -c 'rm -rf /velero-pvc/backups/*'
for BACKUP in `../bin/velero backup get | grep vbackup | awk '{print $1}'`;do echo cleanup: $BACKUP;kubectl -n velero delete backup $BACKUP;done
| true |
0de95b12d0f7cd2c062ec00eb44f4b11a747f817 | Shell | bytedance/fedlearner | /deploy/scripts/aliyun/install-add-on.sh | UTF-8 | 6,300 | 3.1875 | 3 | [
"LicenseRef-scancode-proprietary-license",
"Apache-2.0"
] | permissive | #!/bin/bash
# Copyright 2020 The FedLearner Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
IMAGE_HUB_URL=$1
IMAGE_HUB_USERNAME=$2
IMAGE_HUB_PASSWORD=$3
EXTERNAL_NAME=$4
GRPC_SSL_NAME=$5
DB_PASSWORD=$6
ES_PASSWORD=$7
DOMAIN_URL=$8
REGION="cn-beijing"
ZONE_ID="cn-beijing-h"
GENERATER_NAME="fedlearnerwins"
function echo_exit {
echo $1
exit 1
}
function install {
cat ../../charts/fedlearner-add-on/configuration-snippet.txt | grep grpc_set_header >/dev/null 2>&1
if [ $? -ne 0 ]
then
echo "grpc_set_header Host $GRPC_SSL_NAME;" >> ../../charts/fedlearner-add-on/configuration-snippet.txt
fi
cat ../../charts/fedlearner-add-on/server-snippet.txt | grep grpc_ssl_name >/dev/null 2>&1
if [ $? -ne 0 ]
then
echo "grpc_ssl_name $GRPC_SSL_NAME;" >> ../../charts/fedlearner-add-on/server-snippet.txt
fi
CURRENT_DIR=`pwd`
export KUBECONFIG="$CURRENT_DIR/config"
helm install fedlearner-add-on ../../charts/fedlearner-add-on \
--set imageCredentials.registry=$IMAGE_HUB_URL \
--set imageCredentials.username=$IMAGE_HUB_USERNAME \
--set imageCredentials.password=$IMAGE_HUB_PASSWORD \
--set service.externalName=$EXTERNAL_NAME
FILE_SYSTEM_ID=`aliyun nas DescribeFileSystems --Description $GENERATER_NAME | grep FileSystemId | awk -F "\"" '{print $4}'`
if [ -n "$FILE_SYSTEM_ID" ]
then
MOUNT_TARGET_DOMAIN=`aliyun nas DescribeMountTargets --FileSystemId $FILE_SYSTEM_ID | grep MountTargetDomain | awk -F "\"" '{print $4}'`
ES_INSTANCE_ID=`aliyun elasticsearch ListInstance --description $GENERATER_NAME | grep instanceId | awk -F "\"" '{print $4}' | head -1`
helm upgrade fedlearner-stack ../../charts/fedlearner-stack --set nfs-server-provisioner.enabled=false \
--set nfs-client-provisioner.enabled=true \
--set nfs-client-provisioner.nfs.server=$MOUNT_TARGET_DOMAIN \
--set mariadb.enabled=false \
--set 'elastic-stack.filebeat.indexTemplateLoad[0]'="$ES_INSTANCE_ID.elasticsearch.aliyuncs.com:9200" \
--set 'ingress-nginx.controller.extraVolumeMounts[0].name=fedlearner-proxy-client' \
--set 'ingress-nginx.controller.extraVolumeMounts[0].mountPath=/etc/ingress-nginx/client/' \
--set 'ingress-nginx.controller.extraVolumes[0].name=fedlearner-proxy-client' \
--set 'ingress-nginx.controller.extraVolumes[0].secret.secretName=fedlearner-proxy-client'
else
echo_exit "Failed to update fedlearner-stack since missing MOUNT_TARGET_DOMAIN."
fi
VPC_ID=`aliyun vpc DescribeVpcs --VpcName $GENERATER_NAME | grep VpcId | awk -F "\"" '{print $4}'`
if [[ $VPC_ID == "vpc"* ]]
then
DB_INSTANCE_ID=`aliyun rds DescribeDBInstances --VpcId $VPC_ID | grep \"DBInstanceId\" | awk -F "\"" '{print $4}' | head -1`
if [ -n "$DB_INSTANCE_ID" ]
then
DB_URL=`aliyun rds DescribeDBInstanceNetInfo --DBInstanceId $DB_INSTANCE_ID | grep ConnectionString\" | awk -F "\"" '{print $4}'`
helm upgrade fedlearner ../../charts/fedlearner \
--set fedlearner-web-console.cluster.env.DB_USERNAME=fedlearner \
--set fedlearner-web-console.cluster.env.DB_PASSWORD=$DB_PASSWORD \
--set fedlearner-web-console.cluster.env.DB_HOST=$DB_URL \
--set fedlearner-web-console.cluster.env.DB_PORT=3306 \
--set fedlearner-web-console.cluster.env.ES_HOST="$ES_INSTANCE_ID.elasticsearch.aliyuncs.com" \
--set fedlearner-web-console.cluster.env.ES_PASSWORD="$ES_PASSWORD" \
--set fedlearner-web-console.ingress.host="fedlearner-webconsole$DOMAIN_URL" \
--set fedlearner-operator.extraArgs.ingress-extra-host-suffix=$DOMAIN_URL \
--set fedlearner-operator.extraArgs.ingress-client-auth-secret-name="default/ca-secret" \
--set fedlearner-operator.extraArgs.ingress-enabled-client-auth=true \
--set fedlearner-operator.extraArgs.ingress-secret-name=fedlearner-proxy-server \
--set fedlearner-operator.ingress.host="fedlearner-operator$DOMAIN_URL"
else
echo_exit "Failed to update fedlearner-stack since missing DB_INSTANCE_ID."
fi
else
echo_exit "Failed to update fedlearner-stack since missing VPC_ID."
fi
if [ -f "filebeat.yml" ]; then
kubectl delete secret fedlearner-stack-filebeat
kubectl create secret generic fedlearner-stack-filebeat --from-file=./filebeat.yml
fi
rm -rf filebeat.yml
kubectl get pod | grep fedlearner-stack-filebeat | awk -F " " '{print $1}' | xargs kubectl delete pod
}
function usage {
echo "Usage: "
echo " ./install-add-on.sh image_hub_url image_hub_username image_hub_password external_name grpc_ssl_name db_password domain_url"
echo ""
echo "Params:"
echo ""
echo " image_hub_url: the docker image hub url, required"
echo " image_hub_username: the docker image hub username, required"
echo " image_hub_password: the docker image hub password, required"
echo " external_name: the ip address for external service, required"
echo " grpc_ssl_name: the grpc ssl name, required"
echo " db_password: the database password, required"
echo " es_password: the elasticsearch password, required"
echo " domain_url: the domain url, required"
}
if [[ -z $IMAGE_HUB_URL ]] || [[ -z $IMAGE_HUB_USERNAME ]] || [[ -z $IMAGE_HUB_PASSWORD ]] || [[ -z $EXTERNAL_NAME ]] || [[ -z $GRPC_SSL_NAME ]] || [[ -z $DOMAIN_URL ]] || [[ -z $DB_PASSWORD ]] || [[ -z $ES_PASSWORD ]]
then
usage
exit 1
else
install
fi
| true |
89fa673dce542917c9825745af674f42f15e3d0b | Shell | Rafid013/OS-Assignments--before-NACHOS- | /Offline 1/1405013.sh | UTF-8 | 610 | 3.734375 | 4 | [] | no_license | #!/bin/bash
a=1
file_struct_traverse(){
cd "$1"
for file in *
do
if [ -d "$file" ]
then
b=`expr $a - 1`
for((i=0;i<$b;i++))
do
echo -n '| '
done
echo "|--$file"
a=`expr $a + 1`
file_struct_traverse "$file"
elif [ -f "$file" ]; then
b=`expr $a - 1`
for((i=0;i<$b;i++))
do
echo -n '| '
done
echo "|--$file"
fi
done
a=`expr $a - 1`
cd ../
}
if [ $# -eq 0 ]
then
echo .
file_struct_traverse .
else
for i in $*
do
if [ -e "$i" ]
then
echo "$i"
file_struct_traverse "$i"
else
echo "$i does not exist"
fi
done
fi
| true |
64fd85a4da0e76f8b76d5ca41551993a5f2795f6 | Shell | lordsha0/calc | /calc | UTF-8 | 320 | 3.328125 | 3 | [] | no_license | #!/usr/bin/sh
while getopts a:d:m:s:h: option
do
case "${option}"
in
a) /home/sha0/documents/calc/calc.py a $2 $3;;
d) /home/sha0/documents/calc/calc.py d $2 $3;;
m) /home/sha0/documents/calc/calc.py m $2 $3;;
s) /home/sha0/documents/calc/calc.py s $2 $3;;
h) echo 'Please specify an operator and numbers.';;
esac
done
| true |
a4123c0fcc0eeffc27118ca61cd99db7cb974ed7 | Shell | dse/git-scripts | /bin/git-push-from | UTF-8 | 968 | 3.6875 | 4 | [] | no_license | #!/usr/bin/env bash
set -o errexit
set -o pipefail
progname="$(basename "$0")"
dirname="$(dirname "$0")"
. "${dirname}/../share/bash-getlongopts/getlongopts.sh"
. "${dirname}/../share/git-scripts/git-scripts.sh"
main () {
if (( $# < 1 )) ; then
>&2 echo "usage: git push-from <branch>"
exit 1
fi
local dest source
dest="$1"
source="$(git rev-parse --abbrev-ref HEAD)"
git checkout "${dest}" || croak $? "${source}" <<EOF
|
|*** \`git checkout ${dest}\` failed.
EOF
git push || croak $? "${source}" <<EOF
|
|*** checkout successful, but \`git push\` from ${dest} failed.
EOF
git checkout "${source}" || croak $? "${source}" <<EOF
|
|*** checkout and push successful, but \`git checkout ${source}\` failed.
EOF
sendoff <<EOF
|
|*** ALL DONE. :-)
EOF
}
###############################################################################
main "$@"
| true |
a63bd9c717c8cb9429af8867cf2d051886f78459 | Shell | scottwedge/OpenStack-Stein | /solum-6.0.0/contrib/infra/build-farm/images/guestagent/build-ubuntu12_04-docker.sh | UTF-8 | 210 | 2.984375 | 3 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | #!/bin/bash
set -e
DOCKERFILE_PATH="."
if [[ ! -z "$1" ]]; then
DOCKERFILE_PATH=$1
fi
sudo docker build -t solum/guestagent:u1304 $DOCKERFILE_PATH
sudo docker tag solum/guestagent:u1304 solum/guestagent
| true |
f1e65d437476c1d4878e4ef70ba84f5bcce0d34b | Shell | enterprise001/enigma2-plugins-sh4 | /epanel/src/script/Backup_Image.sh | UTF-8 | 1,327 | 3.421875 | 3 | [] | no_license | #!/bin/sh
#DIRECTORY=$1
DIRECTORY="/media/hdd"
MTDBOOT=5
MTDROOT=6
ISAVAILABLE=`mount | grep hdd`
if [ -z "$ISAVAILABLE" ]; then
echo "Try to mount sda1 to /media/hdd"
mount /dev/sda1 /media/hdd
ISAVAILABLE=`mount | grep sda1`
fi
if [ ! -z "$ISAVAILABLE" ]; then
if grep -qs 'spark' /proc/stb/info/model ; then
BOXTYPE=spark
OPTIONS="-e 0x20000 -n"
else
echo "Box not found !!!"
exit 0
fi
echo $BOXTYPE " found"
DATE=`date +%Y%m%d`
MKFS=/sbin/mkfs.jffs2
BACKUPIMAGE="e2jffs2.img"
if [ ! -f $MKFS ] ; then
echo $MKFS" not found"
exit 0
fi
rm -rf "$DIRECTORY/tmp/root"
mkdir -p "$DIRECTORY/tmp/root"
if [ ! -e "$DIRECTORY/enigma2-$DATE" ]; then
mkdir -p "$DIRECTORY/enigma2-$DATE"
fi
mount -t jffs2 /dev/mtdblock$MTDROOT "$DIRECTORY/tmp/root"
echo "Copying uImage"
cp /boot/uImage "$DIRECTORY/enigma2-$DATE/uImage"
echo "Create root.jffs2, please wait..."
$MKFS --root="$DIRECTORY/tmp/root" --faketime --output="$DIRECTORY/tmp/$BACKUPIMAGE" $OPTIONS
mv "$DIRECTORY/tmp/$BACKUPIMAGE" "$DIRECTORY/enigma2-$DATE/"
if [ -f "$DIRECTORY/enigma2-$DATE/$BACKUPIMAGE" ] ; then
echo "$BACKUPIMAGE can be found in: $DIRECTORY/enigma2-$DATE"
else
echo "Error"
fi
sync
umount "$DIRECTORY/tmp/root"
rm -rf "$DIRECTORY/tmp/root"
else
echo "No hdd or usb-stick found!"
exit 0
fi
exit 0
| true |
4896a65b0fb2c22f894f990e862793d4ec778a11 | Shell | msundara1/test | /container-scripts/release/run-ready-check.sh | UTF-8 | 1,261 | 3.84375 | 4 | [] | no_license |
echo "=== Ready Check Starting ($HOSTNAME)"
if [ -z "$READYFILE_LOCATION" ]; then
# Default Readyfile location
READYFILE_LOCATION="/Readyfile"
fi
# Readyfile shouldn't exist yet but let's make extra sure
rm $READYFILE_LOCATION
if [ "$FORCE_READY" ]; then
echo "=== FORCE_READY environment variable present, skipping ready check"
touch $READYFILE_LOCATION
exit 0
fi
echo "=== Waiting 5s for app to start... ($HOSTNAME)"
# This script should have been started in the background just before the app starts
sleep 5s
#curl the health endpoint to make sure the service is health before running unit tests
echo "=== Starting curl ($HOSTNAME)"
response=$(curl -k https://127.0.0.1:443/v1/health --write-out %{http_code} --silent --output ./curl.out --retry 3 --retry-delay 5)
echo "=== response code from curl request: $response ($HOSTNAME)"
echo "== Curl output: ($HOSTNAME)"
cat ./curl.out
echo "" # above output likely doesn't contain ending newline
if [ $response -eq 200 ]; then
echo "=== curl health test result: SUCCESS ($HOSTNAME)"
echo "Creating ready file"
touch $READYFILE_LOCATION
exit 0
else
echo "=== curl health test result: FAILURE ($HOSTNAME)"
echo "Ready check Failed!" > /NotReady
exit 1
fi | true |
0cf6f78877673c4169b2c67d3e0d6bf8871346fd | Shell | daniel-dona/mirascreen-sg20-nor-dump | /etc/fds/profile | UTF-8 | 273 | 2.640625 | 3 | [] | no_license | PATH=/bin:/sbin:/usr/bin:/usr/sbin
PS1='[\u@\h \W]\$ '
HOSTNAME='/bin/hostname'
export PATH HOSTNAME PS1
alias l.='ls -d .[a-zA-Z]* --color=tty'
alias rm='rm -i'
alias cp='cp -i'
alias mv='mv -i'
export PATH=/usr/local/bin:$PATH
export PATH=/usr/local/sbin:$PATH
| true |
e0a7d4de054d1c087d5b628b4ac763b5e19a514b | Shell | localmed/heroku-buildpack-nginx-extras | /support/nginx-prep.sh | UTF-8 | 2,544 | 3.5 | 4 | [] | no_license | #!/bin/bash
NGINX_URL='http://nginx.org/download/nginx-1.4.1.tar.gz'
NGINX_INCLUDES=(
"ipv6"
"http_addition_module"
"http_degradation_module"
"http_flv_module"
"http_gzip_static_module"
"http_mp4_module"
"http_random_index_module"
"http_realip_module"
"http_secure_link_module"
"http_ssl_module"
"http_stub_status_module"
"http_sub_module"
"http_dav_module"
"http_xslt_module"
"http_spdy_module"
)
NGINX_DEPENDENCIES=("pcre|https://s3.amazonaws.com/heroku-nginx-extras/pcre-8.33.tar.gz")
NGINX_MODULES=(
"https://s3.amazonaws.com/heroku-nginx-extras/ngx_devel_kit-0.2.18.tar.gz"
"https://s3.amazonaws.com/heroku-nginx-extras/ngx_http_auth_request_module-a29d74804ff1.tar.gz"
"https://s3.amazonaws.com/heroku-nginx-extras/set-misc-nginx-module-0.22rc8.tar.gz"
)
NGINX_CONTRIB_DIR="contrib"
function create_nginx_build_directory () {
local nginx_dir="$(basename $NGINX_URL '.tar.gz')"
local contrib_dir="$nginx_dir/$NGINX_CONTRIB_DIR"
_get_file $NGINX_URL "./"
for mod in "${NGINX_MODULES[@]}"; do
_get_file $mod $contrib_dir
done
for dep in "${NGINX_DEPENDENCIES[@]}"; do
local url="${dep##*|}"
_get_file $url $contrib_dir
done
create_build_script "$nginx_dir/full_build.sh"
echo "$nginx_dir"
}
function create_build_script () {
local script_path="$1"
local prefix='--prefix=$PREFIX_DIR'
local includes="$(create_includes_config)"
local dependencies="$(create_dependencies_config)"
local modules="$(create_modules_config)"
local configure_cmd="./configure $prefix $includes $dependencies $modules"
cat >> $script_path <<EOF
PREFIX_DIR=$PREFIX_DIR
$configure_cmd
make
EOF
chmod +x $script_path
}
function create_dependencies_config () {
local deps=()
for d in "${NGINX_DEPENDENCIES[@]}"; do
local name="${d%%|*}"
local url="${d##*|}"
local dep_path="$NGINX_CONTRIB_DIR/$(basename $url '.tar.gz')"
deps=("${deps[@]}" "--with-$name=$dep_path")
done
echo "${deps[*]}"
}
function create_includes_config () {
local includes=$(printf -- "--with-%s " "${NGINX_INCLUDES[@]}")
echo "$includes"
}
function create_modules_config () {
local modules=()
for mod in "${NGINX_MODULES[@]}"; do
local dep_path="$NGINX_CONTRIB_DIR/$(basename $mod '.tar.gz')"
modules=("${modules[@]}" "--add-module=$dep_path")
done
echo "${modules[*]}"
}
function _get_file () {
local url="$1"
local dir="$2"
wget -O - -P $dir $url | tar xzf - -C $dir
} | true |
a66728f36f95d8cbc4f01c9b409598b613fd02db | Shell | 2dpodcast/baids-daedalus | /00-init | UTF-8 | 3,280 | 3.6875 | 4 | [] | no_license | #!/bin/bash
function daedalus-cli-init() {
UNAME=$(uname -s)
case "${UNAME}" in
Darwin*)
daedalus-cli-init-darwin
OS=darwin
;;
Linux*)
daedalus-cli-init-linux
OS=linux
;;
esac
}
function daedalus-cli-init-darwin() {
export CARDANO_NODE_SOCKET_PATH=$(ps ax|grep -v "grep\|sed" | grep cardano-wallet | sed 's|\(.*cardano-wallet serve.*\)--node-socket \(.*cardano-node.socket\).*|\2|g')
export CARDANO_NODE_PORT=$(pgrep -fl cardano-node.run | sed 's|\(.*\)--port \(.*\) --\(.*\)|\2|g')
if [ ! -z "$(pgrep -fl 'cardano-wallet.*tls' | grep -i flight)" ]
then
WORKING_DIR="Flight"
else
WORKING_DIR="Mainnet"
fi
PATH=${PATH}:"/Applications/Daedalus ${WORKING_DIR}.app/Contents/MacOS"
}
function daedalus-cli-init-linux() {
# Get cardano-node's socket file
export CARDANO_NODE_SOCKET_PATH=$(pgrep -fa cardano-wallet.*tls | sed -e 's|.*\(--node-socket\) \(.*cardano-node.socket\).*|\2|g')
export CARDANO_NODE_PORT=$(pgrep -fa cardano-node.run | sed 's|\(.*\)--port \(.*\) --\(.*\)|\2|g')
# Make Daedalus' binaries available to our shell
CARDANO_BRIDGE_NIX_STORE_BINDIR="/$(dirname $(lsof -np $(pgrep -af cardano-wallet.*tls | grep -v "grep" | awk '{print $1}') 2>/dev/null | grep bin.cardano-wallet | awk '{print $NF}' | sed -e 's|\(^/tmp.*\)/\(nix.*\)|\2|'))"
SOME_BASH="/nix/store/$(ls -1 ~/.daedalus/nix/store | grep bash | head -n1)/bin/bash"
}
function daedalus-cli-cardano-cli() {
daedalus-cli-init
ARGS="$@"
proot \
-b ${HOME}/.daedalus/nix/store:/nix/store \
bash -c "\
CARDANO_NODE_SOCKET_PATH=${CARDANO_NODE_SOCKET_PATH}; \
PATH=${PATH}:${CARDANO_BRIDGE_NIX_STORE_BINDIR}; \
cardano-cli ${ARGS}
"
}
function daedalus-cli-run-cardano-wallet() {
daedalus-cli-init
case "${OS}" in
darwin)
if [ ! -z $(pgrep -fl 'cardano-wallet.*tls' | grep testnet) ]
then
cardano-wallet serve \
--testnet \
--shutdown-handler \
--database "${HOME}/Library/Application Support/Daedalus Testnet/wallets" \
--node-socket "${HOME}/Library/Application Support/Daedalus Testnet/cardano-node.socket"
else
cardano-wallet serve \
--mainnet \
--shutdown-handler \
--database "${HOME}/Library/Application Support/Daedalus ${WORKING_DIR}/wallets" \
--node-socket "${HOME}/Library/Application Support/Daedalus ${WORKING_DIR}/cardano-node.socket"
fi
;;
linux)
proot \
-b ${HOME}/.daedalus/nix/store:/nix/store \
bash -c "\
CARDANO_NODE_SOCKET_PATH=${CARDANO_NODE_SOCKET_PATH}; \
PATH=${PATH}:${CARDANO_BRIDGE_NIX_STORE_BINDIR}; \
$(pgrep -fa cardano-wallet.*tls | sed -e 's|--shutdown.*--database|--database|' -e 's|--tls.*metadata.*.iohk.*io ||' -e 's|^\([0-9]\+\) ||' -e "s|/nix/store|$HOME/.daedalus/nix/store|")
"
;;
esac
}
# FIXME: cardano-node: fdType: unsupported operation (unknown file type)
#function daedalus-cli-run-cardano-node() {
#
# daedalus-cli-init
#
# $(pgrep -a cardano-node | sed -e 's|^\([0-9]\+\) ||' -e "s|/nix/store|$HOME/.daedalus/nix/store|g" -e "s|--database-path chain|--database-path $(dirname $CARDANO_NODE_SOCKET_PATH)/chain|")
#
#
#}
| true |
b3c9bf708547472d2e2b920d9d439d31a9755371 | Shell | scottbri/pcftools | /pks-uaac-create-user.sh | UTF-8 | 1,132 | 3.640625 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash
if [ $# -lt 5 ] ; then
echo "Usage: $0 <FQDN> {admin|manage} <username> <email> <password>"
echo ""
echo "where:"
echo " FQDN: the domain name or IP of PKS API server"
echo " admin or manage admin is able to manage any K8s cluster, manage only able to manage own"
echo " username: the user name to be created for PKS API access"
echo " email: the user's email address"
echo " password: the password for the new user"
echo ""
echo "Note: the script will prompt for the PKS UAA Management Admin Client Credential from PKS Tile"
exit 1
fi
PKS_API_FQDN=$1
ROLE=$2
USERNAME=$3
EMAIL=$4
PASSWORD=$5
echo "Targeting $PKS_API_FQDN"
uaac target $PKS_API_FQDN:8443 --skip-ssl-validation
echo ""
echo "Please provide the PKS UAA Managemetn Admin Client Credential from the PKS Tile:"
read $PKS_CREDENTIAL
echo "Getting the administrative access token"
uaac token client get admin -s $PKS_CREDENTIAL
echo "Adding user $USERNAME to uaa"
uaac user add $USERNAME --emails $EMAIL -p $PASSWORD
echo "Assigning the user $USERNAME to the pks.clusters.$ROLE role"
uaac member add pks.clusters.$ROLE $USERNAME
| true |
110ffab60a68a08b2e93a81916e9ba79f49cbd15 | Shell | mrseanryan/ubuntu-scripts | /useful-scripts/yarn-install-version-x.sh | UTF-8 | 308 | 3.4375 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env bash
if [ "$#" -ne 1 ]; then
echo "USAGE: $0 <Yarn version like 1.15.2>"
echo "EXAMPLE: ./$0 1.15.2"
exit 1
fi
VERSION=$1
wget "https://github.com/yarnpkg/yarn/releases/download/v${VERSION}/yarn_${VERSION}_all.deb"
sudo dpkg -i "yarn_${VERSION}_all.deb"
yarn --version
| true |
18dae508596a975daed3ad276c79cfb6c3dea6f2 | Shell | udaykurien/bash-scripts-for-cloud-model | /verification_scripts_for_collision_efficiency_master_bidisperse/parent_loop_bulk_alternate_version.sh | UTF-8 | 8,980 | 3.734375 | 4 | [] | no_license | #!/bin/bash
# The script contains the loops to cycle through all of the simulation files.
# External verification scripts are called from within the loops in this script.
# Path to base simulation directory
pathBaseSimulation="/home/ukurien/projects/def-yaumanko/ukurien/Clones_2"
# Path to base log directory
pathBaseLog="/home/ukurien/projects/def-yaumanko/ukurien/Clones_2/Logs/Parameter_Verification_Logs"
# Name of the log file
# nameLogFile="R30r30-R40r40-R50r50-gomic2ihydro0-gomic2ihydro1-collision.log"
# nameLogFile="R30-50_r30-50_rbyR0.2-0.2-1.0_edr0.002-0.050_microphysics_spinup.log"
tempLogNo=$(ls -l $pathBaseLog/parameter_verification_log*[0-9].log | wc -l)
nameLogFile=parameter_verification_logs$tempLogNo.log
# Final path to log file
pathLogFile="$pathBaseLog/$nameLogFile"
# Creation of log file
if [ -f "$pathLogFile" ]
then
echo "Conflict: Log file already exists!"
echo "Exiting script"
exit 1
else
touch $pathLogFile
fi
# Path to subscripts
pathSubScripts="/home/ukurien/projects/def-yaumanko/ukurien/Github/Scripts/verification_scripts_for_collision_efficiency_master_bidisperse"
# Define run number to be included in the log file
echo "Enter run number"
read runNumber
# Loop variables
dropSizeLB=30
dropSizeUB=50
dropSizeInc=10
lbrbyR=0.1
ubrbyR=1.0
rbyRInc=0.1
edr1=0.000
edr2=0.002
edr3=0.005
edr4=0.010
edr5=0.020
edr6=0.050
counter=1
# Simulations stages to be verified
choice_gomic0="Yes"
choice_gomic1="Yes"
choice_gomic2ihydro0="Yes"
choice_gomic2ihydro1="Yes"
# Write run number to log file before writing other data
echo "--------------" >> $pathLogFile
echo "Run Number: $runNumber" >> $pathLogFile
date >> $pathLogFile
echo "--------------" >> $pathLogFile
# Labelling columns
# printf "SNo \t R \t r \t EDR \t nstop \t wall \t \t gomic \t ihydro \t Col. \t Run Time \t\n" >> $pathLogFile
printf "%-5s %-8s %-8s %-8s %-7s %-9s %-13s %-7s %-8s %-7s %-10s\n" "SNo" "R" "r" "TND" "EDR" "nstop" "Wall" "Gomic" "iHydro" "Col." "Run time" >> $pathLogFile
if [ $choice_gomic0 == "Yes" ]
then
for EDR in $edr2 $edr3 $edr4 $edr5 $edr6
do
for (( dropSize=$dropSizeLB; dropSize<=$dropSizeUB; dropSize=$dropSize+$dropSizeInc ))
do
for rbyR in $(seq $lbrbyR $rbyRInc $ubrbyR)
do
# Calculating size of collected drop based on dropSize and rbyR
colDropSize=$(echo "scale=0;($dropSize*$rbyR)/1" | bc)
# ----------------------------------
# Extracting data from gomic0
# ----------------------------------
# Final directory to scan
finalDir="gomic0"
# Constructing path to the directory
pathSimulation="$pathBaseSimulation/$EDR/R$dropSize/R"$dropSize"r"$colDropSize"/$finalDir"
# Check if pathSimulation exists before extracting data (this is necessary since completed simulations maybe shifted out of this directory)
if test -d $pathSimulation
then
# Subscripts may be sourced directly beneath this
source $pathSubScripts/check_col_no_and_run_time.sh
source $pathSubScripts/check_simulation_parameters.sh
# Shell instructions can be included directly beneath this
# Recording data to file
printf "%-5s %-8s %-8s %-8s %-7s %-9s %-13s %-7s %-8s %-7s %-10s\n" ""$counter"a." "$dropSizeR_ret" "$dropSizer_ret" "$tnd_ret" "$edr_ret" "$nstop_ret" "$wall_ret" "$gomic_ret" "$ihydro_ret" "$colNumber" "$runTime" >> $pathLogFile
# Incrementing counter
counter=$(echo "$counter+1" | bc)
fi
done
done
done
fi
echo "------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- " >> $pathLogFile
counter=1
if [ $choice_gomic1 == "Yes" ]
then
for EDR in $edr2 $edr3 $edr4 $edr5 $edr6
do
for (( dropSize=$dropSizeLB; dropSize<=$dropSizeUB; dropSize=$dropSize+$dropSizeInc ))
do
for rbyR in $(seq $lbrbyR $rbyRInc $ubrbyR)
do
# Calculating size of collected drop based on dropSize and rbyR
colDropSize=$(echo "scale=0;($dropSize*$rbyR)/1" | bc)
# ----------------------------------
# Extracting data from gomic1
# ----------------------------------
# Final directory to scan
finalDir="gomic1"
# Constructing path to the directory
pathSimulation="$pathBaseSimulation/$EDR/R$dropSize/R"$dropSize"r"$colDropSize"/$finalDir"
# Check if pathSimulation exists before extracting data (this is necessary since completed simulations maybe shifted out of this directory)
if test -d $pathSimulation
then
# Subscripts may be sourced directly beneath this
source $pathSubScripts/check_col_no_and_run_time.sh
source $pathSubScripts/check_simulation_parameters.sh
# Shell instructions can be included directly beneath this
# Recording data to file
printf "%-5s %-8s %-8s %-8s %-7s %-9s %-13s %-7s %-8s %-7s %-10s\n" ""$counter"a." "$dropSizeR_ret" "$dropSizer_ret" "$tnd_ret" "$edr_ret" "$nstop_ret" "$wall_ret" "$gomic_ret" "$ihydro_ret" "$colNumber" "$runTime" >> $pathLogFile
# Incrementing counter
counter=$(echo "$counter+1" | bc)
fi
done
done
done
fi
echo "------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- " >> $pathLogFile
counter=0
if [ $choice_gomic2ihydro0 == "Yes" ]
then
for EDR in $edr2 $edr3 $edr4 $edr5 $edr6
do
for (( dropSize=$dropSizeLB; dropSize<=$dropSizeUB; dropSize=$dropSize+$dropSizeInc ))
do
for rbyR in $(seq $lbrbyR $rbyRInc $ubrbyR)
do
# Calculating size of collected drop based on dropSize and rbyR
colDropSize=$(echo "scale=0;($dropSize*$rbyR)/1" | bc)
# ----------------------------------
# Extracting data from gomic2ihydro0
# ----------------------------------
# Final directory to scan
finalDir="gomic2ihydro0"
# Constructing path to the directory
pathSimulation="$pathBaseSimulation/$EDR/R$dropSize/R"$dropSize"r"$colDropSize"/$finalDir"
# Check if pathSimulation exists before extracting data (this is necessary since completed simulations maybe shifted out of this directory)
if test -d $pathSimulation
then
# Subscripts may be sourced directly beneath this
source $pathSubScripts/check_col_no_and_run_time.sh
source $pathSubScripts/check_simulation_parameters.sh
# Shell instructions can be included directly beneath this
# Recording data to file
printf "%-5s %-8s %-8s %-8s %-7s %-9s %-13s %-7s %-8s %-7s %-10s\n" ""$counter"a." "$dropSizeR_ret" "$dropSizer_ret" "$tnd_ret" "$edr_ret" "$nstop_ret" "$wall_ret" "$gomic_ret" "$ihydro_ret" "$colNumber" "$runTime" >> $pathLogFile
# Incrementing counter
counter=$(echo "$counter+1" | bc)
fi
done
done
done
fi
echo "------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- " >> $pathLogFile
counter=0
if [ $choice_gomic2ihydro1 == "Yes" ]
then
for EDR in $edr2 $edr3 $edr4 $edr5 $edr6
do
for (( dropSize=$dropSizeLB; dropSize<=$dropSizeUB; dropSize=$dropSize+$dropSizeInc ))
do
for rbyR in $(seq $lbrbyR $rbyRInc $ubrbyR)
do
# Calculating size of collected drop based on dropSize and rbyR
colDropSize=$(echo "scale=0;($dropSize*$rbyR)/1" | bc)
# ----------------------------------
# Extracting data from gomic2ihydro1
# ----------------------------------
# Final directory to scan
finalDir="gomic2ihydro1"
# Constructing path to the directory
pathSimulation="$pathBaseSimulation/$EDR/R$dropSize/R"$dropSize"r"$colDropSize"/$finalDir"
# Check if pathSimulation exists before extracting data (this is necessary since completed simulations maybe shifted out of this directory)
if test -d $pathSimulation
then
# Subscripts may be sourced directly beneath this
source $pathSubScripts/check_col_no_and_run_time.sh
source $pathSubScripts/check_simulation_parameters.sh
# Shell instructions can be included directly beneath this
# Recording data to file
printf "%-5s %-8s %-8s %-8s %-7s %-9s %-13s %-7s %-8s %-7s %-10s\n" ""$counter"b." "$dropSizeR_ret" "$dropSizer_ret" "$tnd_ret" "$edr_ret" "$nstop_ret" "$wall_ret" "$gomic_ret" "$ihydro_ret" "$colNumber" "$runTime" >> $pathLogFile
# Incrementing counter
counter=$(echo "$counter+1" | bc)
fi
done
done
done
fi
echo "------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- " >> $pathLogFile
# Preview log file
cat $pathLogFile
| true |
85961f5d426be5138fb42f482fb876022c448b25 | Shell | delkyd/alfheim_linux-PKGBUILDS | /geekbench227/PKGBUILD | UTF-8 | 1,142 | 2.640625 | 3 | [] | no_license | # Contributor: Roman Ajsin <aysin (dot) roman [at] gmail (dot) com>
# Maintainer: jose <jose1711 [at] gmail (dot) com>
pkgname=geekbench227
_pkgname=geekbench
pkgver=2.2.7
pkgrel=1
pkgdesc="A cross-platform benchmark that measures processor and memory performance (last version to not require online access)"
arch=('i686' 'x86_64')
url="http://www.primatelabs.ca/geekbench/"
depends=('zlib' 'gcc-libs')
license=("custom")
conflicts=("geekbench")
source=("http://s3.amazonaws.com/geekbench/Geekbench-${pkgver}-Linux.tar.gz")
md5sums=('e62e986ca1424aedbe4ddedb52c95707')
options=('!strip')
package() {
install -D -m755 $srcdir/dist/Geekbench-${pkgver}-Linux/${_pkgname}_x86_32 $pkgdir/opt/${_pkgname}/${_pkgname}_x86_32
install -D -m755 $srcdir/dist/Geekbench-${pkgver}-Linux/${_pkgname}_x86_64 $pkgdir/opt/${_pkgname}/${_pkgname}_x86_64
install -D -m644 $srcdir/dist/Geekbench-${pkgver}-Linux/geekbench.plar $pkgdir/opt/${_pkgname}/geekbench.plar
mkdir -p $pkgdir/usr/bin
[ "${CARCH}" = "i686" ] && ln -s /opt/${_pkgname}/${_pkgname}_x86_32 $pkgdir/usr/bin/${_pkgname} || \
ln -s /opt/${_pkgname}/${_pkgname}_x86_64 $pkgdir/usr/bin/${_pkgname}
}
| true |
98f961cfac2dc189edfee19429fbda37a1fa4fe9 | Shell | VirgilSecurity/virgil-messenger-qt | /platforms/macos/tools/dmg-notarization.sh | UTF-8 | 4,123 | 3.671875 | 4 | [
"BSD-3-Clause"
] | permissive | #!/bin/bash
set -e
#
# Global variables
#
SCRIPT_FOLDER="$(cd $(dirname "$0") && pwd)"
#***************************************************************************************
print_message() {
echo
echo "===================================="
echo "=== ${1}"
echo "===================================="
}
print_notarize_status() {
echo "============================================"
echo "=== Get status state: $(echo "${INFO_OUTPUT}" | jq -r '."success-message"')"
echo "=== Status message: $(echo "${INFO_OUTPUT}" | jq -r '."notarization-info"."Status Message"')"
echo "=== Status URL: $(echo "${INFO_OUTPUT}" | jq -r '."notarization-info"."LogFileURL"')"
echo "=== Date: $(echo "${INFO_OUTPUT}" | jq -r '."notarization-info"."Date"')"
echo "=== Status: $(echo "${INFO_OUTPUT}" | jq -r '."notarization-info"."Status"')"
echo "=== Status code: $(echo "${INFO_OUTPUT}" | jq -r '."notarization-info"."Status Code"')"
echo "============================================"
}
############################################################################################
print_usage() {
echo
echo "$(basename ${0})"
echo
echo " -u < User name > - User name"
echo " -p < Password > - Password"
echo " -i < PKG identify > - Pkg bundle identify"
echo " -f < Path to dmg file > - DMG file path"
echo " -h - Print help"
exit 0
}
############################################################################################
#
# Script parameters
#
############################################################################################
while [ -n "$1" ]
do
case "$1" in
-h) print_usage
exit 0
;;
-u) USER_NAME="$2"
shift
;;
-p) PASS="$2"
shift
;;
-i) PKG_IDENTIFIER="$2"
shift
;;
-f) DMG_FILE="$2"
shift
;;
*) print_usage;;
esac
shift
done
#***************************************************************************************
function notarize_dmg() {
print_message "Send Application for Apple's notarization"
NOTARIZE_OUTPUT=$(xcrun altool -t osx -f "${DMG_FILE}" --output-format json --primary-bundle-id "${PKG_IDENTIFIER}" --notarize-app --username ${USER_NAME} -p ${PASS})
NOTARIZE_ID="$(echo "${NOTARIZE_OUTPUT}" | jq -r '."notarization-upload"."RequestUUID"')"
echo "============================================"
echo "=== Tool version: $(echo "${NOTARIZE_OUTPUT}" | jq -r '."tool-version"')"
echo "=== Upload result: $(echo "${NOTARIZE_OUTPUT}" | jq -r '."success-message"')"
echo "=== OS version: $(echo "${NOTARIZE_OUTPUT}" | jq -r '."os-version"')"
echo "=== Notarization ID: ${NOTARIZE_ID}"
echo "============================================"
if [ "${NOTARIZE_ID}" == "" ] || [ "${NOTARIZE_ID}" == "null" ]; then
echo "Error notarization"
exit 127
fi
print_message "Get result of notarization"
NOTARIZATION_DONE="false"
for count in $(seq 1 300); do
echo "Wait .. ${count} of 300"
sleep 10s
INFO_OUTPUT=$(xcrun altool --output-format json --notarization-info "${NOTARIZE_ID}" --username ${USER_NAME} -p ${PASS})
NOTARIZE_STATUS_CODE="$(echo "${INFO_OUTPUT}" | jq -r '."notarization-info"."Status Code"')"
NOTARIZE_STATUS="$(echo "${INFO_OUTPUT}" | jq -r '."notarization-info"."Status"')"
print_notarize_status
if [ "${NOTARIZE_STATUS_CODE}" == "null" ] || [ "${NOTARIZE_STATUS}" == "in progress" ]; then
continue
fi
if [ "${NOTARIZE_STATUS_CODE}" != "0" ]; then
exit 127
else
print_message "Staple result of the notarization"
STAMPLE_OUTPUT=$(xcrun stapler staple -v "${DMG_FILE}" 2>&1 | tr -d "\n")
if echo ${STAMPLE_OUTPUT} | grep -q -F 'The staple and validate action worked!'; then
echo ""
exit 0
else
echo "${STAMPLE_OUTPUT}"
exit 127
fi
fi
done
print_message "Notarization time out"
}
#***************************************************************************************
notarize_dmg
| true |
ef2633ff25f10bb8fa7774573d61632f60cdbd71 | Shell | ickc/toast | /wheels/test_local_cibuildwheel.sh | UTF-8 | 841 | 3.03125 | 3 | [
"BSD-2-Clause"
] | permissive | #!/bin/bash
# Before running this from the toast git checkout directory,
# you shoul pip install cibuildwheel
export CIBW_BUILD="cp38-manylinux_x86_64"
export CIBW_MANYLINUX_X86_64_IMAGE="manylinux2010"
export CIBW_BUILD_VERBOSITY=3
export CIBW_ENVIRONMENT_LINUX="TOAST_BUILD_BLAS_LIBRARIES='-lopenblas -fopenmp -lm -lgfortran' TOAST_BUILD_LAPACK_LIBRARIES='-lopenblas -fopenmp -lm -lgfortran' TOAST_BUILD_CMAKE_VERBOSE_MAKEFILE=ON"
export CIBW_BEFORE_BUILD_LINUX=./wheels/install_deps_linux.sh
export CIBW_BEFORE_TEST="pip3 install numpy && pip3 install mpi4py"
export CIBW_TEST_COMMAND="export OMP_NUM_THREADS=2; python -c 'import toast.tests; toast.tests.run()'"
# Get the current date for logging
now=$(date "+%Y-%m-%d_%H:%M:%S")
# Run it
cibuildwheel --platform linux --archs x86_64 --output-dir wheelhouse . 2>&1 | tee log_${now}
| true |
78b28c984a2f48fc26853a09170e2a56d30a06c2 | Shell | YMC-GitHub/setup-centos-in-window-with-virtualbox | /dist/uses-static-ip.sh | UTF-8 | 5,718 | 3.46875 | 3 | [] | no_license |
#!/bin/sh
###
# 定义内置变量
###
#网卡名字
NET_CARD_NAME=eth0
#电脑地址
VM_IPADDR=192.168.2.2
#网络掩码
VM_NETMASK=255.255.255.0
#网关地址
VM_GATEWAY=192.168.2.1
#某个网络
ONE_NETWORK=192.168.2.0/24
#命令操作
ACTION="SET" #SET|REVOCER
RESTART_NETWORK=
THIS_FILE_PATH=$(cd `dirname $0`; pwd)
###
# 定义内置函数
###
function ouput_debug_msg() {
local debug_msg=$1
local debug_swith=$2
if [[ "$debug_swith" =~ "false" ]]; then
echo $debug_msg >/dev/null 2>&1
elif [ -n "$debug_swith" ]; then
echo $debug_msg
elif [[ "$debug_swith" =~ "true" ]]; then
echo $debug_msg
fi
}
function path_resolve_for_relative() {
local str1="${1}"
local str2="${2}"
local slpit_char1=/
local slpit_char2=/
if [[ -n ${3} ]]; then
slpit_char1=${3}
fi
if [[ -n ${4} ]]; then
slpit_char2=${4}
fi
# 路径-转为数组
local arr1=(${str1//$slpit_char1/ })
local arr2=(${str2//$slpit_char2/ })
# 路径-解析拼接
#2 遍历某一数组
#2 删除元素取值
#2 获取数组长度
#2 获取数组下标
#2 数组元素赋值
for val2 in ${arr2[@]}; do
length=${#arr1[@]}
if [ $val2 = ".." ]; then
index=$(($length - 1))
if [ $index -le 0 ]; then index=0; fi
unset arr1[$index]
#echo ${arr1[*]}
#echo $index
else
index=$length
arr1[$index]=$val2
#echo ${arr1[*]}
fi
done
# 路径-转为字符
local str3=''
for i in ${arr1[@]}; do
str3=$str3/$i
done
if [ -z $str3 ]; then str3="/"; fi
echo $str3
}
function path_resolve() {
local str1="${1}"
local str2="${2}"
local slpit_char1=/
local slpit_char2=/
if [[ -n ${3} ]]; then
slpit_char1=${3}
fi
if [[ -n ${4} ]]; then
slpit_char2=${4}
fi
#FIX:when passed asboult path,dose not return the asboult path itself
#str2="/d/"
local str3=""
str2=$(echo $str2 | sed "s#/\$##")
ABSOLUTE_PATH_REG_PATTERN="^/"
if [[ $str2 =~ $ABSOLUTE_PATH_REG_PATTERN ]]; then
str3=$str2
else
str3=$(path_resolve_for_relative $str1 $str2 $slpit_char1 $slpit_char2)
fi
echo $str3
}
function get_help_msg() {
local USAGE_MSG=$1
local USAGE_MSG_FILE=$2
if [ -z $USAGE_MSG ]; then
if [[ -n $USAGE_MSG_FILE && -e $USAGE_MSG_FILE ]]; then
USAGE_MSG=$(cat $USAGE_MSG_FILE)
else
USAGE_MSG="no help msg and file"
fi
fi
echo "$USAGE_MSG"
}
# 引入相关文件
PROJECT_PATH=$(path_resolve $THIS_FILE_PATH "../")
HELP_DIR=$(path_resolve $THIS_FILE_PATH "../help")
SRC_DIR=$(path_resolve $THIS_FILE_PATH "../src")
TEST_DIR=$(path_resolve $THIS_FILE_PATH "../test")
DIST_DIR=$(path_resolve $THIS_FILE_PATH "../dist")
DOCS_DIR=$(path_resolve $THIS_FILE_PATH "../docs")
TOOL_DIR=$(path_resolve $THIS_FILE_PATH "../tool")
# 参数帮助信息
USAGE_MSG=
USAGE_MSG_PATH=$(path_resolve $THIS_FILE_PATH "../help")
USAGE_MSG_FILE=${USAGE_MSG_PATH}/uses-static-ip.txt
USAGE_MSG=$(get_help_msg "$USAGE_MSG" "$USAGE_MSG_FILE")
###
#参数规则内容
###
GETOPT_ARGS_SHORT_RULE="--options h,d,"
GETOPT_ARGS_LONG_RULE="--long help,debug,net-card-name:,action:,restart-network:,vm-ipaddr:,vm-netmask:,vm-gateway:"
###
#设置参数规则
###
GETOPT_ARGS=$(
getopt $GETOPT_ARGS_SHORT_RULE \
$GETOPT_ARGS_LONG_RULE -- "$@"
)
###
#解析参数规则
###
eval set -- "$GETOPT_ARGS"
# below generated by write-sources.sh
while [ -n "$1" ]
do
case $1 in
--net-card-name)
ARG_NET_CARD_NAME=$2
shift 2
;;
--action)
ARG_ACTION=$2
shift 2
;;
--restart-network)
ARG_RESTART_NETWORK=$2
shift 2
;;
--vm-ipaddr)
ARG_VM_IPADDR=$2
shift 2
;;
--vm-netmask)
ARG_VM_NETMASK=$2
shift 2
;;
--vm-gateway)
ARG_VM_GATEWAY=$2
shift 2
;;
-h|--help)
echo "$USAGE_MSG"
exit 1
;;
-d|--debug)
IS_DEBUG_MODE=true
shift 2
;;
--)
break
;;
*)
printf "$USAGE_MSG"
;;
esac
done
###
#处理剩余参数
###
# optional
###
#更新内置变量
###
# below generated by write-sources.sh
if [ -n "$ARG_NET_CARD_NAME" ]
then
NET_CARD_NAME=$ARG_NET_CARD_NAME
fi
if [ -n "$ARG_ACTION" ]
then
ACTION=$ARG_ACTION
fi
if [ -n "$ARG_RESTART_NETWORK" ]
then
RESTART_NETWORK=$ARG_RESTART_NETWORK
fi
if [ -n "$ARG_VM_IPADDR" ]
then
VM_IPADDR=$ARG_VM_IPADDR
fi
if [ -n "$ARG_VM_NETMASK" ]
then
VM_NETMASK=$ARG_VM_NETMASK
fi
if [ -n "$ARG_VM_GATEWAY" ]
then
VM_GATEWAY=$ARG_VM_GATEWAY
fi
###
#脚本主要代码
###
cat /etc/sysconfig/network-scripts/ifcfg-${NET_CARD_NAME} | grep --extended-regexp "IPADDR.*(.*.)\..*"
#设置
sed -i 's/BOOTPROTO=.*//g' /etc/sysconfig/network-scripts/ifcfg-${NET_CARD_NAME}
sed -i 's/ONBOOT=.*//g' /etc/sysconfig/network-scripts/ifcfg-${NET_CARD_NAME}
sed -i 's/IPADDR=.*//g' /etc/sysconfig/network-scripts/ifcfg-${NET_CARD_NAME}
sed -i 's/NETMASK=.*//g' /etc/sysconfig/network-scripts/ifcfg-${NET_CARD_NAME}
sed -i 's/GATEWAY=.*//g' /etc/sysconfig/network-scripts/ifcfg-${NET_CARD_NAME}
sed -i '/^\s*$/d' /etc/sysconfig/network-scripts/ifcfg-${NET_CARD_NAME} # 删除空格
cat >>/etc/sysconfig/network-scripts/ifcfg-${NET_CARD_NAME} <<centos-set-static-ip-address
IPADDR=${VM_IPADDR}
NETMASK=${VM_NETMASK}
GATEWAY=${VM_GATEWAY}
BOOTPROTO=static
ONBOOT=yes
centos-set-static-ip-address
#重启网卡
#systemctl restart network
RESTART_NETWORK=$(echo "$RESTART_NETWORK" | tr "[:upper:]" "[:lower:]")
if [[ $RESTART_NETWORK == "true" ]]; then
echo "the machine restart network service"
service network restart
fi
#### 参考文献
: <<reference
Linux下网络配置、查看ip地址、网关信息,DNS信息(以centos7为例)
https://blog.csdn.net/qq_15304853/article/details/78700197
reference
| true |
db77500eba71d7dc17bfe4b85091c0223f5b7736 | Shell | quangdinh/kubernetes | /Debian/setup.sh | UTF-8 | 1,391 | 3.09375 | 3 | [] | no_license | #!/usr/bin/env sh
USERNAME=qdtc
GITHUBUSER=quangdinh
set -e
apt-get update
apt-get install -y --no-install-recommends vim sudo curl
mkdir -p /home/$USERNAME/.ssh
curl https://github.com/$GITHUBUSER.keys > /home/$USERNAME/.ssh/authorized_keys
chown -R $USERNAME:$USERNAME /home/$USERNAME/.ssh
usermod -aG sudo $USERNAME
apt-get install -y --no-install-recommends iptables arptables ebtables iptables-persistent
# switch to legacy versions
update-alternatives --set iptables /usr/sbin/iptables-legacy
update-alternatives --set ip6tables /usr/sbin/ip6tables-legacy
update-alternatives --set arptables /usr/sbin/arptables-legacy
update-alternatives --set ebtables /usr/sbin/ebtables-legacy
cat <<EOF | tee /etc/iptables/rules.v4
*filter
# Allows all loopback (lo0) traffic and drop all traffic to 127/8 that doesn't use lo0
-A INPUT -i lo -j ACCEPT
-A INPUT ! -i lo -d 127.0.0.0/8 -j REJECT
# Accepts all established inbound connections
-A INPUT -m state --state ESTABLISHED,RELATED -j ACCEPT
# Allows all outbound traffic
-A OUTPUT -j ACCEPT
# Allow SSH
-A INPUT -p tcp -m state --state NEW --dport 22 -j ACCEPT
# Allow ping
-A INPUT -p icmp -m icmp --icmp-type 8 -j ACCEPT
# Reject all other inbound - default deny unless explicitly allowed policy:
-A INPUT -j REJECT
-A FORWARD -j REJECT
COMMIT
EOF
iptables-restore < /etc/iptables/rules.v4
service netfilter-persistent save | true |
84127f226ca4438cded55da0c4f9c836d249a9df | Shell | jlesage/docker-baseimage-gui | /rootfs/etc/services.d/certsmonitor/run | UTF-8 | 1,949 | 3.71875 | 4 | [] | no_license | #!/bin/sh
set -u # Treat unset variables as an error.
WEB_CERTS="/config/certs/web-privkey.pem /config/certs/web-fullchain.pem"
VNC_CERTS="/config/certs/vnc-privkey.pem /config/certs/vnc-fullchain.pem"
DH_PARAMS="/config/certs/dhparam.pem"
RUN_DIR=/var/run/certsmonitor
WEB_CERTS_HASH_FILE="$RUN_DIR"/web_certs_hash
VNC_CERTS_HASH_FILE="$RUN_DIR"/vnc_certs_hash
DH_PARAMS_HASH_FILE="$RUN_DIR"/dh_params_hash
hash() {
stat -c '%n %s %Y' "$@" | md5sum | cut -d' ' -f1
}
restart_nginx() {
echo "Restarting nginx..."
/usr/sbin/nginx -s reload
}
restart_xvnc() {
echo "Restarting Xvnc..."
killall Xvnc
}
mkdir -p "$RUN_DIR"
# Get previous hashes.
WEB_CERTS_HASH="$(cat "$WEB_CERTS_HASH_FILE" 2>/dev/null || true)"
VNC_CERTS_HASH="$(cat "$VNC_CERTS_HASH_FILE" 2>/dev/null || true)"
DH_PARAMS_HASH="$(cat "$DH_PARAMS_HASH_FILE" 2>/dev/null || true)"
# Get new hashes.
WEB_CERTS_NEW_HASH="$(hash $WEB_CERTS)"
VNC_CERTS_NEW_HASH="$(hash $VNC_CERTS)"
DH_PARAMS_NEW_HASH="$(hash $DH_PARAMS)"
UPDATE_HASHES=0
if [ -n "$WEB_CERTS_HASH" ] && [ -n "$VNC_CERTS_HASH" ] && [ -n "$DH_PARAMS_HASH" ]; then
# Restart nginx if certificates changed.
if [ "$WEB_CERTS_NEW_HASH" != "$WEB_CERTS_HASH" ]; then
echo "Web certificates changed."
UPDATE_HASHES=1
restart_nginx
elif [ "$DH_PARAMS_NEW_HASH" != "$DH_PARAMS_HASH" ]; then
echo "DH parameters changed."
UPDATE_HASHES=1
restart_nginx
fi
# Restart xvnc if certificates changed.
if [ "$VNC_CERTS_NEW_HASH" != "$VNC_CERTS_HASH" ]; then
echo "VNC certificates changed."
UPDATE_HASHES=1
restart_xvnc
fi
else
UPDATE_HASHES=1
fi
# Save new hashes.
if [ "$UPDATE_HASHES" -eq 1 ]; then
echo "$WEB_CERTS_NEW_HASH" > "$WEB_CERTS_HASH_FILE"
echo "$VNC_CERTS_NEW_HASH" > "$VNC_CERTS_HASH_FILE"
echo "$DH_PARAMS_NEW_HASH" > "$DH_PARAMS_HASH_FILE"
fi
# vim:ft=sh:ts=4:sw=4:et:sts=4
| true |
4517da297cc2912c89343ed911dab7b2901ce4f1 | Shell | CLOVIS-AI/Dotfiles | /scripts/ensure_kscript | UTF-8 | 2,171 | 3.65625 | 4 | [] | no_license | #!/usr/bin/env bash
. announce-menu --import
echo -en "Checking dependencies...\r" >&2
function _in_path() {
command -v "$1" >/dev/null 2>&1
}
_sdkman_path="$HOME/.sdkman/bin/sdkman-init.sh"
[[ -f $_sdkman_path ]] && . "$_sdkman_path"
function _install_sdkman_kscript() {
if declare -f sdk >/dev/null; then
echo -en "[-> ] SDKMAN is installed... \r" >&2
else
echo -en "[> ] Checking dependencies... \r" >&2
if ! _in_path curl; then
echo "'curl' is not installed... "
packager install curl || exit 1
fi
if ! _in_path zip; then
echo "'zip' is not installed..."
packager install zip || exit 1
fi
if ! _in_path unzip; then
echo "'unzip' is not installed..."
packager install unzip || exit 1
fi
echo "[-> ] Installing SDKMAN..." >&2
curl -L "https://get.sdkman.io" | bash >/dev/null 2>/dev/null
source "$_sdkman_path"
fi
if ! _in_path java; then
echo -en "[--> ] Installing java... \r" >&2
sdk install java >/dev/null 2>/dev/null
fi
if ! _in_path kotlin; then
echo -en "[---> ] Installing kotlin... \r" >&2
sdk install kotlin >/dev/null 2>/dev/null
fi
if ! _in_path gradle; then
echo -en "[----> ] Installing gradle... \r" >&2
sdk install gradle >/dev/null 2>/dev/null
fi
if ! _in_path kscript; then
echo -en "[----->] Installing kscript... \r" >&2
sdk install kscript >/dev/null 2>/dev/null
fi
source "$_sdkman_path"
}
if ! _in_path kscript; then
if [[ -t 0 ]]; then # stdin is a terminal, the user can input data
announce-menu --title "This script uses 'kscript', which is missing from your system." \
"sdkman:Install kscript with SDKMAN, a non-root packager manager" \
"packager:Use BrainDot's Packager tool to install kscript with your favorite package manager" \
"quit:Stop and exit without doing anything"
case "$(announce-menu-get)" in
sdkman)
_install_sdkman_kscript
;;
packager)
packager install kscript
;;
quit)
exit 0
;;
esac
else
_install_sdkman_kscript
fi
fi
echo -en "Working... \r" >&2
exec kscript "$@"
| true |
44380ec34a024e7f004f7eaf23bae88f4899709d | Shell | dydra/http-api-tests | /extensions/sparql-protocol/revisions/previous.sh | UTF-8 | 412 | 2.90625 | 3 | [
"Unlicense"
] | permissive | #! /bin/bash
# exercise the revision mechanism
if ( repository_has_revisions )
then
curl_sparql_request 'revision-id=HEAD~1' <<EOF \
| tee $ECHO_OUTPUT | jq '.results.bindings[] | .[].value' | fgrep -q "1"
SELECT (count(*) as ?count)
WHERE {
#{ ?s ?p ?o }
#union
{ ?s ?p ?o }
union
{ graph ?g { ?s ?p ?o } }
}
EOF
else
echo "${STORE_ACCOUNT}/${STORE_REPOSITORY} has just one revision"
fi
| true |
d3faab3287f1446ee54640ea550b9ce4b387521c | Shell | cysk003/ServerStatus | /status.sh | UTF-8 | 41,770 | 3.4375 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env bash
PATH=/bin:/sbin:/usr/bin:/usr/sbin:/usr/local/bin:/usr/local/sbin:~/bin
export PATH
sh_ver="1.0.0"
filepath=$(
cd "$(dirname "$0")" || exit
pwd
)
file_1=$(echo -e "${filepath}" | awk -F "$0" '{print $1}')
file="/usr/local/ServerStatus"
web_file="/usr/local/ServerStatus/web"
server_file="/usr/local/ServerStatus/server"
server_conf="/usr/local/ServerStatus/server/config.json"
server_conf_1="/usr/local/ServerStatus/server/config.conf"
plugin_file="/usr/local/ServerStatus/plugin"
client_file="/usr/local/ServerStatus/clients"
service="/usr/lib/systemd/system"
jq_file="${file}/jq"
[[ ! -e ${jq_file} ]] && jq_file="/usr/bin/jq"
github_prefix="https://raw.githubusercontent.com/cppla/ServerStatus/master"
NAME="ServerStatus"
Green_font_prefix="\033[32m" && Red_font_prefix="\033[31m" && Red_background_prefix="\033[41;37m" && Font_color_suffix="\033[0m"
Info="${Green_font_prefix}[信息]${Font_color_suffix}"
Error="${Red_font_prefix}[错误]${Font_color_suffix}"
Tip="${Green_font_prefix}[注意]${Font_color_suffix}"
#检查系统
check_sys() {
if [[ -f /etc/redhat-release ]]; then
release="centos"
elif grep -q -E -i "debian|ubuntu" /etc/issue; then
release="debian"
elif grep -q -E -i "centonetstat -tunlps|red hat|redhat" /etc/issue; then
release="centos"
elif grep -q -E -i "Arch|Manjaro" /etc/issue; then
release="archlinux"
elif grep -q -E -i "debian|ubuntu" /proc/version; then
release="debian"
elif grep -q -E -i "centos|red hat|redhat" /proc/version; then
release="centos"
else
echo -e "ServerStatus 暂不支持该Linux发行版"
fi
bit=$(uname -m)
}
check_installed_server_status() {
[[ ! -e "${server_file}/sergate" ]] && echo -e "${Error} $NAME 服务端没有安装,请检查 !" && exit 1
}
check_installed_client_status() {
[[ ! -e "${client_file}/client-linux.py" ]] && echo -e "${Error} $NAME 客户端没有安装,请检查 !" && exit 1
}
Download_Server_Status_server() {
cd "/tmp" || exit 1
wget -N --no-check-certificate https://github.com/cppla/ServerStatus/archive/refs/heads/master.zip
[[ ! -e "master.zip" ]] && echo -e "${Error} ServerStatus 服务端下载失败 !" && exit 1
unzip master.zip
rm -rf master.zip
[[ ! -d "/tmp/ServerStatus-master" ]] && echo -e "${Error} ServerStatus 服务端解压失败 !" && exit 1
cd "/tmp/ServerStatus-master/server" || exit 1
make
[[ ! -e "sergate" ]] && echo -e "${Error} ServerStatus 服务端编译失败 !" && cd "${file_1}" && rm -rf "/tmp//ServerStatus-master" && exit 1
cd "${file_1}" || exit 1
mkdir -p "${server_file}"
mv "/tmp/ServerStatus-master/server" "${file}"
mv "/tmp/ServerStatus-master/web" "${file}"
mv "/tmp/ServerStatus-master/plugin" "${file}"
rm -rf "/tmp/ServerStatus-master"
if [[ ! -e "${server_file}/sergate" ]]; then
echo -e "${Error} ServerStatus 服务端移动重命名失败 !"
[[ -e "${server_file}/sergate1" ]] && mv "${server_file}/sergate1" "${server_file}/sergate"
exit 1
else
[[ -e "${server_file}/sergate1" ]] && rm -rf "${server_file}/sergate1"
fi
}
Download_Server_Status_client() {
mkdir -p "${client_file}"
wget -N --no-check-certificate "${github_prefix}/clients/client-linux.py" -P "${client_file}"
}
Download_Server_Status_Service() {
mode=$1
[[ -z ${mode} ]] && mode="server"
local service_note="服务端"
[[ ${mode} == "client" ]] && service_note="客户端"
wget --no-check-certificate "${github_prefix}/service/status-${mode}.service" -O "${service}/status-${mode}.service" ||
{
echo -e "${Error} $NAME ${service_note}服务管理脚本下载失败 !"
exit 1
}
systemctl enable "status-${mode}.service"
echo -e "${Info} $NAME ${service_note}服务管理脚本下载完成 !"
}
Service_Server_Status_server() {
Download_Server_Status_Service "server"
}
Service_Server_Status_client() {
Download_Server_Status_Service "client"
}
Installation_dependency() {
mode=$1
if [[ ${release} == "centos" ]]; then
yum makecache
yum -y install unzip
yum -y install python3 >/dev/null 2>&1 || yum -y install python
[[ ${mode} == "server" ]] && yum -y groupinstall "Development Tools"
elif [[ ${release} == "debian" ]]; then
apt -y update
apt -y install unzip
apt -y install python3 >/dev/null 2>&1 || apt -y install python
[[ ${mode} == "server" ]] && apt -y install build-essential
elif [[ ${release} == "archlinux" ]]; then
pacman -Sy python python-pip unzip --noconfirm
[[ ${mode} == "server" ]] && pacman -Sy base-devel --noconfirm
fi
[[ ! -e /usr/bin/python ]] && ln -s /usr/bin/python3 /usr/bin/python
}
Write_server_config() {
cat >${server_conf} <<-EOF
{
"servers": [
{
"username": "s01",
"password": "password",
"name": "vps-1",
"type": "KVM",
"host": "azure",
"location": "Hong Kong",
"monthstart": 1
}
]
}
EOF
}
Write_server_config_conf() {
cat >${server_conf_1} <<-EOF
PORT = ${server_port_s}
EOF
}
Read_config_client() {
client_text="$(sed 's/\"//g;s/,//g;s/ //g' "${client_file}/client-linux.py") "
client_server="$(echo -e "${client_text}" | grep "SERVER=" | awk -F "=" '{print $2;exit}')"
client_port="$(echo -e "${client_text}" | grep "PORT=" | awk -F "=" '{print $2;exit}')"
client_user="$(echo -e "${client_text}" | grep "USER=" | awk -F "=" '{print $2;exit}')"
client_password="$(echo -e "${client_text}" | grep "PASSWORD=" | awk -F "=" '{print $2;exit}')"
}
Read_config_server() {
if [[ ! -e "${server_conf_1}" ]]; then
server_port_s="35601"
Write_server_config_conf
server_port="35601"
else
server_port="$(grep "PORT = " ${server_conf_1} | awk '{print $3}')"
fi
}
Set_server() {
mode=$1
[[ -z ${mode} ]] && mode="server"
if [[ ${mode} == "server" ]]; then
echo -e "请输入 $NAME 服务端中网站要设置的 域名[server]
默认为本机IP为域名,例如输入: toyoo.pw ,如果要使用本机IP,请留空直接回车"
read -erp "(默认: 本机IP):" server_s
[[ -z "$server_s" ]] && server_s=""
else
echo -e "请输入 $NAME 服务端的 IP/域名[server],请注意,如果你的域名使用了CDN,请直接填写IP"
read -erp "(默认: 127.0.0.1):" server_s
[[ -z "$server_s" ]] && server_s="127.0.0.1"
fi
echo && echo " ================================================"
echo -e " IP/域名[server]: ${Red_background_prefix} ${server_s} ${Font_color_suffix}"
echo " ================================================" && echo
}
Set_server_http_port() {
while true; do
echo -e "请输入 $NAME 服务端中网站要设置的 域名/IP的端口[1-65535](如果是域名的话,一般用 80 端口)"
read -erp "(默认: 8888):" server_http_port_s
[[ -z "$server_http_port_s" ]] && server_http_port_s="8888"
if [[ "$server_http_port_s" =~ ^[0-9]*$ ]]; then
if [[ ${server_http_port_s} -ge 1 ]] && [[ ${server_http_port_s} -le 65535 ]]; then
echo && echo " ================================================"
echo -e " 端口: ${Red_background_prefix} ${server_http_port_s} ${Font_color_suffix}"
echo " ================================================" && echo
break
else
echo "输入错误, 请输入正确的端口。"
fi
else
echo "输入错误, 请输入正确的端口。"
fi
done
}
Set_server_port() {
while true; do
echo -e "请输入 $NAME 服务端监听的端口[1-65535](用于服务端接收客户端消息的端口,客户端要填写这个端口)"
read -erp "(默认: 35601):" server_port_s
[[ -z "$server_port_s" ]] && server_port_s="35601"
if [[ "$server_port_s" =~ ^[0-9]*$ ]]; then
if [[ ${server_port_s} -ge 1 ]] && [[ ${server_port_s} -le 65535 ]]; then
echo && echo " ================================================"
echo -e " 端口: ${Red_background_prefix} ${server_port_s} ${Font_color_suffix}"
echo " ================================================" && echo
break
else
echo "输入错误, 请输入正确的端口。"
fi
else
echo "输入错误, 请输入正确的端口。"
fi
done
}
Set_username() {
mode=$1
[[ -z ${mode} ]] && mode="server"
if [[ ${mode} == "server" ]]; then
echo -e "请输入 $NAME 服务端要设置的用户名[username](字母/数字,不可与其他账号重复)"
else
echo -e "请输入 $NAME 服务端中对应配置的用户名[username](字母/数字,不可与其他账号重复)"
fi
read -erp "(默认: 取消):" username_s
[[ -z "$username_s" ]] && echo "已取消..." && exit 0
echo && echo " ================================================"
echo -e " 账号[username]: ${Red_background_prefix} ${username_s} ${Font_color_suffix}"
echo " ================================================" && echo
}
Set_password() {
mode=$1
[[ -z ${mode} ]] && mode="server"
if [[ ${mode} == "server" ]]; then
echo -e "请输入 $NAME 服务端要设置的密码[password](字母/数字,可重复)"
else
echo -e "请输入 $NAME 服务端中对应配置的密码[password](字母/数字)"
fi
read -erp "(默认: serverstatus):" password_s
[[ -z "$password_s" ]] && password_s="serverstatus"
echo && echo " ================================================"
echo -e " 密码[password]: ${Red_background_prefix} ${password_s} ${Font_color_suffix}"
echo " ================================================" && echo
}
Set_name() {
echo -e "请输入 $NAME 服务端要设置的节点名称[name](支持中文,前提是你的系统和SSH工具支持中文输入,仅仅是个名字)"
read -erp "(默认: Server 01):" name_s
[[ -z "$name_s" ]] && name_s="Server 01"
echo && echo " ================================================"
echo -e " 节点名称[name]: ${Red_background_prefix} ${name_s} ${Font_color_suffix}"
echo " ================================================" && echo
}
Set_type() {
echo -e "请输入 $NAME 服务端要设置的节点虚拟化类型[type](例如 OpenVZ / KVM)"
read -erp "(默认: KVM):" type_s
[[ -z "$type_s" ]] && type_s="KVM"
echo && echo " ================================================"
echo -e " 虚拟化类型[type]: ${Red_background_prefix} ${type_s} ${Font_color_suffix}"
echo " ================================================" && echo
}
Set_location() {
echo -e "请输入 $NAME 服务端要设置的节点位置[location](支持中文,前提是你的系统和SSH工具支持中文输入)"
read -erp "(默认: Hong Kong):" location_s
[[ -z "$location_s" ]] && location_s="Hong Kong"
echo && echo " ================================================"
echo -e " 节点位置[location]: ${Red_background_prefix} ${location_s} ${Font_color_suffix}"
echo " ================================================" && echo
}
Set_monthstart() {
echo -e "请输入 $NAME 服务端要设置的节点月重置流量日[monthstart](每月流量归零的日期(1~28),默认为1(即每月1日))"
read -erp "(默认: 1):" monthstart_s
[[ -z "$monthstart_s" ]] && monthstart_s="1"
echo && echo " ================================================"
echo -e " 月流量重置日[monthstart]: ${Red_background_prefix} ${monthstart_s} ${Font_color_suffix}"
echo " ================================================" && echo
}
Set_config_server() {
Set_username "server"
Set_password "server"
Set_name
Set_type
Set_location
Set_monthstart
}
Set_config_client() {
Set_server "client"
Set_server_port
Set_username "client"
Set_password "client"
}
Set_ServerStatus_server() {
check_installed_server_status
echo && echo -e " 你要做什么?
${Green_font_prefix} 1.${Font_color_suffix} 添加 节点配置
${Green_font_prefix} 2.${Font_color_suffix} 删除 节点配置
————————
${Green_font_prefix} 3.${Font_color_suffix} 修改 节点配置 - 节点用户名
${Green_font_prefix} 4.${Font_color_suffix} 修改 节点配置 - 节点密码
${Green_font_prefix} 5.${Font_color_suffix} 修改 节点配置 - 节点名称
${Green_font_prefix} 6.${Font_color_suffix} 修改 节点配置 - 节点虚拟化
${Green_font_prefix} 7.${Font_color_suffix} 修改 节点配置 - 节点位置
${Green_font_prefix} 8.${Font_color_suffix} 修改 节点配置 - 月流量重置日
${Green_font_prefix} 9.${Font_color_suffix} 修改 节点配置 - 全部参数
————————
${Green_font_prefix}10.${Font_color_suffix} 修改 服务端监听端口" && echo
read -erp "(默认: 取消):" server_num
[[ -z "${server_num}" ]] && echo "已取消..." && exit 1
if [[ ${server_num} == "1" ]]; then
Add_ServerStatus_server
elif [[ ${server_num} == "2" ]]; then
Del_ServerStatus_server
elif [[ ${server_num} == "3" ]]; then
Modify_ServerStatus_server_username
elif [[ ${server_num} == "4" ]]; then
Modify_ServerStatus_server_password
elif [[ ${server_num} == "5" ]]; then
Modify_ServerStatus_server_name
elif [[ ${server_num} == "6" ]]; then
Modify_ServerStatus_server_type
elif [[ ${server_num} == "7" ]]; then
Modify_ServerStatus_server_location
elif [[ ${server_num} == "8" ]]; then
Modify_ServerStatus_server_monthstart
elif [[ ${server_num} == "9" ]]; then
Modify_ServerStatus_server_all
elif [[ ${server_num} == "10" ]]; then
Read_config_server
Set_server_port
Write_server_config_conf
else
echo -e "${Error} 请输入正确的数字[1-10]" && exit 1
fi
Restart_ServerStatus_server
}
List_ServerStatus_server() {
conf_text=$(${jq_file} '.servers' ${server_conf} | ${jq_file} ".[]|.username" | sed 's/\"//g')
conf_text_total=$(echo -e "${conf_text}" | wc -l)
[[ ${conf_text_total} == "0" ]] && echo -e "${Error} 没有发现 一个节点配置,请检查 !" && exit 1
conf_text_total_a=$((conf_text_total - 1))
conf_list_all=""
for ((integer = 0; integer <= conf_text_total_a; integer++)); do
now_text=$(${jq_file} '.servers' ${server_conf} | ${jq_file} ".[${integer}]" | sed 's/\"//g;s/,$//g' | sed '$d;1d')
now_text_username=$(echo -e "${now_text}" | grep "username" | awk -F ": " '{print $2}')
now_text_password=$(echo -e "${now_text}" | grep "password" | awk -F ": " '{print $2}')
now_text_name=$(echo -e "${now_text}" | grep "name" | grep -v "username" | awk -F ": " '{print $2}')
now_text_type=$(echo -e "${now_text}" | grep "type" | awk -F ": " '{print $2}')
now_text_location=$(echo -e "${now_text}" | grep "location" | awk -F ": " '{print $2}')
now_text_monthstart=$(echo -e "${now_text}" | grep "monthstart" | awk -F ": " '{print $2}')
if [[ ${now_text_disabled} == "false" ]]; then
now_text_disabled_status="${Green_font_prefix}启用${Font_color_suffix}"
else
now_text_disabled_status="${Red_font_prefix}禁用${Font_color_suffix}"
fi
conf_list_all=${conf_list_all}"用户名: ${Green_font_prefix}${now_text_username}${Font_color_suffix} 密码: ${Green_font_prefix}${now_text_password}${Font_color_suffix} 节点名: ${Green_font_prefix}${now_text_name}${Font_color_suffix} 虚拟化: ${Green_font_prefix}${now_text_type}${Font_color_suffix} 位置: ${Green_font_prefix}${now_text_location}${Font_color_suffix} 月流量重置日: ${Green_font_prefix}${now_text_monthstart}${Font_color_suffix}\n"
done
echo && echo -e "节点总数 ${Green_font_prefix}${conf_text_total}${Font_color_suffix}"
echo -e "${conf_list_all}"
}
Add_ServerStatus_server() {
Set_config_server
Set_username_ch=$(grep '"username": "'"${username_s}"'"' ${server_conf})
[[ -n "${Set_username_ch}" ]] && echo -e "${Error} 用户名已被使用 !" && exit 1
sed -i '3i\ },' ${server_conf}
sed -i '3i\ "monthstart": '"${monthstart_s}"'' ${server_conf}
sed -i '3i\ "location": "'"${location_s}"'",' ${server_conf}
sed -i '3i\ "host": "'"None"'",' ${server_conf}
sed -i '3i\ "type": "'"${type_s}"'",' ${server_conf}
sed -i '3i\ "name": "'"${name_s}"'",' ${server_conf}
sed -i '3i\ "password": "'"${password_s}"'",' ${server_conf}
sed -i '3i\ "username": "'"${username_s}"'",' ${server_conf}
sed -i '3i\ {' ${server_conf}
echo -e "${Info} 添加节点成功 ${Green_font_prefix}[ 节点名称: ${name_s}, 节点用户名: ${username_s}, 节点密码: ${password_s} ]${Font_color_suffix} !"
}
Del_ServerStatus_server() {
List_ServerStatus_server
[[ "${conf_text_total}" == "1" ]] && echo -e "${Error} 节点配置仅剩 1个,不能删除 !" && exit 1
echo -e "请输入要删除的节点用户名"
read -erp "(默认: 取消):" del_server_username
[[ -z "${del_server_username}" ]] && echo -e "已取消..." && exit 1
del_username=$(cat -n ${server_conf} | grep '"username": "'"${del_server_username}"'"' | awk '{print $1}')
if [[ -n ${del_username} ]]; then
del_username_min=$((del_username - 1))
del_username_max=$((del_username + 7))
del_username_max_text=$(sed -n "${del_username_max}p" ${server_conf})
del_username_max_text_last=${del_username_max_text:((${#del_username_max_text} - 1))}
if [[ ${del_username_max_text_last} != "," ]]; then
del_list_num=$((del_username_min - 1))
sed -i "${del_list_num}s/,$//g" ${server_conf}
fi
sed -i "${del_username_min},${del_username_max}d" ${server_conf}
echo -e "${Info} 节点删除成功 ${Green_font_prefix}[ 节点用户名: ${del_server_username} ]${Font_color_suffix} "
else
echo -e "${Error} 请输入正确的节点用户名 !" && exit 1
fi
}
Modify_ServerStatus_server_username() {
List_ServerStatus_server
echo -e "请输入要修改的节点用户名"
read -erp "(默认: 取消):" manually_username
[[ -z "${manually_username}" ]] && echo -e "已取消..." && exit 1
Set_username_num=$(cat -n ${server_conf} | grep '"username": "'"${manually_username}"'"' | awk '{print $1}')
if [[ -n ${Set_username_num} ]]; then
Set_username
Set_username_ch=$(grep '"username": "'"${username_s}"'"' ${server_conf})
[[ -n "${Set_username_ch}" ]] && echo -e "${Error} 用户名已被使用 !" && exit 1
sed -i "${Set_username_num}"'s/"username": "'"${manually_username}"'"/"username": "'"${username_s}"'"/g' ${server_conf}
echo -e "${Info} 修改成功 [ 原节点用户名: ${manually_username}, 新节点用户名: ${username_s} ]"
else
echo -e "${Error} 请输入正确的节点用户名 !" && exit 1
fi
}
Modify_ServerStatus_server_password() {
List_ServerStatus_server
echo -e "请输入要修改的节点用户名"
read -erp "(默认: 取消):" manually_username
[[ -z "${manually_username}" ]] && echo -e "已取消..." && exit 1
Set_username_num=$(cat -n ${server_conf} | grep '"username": "'"${manually_username}"'"' | awk '{print $1}')
if [[ -n ${Set_username_num} ]]; then
Set_password
Set_password_num_a=$((Set_username_num + 1))
Set_password_num_text=$(sed -n "${Set_password_num_a}p" ${server_conf} | sed 's/\"//g;s/,$//g' | awk -F ": " '{print $2}')
sed -i "${Set_password_num_a}"'s/"password": "'"${Set_password_num_text}"'"/"password": "'"${password_s}"'"/g' ${server_conf}
echo -e "${Info} 修改成功 [ 原节点密码: ${Set_password_num_text}, 新节点密码: ${password_s} ]"
else
echo -e "${Error} 请输入正确的节点用户名 !" && exit 1
fi
}
Modify_ServerStatus_server_name() {
List_ServerStatus_server
echo -e "请输入要修改的节点用户名"
read -erp "(默认: 取消):" manually_username
[[ -z "${manually_username}" ]] && echo -e "已取消..." && exit 1
Set_username_num=$(cat -n ${server_conf} | grep '"username": "'"${manually_username}"'"' | awk '{print $1}')
if [[ -n ${Set_username_num} ]]; then
Set_name
Set_name_num_a=$((Set_username_num + 2))
Set_name_num_a_text=$(sed -n "${Set_name_num_a}p" ${server_conf} | sed 's/\"//g;s/,$//g' | awk -F ": " '{print $2}')
sed -i "${Set_name_num_a}"'s/"name": "'"${Set_name_num_a_text}"'"/"name": "'"${name_s}"'"/g' ${server_conf}
echo -e "${Info} 修改成功 [ 原节点名称: ${Set_name_num_a_text}, 新节点名称: ${name_s} ]"
else
echo -e "${Error} 请输入正确的节点用户名 !" && exit 1
fi
}
Modify_ServerStatus_server_type() {
List_ServerStatus_server
echo -e "请输入要修改的节点用户名"
read -erp "(默认: 取消):" manually_username
[[ -z "${manually_username}" ]] && echo -e "已取消..." && exit 1
Set_username_num=$(cat -n ${server_conf} | grep '"username": "'"${manually_username}"'"' | awk '{print $1}')
if [[ -n ${Set_username_num} ]]; then
Set_type
Set_type_num_a=$((Set_username_num + 3))
Set_type_num_a_text=$(sed -n "${Set_type_num_a}p" ${server_conf} | sed 's/\"//g;s/,$//g' | awk -F ": " '{print $2}')
sed -i "${Set_type_num_a}"'s/"type": "'"${Set_type_num_a_text}"'"/"type": "'"${type_s}"'"/g' ${server_conf}
echo -e "${Info} 修改成功 [ 原节点虚拟化: ${Set_type_num_a_text}, 新节点虚拟化: ${type_s} ]"
else
echo -e "${Error} 请输入正确的节点用户名 !" && exit 1
fi
}
Modify_ServerStatus_server_location() {
List_ServerStatus_server
echo -e "请输入要修改的节点用户名"
read -erp "(默认: 取消):" manually_username
[[ -z "${manually_username}" ]] && echo -e "已取消..." && exit 1
Set_username_num=$(cat -n ${server_conf} | grep '"username": "'"${manually_username}"'"' | awk '{print $1}')
if [[ -n ${Set_username_num} ]]; then
Set_location
Set_location_num_a=$((Set_username_num + 5))
Set_location_num_a_text=$(sed -n "${Set_location_num_a}p" ${server_conf} | sed 's/\"//g;s/,$//g' | awk -F ": " '{print $2}')
sed -i "${Set_location_num_a}"'s/"location": "'"${Set_location_num_a_text}"'"/"location": "'"${location_s}"'"/g' ${server_conf}
echo -e "${Info} 修改成功 [ 原节点位置: ${Set_location_num_a_text}, 新节点位置: ${location_s} ]"
else
echo -e "${Error} 请输入正确的节点用户名 !" && exit 1
fi
}
Modify_ServerStatus_server_monthstart() {
List_ServerStatus_server
echo -e "请输入要修改的节点用户名"
read -erp "(默认: 取消):" manually_username
[[ -z "${manually_username}" ]] && echo -e "已取消..." && exit 1
Set_username_num=$(cat -n ${server_conf} | grep '"username": "'"${manually_username}"'"' | awk '{print $1}')
if [[ -n ${Set_username_num} ]]; then
Set_monthstart
Set_monthstart_num_a=$((Set_username_num + 6))
Set_monthstart_num_text=$(sed -n "${Set_monthstart_num_a}p" ${server_conf} | sed 's/\"//g;s/,$//g' | awk -F ": " '{print $2}')
sed -i "${Set_monthstart_num_a}"'s/"monthstart": '"${Set_monthstart_num_text}"'/"monthstart": '"${monthstart_s}"'/g' ${server_conf}
echo -e "${Info} 修改成功 [ 原月流量重置日: ${Set_monthstart_num_text}, 新月流量重置日: ${monthstart_s} ]"
else
echo -e "${Error} 请输入正确的节点用户名 !" && exit 1
fi
}
Modify_ServerStatus_server_all() {
List_ServerStatus_server
echo -e "请输入要修改的节点用户名"
read -erp "(默认: 取消):" manually_username
[[ -z "${manually_username}" ]] && echo -e "已取消..." && exit 1
Set_username_num=$(cat -n ${server_conf} | grep '"username": "'"${manually_username}"'"' | awk '{print $1}')
if [[ -n ${Set_username_num} ]]; then
Set_username
Set_password
Set_name
Set_type
Set_location
Set_monthstart
sed -i "${Set_username_num}"'s/"username": "'"${manually_username}"'"/"username": "'"${username_s}"'"/g' ${server_conf}
Set_password_num_a=$((Set_username_num + 1))
Set_password_num_text=$(sed -n "${Set_password_num_a}p" ${server_conf} | sed 's/\"//g;s/,$//g' | awk -F ": " '{print $2}')
sed -i "${Set_password_num_a}"'s/"password": "'"${Set_password_num_text}"'"/"password": "'"${password_s}"'"/g' ${server_conf}
Set_name_num_a=$((Set_username_num + 2))
Set_name_num_a_text=$(sed -n "${Set_name_num_a}p" ${server_conf} | sed 's/\"//g;s/,$//g' | awk -F ": " '{print $2}')
sed -i "${Set_name_num_a}"'s/"name": "'"${Set_name_num_a_text}"'"/"name": "'"${name_s}"'"/g' ${server_conf}
Set_type_num_a=$((Set_username_num + 3))
Set_type_num_a_text=$(sed -n "${Set_type_num_a}p" ${server_conf} | sed 's/\"//g;s/,$//g' | awk -F ": " '{print $2}')
sed -i "${Set_type_num_a}"'s/"type": "'"${Set_type_num_a_text}"'"/"type": "'"${type_s}"'"/g' ${server_conf}
Set_location_num_a=$((Set_username_num + 5))
Set_location_num_a_text=$(sed -n "${Set_location_num_a}p" ${server_conf} | sed 's/\"//g;s/,$//g' | awk -F ": " '{print $2}')
sed -i "${Set_location_num_a}"'s/"location": "'"${Set_location_num_a_text}"'"/"location": "'"${location_s}"'"/g' ${server_conf}
Set_monthstart_num_a=$((Set_username_num + 6))
Set_monthstart_num_a_text=$(sed -n "${Set_monthstart_num_a}p" ${server_conf} | sed 's/\"//g;s/,$//g' | awk -F ": " '{print $2}')
sed -i "${Set_monthstart_num_a}"'s/"monthstart": '"${Set_monthstart_num_a_text}"'/"monthstart": '"${monthstart_s}"'/g' ${server_conf}
echo -e "${Info} 修改成功。"
else
echo -e "${Error} 请输入正确的节点用户名 !" && exit 1
fi
}
Modify_ServerStatus_server_disabled() {
List_ServerStatus_server
echo -e "请输入要修改的节点用户名"
read -erp "(默认: 取消):" manually_username
[[ -z "${manually_username}" ]] && echo -e "已取消..." && exit 1
Set_username_num=$(cat -n ${server_conf} | grep '"username": "'"${manually_username}"'"' | awk '{print $1}')
if [[ -n ${Set_username_num} ]]; then
Set_disabled_num_a=$((Set_username_num + 6))
Set_disabled_num_a_text=$(sed -n "${Set_disabled_num_a}p" ${server_conf} | sed 's/\"//g;s/,$//g' | awk -F ": " '{print $2}')
if [[ ${Set_disabled_num_a_text} == "false" ]]; then
disabled_s="true"
else
disabled_s="false"
fi
sed -i "${Set_disabled_num_a}"'s/"disabled": '"${Set_disabled_num_a_text}"'/"disabled": '"${disabled_s}"'/g' ${server_conf}
echo -e "${Info} 修改成功 [ 原禁用状态: ${Set_disabled_num_a_text}, 新禁用状态: ${disabled_s} ]"
else
echo -e "${Error} 请输入正确的节点用户名 !" && exit 1
fi
}
Set_ServerStatus_client() {
check_installed_client_status
Set_config_client
Read_config_client
Modify_config_client
Restart_ServerStatus_client
}
Modify_config_client() {
sed -i '0,/SERVER = "'"${client_server}"'"/s//SERVER = "'"${server_s}"'"/' "${client_file}/client-linux.py"
sed -i '0,/PORT = ${client_port}/s//PORT = ${server_port_s}/' "${client_file}/client-linux.py"
sed -i '0,/USER = "'"${client_user}"'"/s//USER = "'"${username_s}"'"/' "${client_file}/client-linux.py"
sed -i '0,/PASSWORD = "'"${client_password}"'"/s//PASSWORD = "'"${password_s}"'"/' "${client_file}/client-linux.py"
}
Install_jq() {
[[ ${mirror_num} == 2 ]] && {
github_link="https://hub.fastgit.org"
raw_link="https://raw.fastgit.org"
} || {
github_link="https://github.com"
raw_link="https://raw.githubusercontent.com"
}
if [[ ! -e ${jq_file} ]]; then
if [[ ${bit} == "x86_64" ]]; then
jq_file="${file}/jq"
wget --no-check-certificate "${github_link}/stedolan/jq/releases/download/jq-1.5/jq-linux64" -O ${jq_file}
elif [[ ${bit} == "i386" ]]; then
jq_file="${file}/jq"
wget --no-check-certificate "${github_link}/stedolan/jq/releases/download/jq-1.5/jq-linux32" -O ${jq_file}
else
# ARM fallback to package manager
[[ ${release} == "archlinux" ]] && pacman -Sy jq --noconfirm
[[ ${release} == "centos" ]] && yum -y install jq
[[ ${release} == "debian" ]] && apt -y install jq
jq_file="/usr/bin/jq"
fi
[[ ! -e ${jq_file} ]] && echo -e "${Error} JQ解析器 下载失败,请检查 !" && exit 1
chmod +x ${jq_file}
echo -e "${Info} JQ解析器 安装完成,继续..."
else
echo -e "${Info} JQ解析器 已安装,继续..."
fi
}
Install_caddy() {
echo
echo -e "${Info} 是否由脚本自动配置HTTP服务(服务端的在线监控网站),如果选择 N,则请在其他HTTP服务中配置网站根目录为:${Green_font_prefix}${web_file}${Font_color_suffix} [Y/n]"
read -erp "(默认: Y 自动部署):" caddy_yn
[[ -z "$caddy_yn" ]] && caddy_yn="y"
if [[ "${caddy_yn}" == [Yy] ]]; then
caddy_file="/etc/caddy/Caddyfile" # Where is the default Caddyfile specified in Archlinux?
[[ ! -e /usr/bin/caddy ]] && {
if [[ ${release} == "debian" ]]; then
apt install -y debian-keyring debian-archive-keyring apt-transport-https curl
curl -1sLf "https://dl.cloudsmith.io/public/caddy/stable/gpg.key" | tee /etc/apt/trusted.gpg.d/caddy-stable.asc
curl -1sLf "https://dl.cloudsmith.io/public/caddy/stable/debian.deb.txt" | tee /etc/apt/sources.list.d/caddy-stable.list
apt update && apt install caddy
elif [[ ${release} == "centos" ]]; then
yum install yum-plugin-copr -y
yum copr enable @caddy/caddy -y
yum install caddy -y
elif [[ ${release} == "archlinux" ]]; then
pacman -Sy caddy --noconfirm
fi
[[ ! -e "/usr/bin/caddy" ]] && echo -e "${Error} Caddy安装失败,请手动部署,Web网页文件位置:${web_file}" && exit 1
systemctl enable caddy
echo "" >${caddy_file}
}
Set_server "server"
Set_server_http_port
cat >>${caddy_file} <<-EOF
http://${server_s}:${server_http_port_s} {
root * ${web_file}
encode gzip
file_server
}
EOF
systemctl restart caddy
else
echo -e "${Info} 跳过 HTTP服务部署,请手动部署,Web网页文件位置:${web_file} ,如果位置改变,请注意修改服务脚本文件 /etc/init.d/status-server 中的 WEB_BIN 变量 !"
fi
}
Install_ServerStatus_server() {
[[ -e "${server_file}/sergate" ]] && echo -e "${Error} 检测到 $NAME 服务端已安装 !" && exit 1
Set_server_port
echo -e "${Info} 开始安装/配置 依赖..."
Installation_dependency "server"
Install_caddy
echo -e "${Info} 开始下载/安装..."
Download_Server_Status_server
Install_jq
echo -e "${Info} 开始下载/安装 服务脚本..."
Service_Server_Status_server
echo -e "${Info} 开始写入 配置文件..."
Write_server_config
Write_server_config_conf
echo -e "${Info} 所有步骤 安装完毕,开始启动..."
Start_ServerStatus_server
}
Install_ServerStatus_client() {
[[ -e "${client_file}/client-linux.py" ]] && echo -e "${Error} 检测到 $NAME 客户端已安装 !" && exit 1
check_sys
echo -e "${Info} 开始设置 用户配置..."
Set_config_client
echo -e "${Info} 开始安装/配置 依赖..."
Installation_dependency "client"
echo -e "${Info} 开始下载/安装..."
Download_Server_Status_client
echo -e "${Info} 开始下载/安装 服务脚本..."
Service_Server_Status_client
echo -e "${Info} 开始写入 配置..."
Read_config_client
Modify_config_client
echo -e "${Info} 所有步骤 安装完毕,开始启动..."
Start_ServerStatus_client
}
Update_ServerStatus_server() {
check_installed_server_status
Download_Server_Status_server
rm -rf /etc/init.d/status-server
Service_Server_Status_server
Start_ServerStatus_server
}
Update_ServerStatus_client() {
check_installed_client_status
systemctl stop status-client
client_text="$(sed 's/\"//g;s/,//g;s/ //g' "${client_file}/client-linux.py")"
server_s="$(echo -e "${client_text}" | grep "SERVER=" | awk -F "=" '{print $2;exit}')"
server_port_s="$(echo -e "${client_text}" | grep "PORT=" | awk -F "=" '{print $2;exit}')"
username_s="$(echo -e "${client_text}" | grep "USER=" | awk -F "=" '{print $2;exit}')"
password_s="$(echo -e "${client_text}" | grep "PASSWORD=" | awk -F "=" '{print $2;exit}')"
Download_Server_Status_client
Read_config_client
Modify_config_client
rm -rf ${service}/status-client.service
Service_Server_Status_client
Start_ServerStatus_client
}
Start_ServerStatus_server() {
port="$(grep "m_Port = " ${server_file}/src/main.cpp | awk '{print $3}' | sed '{s/;$//}')"
check_installed_server_status
systemctl -q is-active status-server && echo -e "${Error} $NAME 正在运行,请检查 !" && exit 1
systemctl start status-server
if (systemctl -q is-active status-server) then
echo -e "${Info} $NAME 服务端启动成功[监听端口:${port}] !"
else
echo -e "${Error} $NAME 服务端启动失败 !"
fi
}
Stop_ServerStatus_server() {
check_installed_server_status
if (systemctl -q is-active status-server)
then
systemctl stop status-server
else
echo -e "${Error} $NAME 没有运行,请检查 !" && exit 1
fi
if (systemctl -q is-active status-server) then
echo -e "${Error} $NAME 服务端停止失败 !"
else
echo -e "${Info} $NAME 服务端停止成功 !"
fi
}
Restart_ServerStatus_server() {
check_installed_server_status
systemctl restart status-server
if (systemctl -q is-active status-server)
then
echo -e "${Info} $NAME 服务端重启成功 !"
else
echo -e "${Error} $NAME 服务端重启失败 !" && exit 1
fi
}
Uninstall_ServerStatus_server() {
check_installed_server_status
echo "确定要卸载 $NAME 服务端(如果同时安装了客户端,则只会删除服务端) ? [y/N]"
echo
read -erp "(默认: n):" unyn
[[ -z ${unyn} ]] && unyn="n"
if [[ ${unyn} == [Yy] ]]; then
systemctl stop status-server
systemctl disable status-server
rm ${service}/status-server.service -f
if [[ -e "${client_file}/client-linux.py" ]]; then
rm -rf "${server_file}"
rm -rf "${web_file}"
rm -rf "${plugin_file}"
else
rm -rf "${file}"
fi
if [[ -e "/usr/bin/caddy" ]]; then
systemctl stop caddy
systemctl disable caddy
[[ ${release} == "debian" ]] && apt purge -y caddy
[[ ${release} == "centos" ]] && yum -y remove caddy
[[ ${release} == "archlinux" ]] && pacman -R caddy --noconfirm
fi
systemctl daemon-reload
systemctl reset-failed
echo && echo "ServerStatus 卸载完成 !" && echo
else
echo && echo "卸载已取消..." && echo
fi
}
Start_ServerStatus_client() {
check_installed_client_status
if (systemctl -q is-active status-client) then
echo -e "${Error} $NAME 客户端正在运行,请检查 !" && exit 1
fi
systemctl start status-client
if (systemctl -q is-active status-client)
then
echo -e "${Info} $NAME 客户端启动成功 !"
else
echo -e "${Error} $NAME 客户端启动失败 !"
fi
}
Stop_ServerStatus_client() {
check_installed_client_status
if (systemctl -q is-active status-client) then
systemctl stop status-client
if (systemctl -q is-active status-client) then
echo -e "${Error}} $NAME 停止失败 !"
else
echo -e "${Info} $NAME 停止成功 !"
fi
else
echo -e "${Error} $NAME 没有运行,请检查 !" && exit 1
fi
}
Restart_ServerStatus_client() {
systemctl restart status-client
if (systemctl -q is-active status-client) then
echo -e "${Info} $NAME 重启成功 !"
else
echo -e "${Error} $NAME 重启失败 !" && exit 1
fi
}
Uninstall_ServerStatus_client() {
check_installed_client_status
echo "确定要卸载 $NAME 客户端(如果同时安装了服务端,则只会删除客户端) ? [y/N]"
echo
read -erp "(默认: n):" unyn
[[ -z ${unyn} ]] && unyn="n"
if [[ ${unyn} == [Yy] ]]; then
systemctl stop status-client
systemctl disable status-client
rm ${service}/status-client.service -f
systemctl daemon-reload
systemctl reset-failed
rm -rf "${client_file}"
echo && echo "ServerStatus 卸载完成 !" && echo
else
echo && echo "卸载已取消..." && echo
fi
}
View_ServerStatus_client() {
check_installed_client_status
Read_config_client
clear && echo "————————————————————" && echo
echo -e " $NAME 客户端配置信息:
IP \t: ${Green_font_prefix}${client_server}${Font_color_suffix}
端口 \t: ${Green_font_prefix}${client_port}${Font_color_suffix}
账号 \t: ${Green_font_prefix}${client_user}${Font_color_suffix}
密码 \t: ${Green_font_prefix}${client_password}${Font_color_suffix}
————————————————————"
}
View_client_Log() {
journalctl -u status-client.service --no-pager -f
if [[ $# == 0 ]]; then
before_show_menu
fi
}
View_server_Log() {
journalctl -u status-server.service --no-pager -f
if [[ $# == 0 ]]; then
before_show_menu
fi
}
Update_Shell() {
sh_new_ver=$(wget --no-check-certificate -qO- -t1 -T3 "${github_prefix}/status.sh" | grep 'sh_ver="' | awk -F "=" '{print $NF}' | sed 's/\"//g' | head -1)
[[ -z ${sh_new_ver} ]] && echo -e "${Error} 无法链接到 Github !" && exit 0
if [[ -e "${service}/status-client.service" ]]; then
rm -rf ${service}/status-client.service
Service_Server_Status_client
fi
if [[ -e "${service}/status-server.service" ]]; then
rm -rf ${service}/status-server.service
Service_Server_Status_server
fi
wget -N --no-check-certificate "${github_prefix}/status.sh"
echo -e "脚本已更新为最新版本[ ${sh_new_ver} ] !(注意:因为更新方式为直接覆盖当前运行的脚本,所以可能下面会提示一些报错,无视即可)" && exit 0
}
menu_client() {
echo && echo -e " $NAME 一键安装管理脚本 ${Red_font_prefix}[v${sh_ver}]${Font_color_suffix}
${Green_font_prefix} 0.${Font_color_suffix} 升级脚本
————————————
${Green_font_prefix} 1.${Font_color_suffix} 安装 客户端
${Green_font_prefix} 2.${Font_color_suffix} 更新 客户端
${Green_font_prefix} 3.${Font_color_suffix} 卸载 客户端
————————————
${Green_font_prefix} 4.${Font_color_suffix} 启动 客户端
${Green_font_prefix} 5.${Font_color_suffix} 停止 客户端
${Green_font_prefix} 6.${Font_color_suffix} 重启 客户端
————————————
${Green_font_prefix} 7.${Font_color_suffix} 设置 客户端配置
${Green_font_prefix} 8.${Font_color_suffix} 查看 客户端信息
${Green_font_prefix} 9.${Font_color_suffix} 查看 客户端日志
————————————
${Green_font_prefix}10.${Font_color_suffix} 切换为 服务端菜单" && echo
if [[ -e "${client_file}/client-linux.py" ]]; then
if (systemctl -q is-active status-client); then
echo -e " 当前状态: 客户端 ${Green_font_prefix}已安装${Font_color_suffix} 并 ${Green_font_prefix}已启动${Font_color_suffix}"
else
echo -e " 当前状态: 客户端 ${Green_font_prefix}已安装${Font_color_suffix} 但 ${Red_font_prefix}未启动${Font_color_suffix}"
fi
else
echo -e " 当前状态: 客户端 ${Red_font_prefix}未安装${Font_color_suffix}"
fi
echo
read -erp " 请输入数字 [0-10]:" num
case "$num" in
0)
Update_Shell
;;
1)
Install_ServerStatus_client
;;
2)
Update_ServerStatus_client
;;
3)
Uninstall_ServerStatus_client
;;
4)
Start_ServerStatus_client
;;
5)
Stop_ServerStatus_client
;;
6)
Restart_ServerStatus_client
;;
7)
Set_ServerStatus_client
;;
8)
View_ServerStatus_client
;;
9)
View_client_Log
;;
10)
menu_server
;;
*)
echo "请输入正确数字 [0-10]"
;;
esac
}
menu_server() {
echo && echo -e " $NAME 一键安装管理脚本 ${Red_font_prefix}[v${sh_ver}]${Font_color_suffix}
${Green_font_prefix} 0.${Font_color_suffix} 升级脚本
————————————
${Green_font_prefix} 1.${Font_color_suffix} 安装 服务端
${Green_font_prefix} 2.${Font_color_suffix} 更新 服务端
${Green_font_prefix} 3.${Font_color_suffix} 卸载 服务端
————————————
${Green_font_prefix} 4.${Font_color_suffix} 启动 服务端
${Green_font_prefix} 5.${Font_color_suffix} 停止 服务端
${Green_font_prefix} 6.${Font_color_suffix} 重启 服务端
————————————
${Green_font_prefix} 7.${Font_color_suffix} 设置 服务端配置
${Green_font_prefix} 8.${Font_color_suffix} 查看 服务端信息
${Green_font_prefix} 9.${Font_color_suffix} 查看 服务端日志
————————————
${Green_font_prefix}10.${Font_color_suffix} 切换为 客户端菜单" && echo
if [[ -e "${server_file}/sergate" ]]; then
if (systemctl -q is-active status-server) then
echo -e " 当前状态: 服务端 ${Green_font_prefix}已安装${Font_color_suffix} 并 ${Green_font_prefix}已启动${Font_color_suffix}"
else
echo -e " 当前状态: 服务端 ${Green_font_prefix}已安装${Font_color_suffix} 但 ${Red_font_prefix}未启动${Font_color_suffix}"
fi
else
echo -e " 当前状态: 服务端 ${Red_font_prefix}未安装${Font_color_suffix}"
fi
echo
read -erp " 请输入数字 [0-10]:" num
case "$num" in
0)
Update_Shell
;;
1)
Install_ServerStatus_server
;;
2)
Update_ServerStatus_server
;;
3)
Uninstall_ServerStatus_server
;;
4)
Start_ServerStatus_server
;;
5)
Stop_ServerStatus_server
;;
6)
Restart_ServerStatus_server
;;
7)
Set_ServerStatus_server
;;
8)
List_ServerStatus_server
;;
9)
View_server_Log
;;
10)
menu_client
;;
*)
echo "请输入正确数字 [0-10]"
;;
esac
}
check_sys
action=$1
if [[ -n $action ]]; then
if [[ $action == "s" ]]; then
menu_server
elif [[ $action == "c" ]]; then
menu_client
fi
else
menu_client
fi
| true |
a31336c96372499fa8c0f754fc003ece9484acc4 | Shell | fpezzini/Basic_Hyb_Seq_Assembly | /vcf_metrics.sh | UTF-8 | 1,097 | 3.421875 | 3 | [] | no_license | #! /bin/bash -x
# to gather a table of stats on vcf files
# assumes list of samples in is text file List
# assumes format of vcf files is ACC.vcf.gz run
# as ./gather_vcf_stats.sh ref_vcf_file
# Catherine Kidner 9 Dec 2018
ref_vcf=$1
echo "Hello world"
while read f; do
echo "$f"
#generate the comparisons
bcftools isec -p dir $ref_vcf "$f".vcf.gz
#Make the colums
grep "GT:PL" dir/0000.vcf | grep -v "INDEL"| cut -f6 > qual_0
grep "GT:PL" dir/0000.vcf | grep -v "INDEL"| cut -f8 | cut -f1 -d ";" | sed 's/DP=//g' > dep_0
grep "GT:PL" dir/0001.vcf | grep -v "INDEL"| cut -f6 > qual_1
grep "GT:PL" dir/0001.vcf | grep -v "INDEL"| cut -f8 | cut -f1 -d ";" | sed 's/DP=//g' > dep_1
grep "GT:PL" dir/0002.vcf | grep -v "INDEL"| cut -f6 > qual_2
grep "GT:PL" dir/0002.vcf | grep -v "INDEL"| cut -f8 | cut -f1 -d ";" | sed 's/DP=//g' > dep_2
#Join colums to a table
paste dep_* > all_dep
paste qual_* > all_qual
paste all_dep all_qual > data
echo "Ref_dep Sample_dep Both_dep Ref_qual Sample_qual Both_qual" > row1
cat row1 data > "$f"_data
rm -r dir
done < List
exit 0
| true |
f34fdc2ecafd1d7f5bd6de6f026f57411f2d5d80 | Shell | reisdebora/mongodatamodels | /mongoshard.sh | UTF-8 | 3,866 | 3.453125 | 3 | [] | no_license | #!/bin/bash
### BEGIN INIT INFO
# Provides: mongoshard
# Required-Start: $local_fs $network $named $time $syslog
# Required-Stop: $local_fs $network $named $time $syslog
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Description: Start script for a mongo sharded cluster
### END INIT INFO
SETUPDIR=`pwd -P`
export DATA_DIR=$SETUPDIR/data
SCRIPTSTART="docker-compose -f $SETUPDIR/docker-compose.yml up"
SCRIPTSTOP="docker-compose -f $SETUPDIR/docker-compose.yml down"
SCRIPTSTATUS="docker-compose -f $SETUPDIR/docker-compose.yml ps"
#RUNAS=<USERNAME>
PIDFILE=/var/run/mongoshard.pid
LOGFILE=/var/log/mongoshard.log
start() {
if [ -f /var/run/$PIDNAME ] && kill -0 $(cat /var/run/$PIDNAME); then
echo 'Service already running' >&2
return 1
fi
echo 'Starting service…' >&2
local CMD="$SCRIPTSTART &> \"$LOGFILE\" & echo \$!"
su -c "$CMD" > "$PIDFILE"
wait_up mongos1 27017
echo 'Service started' >&2
}
start_init() {
if [ -f /var/run/$PIDNAME ] && kill -0 $(cat /var/run/$PIDNAME); then
echo 'Service already running' >&2
return 1
fi
echo 'Starting service…' >&2
local CMD="$SCRIPTSTART &> \"$LOGFILE\" & echo \$!"
su -c "$CMD" > "$PIDFILE"
}
stop() {
if [ ! -f "$PIDFILE" ] || ! kill -0 $(cat "$PIDFILE"); then
echo 'Service not running' >&2
return 1
fi
echo 'Stopping service…' >&2
local CMD="$SCRIPTSTOP &> \"$LOGFILE\" & echo \$!"
su -c "$CMD"
while kill -0 $(cat "$PIDFILE") >/dev/null 2>&1
do
printf '.'
sleep 1
done
rm -f "$PIDFILE"
echo 'Service stopped' >&2
}
wait_up(){
until docker exec -it $1 bash -c "mongo --host $1:$2 --eval 'quit(db.runCommand({ ping: 1 }).ok ? 0 : 2)'"&>/dev/null; do
printf '.'
sleep 1
done
}
reset(){
dirs=( mongocfg1 mongocfg2 mongocfg3 mongors1n1 mongors1n2 mongors1n3 mongors2n1 mongors2n2 mongors2n3 mongocli )
for d in ${dirs[@]}; do
echo "Resetting data/$d"
sudo rm -rf data/$d
mkdir data/$d
#touch data/$d/.gitkeep
done
}
mkdir_init(){
echo "Creating data folders"
dirs=( mongocfg1 mongocfg2 mongocfg3 mongors1n1 mongors1n2 mongors1n3 mongors2n1 mongors2n2 mongors2n3 mongocli )
for d in ${dirs[@]}; do
echo "Creating data/$d"
sudo rm -rf data/$d
mkdir data/$d -p
#touch data/$d/.gitkeep
done
}
status(){
if [ ! -f "$PIDFILE" ] || ! kill -0 $(cat "$PIDFILE"); then
echo 'Service not running' >&2
return 1
fi
echo 'Service status…' >&2
local CMD="$SCRIPTSTATUS"
su -c "$CMD"
}
init(){
mkdir_init
start_init
#sleep 15
echo "Waiting for config containers"
wait_up mongocfg1 27019
echo "Started.."
echo "Intializing replicas config set"
replicate="rs.initiate(); sleep(1000); cfg = rs.conf(); cfg.members[0].host = \"mongocfg1:27019\"; rs.reconfig(cfg); rs.add(\"mongocfg2:27019\"); rs.add(\"mongocfg3:27019\"); rs.status();"
docker exec -it mongocfg1 bash -c "echo '${replicate}' | mongo --port 27019"
echo "Waiting for shard containers"
wait_up mongors1n1 27018
echo "Started.."
for (( rs = 1; rs < 3; rs++ )); do
echo "Intializing replica ${rs} set"
replicate="rs.initiate(); sleep(1000); cfg = rs.conf(); cfg.members[0].host = \"mongors${rs}n1:27018\"; rs.reconfig(cfg); rs.add(\"mongors${rs}n2:27018\"); rs.add(\"mongors${rs}n3:27018\"); rs.status();"
docker exec -it mongors${rs}n1 bash -c "echo '${replicate}' | mongo --port 27018"
done
echo "Waiting for router containers"
#sleep 2
wait_up mongos1 27017
docker exec -it mongos1 bash -c "echo \"sh.addShard('mongors1/mongors1n1:27018'); sh.addShard('mongors2/mongors2n1:27018');\" | mongo "
}
case "$1" in
start)
start
;;
reset)
reset
;;
stop)
stop
;;
init)
init
;;
status)
status
;;
retart)
stop
start
;;
*)
echo "Usage: $0 {start|stop|restart|reset|init|status}"
esac
| true |
c51abb7451b4fbbfd1ae5244c3ae2945cabf7dfc | Shell | pymir3/pymir3 | /scripts/score_eval/transcribe_betabase_lower_pitchclass_entropy.sh | UTF-8 | 3,715 | 3.65625 | 4 | [
"MIT"
] | permissive | #!/bin/bash
set -o errexit
if [ $# -lt 1 ]
then
echo "Usage: $0 <dataset directory>"
exit 1
fi
if [ ! -d "$1" ]
then
echo "Invalid directory \"$1\"!"
exit 1
fi
database="$1"
min_freq=10
max_freq=5000
window_length=2048
dft_length=4096
beta=0.5
n_tests=10
minimum_note_length=0.05
echo 'Converting wav to spectrograms...'
for name in `find "$database" -name '*.wav'`
do
target_name="${name%.wav}.spec"
if [ ! -e "$target_name" ]
then
echo "$name"
./pymir3-cl.py tool wav2spectrogram -l $window_length -L $dft_length "$name" /tmp/$$
./pymir3-cl.py tool trim_spectrogram -f $min_freq -F $max_freq /tmp/$$ "$target_name"
rm /tmp/$$
fi
done
echo 'Converting samples spectrogram to basis...'
for name in `find "$database"/Samples/Audio/Piano -name '*.spec'`
do
target_name="${name%.spec}.beta.dec"
if [ ! -e "$target_name" ]
then
echo "$name"
note=`basename "${name}" | sed 's/^.\{4\}//;s/\.spec$//'`
./pymir3-cl.py supervised linear decomposer beta_nmf -s 1 piano "$note" "$name" /tmp/$$
./pymir3-cl.py supervised linear extract left /tmp/$$ "$target_name"
rm /tmp/$$
fi
done
echo 'Merging basis...'
if [ ! -e "$database"/Samples/Audio/piano.beta.dec ]
then
./pymir3-cl.py supervised linear merge `find "$database"/Samples/Audio/Piano -name '*.beta.dec'` "$database"/Samples/Audio/piano.beta.dec
fi
echo 'Converting labels...'
for name in `find "$database"/Pieces/Labels/Piano -name '*.txt'`
do
target_name="${name%.txt}.score"
if [ ! -e "$target_name" ]
then
echo "$name"
./pymir3-cl.py tool label2score --instrument piano "$name" "$target_name"
fi
done
echo 'Processing each individual piece...'
for name in `find "$database"/Pieces/Audio -name '*.spec'`
do
echo $name
echo "Computing activation"
basename="${name%.spec}"
target_name="${name%.spec}.beta.dec"
if [ ! -e "$target_name" ]
then
./pymir3-cl.py supervised linear decomposer beta_nmf --beta $beta --basis "$database"/Samples/Audio/piano.beta.dec "$name" /tmp/$$
./pymir3-cl.py supervised linear extract right /tmp/$$ "$target_name"
rm /tmp/$$
fi
echo 'Computing threshold values to test...'
thresholds=`./pymir3-cl.py unsupervised detection threshold tests -n $n_tests "$basename"*.beta.dec`
echo $thresholds
echo 'Applying thresholds'
for th in $thresholds
do
th_name="${basename%.beta.dec}_th_${th}.beta"
echo $target_name $th
target_name1="${th_name}.bdec"
target_name2="${th_name}.bdec.score"
if [ ! -e "$target_name1" ]
then
./pymir3-cl.py unsupervised detection threshold detect $th "$target_name" "$target_name1"
fi
if [ ! -e "$target_name2" ]
then
./pymir3-cl.py unsupervised detection score piano "$target_name1" /tmp/$$
./pymir3-cl.py tool trim_score -d $minimum_note_length /tmp/$$ "$target_name2"
fi
done
scorenames=`ls $basename*.score`
best_score=`./pymir3-cl.py unsupervised detection threshold lower_pitchclass_entropy $scorenames`
final_target="${best_score}.selected"
cp $best_score $final_target
target_name3="${basename}.beta.lower_pitchclass_entropy.symbolic.eval"
score_name=`echo "${name%.spec}.score" | sed 's,/Audio/,/Labels/Piano/,'`
#if [ ! -e "$target_name3" ]
#then
./pymir3-cl.py evaluation mirex_symbolic "$final_target" "$score_name" "$target_name3" --id $th
#fi
echo $target_name3
done
evaluations=`find "$database"/Pieces/Audio/ -name "*beta.lower_pitchclass_entropy.symbolic.eval"`
./pymir3-cl.py info evaluation_csv $evaluations
./pymir3-cl.py info evaluation_statistics $evaluations
| true |
077b6dde44942ee09af1dc3bc6d2117d913d9aa9 | Shell | evilon/sandbox | /codified-exercises/rebase-exercise.sh | UTF-8 | 2,565 | 3.5625 | 4 | [] | no_license | #!/bin/sh
myerr(){
echo "ERR:" $* 1>&2
exit 1
}
cd $(dirname $0) || myerr "Failed to move to working directory"
mydir=$(pwd)
[ $(basename $(pwd)) != "codified-exercises" ]|| myerr "Move this script away from repo"
EXERCISE="${mydir}/rebase-exercise"
origin="${EXERCISE}/origin"
mylocalrepo="${EXERCISE}/rebaserepo-my"
somebodyelse="${EXERCISE}/rebaserepo-somebodyelse"
mkdir ${EXERCISE}
cd ${EXERCISE} || myerr "Failed to move to exercise dir"
# First we'll create the repo which simulates the "origin".
# This could be your github site in real-world
git init ${origin} || myerr "Repo init failed"
cd ${origin}
echo "First commit to master..."
echo "master of puppets" > worked-in-master.txt
git add worked-in-master.txt
git commit -m "1st commit to master"
git checkout -b "null" #to allow pushes, origin/master can not be checked out
# Now, we will clone the origin for ourselves.
# After clone, we already start working in our own branch.
git clone ${origin} ${mylocalrepo}
cd ${mylocalrepo}
echo "Branch and commit"
git checkout -b lagger
echo "slave new world" > worked-in-local.txt
git add worked-in-local.txt
git commit -m "1st commit to local 'topic' branch"
# While we are working, somebody else commits to origin/master
# a bunch of commits (we will use another clone to simulate this)
cd ${EXERCISE}
git clone ${origin} ${somebodyelse}
cd ${somebodyelse} || myerr "failed to move to repo"
git checkout master
echo "2nd commit to master..."
echo "is pulling your strings" > worked-in-master.txt
git add worked-in-master.txt
git commit -m "2nd commit to master"
echo "3rd commit to master..."
echo "blinded by me you can't see a thing" > worked-in-master.txt
git add worked-in-master.txt
git commit -m "3rd commit to master"
git push --set-upstream origin master
# You continue to work with your own branch
cd ${mylocalrepo} || myerr "Moving to ${mylocalrepo} failed"
git checkout lagger
echo "Face the enemy" > worked-in-local.txt
git add worked-in-local.txt
git commit -m "2nd commit to local 'topic' branch"
# Now you realise, that you are behind the origin/master quite a lot.
# You think you should not duplicate a bunch of commits made by others,
# when you commit/push to origin. Instead of merge, you decide to rebase.
git fetch origin || myerr "Git fetch origin failed"
git rebase origin/master || myerr "Rebase failed"
# Merge your branch to master (In github, this could happen via pull request)
git checkout master
git merge lagger master
git push
# You are done, time to get rid of your branch
git branch -d lagger
| true |
ba6a04f0f8fab7a03d214d7de38220214ebdf0f0 | Shell | fukusaka/dotfiles | /bin/update-multipass-docker-context | UTF-8 | 475 | 3 | 3 | [] | no_license | #!/bin/bash
MULTIPASS_NAME=${MULTIPASS_NAME=workspace}
DOCKER_CONTEXT=${DOCKER_CONTEXT=docker-vm}
DOCKER_HOST="$(multipass info ${MULTIPASS_NAME} --format json | jq -r .info[\"${MULTIPASS_NAME}\"].ipv4[0])"
if [ "x$(docker context list -q | grep ^${DOCKER_CONTEXT}$)" != x${DOCKER_CONTEXT} ]; then
CONTEXT_MODE=create
else
CONTEXT_MODE=update
fi
docker context \
${CONTEXT_MODE} ${DOCKER_CONTEXT} \
--docker "host=tcp://${DOCKER_HOST}:2375"
| true |
82d07f2349b64a24d821bd67b7680d9eb021509a | Shell | 976112643/vtools | /app/src/main/assets/custom/switchs/mi8status_set.sh | UTF-8 | 1,112 | 3.046875 | 3 | [] | no_license | #!/system/bin/sh
echo '使用本功能,需要解锁system分区,否则修改无效!'
echo 'MIUI自带的ROOT无法使用本功能'
echo '1.挂载/system为读写'
$BUSYBOX mount -o rw,remount /system
mount -o rw,remount /system
$BUSYBOX mount -o remount,rw /dev/block/bootdevice/by-name/system /system
mount -o remount,rw /dev/block/bootdevice/by-name/system /system 2> /dev/null
busybox mount -o rw,remount /vendor 2> /dev/null
mount -o rw,remount /vendor 2> /dev/null
path="/system/build.prop"
if [[ -f /vendor/build.prop ]] && [[ -n `cat /vendor/build.prop | grep ro\.miui\.notch=` ]]
then
path="/vendor/build.prop"
fi
$BUSYBOX sed '/ro.miui.notch=/'d $path > /cache/build.prop
if [ $state == 1 ];then
$BUSYBOX sed -i '$aro.miui.notch=1' /cache/build.prop
echo '2.修改ro.miui.notch=1'
else
$BUSYBOX sed -i '$aro.miui.notch=0' /cache/build.prop
echo '2.修改ro.miui.notch=0'
fi
echo '3.覆盖/system/build.prop'
cp /cache/build.prop $path
echo '4.修正读写权限'
chmod 0755 $path
echo '5.删除临时文件'
rm /cache/build.prop
sync
echo ''
echo '重启后生效!'
| true |
639b4a3ac3ff17e6a715d4d6e9bc3d837a4c008a | Shell | felix/archlinux | /spawn-fcgi/PKGBUILD | UTF-8 | 540 | 2.65625 | 3 | [] | no_license | # Contributor: Felix Hanley <felix@seconddrawer.com.au>
pkgname=spawn-fcgi
pkgver=1.6.3
pkgrel=2
pkgdesc="Fast CGI process spawner"
arch=('i686' 'x86_64')
url="http://redmine.lighttpd.net/projects/spawn-fcgi"
license=('GPL')
depends=(glibc)
source=(http://www.lighttpd.net/download/$pkgname-$pkgver.tar.gz)
md5sums=(6d75f9e9435056fa1e574d836d823cd0)
build() {
cd "$srcdir/$pkgname-$pkgver"
./configure --prefix=/usr --sysconfdir=/etc --libexecdir=/usr/lib --localstatedir=/var
make || return 1
make DESTDIR="$pkgdir/" install
}
# vim:set ts=2 sw=2 et:
| true |
173ba77b5c480549171221cf5cffde623d148828 | Shell | koraynilay/linux-custom-scripts | /degoo_upload_script.sh | UTF-8 | 339 | 3.40625 | 3 | [] | no_license | #!/bin/bash
echo pwd:`pwd`. Are you sure?
echo=1;
read res
if [ "$res" == "yes" ]; then
echo=0;
fi
foldername="`basename "$PWD"`_degoo";
if [ $echo -eq 1 ];then
echo="echo";
else
echo="";
fi
for a in *; do
$echo mkdir -vp "${foldername}/${a}.folder";
$echo split --verbose -b 500M -d -a 3 "$a" "${foldername}/${a}.folder/${a}.";
done
| true |
821d66bfd3bb7630cb243746d91937ba32904399 | Shell | delkyd/alfheim_linux-PKGBUILDS | /arch-wiki-man/PKGBUILD | UTF-8 | 638 | 2.6875 | 3 | [] | no_license | # Maintainer: Gregory Scheerlinck <gregory dot scheerlinck at gmail dot com>
_npmname=arch-wiki-man
_npmver=1.3.0
pkgname=arch-wiki-man # All lowercase
pkgver=1.3.0
pkgrel=1
pkgdesc="The Arch Wiki as linux man pages"
arch=(any)
url="https://github.com/greg-js/arch-wiki-man#readme"
license=()
depends=('nodejs' 'npm' )
optdepends=()
source=(http://registry.npmjs.org/$_npmname/-/$_npmname-$_npmver.tgz)
noextract=($_npmname-$_npmver.tgz)
sha1sums=(5d12b7248a80d15e1fd4bc71405cc0d65ba38aad)
package() {
cd $srcdir
local _npmdir="$pkgdir/usr/lib/node_modules/"
mkdir -p $_npmdir
cd $_npmdir
npm install -g --prefix "$pkgdir/usr" $_npmname@$_npmver
}
# vim:set ts=2 sw=2 et:
| true |
b1e46dcb4899caa08c2b2926f26bb759a1d1b509 | Shell | morgothB/OSprac | /lab2/task3 | UTF-8 | 256 | 2.53125 | 3 | [] | no_license | #! /bin/bash
sudo grep -sarhE '^.*(([a-z1-9\-_]\.?)+)([^.])@([a-zA-Z1-9]+\.)+([[:alpha:]]{2,5})' /etc/* | sed -e 's/^.*[^a-zA-Z1-9]\([a-zA-Z1-9]*@[a-zA-Z1-9]*.[a-zA-Z]*\).*$/\1/' | sudo tr '\n' ' ,' | sudo sed -e 's/\s/, /g' > emails.lst
cat emails.lst
| true |
0d1258e35238b467fbe5a20198737910d4cefe69 | Shell | protosaditya21/aws-zabbix | /user-data.sh | UTF-8 | 4,422 | 3.296875 | 3 | [
"MIT"
] | permissive | #!/bin/bash
echo "=== User Data start ==="
# https://www.zabbix.com/documentation/5.0/manual/installation/install_from_packages/debian_ubuntu
###########################################################
# VARIABLES -- CHANGE THINGS HERE
###########################################################
ZABBIX_PKG_NAME="zabbix-release_5.0-1+bionic_all.deb"
ZABBIX_REPO_URL="https://repo.zabbix.com/zabbix/5.0/ubuntu/pool/main/z/zabbix-release"
DB_HOST="localhost"
DB_PORT=3306
DB_USER="zabbix" # change your zabbix database username as needed
DB_PASS="zabbix" # change your zabbix database password as needed
DB_NAME="zabbix" # change your zabbix database name as needed
ZBX_SERVER_HOST="localhost"
DB_SERVER_HOST=${DB_HOST}
DB_SERVER_PORT=${DB_PORT}
DB_SERVER_DBNAME=${DB_NAME}
MYSQL_USER=${DB_USER}
MYSQL_PASSWORD=${DB_PASS}
MYSQL_DATABASE=${DB_NAME}
ZBX_LOADMODULE=""
ZBX_DEBUGLEVEL=5
ZBX_TIMEOUT=10
# ***** THERE IS NO NEED TO CHANGE ANYTHING AFTER THIS POINT **** #
###########################################################
# COMMON
###########################################################
AWS_INSTANCE_ID=`curl -s http://169.254.169.254/latest/meta-data/instance-id`
TEMP_INSTALL_DIR="/root/install"
mkdir ${TEMP_INSTALL_DIR}
cd ${TEMP_INSTALL_DIR}
wget ${ZABBIX_REPO_URL}/${ZABBIX_PKG_NAME}
dpkg -i ${ZABBIX_PKG_NAME}
# update OS
mv /boot/grub/menu.lst /tmp/
update-grub-legacy-ec2 -y
apt-get dist-upgrade -qq --force-yes
apt update
apt full-upgrade -y
###########################################################
# MySQL INSTALLATION AND CONFIGURATION FOR ZABBIX
###########################################################
apt install zabbix-server-mysql -y
cp -pd /etc/zabbix/zabbix_server.conf /etc/zabbix/zabbix_server.conf.orig
service zabbix-server start
update-rc.d zabbix-server enable
###########################################################
# ZABBIX FRONTEND
###########################################################
apt install apache2 -y
apt install php libapache2-mod-php -y
update-rc.d apache2 enable
service apache2 start
apt install zabbix-frontend-php -y
service apache2 restart
###########################################################
# ZABBIX DATA
###########################################################
cd ${TEMP_INSTALL_DIR}
apt install mysql-server -y
service mysql start
update-rc.d mysql enable
echo "CREATE DATABASE IF NOT EXISTS ${DB_NAME} CHARACTER SET utf8 COLLATE utf8_bin;" > ${TEMP_INSTALL_DIR}/create_zabbix.sql
echo "GRANT ALL ON *.* TO '${DB_USER}'@'localhost' IDENTIFIED BY '${DB_PASS}';" >> ${TEMP_INSTALL_DIR}/create_zabbix.sql
echo "FLUSH PRIVILEGES;" >> ${TEMP_INSTALL_DIR}/create_zabbix.sql
mysql -u root < ${TEMP_INSTALL_DIR}/create_zabbix.sql
zcat /usr/share/doc/zabbix-server-mysql/create.sql.gz | mysql -u root ${DB_NAME}
###########################################################
# ZABBIX AGENT
###########################################################
apt install zabbix-agent -y
service zabbix-agent start
###########################################################
# ZABBIX CONFIG
###########################################################
cat > /etc/apache2/conf-available/zabbix.conf <<EOF
#
# Zabbix monitoring system php web frontend
#
Alias /zabbix /usr/share/zabbix
<Directory "/usr/share/zabbix">
Options FollowSymLinks
AllowOverride None
Require all granted
<IfModule mod_php7.c>
php_value max_execution_time 300
php_value memory_limit 512M
php_value post_max_size 128M
php_value upload_max_filesize 128M
php_value max_input_time 300
php_value max_input_vars 10000
php_value always_populate_raw_post_data -1
php_value date.timezone America/Toronto
</IfModule>
</Directory>
<Directory "/usr/share/zabbix/conf">
Require all denied
</Directory>
<Directory "/usr/share/zabbix/app">
Require all denied
</Directory>
<Directory "/usr/share/zabbix/include">
Require all denied
</Directory>
<Directory "/usr/share/zabbix/local">
Require all denied
</Directory>
EOF
ln -s /etc/apache2/conf-available/zabbix.conf /etc/apache2/conf-enabled/zabbix.conf
###########################################################
# RESTART ZABBIX AND APACHE
###########################################################
service zabbix-server restart
service apache2 restart
service zabbix-agent restart
echo "=== User Data end ==="
# End; | true |
ce8911b2243fc94d2b599bfc9a5f4fe1a996e807 | Shell | pwyoung/computer-setup | /python/python.sh | UTF-8 | 931 | 3.6875 | 4 | [] | no_license | #!/bin/bash
PYTHON_MAJOR_VERSION="3.11"
PYTHON_VERSION="${PYTHON_MAJOR_VERSION}.4"
install_pyenv(){
if command -v pyenv; then
echo "pyenv is already installed"
else
echo "Installing pyenv"
if uname -a | grep Linux >/dev/null; then
# POPOS 22.04
PKGS=make build-essential libssl-dev zlib1g-dev \
libbz2-dev libreadline-dev libsqlite3-dev wget curl llvm libncurses5-dev \
libncursesw5-dev xz-utils tk-dev libffi-dev liblzma-dev python3-openssl
sudo apt-get install -y $PKGS
git clone https://github.com/pyenv/pyenv.git ~/.pyenv
elif uname -a | grep Darwin >/dev/null; then
brew install pyenv
fi
fi
}
install_pyenv_global_version(){
pyenv install -v ${PYTHON_VERSION}
pyenv global ${PYTHON_VERSION}
pyenv versions
}
install_pyenv
install_pyenv_global_version
| true |
e38e3163b0ad07a461b1852cc62c9d7674688a27 | Shell | justintoo/rose-sh | /applications/xen/xen.sh | UTF-8 | 6,901 | 3.546875 | 4 | [] | no_license | : ${XEN_DEPENDENCIES:=dev86 iasl libuuid libaio yajl pixman}
: ${XEN_CONFIGURE_OPTIONS:=
--enable-githttp
--disable-stubdom
}
#-------------------------------------------------------------------------------
download_xen()
#-------------------------------------------------------------------------------
{
info "Downloading source code"
wget -O- --no-check-certificate www.google.com | grep "LLNL User Identification Portal"
if test $? -eq 0; then
echo "[FATAL] Xen requires access to the Internet."
echo "[FATAL] Please open a browser and authenticate through the LLNL USER Identification Portal"
exit 1
fi
set -x
clone_repository "${application}" "${application}-src" || exit 1
cd "${application}-src/" || exit 1
#if [ ! -d "xen-src" ]; then
# wget --no-check-certificate http://www.xenproject.org/downloads/xen-archives/xen-44-series/xen-440/299-xen-project-440/file.html || exit 1
# tar xzvf file.html || exit 1
# mv xen-4.4.0/ xen-src || exit 1
#fi
#cd xen-src || exit 1
set +x
}
#-------------------------------------------------------------------------------
install_deps_xen()
#-------------------------------------------------------------------------------
{
install_deps ${XEN_DEPENDENCIES} || fail "Could not install dependencies"
}
#-------------------------------------------------------------------------------
patch_xen()
#-------------------------------------------------------------------------------
{
info "Patching not required"
}
#-------------------------------------------------------------------------------
configure_xen__rose()
#-------------------------------------------------------------------------------
{
info "Configuring application for ROSE compiler='${ROSE_CC}'"
#-----------------------------------------------------------------------------
set -x
#-----------------------------------------------------------------------------
PREPEND_INCLUDES="${ROSE_SH_DEPS_PREFIX}/include" \
PREPEND_LIB="${ROSE_SH_DEPS_PREFIX}/lib" \
./configure \
--prefix="$(pwd)/install_tree" \
${XEN_CONFIGURE_OPTIONS} || fail "An error occurred during application configuration"
#-----------------------------------------------------------------------------
set +x
#-----------------------------------------------------------------------------
}
#-------------------------------------------------------------------------------
configure_xen__gcc()
#-------------------------------------------------------------------------------
{
info "Configuring application for default compiler='${CC}'"
#-----------------------------------------------------------------------------
set -x
#-----------------------------------------------------------------------------
PREPEND_INCLUDES="${ROSE_SH_DEPS_PREFIX}/include" \
PREPEND_LIB="${ROSE_SH_DEPS_PREFIX}/lib" \
./configure \
--prefix="$(pwd)/install_tree" \
${XEN_CONFIGURE_OPTIONS} || fail "An error occurred during application configuration"
#-----------------------------------------------------------------------------
set +x
#-----------------------------------------------------------------------------
}
#-------------------------------------------------------------------------------
compile_xen()
#-------------------------------------------------------------------------------
{
info "Compiling application"
#-----------------------------------------------------------------------------
set -x
#-----------------------------------------------------------------------------
export ROSE_CC
# (1) Compile with GCC and save output for replay with ROSE
#
# Must compile serially in order to replay with ROSE... Actually, I
# guess it may not be necessary since all dependencies are already
# available... I guess for now we'll play it safe
# Must run with verbose mode to get *all* compile lines
SEABIOS_UPSTREAM_URL="rose-dev@rosecompiler1.llnl.gov:rose/c/xen/seabios.git" \
SEABIOS_UPSTREAM_TAG="master" \
ROSE_IPXE_GIT_URL="rose-dev@rosecompiler1.llnl.gov:rose/c/xen/ipxe.git" \
make dist -j1 V=1 2>&1 | tee output-make-gcc.txt || exit 1
if [ "${PIPESTATUS[0]}" -ne 0 ]; then
echo "[FATAL] GCC compilation failed. Terminating..."
exit 1
fi
# (2) Replace Make Entering/Leaving directory changes with pushd/popd
cat output-make-gcc.txt | \
sed "s/^make.*Entering directory \`\(.*\)'$/pushd \1/" | \
sed "s/^make.*Leaving directory.*/popd/" | \
sed 's/^make.*//' \
> make-handle_directory_changes.txt || exit 1
# (3) Replace gcc, cc, libtool cc compile lines with ${ROSE_CC} variable
cat make-handle_directory_changes.txt | \
sed 's/^cc/\${ROSE_CC}/' | \
sed 's/^gcc/\${ROSE_CC}/' | \
sed 's/^libtool.*--mode=compile.*--tag=CC cc/\${ROSE_CC}/' \
> make-rose-0.txt || exit 1
# (4) Save replay commands: ROSE_CC, pushd, popd
cat make-rose-0.txt | \
grep "^\${ROSE_CC}\|^pushd\|^popd" \
> make-rose.txt || exit 1
#
cat <<EOF | cat - make-rose.txt > make-rose.sh
#!/bin/bash -x
which \${ROSE_CC}
echo \$LD_LIBRARY_PATH
echo \$PATH
EOF
# (5) Remove -Werror commandline options
perl \
-pi.bak \
-e 's/-Werror//g' \
"make-rose.sh" || exit 1
# Debug
if test -n "${ROSE_DEBUG:+yes}"; then
cat <<EOF | cat - make-rose.sh > make-rose-debug.sh
function rose_trap_handler()
{
CMD="\$0" # equals to my script name
LASTLINE="\$1" # argument 1: line of command
LASTERR="\$2" # argument 2: error code of last command
echo "[DEBUG] \${CMD}: line \${LASTLINE}: exit status of last command: \${LASTERR}"
echo "[DEBUG] ROSE P=\$(cat "${application_abs_srcdir}/rose-passes.txt" | wc -l) F=\$(cat "${application_abs_srcdir}/rose-failures.txt" | wc -l)"
}
# trap all simple commands
trap 'rose_trap_handler \${LINENO} \$?' ERR DEBUG
EOF
fi
# (6) Execute ROSE commandlines; $ROSE_CC must be set
chmod +x make-rose.sh
time ./make-rose${ROSE_DEBUG:+-debug}.sh || exit 1
# Extract results from Sqlite database and save to files:
#
# rose-passes.txt
# rose-failures.txt
#
sqlite3 rose-results.db > rose-passes.txt <<SQL
SELECT filename FROM results WHERE passed=1;
SQL
sqlite3 rose-results.db > rose-failures.txt <<SQL
SELECT filename FROM results WHERE passed=0;
SQL
#-----------------------------------------------------------------------------
set +x
#-----------------------------------------------------------------------------
}
| true |
937f91f87a9f913e97151b3a6445c188fc5c0e5d | Shell | sherpya/mplayer-be | /packages/libvpx/build.sh | UTF-8 | 938 | 3.359375 | 3 | [
"LicenseRef-scancode-public-domain",
"BSD-2-Clause"
] | permissive | #!/bin/bash
# Build script for Google VP8/9 Video Codec
# Copyright (c) 2013-2021 Gianluigi Tiesi <sherpya@netfarm.it>
# See LICENSE for licensing informations
GIT_REPO=https://chromium.googlesource.com/webm/libvpx.git
. $(dirname $0)/../functions.sh
case ${ARCH} in
x86) VPX_TARGET=x86-win32-gcc ;;
x86_64) VPX_TARGET=x86_64-win64-gcc ;;
esac
BUILDDIR="libvpx"
# -fno-asynchronous-unwind-tables ??
# package configure fails on unknown options
pkg_configure()
{
CROSS=${HOST}- ./configure \
--prefix=${PREFIX} \
--target=${VPX_TARGET} \
--enable-static \
--disable-shared \
--disable-unit-tests \
--disable-examples \
--extra-cflags="${MBE_CFLAGS}" \
--extra-cxxflags="${MBE_CXXFLAGS}"
}
post_install_hook()
{
gen_ld_script libvpx.a "-lpthread"
}
git_clean && pkg_build && git_clean
| true |
edc98111bba4b6e4cf31ea0af33f5c045ee3c978 | Shell | drsaikirant88/tf-dlpack | /tests/scripts/build_in_docker.sh | UTF-8 | 1,128 | 3.5 | 4 | [
"Apache-2.0"
] | permissive | #/bin/bash
set -e
BRANCH=$1
USE_CUDA=$2
pushd /tmp
git clone https://github.com/VoVAllen/tf-dlpack.git --recursive
pushd tf-dlpack
git checkout $BRANCH
CONDA_PREFIX=$HOME/miniconda3/bin
export PATH=$CONDA_PREFIX:$PATH
export PYTHONPATH=$PWD/python:$PYTHONPATH
export TFDLPACK_LIBRARY_PATH=$PWD/libs
for PY_VER in 3.5 3.6 3.7; do
echo "Build for python $PY_VER"
source activate $PY_VER
# clean & build
rm -rf libs
mkdir libs
for TF_VER in "2.1.0" "2.2.0rc2"; do
pip uninstall -y tensorflow
if [ $PY_VER = "3.5" ]; then
pip uninstall -y numpy
pip install numpy
fi
pip install $HOME/$PY_VER/tensorflow*${TF_VER}*.whl
rm -rf build
mkdir build
cd build; cmake -DUSE_CUDA=$USE_CUDA ..; make -j; cd ..
mv build/*.so libs
done
# test
if [ $USE_CUDA = "ON" ]; then
python -m pytest tests
export TFDLPACK_PACKAGE_SUFFIX=-gpu
else
export TFDLPACK_PACKAGE_SUFFIX=
fi
# build wheel
pushd python
python setup.py clean
python setup.py bdist_wheel --plat-name manylinux1_x86_64
popd
source deactivate
done
cp python/dist/*.whl /workspace
popd
popd
| true |
4c40db3b7a784fa25c1d666f0a17c78f29ba7c3f | Shell | buildkite/agent | /.buildkite/steps/publish-docker-images.sh | UTF-8 | 1,232 | 3.671875 | 4 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | #!/bin/bash
set -euo pipefail
dry_run() {
if [[ "${DRY_RUN:-}" == "false" ]] ; then
"$@"
else
echo "[dry-run] $*"
fi
}
if [[ "$CODENAME" == "" ]]; then
echo "Error: Missing \$CODENAME (stable, experimental or unstable)"
exit 1
fi
echo "--- Logging in to Docker Hub"
dockerhub_user="$(aws ssm get-parameter --name /pipelines/agent/DOCKER_HUB_USER --with-decryption --output text --query Parameter.Value --region us-east-1)"
aws ssm get-parameter --name /pipelines/agent/DOCKER_HUB_PASSWORD --with-decryption --output text --query Parameter.Value --region us-east-1 | docker login --username="${dockerhub_user}" --password-stdin
version=$(buildkite-agent meta-data get "agent-version")
build=$(buildkite-agent meta-data get "agent-version-build")
for variant in "alpine" "alpine-k8s" "ubuntu-18.04" "ubuntu-20.04" "ubuntu-22.04" "sidecar" ; do
echo "--- Getting docker image tag for $variant from build meta data"
source_image=$(buildkite-agent meta-data get "agent-docker-image-$variant")
echo "Docker Image Tag for $variant: $source_image"
echo "--- :docker: Publishing images for $variant"
.buildkite/steps/publish-docker-image.sh "$variant" "$source_image" "$CODENAME" "$version" "$build"
done
| true |
d66c4e03b500699e0d44cc7d8cb40e3a03d7fa18 | Shell | Efreak/dotfiles | /.bash_aliases | UTF-8 | 2,444 | 2.953125 | 3 | [] | no_license | # some more ls aliases
alias ll='ls -alF'
alias la='ls -A'
alias l='ls -CF'
# Add an "alert" alias for long running commands. Use like so:
# sleep 10; alert
alias alert='notify-send --urgency=low -i "$([ $? = 0 ] && echo terminal || echo error)" "$(history|tail -n1|sed -e '\''s/^\s*[0-9]\+\s*//;s/[;&|]\s*alert$//'\'')"'
alias nodejs='node'
alias sr='screen -r'
alias sdms='screen -dmS'
alias sls='screen -ls'
alias s='screen'
alias byobu='byobu-screen'
alias less='less -r'
alias nginx='sudo service nginx'
alias php-fpm='sudo service php5-fpm'
alias php5-fpm='sudo service php5-fpm'
alias phpfpm='sudo service php5-fpm'
alias php5fpm='sudo service php5-fpm'
alias php='sudo service php5-fpm'
alias php5='sudo service php5-fpm'
alias ssh='sudo service openssh'
alias plex='sudo service plexmediaserver'
alias calibre='sudo service calibre-server'
alias calibre-server='sudo service calibre-server'
alias bubbleupnpserver='sudo service bubbleupnpserver'
alias bubbleupnp='sudo service bubbleupnpserver'
alias bubble='sudo service bubbleupnpserver'
alias genpasswd="strings /dev/urandom | grep -o '[[:graph:]]' | head -n 30 | tr -d '\n'; echo"
alias get='sudo apt-get install'
alias nyan='telnet -e ^c nyancat.dakko.us'
alias reload='. /home/paul/.bashrc'
alias remove='sudo apt-get --purge remove'
alias root='sudo -i'
alias update='sudo apt-get update && sudo apt-get upgrade'
alias nethack='telnet nethack.alt.org'
alias apt-get="sudo apt-get"
## get rid of command not found ##
alias cd..='cd ..'
alias ..='cd ..'
alias ...='cd ../../../'
alias ....='cd ../../../../'
alias .....='cd ../../../../'
alias .4='cd ../../../../'
alias .5='cd ../../../../..'
alias mkdir='mkdir -pv'
alias diff='colordiff'
alias mount='acoc colmount'
alias h='history'
alias j='jobs -l'
# do not delete / or prompt if deleting more than 3 files at a time #
alias rm='rm -I --preserve-root'
# Parenting changing perms on / #
alias chown='chown --preserve-root'
alias chmod='chmod --preserve-root'
alias chgrp='chgrp --preserve-root'
alias iftop='iftop -i eth0'
alias meminfo='free -m -l -t'
alias psmem='ps auxf | sort -nr -k 4'
alias psmem10='ps auxf | sort -nr -k 4 | head -10'
alias pscpu='ps auxf | sort -nr -k 3'
alias pscpu10='ps auxf | sort -nr -k 3 | head -10'
alias mountall='sudo mount -a'
alias back='cd "$OLDPWD"'
alias less='less -FSR'
alias du='cdu -d'
alias df="dfc -t vfat,ext4,ext3,ntfs -WT && dfc -c always -fWT|grep -E 'drive$'|head -n 2"
| true |
b6978bfdb5da5dff2455d85ef1a8d807b7600e34 | Shell | khalavak/ansible-minecraft-demo | /bin/aws-auth.sh | UTF-8 | 1,430 | 3.296875 | 3 | [
"CC-BY-3.0"
] | permissive | #!/usr/bin/env bash
# Setup AWS authentication for Ansible and ec2.py inventory scripts using
# environment variables
# Access Key ID:
# Secret Access Key:
# Setup environment variables
echo "Setting up EC2_ACCESS_KEY..."
export EC2_ACCESS_KEY="xxxxxxx"
echo
echo "Setting up EC2_SECRET_KEY..."
export EC2_SECRET_KEY="yyyyyyyyyyy"
echo
echo "Setting up AWS_ACCESS_KEY_ID..."
export AWS_ACCESS_KEY_ID=${EC2_ACCESS_KEY}
echo "export AWS_ACCESS_KEY_ID=${EC2_ACCESS_KEY}"
echo
echo "Setting up ec2.py:s AWS_SECRET_ACCESS_KEY..."
export AWS_SECRET_ACCESS_KEY=${EC2_SECRET_KEY}
echo "export AWS_SECRET_ACCESS_KEY=${EC2_SECRET_KEY}"
echo
echo "Setting up Ansible inventry with EC2.py..."
export ANSIBLE_HOSTS=/Users/khalavak/Projects/ansible-repo/ansible-minecraft-demo/plugins/inventory/ec2.py
export EC2_INI_PATH=/Users/khalavak/Projects/ansible-repo/ansible-minecraft-demo/plugins/inventory/ec2.ini
echo
echo "Environment variables setup!"
echo
echo "BASH Environment export commands:"
echo "export EC2_ACCESS_KEY="${EC2_ACCESS_KEY}"
echo "export EC2_SECRET_KEY="${EC2_SECRET_KEY}"
echo "export AWS_ACCESS_KEY_ID=${EC2_ACCESS_KEY}"
echo "export AWS_SECRET_ACCESS_KEY=${EC2_SECRET_KEY}"
echo "export ANSIBLE_HOSTS=/Users/khalavak/Projects/ansible-repo/crosskey-minecraft-demo/plugins/inventory/ec2.py"
echo "export EC2_INI_PATH=/Users/khalavak/Projects/ansible-repo/crosskey-minecraft-demo/plugins/inventory/ec2.ini"
echo
| true |
1242e3757181438fd2fbf5b1472027b4d75e6a02 | Shell | m040601/handlers | /other/someguy | UTF-8 | 3,359 | 3.671875 | 4 | [] | no_license | #!/bin/sh
# https://efe.kim/files/scripts/linkhandler
# Author: Efe Vural <efe@efe.kim>
# give -d flag for manual selection
# In my setup $TERMINAL=st, $PLAYER=mpv, $BROWSERCLI=w3m
geturl(){
primary=$(xclip -o 2>/dev/null | sed '1!d')
clipboard=$(xclip -sel c -o 2>/dev/null | sed '1!d')
url=$(printf "%s\n%s\n%s" "$1" "$primary" "$clipboard"\
| grep -E "^ *magnet:|^ *git://|^ *https?://" | sed '1!d;s/ *//g')
[ -z "$url" ] && notify-send "Not a link" && exit
}
tmpdl(){ tmpfile=$(mktemp /tmp/linkhandler-"$1".XXXXXX)
setsid "$TERMINAL" -n download -e curl -o "$tmpfile" "$url" ;}
torrentdl(){ setsid "$TORRENT" "$url" >/dev/null 2>&1 ;}
gitdl(){ setsid "$TERMINAL" -n download\ -e git clone "$url" ;}
imgview(){ tmpdl pic && setsid sxiv -qfabsf "$tmpfile" >/dev/null 2>&1
rm "$tmpfile" ;}
pdfview(){ tmpdl pdf && setsid zathura "$tmpfile" >/dev/null 2>&1
rm "$tmpfile" ;}
htmlview(){ setsid "$TERMINAL" -e "$BROWSERCLI" "$url" >/dev/null 2>&1 ;}
torview(){ setsid torsocks "$TERMINAL" -e "$BROWSERCLI" "$url"
>/dev/null 2>&1 ;}
videoplay(){ tmpdl vid &&
setsid "$PLAYER" --no-terminal "$tmpfile" >/dev/null 2>&1
rm "$tmpfile" ;}
audioplay(){ tmpdl aud &&
setsid "$TERMINAL" -e "$PLAYER" --no-video "$tmpfile" >/dev/null 2>&1
rm "$tmpfile" ;}
streamplay(){ notify-send "Starting stream" &\
setsid "$PLAYER" --no-terminal "$url" >/dev/null 2>&1 ||
notify-send "Stream failed" ;}
autohandler() {
case "$url" in
(*.pdf) pdfview & ;;
(*.mkv|*.webm|*.mp4|*.gifv) videoplay & ;;
(*.png|*.jpg|*.jpeg|*.jpe|*.gif) imgview & ;;
(*.mp3|*.flac|*.opus|*.ogg|*.mp3?source) audioplay & ;;
(*youtube.com*|*m.youtube.com*|*youtu.be*|*twitch.tv*|*hooktube.com*\
|*bitchute.com*|*liveleak.com*|*crunchyroll.com*) streamplay & ;;
(git://*) gitdl & ;;
(magnet:*) torrentdl & ;;
(http://*.onion|http://*.onion/*) torview & ;;
(http://*|https://*) htmlview & ;;
esac
}
dmenuhandler(){
visual=$(echo "$url" | sed -E '/^.{30}/s/^(.{20}).*(.{7})$/\1...\2/')
options="video\naudio\ndownload\nimage\npdf\nbrowser\nbrowsergui\
\nbrowsercli\ntorrent\nbookmark\nfeed\ncomments\ntor\nytinfo\
\nsubscribe\ncopy\nstream\ngitclone"
selection=$(echo "$options" | dmenu -i -p "Open '$visual':")
case "$selection" in
(video) videoplay & ;;
(audio) audioplay & ;;
(download) cd ~/Downloads/
setsid "$TERMINAL" -n download -e curl -OL "$url" ||
setsid "$TERMINAL" -n download -e curl -o\
"$(echo "$url" | sed 's_.*/\(.*\)/*$_\1.html_')" "$url" & ;;
(image) imgview & ;;
(document) pdfview & ;;
(browsergui) setsid "$BROWSERGUI" "$url" & ;;
(browsercli) htmlview & ;;
(torrent) torrentdl & ;;
(bookmark) echo "$url" >> "$HOME/.bookmarks" & ;;
(feed) echo "$url" >> "$HOME/.newsbeuter/urls" & ;;
(tor) torview & ;;
(comments) setsid "$TERMINAL" youtube-viewer -c "$url" & ;;
(ytinfo) setsid "$TERMINAL" youtube-viewer -i "$url" & ;;
(subscribe) chanid=$(youtube-viewer --no-interactive --extract\
'*CHANNELID* #*AUTHOR*' "$url")
[ "$(echo "$chanid" | sed -n $=)" = "1" ] || exit
echo "https://www.youtube.com/feeds/videos.xml?channel_id=$chanid"\
>> "$HOME/.newsbeuter/urls" & ;;
(copy) echo "$url" | xclip -i -f | xclip -sel c -i ;;
(stream) streamplay & ;;
(gitclone) gitdl & ;;
(*) $selection "$url" ;;
esac
}
if [ "$1" = "-d" ]
then shift 1 ; geturl "$1" ; dmenuhandler >/dev/null 2>&1 &
else geturl "$1" ; autohandler >/dev/null 2>&1 &
fi
| true |
0277786b15cd3f870bf1467194c6f81ebf1c0e8c | Shell | delkyd/alfheim_linux-PKGBUILDS | /werken-xpath/PKGBUILD | UTF-8 | 1,411 | 2.78125 | 3 | [] | no_license | # Maintainer: Xiao-Long Chen <chenxiaolong@cxl.epac.to>
# Based on Fedora's packaging
_fedora_rel=13.beta.12.6.fc21
pkgname=werken-xpath
pkgver=0.9.4
pkgrel=4
pkgdesc="XPath implementation using JDOM"
arch=(any)
url="http://sourceforge.net/projects/werken-xpath/"
license=(Apache)
depends=(java-jdom1)
makedepends=(antlr2 apache-ant java-xml-commons-external xerces2-java)
source=("http://kojipkgs.fedoraproject.org//packages/werken-xpath/${pkgver}/${_fedora_rel}/src/werken-xpath-${pkgver}-${_fedora_rel}.src.rpm")
sha512sums=('ea58195dd2ae7b3601df61f3df6268348f3e3f6fc65e974d4db0fe55cbba04415672674cc7d6ba013e8ab897978342f25cbfd957bfb2ebb6c9028f0a65ab1514')
build() {
tar xvf werken-xpath-0.9.4.tar.xz
cd werken-xpath-${pkgver}
# Apply Fedora's patches
for i in $(grep ^Patch ../werken-xpath.spec | \
awk -F '%{name}' '{print $2}'); do
patch -p0 -i "../${pkgname}${i}"
done
find . -name '*.jar' -o -name '*.class' -delete
cp ../werken-xpath-${pkgver}.pom .
export CLASSPATH="/usr/share/java/jdom/jdom.jar:/usr/share/java/antlr2.jar"
ant -Dbuild.compiler=modern package compile-test
#ant package
}
check() {
cd werken-xpath-${pkgver}
export CLASSPATH="${CLASSPATH}:./build/werken.xpath.jar:build/test/classes"
sh runtests.sh
}
package() {
cd werken-xpath-${pkgver}
install -dm755 "${pkgdir}/usr/share/java/"
install -m644 build/werken.xpath.jar "${pkgdir}/usr/share/java/"
}
| true |
7ec170aa9624d85c3c4122b54f0b3b40b1c80f9c | Shell | staylor14/capi-workspace | /setup/fly.sh | UTF-8 | 203 | 2.9375 | 3 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | #! /bin/bash
set -e
if ! which fly > /dev/null ; then
destination=/usr/local/bin/fly
wget "https://capi.ci.cf-app.com/api/v1/cli?arch=amd64&platform=darwin" -O $destination
chmod +x $destination
fi
| true |
312bbd697d6b3d7238270e308c00b6a50cd308cd | Shell | v-fox/live_opensuse_hsf | /source/root/etc/init.d/after.local | UTF-8 | 725 | 3.109375 | 3 | [] | no_license | #! /bin/sh
#
# Copyright (c) 2010 SuSE LINUX Products GmbH, Germany. All rights reserved.
#
# Author: Werner Fink, 2010
#
# /etc/init.d/after.local
#
# script with local commands to be executed from init after all scripts
# of a runlevel have been executed.
#
# Here you should add things, that should happen directly after
# runlevel has been reached.
#
# Please note that the use of this script is deprecated and should be
# avoided for starting commands. You should consider creating a dedicated
# systemd service instead.
#
## update custom meta-theme to account for changes in system themes
if [ -x /usr/local/share/icons/update-icon-cache ]; then
cd /usr/local/share/icons && \
./update-icon-cache > /dev/null
cd ~
fi
| true |
eba0ea0aba0d49969ef9b1582eae90062b728119 | Shell | slysonway/Projet-Cloud | /deployment/push_mysql_data.sh | UTF-8 | 472 | 2.84375 | 3 | [] | no_license | #!/bin/bash
database_name=$(terraform output database_name)
database_admin_login=$(terraform output database_admin_login)
database_admin_password=$(terraform output database_admin_password)
mysql_server_name=$(terraform output mysql_server_name)
database_admin_login="${database_admin_login}@${mysql_server_name}"
mysql -h "${mysql_server_name}.mysql.database.azure.com"\
-u $database_admin_login\
--password=$database_admin_password $database_name < database.sql
| true |
98297db8c53d9bca7e4694870d22afd627764e0f | Shell | Pshock13/heelo-world | /contacts/phil/projects/bash-beginner-series/scripts/filetype.sh | UTF-8 | 299 | 4.0625 | 4 | [] | no_license | #!/bin/bash
if [ $# -ne 1 ]; then
echo "Error: Invalid number of arguments"
exit 1
fi
file=$1
if [ -f $file ]; then
echo "$file is a regular file."
elif [ -L $file ]; then
echo "$file is a soft link."
elif [ -d $file ]; then
echo "$file is a directory."
else
echo "$file does not exist"
fi | true |
92385435c3de8c7cb9a438edea66ebc8c6e6cb9d | Shell | gadmyth/unix-config | /sh-extends/proxy.sh | UTF-8 | 419 | 2.5625 | 3 | [] | no_license | function turn-on-proxy-privoxy() {
export https_proxy=127.0.0.1:8118
export http_proxy=127.0.0.1:8118
}
function turn-on-proxy-socks5() {
export https_proxy=socks5://127.0.0.1:1080
export http_proxy=socks5://127.0.0.1:1080
}
function turn-off-proxy() {
export https_proxy=
export http_proxy=
}
function show-proxy() {
echo "https_proxy: $https_proxy"
echo "http_proxy: $http_proxy"
}
| true |
f0adb7518fbf7d0857fd3f4dfd4e5801e193700c | Shell | shioyadan/Konata | /install.sh | UTF-8 | 152 | 2.640625 | 3 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | npm install
if [ $? -gt 0 ]; then
echo Installation failed. If you have not installed node.js, please install node.js from "https://nodejs.org"
fi
| true |
36b350f0797f25494f5dee438ce4b64efcb262bf | Shell | NIAGroup/PyStockAnalyze-2019 | /tools/clean_env.sh | UTF-8 | 369 | 3.21875 | 3 | [
"MIT"
] | permissive | #!/bin/bash -e
PROJ_ROOT_DIR=$(git rev-parse --show-toplevel)
PROJ_BUILD_PATH=$PROJ_ROOT_DIR/build
PROJ_SRC_PATH=$PROJ_ROOT_DIR/src
PROJ_TOOL_PATH=$PROJ_ROOT_DIR/tools
if [ ! -z "$VIRTUAL_ENV" ]; then
echo "[clean] Exiting virtual environment"
deactivate
fi
if [ -d "$PROJ_BUILD_PATH" ]; then
echo "[clean] Removing build directory"
rm -rf $PROJ_BUILD_PATH/
fi
| true |
165be009f5482b0ee8b850d781fb4c36314ace8c | Shell | cristianojmiranda/x123 | /bin/env.sh | UTF-8 | 507 | 2.53125 | 3 | [] | no_license | #!/bin/bash
_WORKDIR=~/.x123
_CONFIG=~/.x123config
_RESOURCES=$_WORKDIR/resources
_HELM_RESOURCES=$_RESOURCES/helm
_K3S_DOCKER_COMPOSE=$_RESOURCES/k3s-docker-compose.yaml
_K3S_TMP=/tmp/k3s
_K3S_DATA=$_K3S_TMP/data
_K3S_MANIFESTS=$_K3S_TMP/manifests
_K3S_IMAGES=$_K3S_TMP/images
# colors
# https://misc.flogisoft.com/bash/tip_colors_and_formatting
F_RED="\e[31m"
F_GREEN="\e[32m"
F_YELLOW="\e[33m"
F_LIGHT_RED="\e[91m"
F_LIGHT_GRAY="\e[37m"
F_DARK_GRAY="\e[90m"
RESET="\e[0m"
BOLD="\e[1m"
BLINK="\e[5m"
| true |
d1e371051163f1698c7e5e4a5a92e8553efb70eb | Shell | HIPERFIT/vectorprogramming | /surveytools/setup/install-hsenv | UTF-8 | 5,281 | 3.78125 | 4 | [] | no_license | #!/bin/bash
# Make sure you have executed the following commands before running
# this script:
# cabal update
# cabal install c2hs alex happy
# These path variables should be set in your .profile:
# PATH=~/.cabal/bin/:/usr/local/cuda/bin/:$PATH
# LD_LIBRARY_PATH=/usr/local/cuda/lib64:$LD_LIBRARY_PATH
# LD_LIBRARY_PATH=/usr/local/cuda/lib:$LD_LIBRARY_PATH
set -e
export PATH=~/.cabal/bin/:/usr/local/cuda-4.2/bin/:$PATH
export LD_LIBRARY_PATH=/usr/local/cuda-4.2/lib64 #:$LD_LIBRARY_PATH
SCRIPTFILE=`readlink -f $0`
SCRIPTROOT=`dirname $SCRIPTFILE`
ROOT=$(readlink -f ${SCRIPTROOT}/../..)
BUILDROOT=$ROOT/build
if [ ! -d "$BUILDROOT" ]; then
mkdir "$BUILDROOT"
fi
cd $BUILDROOT
BASEPATH=$PATH
BASEGHC=`ghc --version | awk -F' ' '{print $NF}'`
BUILDTOOLSNAME=buildtools
BUILDTOOLSVERSION=7.4.2
BUILDTOOLSBIN=$BUILDROOT/hsenvs/$BUILDTOOLSNAME-GHC$BUILDTOOLSVERSION/.hsenv_$BUILDTOOLSNAME-GHC$BUILDTOOLSVERSION/cabal/bin/
# deactivate current hsenv if possible
deactivate_hsenv 2> /dev/null || true
fetchGHC() {
mkdir -p $GHC_DIR
cd $GHC_DIR
if [ ! -f ghc-$GHC_VERSION-x86_64-unknown-linux.tar.bz2 ]; then
wget http://www.haskell.org/ghc/dist/$GHC_VERSION/ghc-$GHC_VERSION-x86_64-unknown-linux.tar.bz2
fi
}
# Creates a hsenv directory for the selected project, with the
# selected GHC version. Enters the directory and activates hsenv.
init_hsenv() {
NAME=$1
GHC_VERSION=$2
GHC_DIR=$BUILDROOT/ghc/ghc-$GHC_VERSION
INSTALL_DIR=$BUILDROOT/hsenvs/$NAME-GHC$GHC_VERSION/
if [ -d $INSTALL_DIR ]; then
echo "$INSTALL_DIR already exists, using old installation"
cd $INSTALL_DIR
source .hsenv_$NAME-GHC$GHC_VERSION/bin/activate
else
fetchGHC
mkdir -p $INSTALL_DIR
cd $INSTALL_DIR
hsenv --ghc=$GHC_DIR/ghc-$GHC_VERSION-x86_64-unknown-linux.tar.bz2
source .hsenv_$NAME-GHC$GHC_VERSION/bin/activate
cabal install cabal-install
fi
}
installBuildTools () {
init_hsenv $BUILDTOOLSNAME $BUILDTOOLSVERSION
cd $SCRIPTROOT
for tool in */
do
if [ ! $(ghc-pkg --simple-output list $(basename $tool) | wc -l) = 1 ]; then
cd $tool
cabal clean
cabal install --reinstall
echo $(basename $tool)
ghc-pkg --simple-output list $(basename $tool)
cd ..
fi
done
deactivate_hsenv
}
installCUDAFromHackage (){
# Fix and install cuda bindings
cabal unpack cuda || true
cd cuda-*
sed -i -e "s/import Foreign.CUDA.Driver.Exec$/import Foreign.CUDA.Driver.Exec hiding \(Max\)/" Foreign/CUDA/Driver.hs
autoconf
cabal install --extra-include-dirs=/usr/local/cuda-4.2/include/ --extra-lib-dirs=/usr/local/cuda-4.2/lib64/
cd ..
}
installAccelerateFromHackage(){
init_hsenv "accelerate-hackage" "7.4.1"
cabal install accelerate accelerate-io
installCUDAFromHackage
cabal install accelerate-cuda --extra-include-dirs=/usr/local/cuda/include/
cabal install accelerate-examples
deactivate_hsenv
}
# This doesn't work currently
installAccelerateFromGithub() {
init_hsenv "accelerate-github" "7.6.1"
$BUILDTOOLSBIN/shaketool --accelerate
deactivate_hsenv
}
installNikola() {
init_hsenv "nikola" "7.4.2"
$BUILDTOOLSBIN/shaketool --nikola
deactivate_hsenv
}
installFeldspar() {
init_hsenv "feldspar" "7.0.2"
cabal install feldspar-language
cabal unpack feldspar-compiler
cd feldspar-compiler*
sed -i -e "s/ExplicitForall/ExplicitForAll/" Feldspar/NameExtractor.hs
cabal install
cd ..
deactivate_hsenv
}
installObsidian() {
init_hsenv "obsidian" "7.0.3"
git clone git://github.com/svenssonjoel/GCDObsidian.git obsidian
cd obsidian
cabal install
deactivate_hsenv
}
installDPH() {
init_hsenv "dph" "7.4.2"
cabal install dph-examples
deactivate_hsenv
}
installVanilla() {
init_hsenv $2 $1
deactivate_hsenv
}
show_usage() {
echo "This command installs GHC environments for different Haskell vector languages"
echo ""
echo "Invoke with one or more of the following flags:"
echo " --nikola"
echo " --obsidian"
echo " --feldspar"
echo " --dph"
echo " --accelerate-github"
echo " --accelerate-hackage"
echo " --vanilla <GHC versionnumber, e.g. \"7.4.2\"> <name>"
}
if [ $# -lt 1 ]; then
echo "No arguments given. Aborting."
echo ""
show_usage
exit 1
fi
installBuildTools
while [ $# -gt 0 ]; do
case $1 in
--nikola)
installNikola
shift
continue
;;
--feldspar)
installFeldspar
shift
continue
;;
--accelerate-github)
installAccelerateFromGithub
shift
continue
;;
--accelerate-hackage)
installAccelerateFromHackage
shift
continue
;;
--obsidian)
installObsidian
shift
continue
;;
--vanilla)
shift
if [ $# -lt 2 ]; then
echo "Missing argument to --vanilla."
echo ""
show_usage
exit 1
fi
(echo $1 | grep -P "[0-9]+\.[0-9]+\.[0-9]+" > /dev/null) ||
(
echo "Parameter to --vanilla ($1) not formatted correctly."
echo ""
show_usage
exit 1
)
installVanilla $1 $2
shift; shift
continue
;;
--dph)
installDPH
shift
continue
;;
*)
echo "Unknown parameter $1"
shift
continue
;;
esac
break
done
| true |
dded12df40bca2b1402d231305f49b1e8f1ab908 | Shell | ElmWizard/unix-linux | /shellscript/if.sh | UTF-8 | 84 | 3.328125 | 3 | [] | no_license |
x=5
if [ $x = 5 ]; then
echo "x equals 5. "
else
echo "x does not equal 5 "
fi
| true |
7f9197b48f82426f1800d3110ac94182415d45a9 | Shell | ory/ci | /src/scripts/docs/cli.sh | UTF-8 | 530 | 3.34375 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
set -Eeuox pipefail
if [ -z ${CIRCLE_TAG+x} ]; then
if [[ "$(git show -s --format=%B | head -n 1)" == "autogen"* ]]; then
echo "Skipping task because previous commit was generated by it"
circleci-agent step halt
exit 0
fi
fi
bash <(curl -s https://raw.githubusercontent.com/ory/ci/master/src/scripts/install/git.sh)
make .bin/clidoc
.bin/clidoc .
(
cd docs && make format
)
(git add -A; git commit -a -m "autogen(docs): generate cli docs" && /
git push origin HEAD:"$CIRCLE_BRANCH") || true
| true |
ff9ea85486d81d8e19489a550269f2a2517e6569 | Shell | adrsh18/cuda-edge-detector | /scripts/run_eval.sh | UTF-8 | 826 | 3.703125 | 4 | [] | no_license | #!/bin/bash
BASE_DIR=$(dirname $0)
FILES=${BASE_DIR}/../eval_images/*
OUT_DIRECTORY=${BASE_DIR}/../eval_out
PARALLEL=${BASE_DIR}/../edge_detector
SERIAL=${BASE_DIR}/../edge_detector_serial
HIGH_THRESHOLD=100
LOW_THRESHOLD=50
if [ -d "${OUT_DIRECTORY}" ]; then
rm -fr ${OUT_DIRECTORY}
fi
mkdir ${OUT_DIRECTORY}
for f in $FILES
do
OUT_FILE=$(basename "$f")
echo ""
echo "Processing ${OUT_FILE} in Parallel.."
echo "------------------------------------"
${PARALLEL} $f "${OUT_DIRECTORY}/p_out_${OUT_FILE}" ${HIGH_THRESHOLD} ${LOW_THRESHOLD}
echo "------------------------------------"
echo ""
echo "Processing ${OUT_FILE} in Serial.."
${SERIAL} $f "${OUT_DIRECTORY}/s_out_${OUT_FILE}" ${HIGH_THRESHOLD} ${LOW_THRESHOLD}
echo "------------------------------------"
echo ""
done
| true |
00fb4d1c31327c2904fe930586af6fa8a4ba7602 | Shell | zhuxiaofengwww/config | /.bash_profile | UTF-8 | 1,232 | 2.8125 | 3 | [] | no_license | export CLICOLOR=1
export LSCOLORS=ExFxCxDxBxegedabagacad
#Powerline Settings
. ~/.vim/bundle/powerline/powerline/bindings/bash/powerline.sh
alias ll='ls -alhF'
alias df='df -h'
alias grep='grep --color=auto'
alias mkdir='mkdir -p'
alias du='du -c -h'
alias duh='du -h -d 1'
alias ..='cd ..'
alias pht='python -m SimpleHTTPServer'
alias p8='ping 8.8.8.8'
alias gst='git status'
if [[ "$OSTYPE" == "linux-gnu"* ]]; then
alias ls='ls -F --color=auto'
alias listen='netstat -tulnp'
elif [[ "$OSTYPE" == "darwin"* ]]; then
alias ls='ls -F -G'
alias listen='lsof -i -P |grep LISTEN'
fi
alias cdd='cd ~/Downloads/'
alias cdb='cd ~/Dropbox/'
alias cdw='cd ~/Dropbox/Work'
alias cds='cd ~/Dropbox/Study'
alias sshm='ssh -p 11024 -4 root@micbase.com'
alias sshp='ssh pi@192.168.1.148'
function fp() {
echo `pwd`/"$1"
}
if [[ "$OSTYPE" == "darwin"* ]]; then
alias cr='~/Documents/conf/cr.sh'
alias cdp='cd ~/Documents/projects/'
export VG_PATH="/Users/zqy/Documents/projects/brewer/scripts/vagrant/"
function vg() {
if [ $1 ]; then
pushd $VG_PATH > /dev/null;
vagrant $@;
popd > /dev/null;
else
pushd $VG_PATH > /dev/null;
fi
}
fi
| true |
1786e7b397950699d6b84cd602d2f85d34269323 | Shell | bsandrow/hacks | /fb-friends-list | UTF-8 | 522 | 3.25 | 3 | [] | no_license | #!/bin/sh
#
# SOURCE
# http://www.commandlinefu.com/commands/view/4726/view-facebook-friend-list-hidden-or-not-hidden
determine_fetcher() {
if which lynx >/dev/null 2>&1 ; then
echo "lynx -useragent=Opera -dump"
return
fi
which wget >/dev/null 2>&1 && echo "wget -O - --useragent='Opera'"
}
URL="http://www.facebook.com/ajax/typeahead_friends.php?u=$1&__a=1"
FETCHER=`determine_fetcher`
exec $FETCHER "$URL" |gawk -F'\"t\":\"' -v RS='\",' 'RT{print $NF}' |grep -v '\"n\":\"' |cut -d, -f2
| true |
5d61d4a38f7c23038854aafcacb127f9962ec55a | Shell | Colle11/Referee-Assignment-problem | /Techniques/Backtracking/Backtracking_test.sh | UTF-8 | 279 | 2.9375 | 3 | [
"MIT"
] | permissive | #!/bin/bash
mkdir -p Backtracking_test
for f in ../../RefAssign-Instances/Instances/*.txt
do
s=$(echo "$f" | cut -d'/' -f 5 | cut -d'.' -f 1)
timeout 3m ./TestRABacktracking.exe $f 1 1 1 1 1 1 1 &> "./Backtracking_test/$s-result.txt"
echo "Backtracking: $s-result.txt"
done
| true |
f5e4f8187f2a0a81185812ca69fc373e36bee418 | Shell | OMNIALowCode/omnia3-deployment | /omnia-update.sh | UTF-8 | 1,916 | 3.890625 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env bash
# Copyright (c) NumbersBelieve. All rights reserved.
#
set -e
set -u
set -o pipefail
exec 3>&1
if [ -t 1 ] && command -v tput > /dev/null; then
ncolors=$(tput colors)
if [ -n "$ncolors" ] && [ $ncolors -ge 8 ]; then
bold="$(tput bold || echo)"
normal="$(tput sgr0 || echo)"
black="$(tput setaf 0 || echo)"
red="$(tput setaf 1 || echo)"
green="$(tput setaf 2 || echo)"
yellow="$(tput setaf 3 || echo)"
blue="$(tput setaf 4 || echo)"
magenta="$(tput setaf 5 || echo)"
cyan="$(tput setaf 6 || echo)"
white="$(tput setaf 7 || echo)"
fi
fi
invocation='say "Calling: ${yellow:-}${FUNCNAME[0]} ${green:-}$*${normal:-}"'
say() {
printf "%b\n" "${cyan:-}omnia-install:${normal:-} $1" >&3
}
read_dom() {
local IFS=\>
read -d \< ENTITY CONTENT
local ret=$?
TAG_NAME=${ENTITY%% *}
ATTRIBUTES=${ENTITY#* }
return $ret
}
unzip-from-link() {
eval $invocation
local download_link=$1; shift || return 1
local destination_dir=${1:-}
local temporary_dir
rm -rf "$destination_dir"/*
temporary_dir=$(mktemp -d) \
&& curl -LO "${download_link:-}" \
&& unzip -o -d "$temporary_dir" \*.zip \
&& rm -rf \*.zip \
&& mv "$temporary_dir"/* "$destination_dir" \
&& rm -rf "$temporary_dir"
}
download_latest_omnia_version() {
eval $invocation
local omnia_feed=$(curl -sSL https://mymiswebdeploy.blob.core.windows.net/omnia3/platform/updateFeed.xml)
echo "$omnia_feed"
return $?
}
update_omnia() {
eval $invocation
local temp_file=$(mktemp)
download_latest_omnia_version > "$temp_file"
while read_dom; do
if [[ $TAG_NAME = "Version" ]]; then
eval local $ATTRIBUTES
break;
fi
done < "$temp_file"
say "Download package"
unzip-from-link "$PackageBinaries" "/home/omnia/bin"
systemctl restart omnia
return $?
}
update_omnia | true |
69d95f5d0a6c4eddb9ce775acb566380f616e420 | Shell | privateport/openssl-utils | /bin/createCA | UTF-8 | 2,147 | 4.21875 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash
function print_help {
cat <<EOF
OPENVPN CA CERT and KEY GENERATION
===============================================
Usage:
docker run sneakyscampi/createCA (OPTIONS)
Options:
-d | --domain DomainName
-c | --configpath Configuration Path ie /opt/ssl
If you write the password in /tmp/pw.txt, it will be used instead of stdin
_______________________________________________
by SneakyScampi
EOF
}
if [ $# -eq 0 ]; then
print_help; exit 0
fi
DEBUG=false
OPTS=`getopt -o d:c: --long domain:configpath: -n 'parse-options' -- "$@"`
if [ $? != 0 ] ; then echo "Failed parsing options." >&2 ; exit 1 ; fi
echo #OPTS
eval set -- "$OPTS"
while true; do
case "$1" in
-h | --help ) print_help; exit 0; shift ;;
-d | --domain ) DOMAINNAME="$2"; shift; shift ;;
-c | --configpath ) CONFIGPATH="$2"; shift; shift ;;
-- ) shift; break ;;
* ) break ;;
esac
done
if [ -z "$CONFIGPATH" ] || [ -z "$DOMAINNAME" ]; then
print_help
exit 1
fi
# Check to see if path exists
if [ ! -d "$CONFIGPATH" ]; then
echo "Creating DIR: $CONFIGPATH"
mkdir -p $CONFIGPATH
fi
cd $CONFIGPATH
rm -rf $CONFIGPATH/*
mkdir -p ca server && cd ca && touch index.txt
cp -pr /usr/local/share/openssl-templates/* $CONFIGPATH
CAPATH="$CONFIGPATH/ca"
echo "CA PATH: $CAPATH"
sed -i.bak 's|$CA_DIR|'$CAPATH'|g' $CONFIGPATH/ca/ca-sign.cnf
cd $CONFIGPATH/ca
sed -e "s/\$DOMAIN/$DOMAINNAME/g" ca.cnf.template > ca.cnf
if [ -f "/tmp/pw.txt" ]; then
openssl req -new -config ca.cnf -keyout ca.key -out ca.req -passout file:/tmp/pw.txt
openssl ca -batch -config ca-sign.cnf -extensions X509_ca -days 3650 -create_serial -selfsign -updatedb -keyfile ca.key -in ca.req -out ca.crt -passin file:/tmp/pw.txt
else
openssl req -new -config ca.cnf -keyout ca.key -out ca.req
openssl ca -batch -config ca-sign.cnf -extensions X509_ca -days 3650 -create_serial -selfsign -updatedb -keyfile ca.key -in ca.req -out ca.crt
fi
chmod 400 ca.key
chmod 444 ca.crt
cp $CONFIGPATH/ca/ca.crt $CONFIGPATH/server
cp $CONFIGPATH/ca/ca.crt $CONFIGPATH/
echo $DOMAINNAME > $CONFIGPATH/ca/DOMAIN.txt
| true |
12083dc47bc78399cd944c944b69ece57e3711ab | Shell | liechangtian/pythonTest | /ws/kmeans/test.sh | UTF-8 | 136 | 3 | 3 | [] | no_license | #!/usr/bin/env bash
files=`ls|awk '/^da/'`
for file in ${files} ; do
echo ${file}
newFile=`echo $file`
echo $newFile
done
| true |
c94ecf15852f78239fc085f203c08236b9d1b661 | Shell | prayther/uteeg | /ks/add-host.sh | UTF-8 | 3,251 | 3.125 | 3 | [] | no_license | #!/bin/bash -x
#Usage: ./script.sh hostname
export PATH=$PATH:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/root/bin
export HOME=/root
cd "${BASH_SOURCE%/*}"
# bsfl are bash libs used in scripts in uteeg
ls -d ~/bsfl || git clone https://github.com/SkypLabs/bsfl.git /root/bsfl
# read configuration (needs to be adopted!)
#source etc/virt-inst.cfg
source ../etc/virthost.cfg
source ../etc/rhel.cfg
source ../bsfl/lib/bsfl.sh || exit 1
DEBUG=no
LOG_ENABLED="yes"
SYSLOG_ENABLED="yes"
#if [ -z "${1}" ]; [ -z "${2}" ]; [ -z "${3}" ]; [ -z "${4}" ];then
if [ -z "${1}" ];then
echo ""
#echo " ./virt-install.sh <vmname> <disc in GB> <vcpus> <ram>"
echo " ./virt-install.sh <vmname>
echo ""
echo "Ex: ./virt-install.sh testvm
#echo "Ex: ./virt-install.sh testvm 10 2 2048"
echo ""
echo "Make sure you have an entry in uteeg/etc/hosts for your vmname"
echo "Only run one of these at a time. Building multiple"
echo "VM's gets all wacky with the libvirtd restart and "
echo "starting and stopping the network"
echo ""
echo "All the starting and stopping is to get dhcp leases straight"
echo ""
echo ""
exit 1
fi
# make sure your your libvirt host has sw needed for virt-inst.sh
#for sw in ansible virt-manager virt-install virt-viewer nfs-utils httpd;
# do
# if [[ $(rpm -q "${sw}") ]];then
# echo ""${sw}" installed"
# else
# echo ""${sw}" not installed..."
# echo "yum install -y "${sw}" # run this and try again"
# exit 1
# fi
#done
#this set vars per vm from hosts file based on $1, vmname used to launch this script
#use ^ in search to make sure you're not getting comments #
inputfile=../etc/hosts
VMNAME=$(awk /"^${1}"/'{print $1}' "${inputfile}")
DISC_SIZE=$(awk /"^${1}"/'{print $2}' "${inputfile}")
VCPUS=$(awk /"^${1}"/'{print $3}' "${inputfile}")
RAM=$(awk /"^${1}"/'{print $4}' "${inputfile}")
IP=$(awk /"^${1}"/'{print $5}' "${inputfile}")
OS=$(awk /"^${1}"/'{print $6}' "${inputfile}")
RHVER=$(awk /"^${1}"/'{print $7}' "${inputfile}")
OSVARIANT=$(awk /"^${1}"/'{print $8}' "${inputfile}")
VIRTHOST=$(awk /"^${1}"/'{print $9}' "${inputfile}")
DOMAIN=$(awk /"^${1}"/'{print $10}' "${inputfile}")
DISC=$(awk /"^${1}"/'{print $11}' "${inputfile}")
NIC=$(awk /"^${1}"/'{print $12}' "${inputfile}")
MASK=$(awk /"^${1}"/'{print $13}' "${inputfile}")
ISO=$(awk /"^${1}"/'{print $14}' "${inputfile}")
MEDIA=$(awk /"^${1}"/'{print $15}' "${inputfile}")
NETWORK=$(awk /"^${1}"/'{print $16}' "${inputfile}")
cmd has_value VMNAME
cmd has_value DISC_SIZE
cmd has_value VCPUS
cmd has_value RAM
cmd has_value IP
cmd has_value OS
cmd has_value RHVER
cmd has_value OSVARIANT
cmd has_value VIRTHOST
cmd has_value DISC
cmd has_value NIC
cmd has_value MASK
cmd has_value ISO
cmd has_value MEDIA
cmd has_value NETWORK
cp packages/template.packages packages/${VMNAME}.packages
cp partitions/template.partitions partitions/${VMNAME}.partitions
cp post/template.post post/${VMNAME}.post
cp network/template.network network/${VMNAME}.network
# sed search replace &: refer to that portion of the pattern space which matched
sed -i "s/<IP>/${IP}/g" network/${VMNAME}.network
sed -i "s/<VMNAME>/${VMNAME}/g" network/${VMNAME}.network
sed -i "s/<DOMAIN>/${DOMAIN}/g" network/${VMNAME}.network
| true |
c4ff73eaca3a646aa481ab845086c6a4410f82f6 | Shell | rancher/rancher | /tests/v2/codecoverage/scripts/build_docker_images.sh | UTF-8 | 1,182 | 2.953125 | 3 | [
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] | permissive | #!/bin/bash
set -e
ARCH=${ARCH:-"amd64"}
REPO=ranchertest
TAG=v2.7-head
SYSTEM_CHART_REPO_DIR=build/system-charts
SYSTEM_CHART_DEFAULT_BRANCH=${SYSTEM_CHART_DEFAULT_BRANCH:-"dev-v2.7"}
CHART_REPO_DIR=build/charts
CHART_DEFAULT_BRANCH=${CHART_DEFAULT_BRANCH:-"dev-v2.7"}
cd $(dirname $0)/../package
../scripts/k3s-images.sh
cp ../bin/rancher ../bin/agent ../bin/data.json ../bin/k3s-airgap-images.tar .
# Make sure the used data.json is a release artifact
cp ../bin/data.json ../bin/rancher-data.json
IMAGE=${REPO}/rancher:${TAG}
AGENT_IMAGE=${REPO}/rancher-agent:${TAG}
echo "building rancher test docker image"
docker build --build-arg VERSION=${TAG} --build-arg ARCH=${ARCH} --build-arg IMAGE_REPO=${REPO} -t ${IMAGE} -f Dockerfile . --no-cache
echo "building agent test docker image"
docker build --build-arg VERSION=${TAG} --build-arg ARCH=${ARCH} --build-arg RANCHER_TAG=${TAG} --build-arg RANCHER_REPO=${REPO} -t ${AGENT_IMAGE} -f Dockerfile.agent . --no-cache
echo ${DOCKERHUB_PASSWORD} | docker login --username ${DOCKERHUB_USERNAME} --password-stdin
echo "docker push rancher"
docker image push ${IMAGE}
echo "docker push agent"
docker image push ${AGENT_IMAGE}
| true |
4cc1826c3b62b6ebe96e4f42b4790e4660f78fc8 | Shell | YuuichiHosomi/dotfiles | /.bashrc.d/local/hurricane.post.sh | UTF-8 | 368 | 2.84375 | 3 | [] | no_license | backup() {
( set -e
grep -q /mnt/backups /proc/mounts || {
echo "Mounting backup storage"
sudo mount -t nfs 10.42.1.2:/volume1/backups /mnt/backups/
}
echo "Backing up to /mnt/backups/$(hostname -s)"
sudo rsync -av --exclude-from=/home/dennis/.config/backup_exclude --delete --delete-excluded / /mnt/backups/$(hostname -s)/
)
}
| true |
b29ba2d06ebb01a57b4d639181f87c6bd358f186 | Shell | Anshnrag02/competitive-programming | /scripts/mark-problem-as-solved.sh | UTF-8 | 277 | 3.078125 | 3 | [
"MIT"
] | permissive | # usage: ./mark-problem-as-solved.sh JUDGE-FOLDER-PATH PROBLEM-NAME-WITH-EXTENSION
file_md="./scripts/index-solutions/runner.py"
git add $1/$2
if [[ ! -e "${file_csv%.cpp}" ]]
then
g++ -std=gnu++14 ${file_csv} -o ${file_csv%.cpp}
fi
python3 ${file_md}
git add --update
| true |
a6f3f6be647cb1f852311812cd306899a9690007 | Shell | maxned/Random-Bash-Scripts | /Insurance.sh | UTF-8 | 385 | 3.59375 | 4 | [] | no_license | #!/bin/sh
# Insurance.sh
#
#
# Created by Max Nedorezov on 6/22/15.
#
function calculate () #Arguments: cost and insurance amount
{
local cost=$(bc <<< "$1 * $2")
echo "$cost"
}
echo "Enter the cost of your house or building and then press [ENTER]:"
read cost
insuranceCost=$(calculate $cost .8)
echo "You should insure your house or building for at least $insuranceCost dollars" | true |
fa6d54feb64941eb183d1c52413de1685f74c9c1 | Shell | m-sobieszek/fonline | /ThirdParty/mono/repo/scripts/mono-package-runtime | UTF-8 | 1,373 | 3.46875 | 3 | [
"MIT",
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-unknown",
"CC-BY-4.0",
"CC-BY-2.5",
"GPL-2.0-only",
"Apache-2.0",
"LicenseRef-scancode-info-zip-2009-01",
"GPL-1.0-or-later",
"BSD-3-Clause",
"MPL-1.1",
"MS-PL",
"LicenseRef-scancode-proprietary-license"
] | permissive | #!/bin/sh
extension=".dylib"
if test `uname -o` = GNU/Linux; then
extension=".so"
fi
if test x$2 = x; then
echo usage is: mono-package-runtime MONO_INSTALL_PREFIX LABEL
echo The file will be created in the current directory
exit 1
fi
prefix=$1
output=$2
if test ! -d $prefix; then
echo the specified path is not a directory: $prefix
exit 1
fi
if test -e $output.zip; then
echo The output file already exists, refusing to overwrite: $output.zip
exit 1
fi
if test ! -e $prefix/bin/mono; then
echo The $prefix does not contains a bin/mono
exit 1
fi
if test ! -d $prefix/lib/mono/4.5; then
echo The $prefix does not contains a lib/mono/4.5
exit 1
fi
o=`pwd`/$output
cd $prefix
(zip -u $o.zip bin/mono lib/mono/4.5/mscorlib.dll lib/mono/4.5/System*dll lib/mono/4.5/Mono.CSharp.dll lib/mono/4.5/Microsoft*dll lib/mono/4.5/FSharp*.dll lib/mono/4.5/I18N*dll lib/mono/4.5/Accessibility.dll lib/mono/4.5/RabbitMQ.Client.dll lib/mono/4.5/ICSharpCode.SharpZipLib.dll lib/mono/4.5/CustomMarshalers.dll etc/mono/config etc/mono/4.5/machine.config etc/mono/4.5/web.config lib/mono/4.5/Mono.Cairo.dll lib/mono/4.5/Mono.Data.Sqlite.dll lib/mono/4.5/Mono.Posix.dll lib/mono/4.5/Mono.Security.*dll lib/mono/4.5/Mono.Simd.dll lib/libMonoSupportW$extension lib/libMonoPosixHelper$extension lib/libmono-btls-shared$extension)
echo Created file $o.zip
| true |
0d934c9728b232fa5aa4e98168bafff00c4a8fae | Shell | hygull/unix-ssc | /scripts/19_escape_sequence_interpretation.sh | UTF-8 | 1,091 | 3.65625 | 4 | [
"MIT"
] | permissive | #!/bin/bash
#------------- Description ------------------------
# *Date of creation : 22 Feb 2017
# *Aim of script : To use few escape sequences in script
# *Created by : Rishikesh Agrawani
#------------- Script -----------------------------
#\n
echo -e "\nMy name is Monty.\nI like programming."
#\n \t
echo -e "\nHow will you utilize your time?\nA.\t:\tBy reading books\nB.\t:\tBy watching programming videos\n"
#\f
echo -e "Apple\fBanana\fCarrot\fMango"
#\\
echo -e "\\ is slash."
#\v
echo -e "Hello Dear.\vHow are you?"
#\b
echo -e "This is an appp\ble." #3 p's
#\r
echo -e "12345\rABCDE"
#\c
echo -e "hfffggf\c"
# ------------ Ouptut -----------------------------
# admins-MacBook-Pro-3:scripts admin$ ./19_escape_sequence_interpretation.sh
# My name is Monty.
# I like programming.
# How will you utilize your time?
# A. : By reading books
# B. : By watching programming videos
# Apple
# Banana
# Carrot
# Mango
# \ is slash.
# Hello Dear.
# How are you?
# This is an apple.
# ABCDE
# hfffggfadmins-MacBook-Pro-3:scripts admin$
| true |
620b40606b1e5c9fedd0ddf75f44644f5b881d5d | Shell | yasker/kstat | /kstat | UTF-8 | 1,161 | 3.859375 | 4 | [
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] | permissive | #!/usr/bin/env bash
NS="kstat-system"
SELECTOR="app=kstat"
print_usage() {
echo "Usage: ${0} [|-h|--help] <arguments>"
echo ""
echo "Examples:"
echo " ${0}"
echo " ${0} --show-devices --top"
echo ""
echo "Note: Must have KStat installed in "kstat-system" namespace and have access to "kubectl" and the namespace"
echo ""
exit 0
}
exec_command() {
COMMAND_ARGS="${@}"
KSTAT_POD_NAME=`kubectl -n ${NS} get pod -l ${SELECTOR} --output=jsonpath={.items[0].metadata.name}`
kubectl -n ${NS} exec -it ${KSTAT_POD_NAME} -- sh -c \
"kstat stat \
--metrics-format /etc/kstat/metrics-format.yaml \
--header-template /etc/kstat/header.tmpl \
--output-template /etc/kstat/output.tmpl \
${COMMAND_ARGS}"
}
ARG=$1
case $ARG in
"-h" | "--help")
print_usage
;;
install)
kubectl apply -f https://raw.githubusercontent.com/yasker/kstat/main/kstat.yaml
;;
uninstall)
kubectl delete -f https://raw.githubusercontent.com/yasker/kstat/main/kstat.yaml
;;
top)
shift
exec_command --top ${@}
;;
*)
exec_command ${@}
;;
esac
| true |
cc59f8908930f3da5a8b19c57be243a9b84ee91a | Shell | marcello201/JammaPi | /pixel_script/center_screen_script/set_video.sh | UTF-8 | 14,414 | 2.796875 | 3 | [] | no_license | # *************************************
# Video timings generator script
# Version 2.0 Frank Skilton - 19th April 2017
# Version 3.0 Jochen Zurborg 04.03.2018
# added integration in
# /opt/retropie/configs/all/
# *************************************
CURRENT_VERSION=2.0
# User definable values
TEST_IMAGE="align.png"
SAFE_TIMINGS="320 1 20 20 44 240 1 6 7 10 0 0 0 60 0 6400000 1"
SAVE_FILE="timings_save.txt"
LOAD_FILE="timings_load.txt"
SAVE_RES_320_FILE="/opt/retropie/configs/all/timing_320.txt"
BOOT_CONFIG="/boot/config.txt"
# Stuff to output to boot/config.txt ($BOOT_CONFIG)
NEW_LINE=""
LINE1="dtparam=audio=on"
LINE2="gpu_mem_256=128"
LINE3="gpu_mem_512=256"
LINE4="gpu_mem_1024=256"
LINE5="gpu_mem_320"
LINE6="overscan_scale=1"
LINE7="dtparam=i2c_vc=on"
LINE8="dtoverlay=pwm-2chan,pin=18,func=2,pin2=19,func2=2"
LINE9="dtoverlay=vga666-6"
LINE10="enable_dpi_lcd=1"
LINE11="display_default_lcd=1"
LINE12="dpi_output_format=6"
LINE13="dpi_group=2"
LINE14="dpi_mode=87"
LINE15="hdmi_timings=450 1 50 30 90 270 1 10 1 21 0 0 0 50 0 9600000 1"
LINE16="framebuffer_width=360"
LINE17="framebuffer_heightframebuffer_height=260"
LINE18="pi2jamma=no"
LINE19="# set to no to display vertical games on horizontal screen"
LINE20="pi2scart_tate_mode=no"
LINE21="# set to no to keep your own settings"
LINE22="pi2scart_overwrite_rom_cfg=yes"
LINE23="# uncomment or set to arcade for arcade crt or crt with geometry settings"
LINE24="pi2scart_crt_profile=arcade"
# Generate hdmi_timings | Main menu / loop
generate_timings ()
{
# Only generate timings / menu where necessary
# @1 generates timings and displays menu
# @2 does not generate timings and displays menu
# @3 generates timings and does not display menu
# @ not declared does not generate timings or menu
if [ $@ ]; then
if [ $@ != "2" ]; then
vcgencmd hdmi_timings $h_active_pixels 1 $h_front_porch $h_sync_pulse $h_back_porch $v_active_lines 1 $v_front_porch $v_sync_pulse $v_back_porch 0 0 0 $frame_rate 0 $pixel_freq 1
tvservice -e "DMT 87"
fbset -depth 8 && fbset -depth 16 -xres $h_active_pixels -yres $v_active_lines
fi
if [ $@ != "3" ]; then
echo ""
echo "-Use ARROW keys to position screen"
echo "-Press Q to toggle test image"
echo "-Press I to input timings"
echo "-Press L to load timings from" $LOAD_FILE
echo "-Press B to load timings from" $BOOT_CONFIG
echo "-Press S to save timings to " $SAVE_FILE
echo "-Press 1 to save timings to " $SAVE_RES_320_FILE
echo "-Press C to save timings to " $BOOT_CONFIG
echo "-Press D to display current timings"
echo "-Press F to calculate frequencies"
echo "-Press SPACE or ENTER to reset timings"
echo "-Press M to display this menu"
echo "-Press X to exit to shell"
fi
fi
# Detect cursor/arrow keys
while true
do
read -r -sn1 input
case $input in
A) move_up ;; # up arrow pressed
B) move_down ;; # down arrow pressed
C) move_right ;; # right arrow pressed
D) move_left ;; # left arrow pressed
esac
if [ "$input" == "q" ]; then
clear; fbv $TEST_IMAGE; generate_timings 2
elif [ "$input" == "i" ]; then
clear; input_timings
elif [ "$input" == "l" ]; then
load_timings
elif [ "$input" == "b" ]; then
load_boot_config 1
elif [ "$input" == "s" ]; then
save_timings
elif [ "$input" == "1" ]; then
save_1_timings
elif [ "$input" == "c" ]; then
save_boot_config
elif [ "$input" == "d" ]; then
display_timings 2
elif [ "$input" == "f" ]; then
calculate_frequencies 2
elif [ "$input" == "" ]; then
reset_timings
elif [ "$input" == "m" ]; then
clear; generate_timings 2
elif [ "$input" == "x" ]; then
clear; exit
else
generate_timings
fi
done
}
# Obtain hdmi_timings values from user and store as variables
input_timings ()
{
echo "(Safe/example timings in brackets)"; echo ""
read -p "-Enter horiozontal resolution (320): " h_active_pixels
read -p "-Enter horiozontal front porch (22): " h_front_porch
read -p "-Enter horiozontal sync pulse (20): " h_sync_pulse
read -p "-Enter horiozontal back porch (42): " h_back_porch
read -p "-Enter vertical resolution (240): " v_active_lines
read -p "-Enter vertical front porch (8): " v_front_porch
read -p "-Enter vertical sync pulse (7): " v_sync_pulse
read -p "-Enter vertical back porch (8): " v_back_porch
read -p "-Enter vertical refresh rate (60): " frame_rate
read -p "-Enter pixel clock frequency (6400000): " pixel_freq
TOTAL_HORIZONTAL_LINES=$[$h_active_pixels + $h_front_porch + $h_sync_pulse + $h_back_porch]
TOTAL_VERTICAL_LINES=$[$v_active_lines + $v_front_porch + $v_sync_pulse + $v_back_porch]
echo "";
# Calculate horizontal and vertical frequencies and round down to 2 decimal places
echo -n "Horizontal scan rate (kHz) = "; awk 'BEGIN { value = sprintf ("%.2f", '"$pixel_freq / $TOTAL_HORIZONTAL_LINES / 1000"' ); print value}'
echo -n "Vertical refresh rate (Hz) = "; awk 'BEGIN { value = sprintf ("%.2f", '"$pixel_freq / $TOTAL_HORIZONTAL_LINES / $TOTAL_VERTICAL_LINES"' ); print value}'
confirm_input 1
}
confirm_input ()
{
if [ $@ ]; then
echo ""
echo "-You have entered:" $h_active_pixels 1 $h_front_porch $h_sync_pulse $h_back_porch $v_active_lines 1 $v_front_porch $v_sync_pulse $v_back_porch 0 0 0 $frame_rate 0 $pixel_freq 1
echo "-Is this correct (Y/N)?"
fi
read -sn1 correct
if [ "$correct" == "y" ]; then
clear; generate_timings 1
elif [ "$correct" == "n" ]; then
clear; input_timings
else
confirm_input
fi
}
# Load hdmi_timings from $LOAD_FILE
load_timings ()
{
# File exists, load the timings
if [ -f $LOAD_FILE ]; then
# Grab hdmi_timings string from $LOAD_FILE
LOAD_VALUES=$( cat $LOAD_FILE | grep -m1 hdmi_timings )
# Remove the '=' sign from the string
LOAD_VALUES=$( echo $LOAD_VALUES | cut -d "=" -f2 )
# Split the string into individual values
h_active_pixels=$( echo $LOAD_VALUES | cut -d " " -f1 )
h_front_porch=$( echo $LOAD_VALUES | cut -d " " -f3 )
h_sync_pulse=$( echo $LOAD_VALUES | cut -d " " -f4 )
h_back_porch=$( echo $LOAD_VALUES | cut -d " " -f5 )
v_active_lines=$( echo $LOAD_VALUES | cut -d " " -f6 )
v_front_porch=$( echo $LOAD_VALUES | cut -d " " -f8 )
v_sync_pulse=$( echo $LOAD_VALUES | cut -d " " -f9 )
v_back_porch=$( echo $LOAD_VALUES | cut -d " " -f10 )
frame_rate=$( echo $LOAD_VALUES | cut -d " " -f14 )
pixel_freq=$( echo $LOAD_VALUES | cut -d " " -f16 )
echo ""; echo "...timings loaded from $LOAD_FILE"
#echo "hdmi_timings="$LOAD_VALUES
generate_timings 3
# File doesn't exist
# Generate the file and populate with timings
else
echo ""; echo "...file not found, creating file with safe timings"
echo "hdmi_timings="$SAFE_TIMINGS > $LOAD_FILE
generate_timings
fi
}
# Load hdmi_timings from /boot/config.txt ($BOOT_CONFIG)
load_boot_config ()
{
# Grab hdmi_timings string from $BOOT_CONFIG
BOOT_RES=$( cat $BOOT_CONFIG | grep -m1 hdmi_timings )
# Remove the '=' sign from the string
BOOT_RES=$( echo $BOOT_RES | cut -d "=" -f2 )
# Split the string into individual values
h_active_pixels=$( echo $BOOT_RES | cut -d " " -f1 )
h_front_porch=$( echo $BOOT_RES | cut -d " " -f3 )
h_sync_pulse=$( echo $BOOT_RES | cut -d " " -f4 )
h_back_porch=$( echo $BOOT_RES | cut -d " " -f5 )
v_active_lines=$( echo $BOOT_RES | cut -d " " -f6 )
v_front_porch=$( echo $BOOT_RES | cut -d " " -f8 )
v_sync_pulse=$( echo $BOOT_RES | cut -d " " -f9 )
v_back_porch=$( echo $BOOT_RES | cut -d " " -f10 )
frame_rate=$( echo $BOOT_RES | cut -d " " -f14 )
pixel_freq=$( echo $BOOT_RES | cut -d " " -f16 )
if [ $@ == "1" ]; then
echo ""; echo "...timings loaded from $BOOT_CONFIG"
generate_timings 3
# Below is called on first load of script & if no values are found via
# display_timings function
# Purpose is to store the timings variables to be used by other functions
elif [ $@ == "2" ]; then
echo "hdmi_timings="$BOOT_RES
generate_timings 2
fi
}
# Save hdmi_timings to $SAVE_FILE
save_timings ()
{
# This saves and parses hdmi_timings to specified $SAVE_FILE
# Useful if you want to store multiple sets of timings
echo "hdmi_timings="$h_active_pixels 1 $h_front_porch $h_sync_pulse $h_back_porch $v_active_lines 1 $v_front_porch $v_sync_pulse $v_back_porch 0 0 0 $frame_rate 0 $pixel_freq 1 >> $SAVE_FILE;
# This saves and overwrites hdmi_timings to specified $LOAD_FILE
echo "hdmi_timings="$h_active_pixels 1 $h_front_porch $h_sync_pulse $h_back_porch $v_active_lines 1 $v_front_porch $v_sync_pulse $v_back_porch 0 0 0 $frame_rate 0 $pixel_freq 1 > $LOAD_FILE;
echo ""; echo "...timings saved to $SAVE_FILE & $LOAD_FILE and retroarch"
echo "hdmi_timings="$h_active_pixels 1 $h_front_porch $h_sync_pulse $h_back_porch $v_active_lines 1 $v_front_porch $v_sync_pulse $v_back_porch 0 0 0 $frame_rate 0 $pixel_freq 1
}
# Save hdmi_timings to $SAVE_RES_320_FILE
save_1_timings ()
{
# added jzu, 03.03.2018
# carry timings to michaels script
# First delete the existing /opt/retropie/configs/all/timing_320.txt ($SAVE_RES_320_FILE)
sudo rm $SAVE_RES_320_FILE
echo "hdmi_timings "$h_active_pixels 1 $h_front_porch $h_sync_pulse $h_back_porch $v_active_lines 1 $v_front_porch $v_sync_pulse $v_back_porch 0 0 0 $frame_rate 0 $pixel_freq 1 >> $SAVE_RES_320_FILE;
echo ""; echo "...timings saved to $SAVE_RES_320_FILE retroarch"
echo "hdmi_timings="$h_active_pixels 1 $h_front_porch $h_sync_pulse $h_back_porch $v_active_lines 1 $v_front_porch $v_sync_pulse $v_back_porch 0 0 0 $frame_rate 0 $pixel_freq 1
}
# Save hdmi_timings to /boot/config.txt ($BOOT_CONFIG)
save_boot_config ()
{
# First delete the existing /boot/config.txt ($BOOT_CONFIG)
sudo rm $BOOT_CONFIG
BOOT_CONFIG_TMP=config.txt
rm $BOOT_CONFIG_TMP
# Write the following values to the file
sudo echo "# Generated from set_video.sh" >> $BOOT_CONFIG_TMP
sudo echo $NEW_LINE >> $BOOT_CONFIG_TMP
sudo echo $LINE1 >> $BOOT_CONFIG_TMP
sudo echo $LINE2 >> $BOOT_CONFIG_TMP
sudo echo $LINE3 >> $BOOT_CONFIG_TMP
sudo echo $LINE4 >> $BOOT_CONFIG_TMP
sudo echo $LINE5 >> $BOOT_CONFIG_TMP
sudo echo $LINE6 >> $BOOT_CONFIG_TMP
sudo echo $LINE7 >> $BOOT_CONFIG_TMP
sudo echo $LINE8 >> $BOOT_CONFIG_TMP
sudo echo $LINE9 >> $BOOT_CONFIG_TMP
sudo echo $LINE10 >> $BOOT_CONFIG_TMP
sudo echo $LINE11 >> $BOOT_CONFIG_TMP
sudo echo $LINE12 >> $BOOT_CONFIG_TMP
sudo echo $LINE13 >> $BOOT_CONFIG_TMP
sudo echo $LINE14 >> $BOOT_CONFIG_TMP
sudo echo $LINE15 >> $BOOT_CONFIG_TMP
sudo echo $LINE16 >> $BOOT_CONFIG_TMP
sudo echo $LINE17 >> $BOOT_CONFIG_TMP
sudo echo $LINE18 >> $BOOT_CONFIG_TMP
sudo echo $LINE19 >> $BOOT_CONFIG_TMP
sudo echo $LINE20 >> $BOOT_CONFIG_TMP
sudo echo $LINE21 >> $BOOT_CONFIG_TMP
sudo echo $LINE22 >> $BOOT_CONFIG_TMP
sudo echo $LINE23 >> $BOOT_CONFIG_TMP
sudo echo $LINE24 >> $BOOT_CONFIG_TMP
sudo cp $BOOT_CONFIG_TMP $BOOT_CONFIG
echo ""; echo "...timings saved to $BOOT_CONFIG"
echo "hdmi_timings="450 1 50 30 90 270 1 10 1 21 0 0 0 50 0 9600000 1
}
# Display currently applied hdmi_timings
display_timings ()
{
# If we already have some values loaded into the variables, display them
if [ $h_active_pixels ]; then
echo ""
echo "hdmi_timings="$h_active_pixels 1 $h_front_porch $h_sync_pulse $h_back_porch $v_active_lines 1 $v_front_porch $v_sync_pulse $v_back_porch 0 0 0 $frame_rate 0 $pixel_freq 1
generate_timings
# No values loaded (this shouldn't occur but handle it just in case)
# In this case grab timings from boot/config.txt
elif [ !$h_active_pixels ]; then
echo ""
echo "...Displaying timings from" $BOOT_CONFIG
load_boot_config 2
fi
}
calculate_frequencies ()
{
# Horizontal freq = pixel clock / HTOTAL / 1000
# Vertical freq = pixel clock / HTOTAL / VTOTAL
# If values are loaded
if [ $h_active_pixels ]; then
# Calculate total horizontal and vertical lines
TOTAL_HORIZONTAL_LINES=$[$h_active_pixels + $h_front_porch + $h_sync_pulse + $h_back_porch]
TOTAL_VERTICAL_LINES=$[$v_active_lines + $v_front_porch + $v_sync_pulse + $v_back_porch]
echo "";
# Calculate horizontal and vertical frequencies and round down to 2 decimal # places
echo -n "Horizontal scan rate (kHz) = "; awk 'BEGIN { value = sprintf ("%.2f", '"$pixel_freq / $TOTAL_HORIZONTAL_LINES / 1000"' ); print value}'
echo -n "Vertical refresh rate (Hz) = "; awk 'BEGIN { value = sprintf ("%.2f", '"$pixel_freq / $TOTAL_HORIZONTAL_LINES / $TOTAL_VERTICAL_LINES"' ); print value}'
generate_timings
# No values loaded failsafe
elif [ !$h_active_pixels ]; then
echo ""; echo "...No timings loaded"
generate_timings
fi
}
# Reset to known working timings in the event of a blank/corrupt screen
# caused by invalid timings
reset_timings ()
{
X=$( echo $SAFE_TIMINGS | cut -d " " -f1 )
Y=$( echo $SAFE_TIMINGS | cut -d " " -f6 )
# Split the string into individual values
h_active_pixels=$( echo $SAFE_TIMINGS | cut -d " " -f1 )
h_front_porch=$( echo $SAFE_TIMINGS | cut -d " " -f3 )
h_sync_pulse=$( echo $SAFE_TIMINGS | cut -d " " -f4 )
h_back_porch=$( echo $SAFE_TIMINGS | cut -d " " -f5 )
v_active_lines=$( echo $SAFE_TIMINGS | cut -d " " -f6 )
v_front_porch=$( echo $SAFE_TIMINGS | cut -d " " -f8 )
v_sync_pulse=$( echo $SAFE_TIMINGS | cut -d " " -f9 )
v_back_porch=$( echo $SAFE_TIMINGS | cut -d " " -f10 )
frame_rate=$( echo $SAFE_TIMINGS | cut -d " " -f14 )
pixel_freq=$( echo $SAFE_TIMINGS | cut -d " " -f16 )
vcgencmd hdmi_timings $SAFE_TIMINGS
tvservice -e "DMT 87"
fbset -depth 8 && fbset -depth 16 -xres $X -yres $Y
clear
echo "...safe timings restored"; echo ""
generate_timings 1
}
# Increase horizontal front porch while decreasing back porch
move_left ()
{
((h_front_porch++))
((h_back_porch--))
generate_timings 3
}
# Decrease horizontal front porch while increasing back porch
move_right ()
{
((h_front_porch--))
((h_back_porch++))
generate_timings 3
}
# Increase vertical front porch while decreasing back porch
move_up ()
{
((v_front_porch++))
((v_back_porch--))
generate_timings 3
}
# Decrease vertical front porch while increasing back porch
move_down ()
{
((v_front_porch--))
((v_back_porch++))
generate_timings 3
}
# Start of program
clear
echo "*********************"
echo "*Video timings script"
echo "*Version - $CURRENT_VERSION"
echo "*********************"
# First load in timings from boot/config.txt and store for later use
# Then jump to main loop (generate_timings)
load_boot_config 2
| true |
3b697cbc7ad1db84025769bd8ba05972ee1846f9 | Shell | JCSmillie/GSDMosyleAPI_Scripts | /Misc_Scripts/TestGetClasses.sh | UTF-8 | 1,777 | 4.15625 | 4 | [] | no_license | #!/bin/bash
#
# Script to query Mosyle and return a list of class names and the student usernames
# who are in those classes.
#
#The source file is a local file which holds a variable containing
#our MosyleAPI key. Should look like:
# MOSYLE_API_key="<<<<<<<<OUR-KEY>>>>>>>>"
# This file should have rights on it as secure as possible. Runner
# of our scripts needs to read it but no one else.
source /tmp/Someplace/.MosyleAPI
APIKey="$MOSYLE_API_key"
TEMPOUTPUTFILE="/tmp/tmp.txt"
LOG=/dev/null
log_line() {
echo "$1"
}
ParseIt() {
ClassID=$(echo "$line" | cut -f 1 -d$'\t')
ClassName=$(echo "$line" | cut -f 2 -d$'\t')
Students=$(echo "$line" | cut -f 3 -d$'\t' | tr -d \" | tr -d [ | tr -d ])
}
#Clear out our files
rm -Rf $TEMPOUTPUTFILE
THECOUNT=0
# Connect to Mosyle API multiple times (for each page) so we
# get all of the available data.
while true; do
let "THECOUNT=$THECOUNT+1"
THEPAGE="$THECOUNT"
content="{\"accessToken\":\"$APIKey\",\"options\":{\"specific_columns\":[\"id\",\"class_name\",\"students\"],\"page\":$THEPAGE}}"
output=$(curl -s -k -X POST -d 'content='$content 'https://managerapi.mosyle.com/v2/listclasses') >> $LOG
#Detect we just loaded a page with no content and stop.
LASTPAGE=$(echo $output | grep NO_CLASSES_FOUND)
if [ -n "$LASTPAGE" ]; then
let "THECOUNT=$THECOUNT-1"
log_line "Yo we are at the end of the list (Last good page was $THECOUNT)"
break
fi
echo " "
echo "Page $THEPAGE data."
echo "-----------------------"
#Now take the JSON data we received and parse it into tab
#delimited output.
echo "$output" | awk 'BEGIN{FS=",";RS="},{"}{print $0}' | grep id | perl -pe 's/.*"id":"(.*?)","class_name":"?(.*)","students":"?(.*)"*.*/\1\t\2\t\3\t\4/' | sed 's/"//' >> "$TEMPOUTPUTFILE"
done
| true |
c6e6678b042993c832a77237c0f822dc6b48f91c | Shell | UCLALibrary/ucladl-project-scripts | /misc/packer.sh | UTF-8 | 3,098 | 4.375 | 4 | [] | no_license | #! /bin/bash
#
# A Bash script to run a Packer.io basebox-derived build
#
# Written by: Kevin S. Clarke <ksclarke@ksclarke.io>
#
# Some default variables used by the script
GREEN='\033[1;32m'
RED='\033[0;31m'
NC='\033[0m'
INT_REGEXP='^[0-9]+$'
# The location of Atlas artifacts API
ATLAS_API="https://atlas.hashicorp.com/api/v1/artifacts"
# Check to make sure our script dependencies are installed before we proceed
hash jq 2>/dev/null || { printf "${RED}Error: I require jq but it's not installed${NC}\n" >&2; exit 1; }
# Assumes the only JSON file in our project directory is the Packer.io config file
SOURCE_PATH=$(jq -r ".[\"builders\"][] | select(.name==\"$1\") | .source_path" *.json)
SOURCE_TYPE=$(jq -r ".[\"builders\"][] | select(.name==\"$1\") | .type" *.json)
# Fail the script, clean up artifact cache, and output some useful message
clean_fail() {
rm "$2"; fail "$1"
}
# Fail the script and output some useful message
fail() {
printf "${RED}${1}${NC}\n" >&2; exit 1
}
# Download the OVA artifact source from Atlas
download_source_from_atlas() {
ARTIFACT_CACHE="packer_cache/atlas/${1}.ova"
SLASHES=$(grep -o "/" <<< "$1" | wc -l)
if [ "$SLASHES" != "2" ]; then
fail "Error: Expected packer_cache/atlas/[ATLAS_USER]/[ARTIFACT_ID]/[ARTIFACT_VERSION] but found: ${1}"
fi
ATLAS_USER=$(echo $1 | cut -d "/" -f 1)
ARTIFACT_ID=$(echo $1 | cut -d "/" -f 2)
SOURCE_VERSION=$(echo $1 | cut -d "/" -f 3)
# Confirm our version number is an integer
if [[ ! $SOURCE_VERSION =~ $INT_REGEXP ]]; then fail "Error: Version is not an integer: $SOURCE_VERSION"; fi
# Construct correct Atlas API call depending on source type
if [[ "$2" == "virtualbox-ovf" ]]; then
URL="${ATLAS_API}/${ATLAS_USER}/${ARTIFACT_ID}/virtualbox.image/${SOURCE_VERSION}/file"
elif [[ "$2" == "vmware-vmx" ]]; then
URL="${ATLAS_API}/${ATLAS_USER}/${ARTIFACT_ID}/vmware.image/${SOURCE_VERSION}/file"
else
fail "Error: Unexpected artifact download type: ${2}"
fi
# Make sure directory structure is set up for our downloaded file
mkdir -p "$ARTIFACT_CACHE"
if [ -d "$ARTIFACT_CACHE" ]; then rmdir "$ARTIFACT_CACHE"; else exit 0; fi
# Download artifact file
printf "Downloading \"${URL}\" to: packer_cache/atlas/${1}.ova\n"
curl -L# "$URL" > "$ARTIFACT_CACHE"
# Check if downloaded file is empty
if [ ! -s "$ARTIFACT_CACHE" ]; then
clean_fail "Error: Downloaded file is empty" "$ARTIFACT_CACHE"
fi
# Check if file contains an error message
if [[ $(head -c 1 "$ARTIFACT_CACHE") == "{" ]] && [[ $(jq '.success' "$ARTIFACT_CACHE") == "false" ]]; then
MESSAGE=$(jq '.errors[0]' "$ARTIFACT_CACHE")
clean_fail "Error: Download of artifact failed: ${MESSAGE}" "$ARTIFACT_CACHE"
else
mv "$ARTIFACT_CACHE" "${ARTIFACT_CACHE}.gz"
gunzip "$ARTIFACT_CACHE"
fi
}
# If our source path is expecting a downloaded file
if [[ "$SOURCE_PATH" == "packer_cache/atlas/"* ]] && [ ! -e "$SOURCE_PATH" ]; then
download_source_from_atlas "$(echo ${SOURCE_PATH#*atlas/} | rev | cut -f 2- -d '.' | rev)" "$SOURCE_TYPE"
fi
# Run Packer.io
packer build -only "$1" *.json
| true |
da6e5795134174a3cc3d6682cfbda3e9e5119174 | Shell | Lytro/cucumber-chef | /lib/cucumber/chef/templates/bootstrap/ubuntu-precise-omnibus.erb | UTF-8 | 1,983 | 3.203125 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/env bash
#
# Cucumber-Chef 'Chef >= 11.0.0' Bootstrap Script
#
# Generated <%= Time.now.utc %>
#
set -x
CUCUMBER_CHEF_BOOTSTRAP_DONE="/.cucumber-chef-bootstrap-finished"
[ -f ${CUCUMBER_CHEF_BOOTSTRAP_DONE} ] && echo "Already bootstrapped!" && exit
export DEBIAN_FRONTEND=noninteractive
echo "127.0.0.1 <%= @hostname_full %> <%= @hostname_short %>" | tee -a /etc/hosts
echo "<%= @hostname_full %>" | tee /etc/hostname
hostname <%= @hostname_full %>
wget http://www.opscode.com/chef/install.sh
bash install.sh -v <%= @chef_version %>
mkdir -p /var/chef/cache /var/chef/cookbooks/chef-server
wget -qO- https://github.com/opscode-cookbooks/chef-server/archive/master.tar.gz | tar xvzC /var/chef/cookbooks/chef-server --strip-components=1
chef-solo -o 'recipe[chef-server::default]'
echo -n "Waiting on validation.pem and webui.pem to appear..."
until [ -f /etc/chef/validation.pem ] && [ -f /etc/chef/webui.pem ]; do
echo -n "."
sleep 1
done
echo "done."
mkdir -p ~/.chef
cp /etc/chef/validation.pem /etc/chef/webui.pem ~/.chef
knife configure -i --server-url http://127.0.0.1:4000 -u ${SUDO_USER} -r '' --defaults --disable-editing --yes -VV
knife client create <%= @user %> -a -f ${HOME}/.chef/<%= @user %>.pem --disable-editing --yes -VV
chown -R ${SUDO_USER}:${SUDO_USER} ${HOME}
if [ ! -f /etc/chef/client.pem ]; then
/etc/init.d/chef-client restart
echo -n "Waiting on client.pem to appear..."
i="0"
until [ -f /etc/chef/client.pem ]; do
i=$[$i+1]
sleep 1
echo -n "."
if [ $i -gt 60 ]; then
echo -n "restart-chef-client"
/etc/init.d/chef-client restart
i="0"
fi
done
echo "done."
fi
knife cookbook upload --all --cookbook-path /tmp/chef-solo/cookbooks --force --yes -VV
knife role from file /tmp/chef-solo/roles/*.rb --yes -VV
cat <<EOF > /etc/chef/bootstrap-chef-client.json
<%= @chef_client_attributes.to_json %>
EOF
chef-client -j /etc/chef/bootstrap-chef-client.json
touch ${CUCUMBER_CHEF_BOOTSTRAP_DONE}
| true |
408f63cdb830f5a361cbac1f79237967cb6d0c97 | Shell | seanbermejo/dotfiles | /.scripts/set_ssh_banner | UTF-8 | 1,780 | 3.71875 | 4 | [] | no_license | #!/bin/bash
# set up a ssh banner with neofetch and available updates
# check if
if [ ! -e /etc/update-motd.d/ ]; then
echo "/etc/update-motd.d/ is not available on this system"
exit
fi
# remove /etc/motd and existing entries in update-motd.d
rm -rf /etc/motd
rm -rf /etc/update-motd.d/*
# install neofetch if necessary
if [ ! -e /usr/bin/neofetch ]; then
# debian/ubuntu
if [ -e /usr/bin/apt ]; then
sudo apt update && sudo apt install -y neofetch
# arch
elif [ -e /usr/bin/apt ]; then
sudo pacman -S neofetch
fi
fi
# 10-neofetch
nf=$(cat <<EOF
#!/bin/sh
neofetch
EOF
)
echo "$nf" > /etc/update-motd.d/10-neofetch
# 20-updates-available
ua=$(cat <<EOF
#!/bin/sh
stamp="/var/lib/update-notifier/updates-available"
[ ! -r "$stamp" ] || cat "$stamp"
EOF
)
echo "$ua" > /etc/update-motd.d/20-updates-available
# 30-release-upgrade
ru=$(cat <<EOF
#!/bin/sh
# if the current release is under development there won't be a new one
if [ "$(lsb_release -sd | cut -d' ' -f4)" = "(development" ]; then
exit 0
fi
if [ -x /usr/lib/ubuntu-release-upgrader/release-upgrade-motd ]; then
exec /usr/lib/ubuntu-release-upgrader/release-upgrade-motd
fi
EOF
)
echo "$ua" > /etc/update-motd.d/30-release-upgrade
# 40-unattended-upgrades
uu=$(cat <<EOF
#!/bin/sh
if [ -x /usr/share/unattended-upgrades/update-motd-unattended-upgrades ]; then
exec /usr/share/unattended-upgrades/update-motd-unattended-upgrades
fi
EOF
)
echo "$ua" > /etc/update-motd.d/40-unattended-upgrades
# 50-reboot-required
rr=$(cat <<EOF
#!/bin/sh
if [ -x /usr/lib/update-notifier/update-motd-reboot-required ]; then
exec /usr/lib/update-notifier/update-motd-reboot-required
fi
EOF
)
echo "$ua" > /etc/update-motd.d/50-reboot-required
chmod +x /etc/update-motd.d/*
| true |
f7c588293d72805a61920d48fb599a3004bf201b | Shell | cyrilvj94/ShellScriptExamples | /2_For_While_funcs/func_2.sh | UTF-8 | 371 | 3.8125 | 4 | [] | no_license | #! /bin/bash
#Check if palendrome or not
function isPaliendrome
{
temp=$1
rev_num=0
while [ $temp -gt 0 ]
do
digit=$((temp%10))
rev_num=$((rev_num*10+digit))
temp=$((temp/10))
done
if [ $1 -eq $rev_num ]
then
echo Paliendrom
else
echo Not Paliendrome
fi
}
read -p"Enter Number" num1 num2
echo $num1 ":" $(isPaliendrome $num1)
echo $num2 ":" $(isPaliendrome $num2)
| true |
01b679c63006c9d0f72d1f0879250e70ae43b60c | Shell | petronny/aur3-mirror | /nephilim-git/PKGBUILD | UTF-8 | 903 | 2.609375 | 3 | [] | no_license | # Contributor: zajca <zajcaa at gmail dot com>
pkgname=nephilim-git
pkgver=20090819
pkgrel=1
pkgdesc="Experimental mpd client using Qt4 and writen in python with animelyrics support."
arch=('i686')
url="http://repo.or.cz/w/nephilim.git"
license=('GPL')
depends=('mpd' 'mpc' 'zsi' 'python' 'qt' 'pyqt' 'python-lxml')
makedepends=('git')
source=(script_nephilim)
md5sums=('40055b14dfc27dcb4a9b9101c88c2b2a')
_gitroot=http://repo.or.cz/r/nephilim.git
_gitname=nephilim
build() {
cd $startdir/src
msg "Connecting to GIT server...."
if [ -d $startdir/src/$_gitname ] ; then
cd $_gitname && git pull origin
msg "The local files are updated."
else
git clone $_gitroot
cd $_gitname
fi
mkdir -p $startdir/pkg/usr/{bin,share}
cp -r $startdir/src/nephilim $startdir/pkg/usr/share/nephilim
install -D -m755 $startdir/src/script_nephilim $startdir/pkg/usr/bin/nephilim
}
| true |
d608d18e9204b6b8e03b99679b79dbb1a86a1be5 | Shell | wing-888/MOR_X5_FROM_VM | /test/scripts/information/hdd_space_in_root.sh | UTF-8 | 2,496 | 3.9375 | 4 | [] | no_license | #! /bin/sh
# Author: Mindaugas Mardosas, Nerijus Sapola
# Year: 2012
# About: This script checks for available disk space in / dir. If free space is less than 10 GB script asks user a confirmation to ignore and continue if FIRST_INSTALL parameter is passed to this script
# Arguments:
# $1 - "FIRST_INSTALL"
# Examples:
# ./hdd_space_in_root.sh FIRST_INSTALL # Asks for confirmation
# ./hdd_space_in_root.sh # Just reports status {OK, FAILED}, no confirmation
FIRST_INSTALL="$1" #taking parameters
. /usr/src/mor/test/framework/bash_functions.sh
#----- FUNCTIONS ------------
hdd_space_in_dir()
{
# Author: Mindaugas Mardosas
# Year: 2010
# About: This functions gets total free space in K
#
# Arguments:
# $1 - dir to check free space
# $2 - space in K
# Example:
# hdd_space_in_dir / "10485760" #10485760 - 10 GB
DEBUG=0 # 0 - off, 1 - on
FREE_SPACE=`df -P / | (read; awk '{print $4}')` &> /dev/null
if [ "$DEBUG" == "1" ]; then echo "FREE SPACE: $FREE_SPACE"; fi
if [ "$FREE_SPACE" -lt "$2" ]; then
if [ "$DEBUG" == "1" ]; then echo "RETURN: 1"; fi
return 1; #FAILED there is less space than specified in the second parameter
else
if [ "$DEBUG" == "1" ]; then echo "RETURN: 0"; fi
return 0; #OK - there is more space when specified in 2nd parameter
fi
}
#--------MAIN -------------
if [ "$FIRST_INSTALL" == "FIRST_INSTALL" ]; then
hdd_space_in_dir / "47185920" #check if less than 45 GB
if [ "$?" == "1" ]; then #failed
echo "There is less space than 45GB in / dir, it is very important to have more DISK SPACE than that. If you REALLY know what you are doing - please type 'CONTINUE!' and press ENTER"
read INPUT;
if [ "$INPUT" != "CONTINUE" ]; then
echo "Go and fix DISK space problem, after that you can try to INSTALL AGAIN";
exit 1;
else
echo "You have been warned!"
fi
echo
fi
else
hdd_space_in_dir / "10485760" #check if less than 10 GB
if [ "$?" == "1" ]; then #failed
echo
df -h
echo
report "HDD DISK SPACE IN / dir is less than 10 GB !!! Please add another disk, check your partition table or delete uneccessary files, or you will get in trouble!" 1
exit 1
else
report "HDD DISK SPACE IN / dir" 0
exit 0
fi
fi
| true |
3f3a759f736b1d3d5d7d128aaa36ae1dab35b546 | Shell | Splintr255/devvm | /src/scripts/install.sh | UTF-8 | 398 | 3.53125 | 4 | [
"MIT"
] | permissive | #!/bin/bash
# Installs dependencies and the Devvm
main() {
echo "Installing dependencies..."
install_dependencies
echo "Dependencies installed!"
setup_vagrant
echo "Devvm now running! Use \"vagrant ssh\" to connect."
}
install_dependencies() {
brew install --cask vagrant
brew install --cask virtualbox
}
setup_vagrant() {
cd src || exit 1
vagrant up
}
main
| true |
d610b5aa3b614b4351674f0e2134d0c165e3b688 | Shell | YukiYuki0508/study-typescript | /mkzip.sh | UTF-8 | 818 | 3.65625 | 4 | [] | no_license | #!/usr/bin/env bash
echo '全てのファイル(public)をzipにまとめますか? / All files? [y/n]'
read all
# ファイル一式の場合
if [ $all = 'y' ]; then
git archive --format=zip --prefix=public/ HEAD:public -o ~/Desktop/public.$(date +"%y%m%d%H%M").zip
# 差分ファイルの場合
elif [ $all = 'n' ]; then
echo '差分元のコミットIDを入力してください。 / Enter commit ID'
read commitID
git archive --format=zip --prefix=public/ HEAD:public $(git diff --diff-filter=D --name-only HEAD:public ${commitID}) -o ~/Desktop/public.$(date +"%y%m%d%H%M").zip
# 不正な値が入力された場合
else
echo '不正な値が入力されました。Invalid values.'
exit 1
fi
echo 'デスクトップに圧縮したファイルを保存しました。 / Check your desktop'
| true |
14b7f0f7c98304a475d0339e0a6db4d0373639fe | Shell | motion-ai/open-horizon | /sh/travis-build.sh | UTF-8 | 1,518 | 2.5625 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env bash
###
### THIS SCRIPT PROVIDES AN AUTOMATED MUTLI-ARCH BUILD
###
### DO __NOT__ CALL INTERACTIVELY
###
# Login into docker
docker login --username $DOCKER_USER --password $DOCKER_PASSWORD
# Build for amd64 and push
buildctl build --frontend dockerfile.v0 \
--local dockerfile=. \
--local context=. \
--exporter image \
--exporter-opt name=docker.io/zeerorg/cron-connector:test-build-amd64 \
--exporter-opt push=true \
--frontend-opt platform=linux/amd64 \
--frontend-opt filename=./Dockerfile
# Build for armhf and push
buildctl build --frontend dockerfile.v0 \
--local dockerfile=. \
--local context=. \
--exporter image \
--exporter-opt name=docker.io/zeerorg/cron-connector:test-build-armhf \
--exporter-opt push=true \
--frontend-opt platform=linux/armhf \
--frontend-opt filename=./Dockerfile.armhf
export DOCKER_CLI_EXPERIMENTAL=enabled
# Create manifest list and push that
docker manifest create zeerorg/cron-connector:test-build \
zeerorg/cron-connector:test-build-amd64 \
zeerorg/cron-connector:test-build-armhf
docker manifest annotate zeerorg/cron-connector:test-build zeerorg/cron-connector:test-build-armhf --arch arm
docker manifest annotate zeerorg/cron-connector:test-build zeerorg/cron-connector:test-build-amd64 --arch amd64
docker manifest push zeerorg/cron-connector:test-build
| true |
9465946e2629f743d995b8d7457812bc0e0dc1ec | Shell | quivalen/openshift-moodle-quickstart | /.openshift/action_hooks/build | UTF-8 | 728 | 3.75 | 4 | [] | no_license | #!/bin/bash
# To update the version shipped in this quickstart, bump this variable:
#
install_version="2.7"
# Download and install Moodle
install_dir=${OPENSHIFT_BUILD_DEPENDENCIES_DIR}${install_version}
# Used in this script only
current_version_dir=${OPENSHIFT_DATA_DIR}current
#
# If Moodle is already installed in the current gear, there
# is nothing to build :-)
#
[ -d "${current_version_dir}" ] && exit 0
mkdir -p $install_dir
pushd ${install_dir} >/dev/null
curl -Ls http://download.moodle.org/stable27/moodle-2.7.tgz > moodle.tar.gz
# Install Moodle
#
tar --strip-components=1 -xzf moodle.tar.gz
rm -rf moodle.tar.gz
echo $install_version > ${OPENSHIFT_BUILD_DEPENDENCIES_DIR}.current_version
popd >/dev/null
| true |
78cd59170e3c63be10087b3e16a7d8977290c0d7 | Shell | bharambeakshay/shell-scripts | /stage-2-assignments/Day-6/for-loop/4forPrimeRange.sh | UTF-8 | 592 | 3.90625 | 4 | [] | no_license | #!/bin/bash
read -p "Enter starting number of range " starting_number
read -p "Enter ending number of range " ending_number
echo "Prime numbers are in range of $starting_number and $ending_number are: "
for (( i=$starting_number+1; i <= $ending_number-1; i++ )) #this for loop is for iterating from starting number to ending number(i.e.range)
do
flag=0
for (( j=2; j <= $i-1; j++ )) # this for loop checks the prime number iteratively with given if condition
do
if [ `expr $i % $j` = 0 ]
then
flag=1
break
fi
done
if [ $flag -eq 0 ]
then
echo $i
fi
done
| true |
62a1cb0d36a118d2ed5cacac65fa3779c431abcf | Shell | ehtshamulhassan-rs/symbiflow-arch-defs | /quicklogic/common/toolchain_wrappers/conda_build_install_package.sh | UTF-8 | 1,837 | 3 | 3 | [
"ISC"
] | permissive | #!/bin/bash
echo -e "\e[1;34mInstallation starting for conda based symbiflow\e[0m"
echo -e "\e[1;34mQuickLogic Corporation\e[0m"
if [ -z "$INSTALL_DIR" ]
then
echo -e "\e[1;31m\$INSTALL_DIR is not set, please set and then proceed!\e[0m"
echo -e "\e[1;31mExample: \"export INSTALL_DIR=/<custom_location>\". \e[0m"
exit 0
elif [ -d "$INSTALL_DIR/conda" ]; then
echo -e "\e[1;32m $INSTALL_DIR/conda already exists, please clean up and re-install ! \e[0m"
exit 0
else
echo -e "\e[1;32m\$INSTALL_DIR is set to $INSTALL_DIR ! \e[0m"
fi
mkdir -p $INSTALL_DIR
wget https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh -O conda_installer.sh
bash conda_installer.sh -b -p $INSTALL_DIR/conda && rm conda_installer.sh
source "$INSTALL_DIR/conda/etc/profile.d/conda.sh"
echo "include-system-site-packages=false" >> $INSTALL_DIR/conda/pyvenv.cfg
CONDA_FLAGS="-y --override-channels -c defaults -c conda-forge"
conda update $CONDA_FLAGS -q conda
curl https://storage.googleapis.com/symbiflow-arch-defs-install/quicklogic-arch-defs-63c3d8f9.tar.gz --output arch.tar.gz
tar -C $INSTALL_DIR -xvf arch.tar.gz && rm arch.tar.gz
conda install $CONDA_FLAGS -c litex-hub/label/main yosys="0.9_5266_g0fb4224e 20210301_104249_py37"
conda install $CONDA_FLAGS -c litex-hub/label/main symbiflow-yosys-plugins="1.0.0_7_313_g5a87bf8 20210507_125510"
conda install $CONDA_FLAGS -c litex-hub/label/main vtr-optimized="8.0.0_3614_gb3b34e77a 20210507_125510"
conda install $CONDA_FLAGS -c litex-hub iverilog
conda install $CONDA_FLAGS -c tfors gtkwave
conda install $CONDA_FLAGS make lxml simplejson intervaltree git pip
conda activate
pip install python-constraint
pip install serial
pip install git+https://github.com/QuickLogic-Corp/quicklogic-fasm@318abca
pip install git+https://github.com/QuickLogic-Corp/ql_fasm@e5d0915
conda deactivate
| true |
748408176bd02eb16c46c8da53a696547b3d4100 | Shell | hyeshik/tailseeker | /conda/build.sh | UTF-8 | 711 | 3.109375 | 3 | [
"BSD-3-Clause",
"MIT"
] | permissive | export PKG_CONFIG_PATH=$PREFIX/lib/pkgconfig
make -C $SRC_DIR/src
# Copy tailseeker dir
INSTALL_SUBDIRS="bin conf docs refdb scripts tailseeker templates"
TARGET_DIR="$PREFIX/share/tailseeker"
mkdir -p m 755 $TARGET_DIR
for subdir in $INSTALL_SUBDIRS; do
cp -Rp $SRC_DIR/$subdir $TARGET_DIR/$subdir
done
rm -f $TARGET_DIR/bin/tailseq-docker-wrap
# Install entrypoint script.
sed -e "s|%%PREFIX%%|$PREFIX|g" \
-e "s|%%VERSION%%|$PKG_VERSION conda:$PKG_VERSION-$PKG_BUILDNUM|g" \
$SRC_DIR/conda/tseek.in > \
$PREFIX/bin/tseek
chmod 755 $PREFIX/bin/tseek
# Set paths
sed -e "s|%%PREFIX%%|$PREFIX|g" $SRC_DIR/conda/paths.conf.in > \
$TARGET_DIR/conf/paths.conf
chmod 644 $TARGET_DIR/conf/paths.conf
| true |
2bb9bf491666943cfad87d791245fe648645e155 | Shell | tobeycarman/ngee_dhs_code | /quickNdirty.sh | UTF-8 | 1,146 | 3.375 | 3 | [] | no_license | #!/bin/bash
# T. Carman, March 2019
#
# 'cust_model2netcdf.R' is a copy of:
# modex.bnl.gov:/data/software/pecan_dev/models/dvmdostem/R/model2netcdf.dvmdostem.R
# with VegC added to the outputs to translate.
#
OKF="oklist.txt"
MF="missinglist.txt"
###### FIRST, UNCOMMENT THIS AND MAKE FILE LISTS
# echo -n "" > $OKF
# echo -n "" > $MF
# for END_PATH in $(find . -type d -name "yearly_runs" -prune -o -path "*/out/*" -type d -print)
# do
# FULL_PATH="/data/tcarman/ngee_dhs_runs/$END_PATH"
# if [[ $(ncdump -h $FULL_PATH/2015.nc | grep "double VegC" | wc -l) -lt 1 ]]
# then
# echo "$FULL_PATH" >> $MF
# else
# echo "$FULL_PATH" >> $OKF
# fi
# done
###### THEN RE-COMMENT ABOVE AND USE THIS PART
# This is a qsub array job. First, count the lines in the
# missing list so we know how many tasks we'll need. Then
# submit this script like this:
# $ qsub -t 1-$(wc -l missinglist.txt)%100
# The %100 tells qsub not to have more than 100 concurrent tasks.
cd /data/tcarman/ngee_dhs_runs
echo "PBS_ARRAYID is: $PBS_ARRAYID"
FFP=$(tail -n+"$PBS_ARRAYID" $MF | head -n 1)
echo "Will call qd with $FFP"
./qd.sh "$FFP"
| true |
d591c2536ebb2556a5ce7f5986f2e43c1d7adaa9 | Shell | GuilleOr/iam-service | /run-full-test-suite.sh | UTF-8 | 4,136 | 3.265625 | 3 | [
"MIT"
] | permissive | #!/bin/bash
NOCOLOR='\033[0m'
RED='\033[0;31m'
GREEN='\033[0;32m'
START_TIME=$(date +%s.%N)
BUILD_RESULT="${RED}FAILED${NOCOLOR}"
DOCKER_RESULT="${RED}FAILED${NOCOLOR}"
TEST_METHOD_SECURITY_RESULT="${RED}FAILED${NOCOLOR}"
TEST_RESOURCE_SERVER_RESULT="${RED}FAILED${NOCOLOR}"
CLEANUP_RESULT="${RED}FAILED${NOCOLOR}"
CUMULATIVE_RESULT="${RED}FAILED${NOCOLOR}"
RESULT_COUNTER=0
echo ".___ _____ _____ _________ .__ "
echo "| | / _ \ / \ / _____/ ______________ _|__| ____ ____ "
echo "| |/ /_\ \ / \ / \ ______ \_____ \_/ __ \_ __ \ \/ / |/ ___\/ __ \ "
echo "| / | \/ Y \ /_____/ / \ ___/| | \/\ /| \ \__\ ___/ "
echo "|___\____|__ /\____|__ / /_______ /\___ >__| \_/ |__|\___ >___ >"
echo " \/ \/ \/ \/ \/ \/ "
echo "Full Build & Integration Tests"
echo ""
#0. Check system dependencies.
which java
if [ $? = 0 ]; then
echo -e "Java ${GREEN}OK${NOCOLOR}"
else
echo -e "${RED}ERROR: java not installed.${NOCOLOR}"
exit 1
fi
which gradle
if [ $? = 0 ]; then
echo -e "Gradle ${GREEN}OK${NOCOLOR}"
else
echo -e "${RED}ERROR: gradle not installed.${NOCOLOR}"
exit 1
fi
which docker
if [ $? = 0 ]; then
echo -e "Docker ${GREEN}OK${NOCOLOR}"
else
echo -e "${RED}ERROR: docker not installed.${NOCOLOR}"
exit 1
fi
which docker-compose
if [ $? = 0 ]; then
echo -e "docker-compose ${GREEN}OK${NOCOLOR}"
else
echo -e "${RED}ERROR: docker-compose not installed.${NOCOLOR}"
exit 1
fi
#1. Build project and run JUnit tests
gradle clean build test
if [ $? -eq 0 ]; then
BUILD_RESULT="${GREEN}OK${NOCOLOR}"
else
RESULT_COUNTER=$((RESULT_COUNTER+1))
fi
#2. Create and deploy docker images
docker-compose up --build -d
if [ $? -eq 0 ]; then
DOCKER_RESULT="${GREEN}OK${NOCOLOR}"
else
RESULT_COUNTER=$((RESULT_COUNTER+1))
fi
if [ $RESULT_COUNTER -eq 0 ]; then
#3. Wait for all REST services to start.
until $(curl --silent --output /dev/null -f http://127.0.0.1:8080/services/authentication/iam-admins/iam-admins/.well-known/jwks.json ); do
echo "Waiting for iam-service to start ..."
sleep 1
done
until [ $(curl --silent --output /dev/null -f http://127.0.0.1:8082/services/public/info -w '%{http_code}\n') -eq "200" ]; do
echo "Waiting for spring-method-security to start ..."
sleep 1
done
until [ $(curl --silent --output /dev/null -f http://127.0.0.1:8081/services/public/info -w '%{http_code}\n') -eq "401" ]; do
echo "Waiting for spring-resource-server to start ..."
sleep 1
done
#4. Run Integration tests for 'spring-method-security' demo
gradle :spring-method-security:clean :spring-method-security:test -Dtest.profile=integration
if [ $? -eq 0 ]; then
TEST_METHOD_SECURITY_RESULT="${GREEN}OK${NOCOLOR}"
else
RESULT_COUNTER=$((RESULT_COUNTER+1))
fi
#5. Run Integration tests for 'spring-resource-server' demo
gradle :spring-resource-server:clean :spring-resource-server:test -Dtest.profile=integration
if [ $? -eq 0 ]; then
TEST_RESOURCE_SERVER_RESULT="${GREEN}OK${NOCOLOR}"
else
RESULT_COUNTER=$((RESULT_COUNTER+1))
fi
fi
#6. Shutdown and Cleanup docker
docker-compose down -v --rmi all --remove-orphans
if [ $? -eq 0 ]; then
CLEANUP_RESULT="${GREEN}OK${NOCOLOR}"
else
RESULT_COUNTER=$((RESULT_COUNTER+1))
fi
#7. Report results
END_TIME=$(date +%s.%N)
DIFF_TIME=$(echo "$END_TIME - $START_TIME" | bc)
if [ $RESULT_COUNTER -eq 0 ]; then
CUMULATIVE_RESULT="${GREEN}OK${NOCOLOR}"
fi
echo -e ""
echo -e "Full Test Suite Results : $CUMULATIVE_RESULT"
echo -e "============================"
echo -e "gradle build and test : $BUILD_RESULT"
echo -e "docker compose : $DOCKER_RESULT"
echo -e "IT Tests (method security) : $TEST_METHOD_SECURITY_RESULT"
echo -e "IT Tests (resource server) : $TEST_RESOURCE_SERVER_RESULT"
echo -e "docket stop and cleanup : $CLEANUP_RESULT"
echo -e "done in $DIFF_TIME s"
exit $RESULT_COUNTER
| true |
163ea956f944f4f78100565dbba50272edea0031 | Shell | ytz12345/2019_ICPC_Trainings | /OtherTrains/2019沈阳网预/diff.sh | UTF-8 | 148 | 2.84375 | 3 | [] | no_license | for((;;))
do
./EEE > test.txt
./E < test.txt > 1.txt
./EE < test.txt > 11.txt
if diff 1.txt 11.txt;then
echo OK
else
echo !
exit
fi
done | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.