blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 4 115 | path stringlengths 2 970 | src_encoding stringclasses 28 values | length_bytes int64 31 5.38M | score float64 2.52 5.28 | int_score int64 3 5 | detected_licenses listlengths 0 161 | license_type stringclasses 2 values | text stringlengths 31 5.39M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
cdc948e3a28d78d406e7616852cf66d2813deca5 | Shell | slpcat/docker-images | /oracle/OracleDatabase/SingleInstance/extensions/buildExtensions.sh | UTF-8 | 3,263 | 4.1875 | 4 | [
"UPL-1.0",
"MIT",
"BSD-3-Clause"
] | permissive | #!/bin/bash -e
#
# Since: Mar, 2020
# Author: mohammed.qureshi@oracle.com
# Description: Build script for building Docker Image Extensions
#
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER.
#
# Copyright (c) 2020 Oracle and/or its affiliates. All rights reserved.
#
SCRIPT_DIR=$(dirname $0)
SCRIPT_NAME=$(basename $0)
usage() {
cat << EOF
Usage: $SCRIPT_NAME -a -x [extensions] -b [base image] -t [image name] [-o] [Docker build option]
Builds one of more Docker Image Extensions.
Parameters:
-a: Build all extensions
-x: Space separated extensions to build. Defaults to all
Choose from : $(for i in $(cd "$SCRIPT_DIR" && ls -d */); do echo -n "${i%%/} "; done)
-b: Base image to use
-t: name:tag for the extended image
-o: passes on Docker build option
LICENSE UPL 1.0
Copyright (c) 2020 Oracle and/or its affiliates. All rights reserved.
EOF
}
##############
#### MAIN ####
##############
# Parameters
DOCKEROPS=""
DOCKERFILE="Dockerfile"
BASE_IMAGE="oracle/database:19.3.0-ee"
IMAGE_NAME="oracle/database:ext"
if [ "$#" -eq 0 ]; then
usage;
exit 1;
fi
while getopts "ax:b:t:o:h" optname; do
case "$optname" in
a)
EXTENSIONS=$(for i in $(cd "$SCRIPT_DIR" && ls -d */); do echo -n "${i%%/} "; done)
;;
x)
EXTENSIONS="$OPTARG"
;;
b)
BASE_IMAGE="$OPTARG"
;;
t)
IMAGE_NAME="$OPTARG"
;;
o)
DOCKEROPS="$OPTARG"
;;
h|?)
usage;
exit 1;
;;
*)
# Should not occur
echo "Unknown error while processing options inside buildDockerImage.sh"
;;
esac
done
echo "=========================="
echo "DOCKER info:"
docker info
echo "=========================="
# Proxy settings
PROXY_SETTINGS=""
if [ "${http_proxy}" != "" ]; then
PROXY_SETTINGS="$PROXY_SETTINGS --build-arg http_proxy=${http_proxy}"
fi
if [ "${https_proxy}" != "" ]; then
PROXY_SETTINGS="$PROXY_SETTINGS --build-arg https_proxy=${https_proxy}"
fi
if [ "${ftp_proxy}" != "" ]; then
PROXY_SETTINGS="$PROXY_SETTINGS --build-arg ftp_proxy=${ftp_proxy}"
fi
if [ "${no_proxy}" != "" ]; then
PROXY_SETTINGS="$PROXY_SETTINGS --build-arg no_proxy=${no_proxy}"
fi
if [ "$PROXY_SETTINGS" != "" ]; then
echo "Proxy settings were found and will be used during the build."
fi
# ################## #
# BUILDING THE IMAGE #
# ################## #
BUILD_START=$(date '+%s')
cd "$SCRIPT_DIR"
for x in $EXTENSIONS; do
echo "Building extension $x..."
# Go into version folder
cd "$x" || {
echo "Could not find extension directory '$x'";
exit 1;
}
docker build --force-rm=true --build-arg BASE_IMAGE="$BASE_IMAGE" \
$DOCKEROPS $PROXY_SETTINGS -t $IMAGE_NAME -f $DOCKERFILE . || {
echo ""
echo "ERROR: Oracle Database Docker Image was NOT successfully created."
echo "ERROR: Check the output and correct any reported problems with the docker build operation."
exit 1
}
BASE_IMAGE="$IMAGE_NAME"
cd ..
done
# Remove dangling images (intermitten images with tag <none>)
docker image prune -f > /dev/null
BUILD_END=$(date '+%s')
BUILD_ELAPSED=`expr $BUILD_END - $BUILD_START`
echo ""
echo ""
cat<<EOF
Oracle Database Docker Image extended:
--> $IMAGE_NAME
Build completed in $BUILD_ELAPSED seconds.
EOF
| true |
2a50a1339aadd4d5db429aa8381fe28be7d3037c | Shell | hybitz/tax_jp | /lib/build_tasks/download_address.sh | UTF-8 | 396 | 3.3125 | 3 | [
"MIT"
] | permissive | #!/bin/sh
ZIP_FILE=ken_all.zip
if [ -e tmp/${ZIP_FILE} ]; then
echo "tmp/${ZIP_FILE} が存在するので、再ダウンロードは行いません。"
else
mkdir tmp
curl -# -o tmp/${ZIP_FILE} -LO http://www.post.japanpost.jp/zipcode/dl/kogaki/zip/ken_all.zip
fi
pushd tmp
rm -f KEN_ALL.CSV
unzip ${ZIP_FILE}
iconv -f SJIS -t UTF8 KEN_ALL.CSV > ../data/住所/addresses.csv
popd
| true |
5cfb215ae15cdf4e687e8da8fc03548ef7f0bc32 | Shell | tbronchain/perso | /scripts/android_project.sh | UTF-8 | 654 | 3.59375 | 4 | [] | no_license | #!/bin/bash
CAT="test"
TYPE="MainActivity"
if [ "$1" == "" ]; then
if [ "$ANDROID_PROJECT" != "" ]; then
unset ANDROID_PROJECT
else
echo -e "syntax error.\nUsage: . $0 <project name>"
fi
fi
if [ ! -f "/Users/thibaultbronchain/Sources/android/$1/build.xml" ]; then
mkdir -p /Users/thibaultbronchain/Sources/android/$1
cd /Users/thibaultbronchain/Sources/android/$1
android list targets
echo -n "Please input the target [1]: "
read TARGET
if [ "$TARGET" == "" ]; then
TARGET=1
fi
android create project -t $TARGET -n $1 -p . -a ${TYPE} -k com.${CAT}.${1}
fi
export ANDROID_PROJECT=$1
| true |
2b2673f9132ba58da842de0da239bb7fbae9ffd2 | Shell | Vinotha16/WIN_ROLLBACK | /templates/linux_actualfacts/ubuntu20.04/firewallpolicy_35321_actual.fact | UTF-8 | 503 | 2.984375 | 3 | [
"BSD-3-Clause"
] | permissive | #!/bin/bash
cmd=$(iptables -L | egrep 'policy DROP|policy REJECT' | paste -sd "," | expand -t 1 )
if [ $(sudo iptables -L | egrep 'INPUT.*policy DROP|INPUT.*policy REJECT' | wc -l) -eq 0 ] ||[ $(sudo iptables -L | egrep 'FORWARD.*policy DROP|FORWARD.*policy REJECT' | wc -l) -eq 0 ] || [ $(sudo iptables -L | egrep 'OUTPUT.*policy DROP|OUTPUT.*policy REJECT' | wc -l) -eq 0 ]; then
echo "{ \"firewallpolicy_35321_actual\" : \"\" }"
else
echo "{ \"firewallpolicy_35321_actual\" : \"$cmd\" }"
fi
| true |
550db583d58eaa18bc03784ccbce99f0c71c4824 | Shell | FingerLeakers/EquationGroupLeak | /Linux/bin/telnet.sh | UTF-8 | 1,590 | 3.4375 | 3 | [] | no_license | #!/bin/sh
# Wrapper script for spawn.
#v1.0.1.1
#export SU HIDEME HIDECON LD_PRELOAD
echo -e "\n==============="
echo $0: The spawn/telnet wrapper v1.0.1.1
echo -e "\n==============="
echo -e "\n\nENVIRONMENT:"
echo -e "env | egrep \"(RA|RP|CMD|SU|HIDE|LD_PRELOAD|NOPEN).*=\""
env | egrep "(RA|RP|CMD|SU|HIDE|LD_PRELOAD|NOPEN).*="
echo -e "\n==============="
unset SPAWN FTSHELL TELNETMODE FORCEDPORT
type spawn && [ ! "$NOSPAWN" ] && SPAWN=spawn
type ftshell && FTSHELL=ftshell
[ "$2" = "23" ] && [ "$3" = "" ] && TELNETMODE=telnet
BASE=`basename $0`
[ "$BASE" = "telnet" ] && [ "$2" = "23" -o "$2" = "" ] && TELNETMODE=telnet
if [ "x$SPAWN" = "x" ] ; then
[ -x /usr/bin/telnet ] && SPAWN=/usr/bin/telnet
fi
[ "$TELNETMODE" ] && [ ! "$2" ] && FORCEDPORT=23
[ "$NOTELNETMODE" ] && unset TELNETMODE
[ "$SPAWN" = "/usr/bin/telnet" ] && unset TELNETMODE
[ "$3" = "telnet" ] && unset TELNETMODE
if [ ! "${WINTYPE:0:7}" = "JACKPOP" -a ! "$BASE" = "spawn" ] ; then
echo -e "\n\n\nWARNING: You are about to use $SPAWN as your $BASE client via:\n\n\t\t$SPAWN $* $FORCEDPORT $TELNETMODE
\n"
echo -en "\a Are you sure you want to? [N] "
read ans
[ "x$ans" = "x" -o "${ans:0:1}" = "n" -o "${ans:0:1}" = "N" ] && \
echo -e "\n\nUse /usr/bin/telnet if you do not want spawn.\n\n" && \
exit 1
fi
# never mind---ftshell doesn't seem to play nice inside scripme windows
unset FTSHELL
echo -e "\n==============="
echo -e "\ndate -u\n"`date -u`"\n===============\n"
echo $0 wrapper execing: $FTSHELL $SPAWN $* $TELNETMODE
exec $FTSHELL $SPAWN $* $FORCEDPORT $TELNETMODE
| true |
519614ce4808c419c3c540f823c6e168169674a8 | Shell | Azure/azure-sdk-for-go | /eng/scripts/mgmt-auto-release.sh | UTF-8 | 1,114 | 3.453125 | 3 | [
"LGPL-2.1-or-later",
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | #!/bin/bash
set -ex
today=`date "+%Y%m%d"`
firstDay=`date -d "${today}" +%Y%m01`
week=`date -d "$firstDay" +%w`
secondSaturday=$((firstDay+(12 - week) % 7 + 8))
if [ $today -gt $secondSaturday ]
then
echo "The PR generation time of the current month is: [$firstDay-$secondSaturday]"
exit 0
fi
export PATH=$PATH:$HOME/go/bin
git config --global user.email "ReleaseHelper"
git config --global user.name "ReleaseHelper"
cd ../
git clone https://github.com/Azure/azure-sdk-for-go.git
git clone https://github.com/Azure/azure-rest-api-specs.git
cd azure-sdk-for-go
git remote add fork https://Azure:"$1"@github.com/Azure/azure-sdk-for-go.git
cd ../
go install github.com/Azure/azure-sdk-for-go/eng/tools/generator@latest
generator issue -t $1 > sdk-release.json
cat sdk-release.json
file_size=`du -b ./sdk-release.json |awk '{print $1}'`
echo "sdk-release.json file size:" ${file_size}
if [ ${file_size} -le 70 ]; then
echo "There are no services that need to be released"
else
echo "run generator release-v2..."
generator release-v2 ./azure-sdk-for-go ./azure-rest-api-specs ./sdk-release.json -t $1
fi | true |
903782df4dbac93a25266334a7fb16c75beeccc0 | Shell | citrix-openstack/qa | /jenkins/jobs/run-transfervm-tests.sh | UTF-8 | 771 | 3.625 | 4 | [] | no_license | #!/bin/bash
set -eu
REMOTELIB=$(cd $(dirname $(readlink -f "$0")) && cd remote && pwd)
XSLIB=$(cd $(dirname $(readlink -f "$0")) && cd xslib && pwd)
function print_usage_and_die
{
cat >&2 << EOF
usage: $0 XENSERVERNAME
Run transfervm build on a new slave
positional arguments:
XENSERVERNAME The name of the XenServer
EOF
exit 1
}
XENSERVERNAME="${1-$(print_usage_and_die)}"
set -x
SLAVE_IP=$(cat $XSLIB/start-slave.sh | "$REMOTELIB/bash.sh" "root@$XENSERVERNAME")
"$REMOTELIB/bash.sh" "ubuntu@$SLAVE_IP" << END_OF_TVM_TESTS
set -eux
export DEBIAN_FRONTEND=noninteractive
sudo apt-get -qy update
sudo apt-get -qy dist-upgrade
sudo apt-get -qy install git make
git clone https://github.com/matelakat/transfervm transfervm
END_OF_TVM_TESTS
echo "$SLAVE_IP"
| true |
c377692a72323ef2db2695a1ff34c8e92e036699 | Shell | thamaji/typescript-electron-on-vscode | /.devcontainer/entrypoint.sh | UTF-8 | 631 | 2.8125 | 3 | [] | no_license | #!/bin/bash
set -eu
# docker.sock から gid を取得して、docker グループの gid を変更
docker_group_id=$(ls -n /var/run/docker.sock | cut -d ' ' -f 4)
sudo groupmod --gid ${docker_group_id} docker
sudo /usr/bin/supervisord -c /etc/supervisor/supervisord.conf
sudo su --login vscode <<EOS
export PATH=${PATH}
export LANG=${LANG}
unset SESSION_MANAGER
unset DBUS_SESSION_BUS_ADDRESS
export GTK_IM_MODULE=fcitx
export QT_IM_MODULE=fcitx
export XMODIFIERS=@im=fcitx
export DefaultIMModule=fcitx
/usr/bin/fcitx-autostart
while true; do
/usr/bin/startxfce4
done
EOS
| true |
06e17ae0705c8c09789d60fcc255e4e399119b59 | Shell | mstange22/Exercism | /bash/darts/darts.sh | UTF-8 | 429 | 3.453125 | 3 | [] | no_license | #!/usr/bin/env bash
throw() { exho "error"; exit 1; }
main () {
(( $# == 2 )) || throw
([[ $1 =~ [-.0-9] ]] && [[ $2 =~ [-.0-9] ]]) || throw
radius=$(bc <<< "scale=2; sqrt( $1*$1 + $2*$2 )")
if [[ $(bc <<< "$radius > 10") -eq 1 ]]; then
echo "0"
elif [[ $(bc <<< "$radius > 5" ) -eq 1 ]]; then
echo "1"
elif [[ $(bc <<< "$radius > 1" ) -eq 1 ]]; then
echo "5"
else
echo "10"
fi
}
main "$@" | true |
7752e08d9764b6fe030cc9cfae0b01f7791273bd | Shell | dchandran/researchcode | /seq_pipe/get_blast_dbs.sh | UTF-8 | 521 | 3.0625 | 3 | [] | no_license | #!/bin/bash
for i in {0..9}
do
cmd="wget ftp://ftp.ncbi.nlm.nih.gov/blast/db/env_nr.0$i.tar.gz"
eval $cmd
cmd="tar xzf env_nr.0$i.tar.gz"
eval $cmd
done
wget ftp://ftp.ncbi.nlm.nih.gov/blast/db/env_nr.10.tar.gz
tar xzf env_nr.10.tar.gz
for i in {0..3}
do
for j in {0..9}
do
cmd="wget ftp://ftp.ncbi.nlm.nih.gov/blast/db/nr.$i$j.tar.gz"
eval $cmd
if [ -f "nr.$i$j.tar.gz" ]
then
cmd="tar xzf nr.$i$j.tar.gz"
eval $cmd
fi
done
done
| true |
dd6d072a4911f8f191866971e0a31216c4a6b283 | Shell | rockkb/xf_tag | /bin/main.sh | UTF-8 | 436 | 2.8125 | 3 | [] | no_license | #!/bin/bash
cd "$(dirname "$0")"
cd ..
##for i in {1..10};
#for i in $(seq 0 $1)
#if not input $1, default value is 100
for i in $(seq 0 ${1:-100})
do
for fold in {0..4};
do
python -u ./core/bert.py --max_bin=1 --fold=${fold} train_base >> bin_1_fold_${fold}_"$(hostname)".log 2>&1
python -u ./core/bert.py --max_bin=2 --fold=${fold} train_base >> bin_2_fold_${fold}_"$(hostname)".log 2>&1
done
done
| true |
d44443ac800db3aee6397a8254e7a7b03d641ea1 | Shell | holyhan/ShellPractice | /expr.sh | UTF-8 | 2,745 | 3.796875 | 4 | [] | no_license | #!/bin/bash
# 两数相加, 其中两数之间必须有空格
val=`expr 2 + 2`
echo "两数之和为 :$val"
a=10
b=20
val=`expr $a + $b`
echo "a + b : $val"
val=`expr $a - $b`
echo "a - b : $val"
val=`expr $a \* $b`
echo "a * b : $val"
val=`expr $b / $a`
echo "b / a : $val"
val=`expr $b % $a`
echo "b % a : $val"
if [[ $a == $b ]]; then
echo "a 等于 b"
fi
if [[ $a != $b ]]; then
echo "a 不等于 b"
fi
if [[ $a -eq $b ]]; then
echo "$a -eq $b : a 等于 b"
else
echo "$a -eq $b : a 不等于 b"
fi
if [[ $a -ne $b ]]; then
echo "$a -ne $b : a 不等于 b"
else
echo "$a -ne $b : a 等于 b"
fi
if [[ $a -gt $b ]]; then
echo "$a -gt $b : a 大于 b"
else
echo "$a -gt $b : a 不大于 b"
fi
if [[ $a -lt $b ]]; then
echo "$a -lt $b : a 小于 b"
else
echo "$a -lt $b : a 不小于 b"
fi
if [[ $a -ge $b ]]; then
echo "$a -ge $b : a 大于或等于 b"
else
echo "$a -ge $b : a 小于 b"
fi
if [[ $a -le $b ]]; then
echo "$a -le $b : a 小于或等于 b"
else
echo "$a -le $b : a 大于 b"
fi
# 布尔表达式
if [ $a -lt 100 -a $b -gt 15 ]; then
echo "$a 小于 100 且 $b 大于 15 : 返回 true"
else
echo "$a 小于 100 且 $b 大于 15 : 返回false"
fi
if [ $a -lt 100 -o $b -gt 100 ]; then
echo "$a 小于 100 或 $b 大于 100 : 返回 true"
else
echo "$a 小于 100 或 $b 大于 100 : 返回false"
fi
if [ $a -lt 5 -o $b -gt 100 ]; then
echo "$a 小于 5 或 $b 大于 100 : 返回 true"
else
echo "$a 小于 5 且 $b 大于 100 : 返回false"
fi
a="abc"
b="efg"
if [ $a = $b ]; then
echo "$a = $b : a 等于 b"
else
echo "$a = $b : a 不等于 b"
fi
if [ $a != $b ]; then
echo "$a != $b : a 不等于 b"
else
echo "$a != $b : a 等于 b"
fi
if [ -z $a ]; then
echo "-z $a : 字符串长度为0"
else
echo "-z $a : 字符串长度不为0"
fi
if [ -n $a ]; then
echo "-n $a : 字符串长度不为0"
else
echo "-n $a : 字符串长度为0"
fi
if [ $a ]; then
echo "$a : 字符串不为空"
else
echo "$a : 字符串为空"
fi
# 文件测试运算符
file="./expr.sh"
if [ -r $file ]; then
echo "文件可读"
else
echo "文件不可读"
fi
if [ -w $file ]; then
echo "文件可写"
else
echo "文件不可写"
fi
if [ -x $file ]; then
echo "文件可执行"
else
echo "文件不可执行"
fi
if [ -f $file ]; then
echo "文件为普通文件"
else
echo "文件为特殊文件"
fi
if [ -d $file ]; then
echo "文件是个目录"
else
echo "文件不是个目录"
fi
if [ -s $file ]; then
echo "文件不为空"
else
echo "文件为空"
fi
if [ -e $file ]; then
echo "文件存在"
else
echo "文件不存在"
fi
| true |
c8757a942f58b459d44b7976f69dbb347fa0d2c2 | Shell | smartcoop/demo-templating | /start.sh | UTF-8 | 781 | 2.765625 | 3 | [] | no_license | #!/bin/sh
# Build json-server image
docker build -t demo-templating-json-server -f DemoTemplating.JsonServer/Dockerfile ./DemoTemplating.JsonServer/
# Build api image
docker build -t demo-templating-api -f DemoTemplating.Api/Dockerfile .
# Build the benchmark image
docker build -t demo-templating-benchmark -f DemoTemplating.Benchmark/Dockerfile .
# Create docker network if needed
docker network create templating
# Remove existing containers if any
docker rm -f demo-templating-json-server
docker rm -f demo-templating-api
# Run json-server container
docker run -d -p 8080:80 --network=templating --name demo-templating-json-server demo-templating-json-server
# Run api container
docker run -d -p 80:80 --network=templating --name demo-templating-api demo-templating-api | true |
e57b7d8e2d480228cb35df70eceb8fdd4e80f5fb | Shell | alexanderfefelov/scripts | /install/dev/install-soapui.sh | UTF-8 | 727 | 3.640625 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env bash
# Exit immediately if a pipeline, which may consist of a single simple command,
# a list, or a compound command returns a non-zero status
set -e
readonly MONIKER=soapui
readonly VERSION=5.6.0
readonly STUFF=SoapUI-x64-$VERSION.sh
readonly TARGET_DIR=$HOME/dev/$MONIKER
if [ -d "$TARGET_DIR" ]; then
echo Directory exists: $TARGET_DIR >&2
exit 1
fi
mkdir --parents $TARGET_DIR
readonly TEMP_DIR=$(mktemp --directory -t delete-me-XXXXXXXXXX)
(
cd $TEMP_DIR
echo -n Downloading...
wget --quiet https://s3.amazonaws.com/downloads.eviware/soapuios/$VERSION/$STUFF
echo done
echo -n Installing...
chmod +x $STUFF
./$STUFF -q -dir $TARGET_DIR
echo done
)
rm --recursive --force $TEMP_DIR
| true |
a4dd65e12a9565f88cac02e7d0279a20926ba584 | Shell | Akendo/vagrant-skel | /scripts/install.sh | UTF-8 | 2,072 | 3.296875 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env bash
HOSTNAME=$(hostname -f)
function run_puppet_agent (){
puppet agent -v -t
}
function connect_to_puppetmaster () {
cat << EOF > /etc/puppet/puppet.conf
[main]
logdir=/var/log/puppet
vardir=/var/lib/puppet
ssldir=/var/lib/puppet/ssl
rundir=/var/run/puppet
factpath=$vardir/lib/facter
templatedir=$confdir/templates
[master]
# These are needed when the puppetmaster is run by passenger
# and can safely be removed if webrick is used.
ssl_client_header = SSL_CLIENT_S_DN
ssl_client_verify_header = SSL_CLIENT_VERIFY
[agent]
certname = $HOSTNAME
server = puppetmaster.vbox.net
EOF
}
function puppetlabs_repo () {
wget -q -c http://apt.puppetlabs.com/puppetlabs-release-precise.deb
dpkg -i puppetlabs-release-precise.deb
}
function apt_update () {
apt-get update
}
function apt_install_puppetmaster {
apt-get install -y \
puppetmaster\
puppetmaster-common \
puppet-common \
puppet \
puppetdb \
puppetdb-terminus
apt-get install -y git git-man liberror-perl patch
}
function apt_install_puppetagent {
apt-get install -y puppet puppet-common
}
function setup_puppetmaster {
cat << EOF > /etc/puppet/puppet.conf
[main]
logdir=/var/log/puppet
vardir=/var/lib/puppet
ssldir=/var/lib/puppet/ssl
rundir=/var/run/puppet
factpath=$vardir/lib/facter
templatedir=$confdir/templates
[master]
# These are needed when the puppetmaster is run by passenger
# and can safely be removed if webrick is used.
ssl_client_header = SSL_CLIENT_S_DN
ssl_client_verify_header = SSL_CLIENT_VERIFY
storeconfigs = true
storeconfigs_backend = puppetdb
autosign = true
[agent]
certname = $HOSTNAME
server = $HOSTNAME
EOF
cat << EOF > /etc/puppet/puppetdb.conf
[main]
server = $HOSTNAME
port = 8081
EOF
service puppetdb restart
service puppetmaster restart
}
puppetlabs_repo
apt_update
if [ $HOSTNAME = 'puppetmaster.vbox.net' ]; then
apt_install_puppetmaster
setup_puppetmaster
else
apt_install_puppetagent
connect_to_puppetmaster
fi;
sleep 20 # Bad solution. PuppetDB need some time to get running.
run_puppet_agent
exit 0
| true |
c9595d60f6e898a081a110f9e661b631821dd7cf | Shell | a-takumi/get-sh-k8s-pod | /bin/sh-k8s | UTF-8 | 707 | 3.703125 | 4 | [
"MIT"
] | permissive | #!/bin/sh
CTX=""
NS=""
PD=""
while getopts c:n:p: OPT
do
case $OPT in
"c" ) CTX="$OPTARG";;
"n" ) NS="$OPTARG";;
"p" ) PD="$OPTARG";;
esac
done
# TODO Ctr-C
set -e
# Select context
if [ -n "$CTX" ]; then
kubectx "$CTX"
else
kubectx
fi
# Select name space
if [ -n "$NS" ]; then
kubens "$NS"
else
kubens
fi
if [ -n "$PD" ]; then
kubectl exec -it "$PD" sh
else
# Get pod names
PODS=(`kubectl get pods --template '{{range .items}}{{.metadata.name}}{{"\n"}}{{end}}'`)
# Show interface for selecting pods
PS3='Please select the target pod: '
select POD in "${PODS[@]}"
do
# Start attaching
kubectl exec -it "$POD" sh
break
done
fi
exit 0
| true |
a5969d5186e3e205855e6f8aca221e8e4c82258b | Shell | 4nd3r/tiny-matrix-bot | /scripts-available/ping | UTF-8 | 93 | 2.5625 | 3 | [
"WTFPL"
] | permissive | #!/bin/sh -e
if [ -n "$CONFIG" ]
then
echo '^!?ping(!|\?)?$'
else
echo 'P O N G'
fi
| true |
a7a0846ce3b6aaebcd154b789bbbe1406feb602d | Shell | mfthomps/Labtainers | /labs/radius/radius/_bin/prestop | UTF-8 | 196 | 3.34375 | 3 | [] | no_license | #!/bin/bash
#
# intended for use to advise user who uses checkwork
#
run=$(ps -aux | grep [r]adius)
if [[ ! -z "$run" ]]; then
echo "radiusd running"
else
echo "radiusd is not running"
fi
| true |
606b082bf62f66287d53bee9f1519b84b36eb3fe | Shell | jahed/node-terraform | /scripts/dependency-check.sh | UTF-8 | 1,118 | 3.421875 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env bash
set -euo pipefail
if [[ "${CI-}" == "true" ]]; then
git config user.email "${GIT_EMAIL}"
git config user.name "${GITHUB_ACTOR}"
fi
echo "INSTALLING DEPENDENCIES"
npm ci
echo
echo "UPGRADING MINOR DEPENDENCIES"
npx ncu --target minor --upgrade
npm install
git add package-lock.json package.json
if ! git diff --quiet --cached --exit-code; then
git commit -m 'build(deps): upgrade minor dependencies'
fi
echo
echo "UPGRADING TRANSITIVE DEPENDENCIES"
npm upgrade
npm install
git add package-lock.json package.json
if ! git diff --quiet --cached --exit-code; then
git commit -m 'build(deps): upgrade transitive dependencies'
fi
echo
echo "CHECKING MAJOR DEPENDENCIES"
npx ncu --target latest
echo
if git diff --quiet --exit-code master origin/master; then
cat <<EOF
RESULT:
No dependencies upgraded.
Done.
EOF
exit 0
fi
echo "AUDITING DEPENDENCIES"
npm audit --audit-level=critical
echo
echo "LINTING"
npm run lint
echo
echo "BUILDING"
npm run build
echo
echo "TESTING"
npm test
echo
echo "PUSHING CHANGES"
git push
echo
cat <<EOF
RESULT:
Dependencies upgraded.
Done.
EOF
| true |
854275d8c14ac58f9d0704756cc54fd4760f0852 | Shell | vstavrinov/progeny | /progeny | UTF-8 | 230 | 3.546875 | 4 | [] | no_license | #!/bin/bash
#set -x -v
[ -z "$1" ] && exit
childs ()
{
CHILDS="$(ps ho pid --ppid $@)"
for SIBLING in $CHILDS; do
if [ -n "$SIBLING" ]; then
echo -n "$SIBLING "
childs $SIBLING;
fi
done
}
echo -n "$@ "
childs $@
| true |
6bba4bbdc6cca6147a25c371bc2b366d88454c96 | Shell | bigdotsoftware/ingenicoserver | /tests/status.sh | UTF-8 | 266 | 2.859375 | 3 | [] | no_license | #!/bin/bash
echo "###> Starting sttaus checker"
i=1
while [ $i -le 50 ]
do
#echo Iteration: $i
out=`curl -s -XGET 'http://127.0.0.1:3020/v1/ingenico_status' -H 'Content-Type: application/json'`
echo $out
let "i+=1"
sleep 1
done
echo "###> Done"
| true |
71c7a47db565b38074f6e3407d16a0af615d5028 | Shell | dslm4515/BMLFS | /build-scripts/vulkan-validation-layers.build | UTF-8 | 2,098 | 2.875 | 3 | [] | no_license | #! /bin/bash
# Vulkan Validation Layers
# Source: https://github.com/KhronosGroup/Vulkan-ValidationLayers/archive/refs/tags/v1.3.253.tar.gz
#
# $BUILD = Directory to temporarily install
# $PKGS = Directory to store built packages
#
# DEPS
# Required: vulkan-headers spirv-tools glslang spirv-headers wayland
# Recommended: NONE
# Optional: robin_hood vulkanRegistry
export CARGS="-DCMAKE_INSTALL_LIBDIR=lib "
export CARGS+="-DCMAKE_INSTALL_SYSCONFDIR=/etc "
export CARGS+="-DCMAKE_INSTALL_DATADIR=/share "
export CARGS+="-DBUILD_LAYER_SUPPORT_FILES=ON "
export CARGS+="-DCMAKE_SKIP_RPATH=True "
export CARGS+="-DBUILD_TESTS=Off "
export CARGS+="-DBUILD_WSI_XCB_SUPPORT=On "
export CARGS+="-DBUILD_WSI_XLIB_SUPPORT=On "
export CARGS+="-DBUILD_WSI_WAYLAND_SUPPORT=On "
export CARGS+="-DCMAKE_BUILD_TYPE=Release "
export CARGS+="-DUSE_ROBIN_HOOD_HASHING=OFF "
cmake -B OUT -DCMAKE_INSTALL_PREFIX=/usr \
-DCMAKE_INSTALL_INCLUDEDIR="/usr/include/" \
$CARGS -Wno-dev && \
read -p "Compile?" && make -j2 -C OUT &&
sudo -S make DESTDIR=$BUILD -C OUT install &&
unset CARGS
cd $BUILD && sudo -S mkdir -v ${BUILD}/install &&
cat > /tmp/slack-desc << "EOF"
# HOW TO EDIT THIS FILE:
# The "handy ruler" below makes it easier to edit a package description. Line
# up the first '|' above the ':' following the base package name, and the '|'
# on the right side marks the last column you can put a character in. You must
# make exactly 11 lines for the formatting to be correct. It's also
# customary to leave one space after the ':' except on otherwise blank lines.
|-----handy-ruler------------------------------------------------------|
vulkan-validation-layers: vulkan-validation-layers
vulkan-validation-layers:
vulkan-validation-layers: Vulkan Validation Layers.
vulkan-validation-layers:
vulkan-validation-layers: https://github.com/KhronosGroup/Vulkan-ValidationLayers
vulkan-validation-layers:
EOF
sudo -S mv -v /tmp/slack-desc install/ &&
sudo -S makepkg -l y -c n $PKGS/vulkan-validation-layers-1.3.253-$PSUFFIX &&
sudo -S rm -rf ${BUILD}/*
| true |
b525b166cdec833f6fe6d329a5c5ecb400b7e343 | Shell | tzvetkoff/wireguard-template | /script/fixperm | UTF-8 | 410 | 3.015625 | 3 | [] | no_license | #!/usr/bin/env bash
# vim:ft=sh:ts=2:sts=2:sw=2:et
set -e
#
# Variables
#
script_path="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
root_path="$(cd "${script_path}/.." && pwd)"
#
# Fix permissions
#
find "${root_path}" -type f -a \! -wholename "${root_path}/.git*" -print -exec chmod o-rwx,g-rwx {} +
find "${root_path}" -type d -a \! -wholename "${root_path}/.git*" -print -exec chmod o-rwx,g-rwx {} +
| true |
fbb66718338725f7d89be205dd207dc14c35d1f7 | Shell | MRCIEU/handedness-ewas | /alspac/extract-genotypes.sh | UTF-8 | 1,908 | 3.53125 | 4 | [
"MIT"
] | permissive | #!/bin/bash
## extracts genotypes for ALSPAC children and mothers along with SNP info
##
## creates children.{bed,fam,bim}
## mother.{bed,fam,bim} files
## snps.bim.gz
##
## to be run on bluecrystalp3 where the data is located
## (I used an interactive session qsub -l nodes=1:ppn=8,walltime=6:00:00 -I)
ALSPACDIR=/panfs/panasas01/shared/alspac/studies/latest/alspac/genetic/variants/arrays/gwas/imputed/1000genomes/released/2015-06-25/data
module load apps/plink-1.90
## 1. Obtain SNP frequencies, rsid and coordinates: 'all.bim.gz'
## These files provide, for each SNP, the rsid, chromosomal position, and the first and second allele genotypes. For example:
## 1 rs58108140 0 10583 A G
## 1 rs189107123 0 10611 G C
## 1 rs180734498 0 13302 T C
## 1 rs144762171 0 13327 C G
## They are concatenated into a single file:
cat $ALSPACDIR/genotypes/bestguess/*.bim | gzip -c > snps.bim.gz
## 2. Make a list of genotype files (data is split by chromosome)
touch mergefile.txt
for i in $ALSPACDIR/genotypes/bestguess/data_chr*.bed; do
BASE=`dirname $i`/`basename $i .bed`
echo "$BASE.bed $BASE.bim $BASE.fam" >> mergefile.txt
done
## remove the files for chr01
tail -n +2 "mergefile.txt" > "mergefile.tmp" && mv "mergefile.tmp" "mergefile.txt"
## 3. Extract child genotypes
plink --bfile $ALSPACDIR/genotypes/bestguess/data_chr01 \
--merge-list mergefile.txt \
--keep $ALSPACDIR/derived/unrelated_ids/children_unrelated.txt \
--remove $ALSPACDIR/derived/unrelated_ids/children_exclusion_list.txt \
--make-bed \
--out children
## 4. Extract mother genotypes
plink --bfile $ALSPACDIR/genotypes/bestguess/data_chr01 \
--merge-list mergefile.txt \
--keep $ALSPACDIR/derived/unrelated_ids/mothers_unrelated.txt \
--remove $ALSPACDIR/derived/unrelated_ids/mothers_exclusion_list.txt \
--make-bed \
--out mothers
| true |
1abd3d5b3a2265a9183b00b9cfd259f5f6809b24 | Shell | mknyszek/pacer-model | /tools/gen-all-plots.bash | UTF-8 | 285 | 2.9375 | 3 | [
"BSD-3-Clause"
] | permissive | #!/bin/bash
for file in ./data/scenarios/*; do
case=$(echo $(basename $file) | cut -f 1 -d '.')
for sim in $(go run ./cmd/pacer-sim -l | xargs); do
echo "processing: $sim-$case"
go run ./cmd/pacer-sim $sim $file | python3 $(dirname $0)/gen-plots.py $1/$sim-$case.svg
done
done
| true |
4652d22d24bd09029ffc9f8fb1735becbc265e5e | Shell | joeytwiddle/code | /c/phd/projprof/simall | UTF-8 | 6,584 | 2.6875 | 3 | [] | no_license | #!/bin/sh
del *.results
# export PATH="$PATH:."
######## Parameters
# DOCUMENTS="doc-cen doc-cen-more dar-crpage2 leaflet-cen"
# DOCUMENTS="left cen right full"
# DOCUMENTS="left" # test "dar-crpage2-left30" # left cen right full"
# DOCUMENTS="full cen dar-crpage2-left30"
# DOCUMENTS="cen"
DOCUMENTS="leafletleft leafletfull leafletcen"
# DOCUMENTS="dar-crpage2-left30"
# DOCUMENTS="dummy" # when using LINES
# LINESTODO=`seq -w 30 -2 20`
# LINESTODO=`seq -w 30 -1 02`
# LINESTODO=30
LINESTODO=11
PPSCANMETHODS="hier" # full"
# PPMEASURES="square deriv"
# PPMEASURES="willbederived"
PPMEASURES="deriv"
# VVPMETHODS="spacings simple margins"
VVPMETHODS="spacings margins"
# NOISES=`seq -w 0 03 50`
# NOISES=`seq -w 51 03 60`
# NOISES=`seq -w 51 03 60`
NOISES=0
# ANGLES=`seq -w 20 03 50`
# ANGLES="20 40 60 80"
# ANGLES="60"
# FOCALS=`seq -w 0.5 0.3 3.0`
# FOCALS="0.5"
# ANGLES=`seq 30 10 60`
# ANGLES=`seq 5 5 85`
# ANGLES=`seq 5 5 85`
# ANGLES="50"
# ANGLES=`seq 5 6 85`
# ANGLES=`seq 30 10 60`
# ANGLES=`seq 5 3 85`
# ANGLES=`seq 10 3 80`
ANGLES=`seq 30 20 70`
# ANGLES="30"
FOCALS="4.0"
######## end params
touch start
showprogress () {
D="$1"
shift
X="$1"
shift
echo "$D: $*" | sed "s/\<$X\>/[$X]/g"
}
export FOCAL;
for FOCAL in $FOCALS; do
for DOCUMENT in $DOCUMENTS; do
SRCNAME="simgs/$DOCUMENT"
for PPSCANMETHOD in $PPSCANMETHODS; do
for PPMEASURE in $PPMEASURES; do
for VVPMETHOD in $VVPMETHODS; do
# Do measure inline with scan method
# if test "$PPSCANMETHOD" = hier; then
# PPMEASURE=deriv
# else
# PPMEASURE=deriv # actually keep the same cos square can't handle noise
# # PPMEASURE=square
# fi
PPPARAMS="
-hvpcheat
-res 180 -adapt 0.03 -maxpixels 2000
-$PPMEASURE
-gamma 0.0 -recwid 200 -light -oth 12
-badx 35 -bady 21
"
# show bad point -badx 100 -bady 65"
if test "$PPSCANMETHOD" = hier; then
# Hierarchical scan
PPPARAMS="$PPPARAMS -dolowresscan -lowres 60 -lowsmooth 1"
# else
# Full PP scan
# PPPARAMS="$PPPARAMS"
fi
case "$VVPMETHOD" in
"margins")
# Margins method
PPPARAMS="$PPPARAMS -usetwolines"
;;
"spacings")
# Fitting with best method
PPPARAMS="$PPPARAMS -spacings"
;;
esac
PPCOM="./pp $PPPARAMS"
export PPCOM;
for NOISE in $NOISES; do
for LINES in $LINESTODO; do
if "$DOCUMENTS" = "dummy"; then
SRCNAME="simgs/dar-crpage2-left$LINES"
fi
IFILE="$SRCNAME.bmp"
RES="results.txt"
FOCALRES="focalres.txt"
rm -rf res
mkdir -p res
echo "0" > res/.nosync
rm -f "$RES"
echo "START" > "$RES"
rm -f "$FOCALRES"
echo "" > "$FOCALRES"
# for roll in `listnums -6 6`; do
roll="90";
for yaw in $ANGLES; do
for pitch in $ANGLES; do
(
showprogress "focal length " $FOCAL $FOCALS
showprogress "document " $DOCUMENT $DOCUMENTS
showprogress "pp scan method" $PPSCANMETHOD $PPSCANMETHODS
showprogress "pp measure " $PPMEASURE $PPMEASURES
showprogress "vvp method " $VVPMETHOD $VVPMETHODS
showprogress "% noise " $NOISE $NOISES
showprogress "# lines " $LINES $LINESTODO
showprogress "yaw " $yaw $ANGLES
showprogress "pitch " $pitch $ANGLES
) > current
echo
echo "::::::::::::::::::::: $roll $yaw $pitch :::::::::::::::::::::"
echo
# simone "$IFILE" $roll $yaw $pitch -imgnoise $NOISE
# simone
OFILE="tmp.bmp"
INVFILE="tmp-inv.bmp"
SIMPARAMS="
-qnd
-centralise -focal $FOCAL -noise 0.0 -lines $LINES -yoff 0.001
-roll $roll -yaw $yaw -pitch $pitch
-overlay -image $IFILE $OFILE -size 0.7
-gnuplot
-imgnoise $NOISE
"
if test "$VVPMETHOD" = spacings; then
SIMPARAMS="$SIMPARAMS -spacings"
fi
curseyellow
echo ../gentestimg/simgen $SIMPARAMS
cursegrey
../gentestimg/simgen $SIMPARAMS | tee genres.txt
mv gplsolve.txt gplsimsolve.txt
mv gpldata.txt gplsimdata.txt
mv gplfit.ps gplsimfit.ps
curseyellow
echo invert -i "$OFILE" -o "$INVFILE"
cursegrey
invert -i "$OFILE" -o "$INVFILE"
echo ./testsim "$INVFILE" "$OFILE"
rm recover.bmp
# time ./testsim "$INVFILE" "$OFILE" > ppres.txt
time ./testsim "$INVFILE" "$OFILE" | tee ppres.txt
# end simone
FOCALEST=`grep "Got focal length" ppres.txt | sed "s/Got focal length //"`
echo "$yaw $pitch $FOCALEST" >> "$FOCALRES"
cp genres.txt res/genres-r$roll-y$yaw-p$pitch.txt
cp ppres.txt res/ppres-r$roll-y$yaw-p$pitch.txt
./extractone $roll $yaw $pitch >> "$RES"
gzip res/genres-r$roll-y$yaw-p$pitch.txt
gzip res/ppres-r$roll-y$yaw-p$pitch.txt
convert "lowresmap.bmp" -geometry 100 "res/r$roll-y$yaw-p$pitch-0lowresmap.jpg"
convert "ppmap.bmp" -geometry 100 "res/r$roll-y$yaw-p$pitch-1ppmap.jpg"
# convert "overlay.bmp" -geometry 100 "res/r$roll-y$yaw-p$pitch-3origsim.jpg"
# convert "gpl0fit.ps" -geometry 100 "res/r$roll-y$yaw-p$pitch-2desfit.jpg"
# convert "gplfit.ps" -geometry 100 "res/r$roll-y$yaw-p$pitch-3fit.jpg"
convert "origover.bmp" -geometry 100 "res/r$roll-y$yaw-p$pitch-4ovr.jpg"
if test -f "recover.bmp"; then
convert "recover.bmp" -geometry 100 "res/r$roll-y$yaw-p$pitch-5rec.jpg"
fi
# ./showdesiredspacings.sh
# convert "gpl1fit.ps" -geometry 300 "res/r$roll-y$yaw-p$pitch-6desfit.jpg"
rm -f fit.log
# echo "Press a key"
# waitforkeypress
xv origover.bmp overlay.bmp
gv gplfit.ps
done # pitch
done # yaw
# done # roll
gunzip res/*.txt.gz
tar cfz "$DOCUMENT-$PPSCANMETHOD-$VVPMETHOD-$NOISE-ppreg.tgz" res/*.txt
# mv "$RES" "lines-$LINES.results"
# mv "$RES" "$DOCUMENT-$PPSCANMETHOD-$VVPMETHOD.results"
# mv "$RES" "$DOCUMENT-$PPSCANMETHOD-$PPMEASURE-$VVPMETHOD-l$LINES-n$NOISE.results"
mv "$RES" "$DOCUMENT-$PPSCANMETHOD-$PPMEASURE-$VVPMETHOD-l$LINES-n$NOISE-f$FOCAL.results"
done # lines
done # noise
done # vvp method
done # hvp pp measure
done # hvp searchspace scan method
done # document
done # focal
touch ./done
| true |
8fb46d03dc52b175a8758542f008d2d0b345c7b4 | Shell | loveshenshen/swoft-base | /tracker/deploy_env.sh | UTF-8 | 12,549 | 3.46875 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/sh
# swoole Enterprise installer rewritten by dixyes
PORTS_TO_DETECT='53456,60286,60986,55332,44624,51449';
# for zsh compatiable
[ -n "$ZSH_VERSION" ] && emulate -L ksh;
# log utils
logi(){
printf "[0;1;32m[IFO][0;1m $*[0m\n"
}
logw(){
printf "[0;1;33m[WRN][0;1m $*[0m\n"
}
loge(){
printf "[0;1;31m[ERR][0;1m $*[0m\n"
}
logi "Swoole Tracker NodeAgent installer"
[ "`uname -s 2>&1`"x != 'Linuxx' ] && loge "Swoole Enterprise NodeAgent only support linux OS (not UNIX or macOS)." && exit 22 # 22 for EINVAL
[ "`id -u 2>&1`"x != '0x' ] && loge "This install script should be run as root." && exit 13 # 13 for EACCES
# this regex will match 99.999.9999.9999 or something like this, however, sometimes you can bind such a string as domain.(i.e. link in docker)
if [ ! -f "./app_deps/node-agent/userid" ]
then
if [ "`echo $1 | grep -E '^([a-zA-Z0-9][a-zA-Z0-9\-]*?\.)*[a-zA-Z0-9][a-zA-Z0-9\-]*?\.?$'`"x = "x" ]
then
logi "Usage: $0 <remote>\n\twhich \"remote\" should be ipv4 address or domain of swoole-admin host\n" &&
exit 22 # 22 for EINVAL
else
ADMIN_ADDR=$1
fi
else
ADMIN_ADDR="www.swoole-cloud.com"
fi
# install files
logi 'Start Installing node-agent files'
mkdir -p /opt/swoole/node-agent /opt/swoole/logs /opt/swoole/public /opt/swoole/config 2>&-
chmod 777 /opt/swoole/logs
logi ' Installing files at /opt/swoole/node-agent'
rm -rf /opt/swoole/script
cp -rf ./app_deps/node-agent/* /opt/swoole/node-agent/
ln -s /opt/swoole/node-agent/script/ /opt/swoole/script
chmod +x /opt/swoole/script/*.sh
chmod +x /opt/swoole/script/php/swoole_php
logi ' Installing files at /opt/swoole/public'
cp -rf ./app_deps/public/framework /opt/swoole/public/
# clean cache
logi 'Clean caches at /tmp/mostats'
rm -rf /tmp/mostats
# backup config file: yet no use.
logi 'Backing up config file at /opt/swoole/config to /tmp/swconfigback'
if [ -d "/opt/swoole/config/" ];then
cp -rf /opt/swoole/config /tmp/swconfigback
rm -rf /opt/swoole/config
fi
cp -rf ./app_deps/public/config /opt/swoole/
chmod -R 777 /opt/swoole/config
chmod -R 777 /opt/swoole
# remove legacy system-side supervisor nodeagent
if [ -f /etc/supervisor/conf.d/node-agent.conf ]
then
logw 'Removing legacy system-wide supervisor files for nodeagent.'
supervisorctl -c /etc/supervisor/supervisord.conf stop node-agent >>/tmp/na_installer.log 2>&1
rm -f /etc/supervisor/conf.d/node-agent.conf >>/tmp/na_installer.log 2>&1
supervisorctl -c /etc/supervisor/supervisord.conf update >>/tmp/na_installer.log 2>&1
fi
# remove legacy supervisor in /opt/swoole/pysandbox
if [ -e /opt/swoole/pysandbox ]
then
logw 'Removing legacy supervisor files at /opt/swoole'
if [ -S /opt/swoole/supervisor/supervisor.sock ]
then
logw ' Stopping legacy python venv supervisor'
/opt/swoole/pysandbox/bin/supervisorctl -c /opt/swoole/supervisor/supervisord.conf shutdown
fi
rm -rf /opt/swoole/pysandbox
rm -rf /opt/swoole/supervisor
fi
# (Deprecated) use this to disable supervisor check
#logi "Workaround for supervisor dir check"
#echo "{\"supervisor\":{\"config_dir\":[\"/opt/swoole/config\"]}}" > /opt/swoole/config/config.json
mv /opt/swoole/node-agent/userid /opt/swoole/config/ >/tmp/na_installer.log 2>&1
if type kill ps >>/tmp/na_installer.log 2>&1
then
logi 'All dependencies are ok, skipping dependencies installion.'
else
# find package manager ,then install dependencies
# use varibles for future use.(may be removed if it takes no use at all.)
# TODO: dnf, yast
if type apt-get >>/tmp/na_installer.log 2>&1
then
logi 'super moo power detected, using apt-get to install dependencies.'
logi ' Updating apt cache.'
apt-get update -y >>/tmp/na_installer.log 2>&1
type ps kill >>/tmp/na_installer.log 2>&1 || {
logi ' Installing procps for commands: ps, kill.'
apt-get install -y --no-install-recommends procps >>/tmp/na_installer.log 2>&1
}
elif type yum >>/tmp/na_installer.log 2>&1
then
logi 'yellow dog detected, using yum to install dependencies.'
logi ' Updating yum cache.'
yum makecache >>/tmp/na_installer.log 2>&1
# rpm distros' coreutils have ps, kill things in it, we needn't expclit install it : when script go here, it's installed
logi ' Installing coreutils ( you should not see this unless your ps or kill command broken ).'
yum install -y coreutils >>/tmp/na_installer.log 2>&1
fi
fi
if type apk >>/tmp/na_installer.log 2>&1
then
logi 'coffe-making-able package manager detected, using apk to install dependencies.'
logi ' Updating apk cache.'
apk update >>/tmp/na_installer.log 2>&1
# ldd is for determining libc type.
type ldd >>/tmp/na_installer.log 2>&1 || {
logi ' Installing libc-utils for command: ldd.' &&
apk add -q libc-utils >>/tmp/na_installer.log 2>&1
}
# musl workaround TODO: use AWSL rebuild binaries.
if [ "`ldd 2>&1 | grep -i musl`x" != "x" ]
then
logw 'You are using musl as libc, and you have apk,'
logw ' assuming you are using alpine 3+, preparing dynamic libraries for running shared libraries.'
# TODO:assuming using alpine, will be modified to work with all musl distros.
apk add --allow-untrusted musl-compat/glibc-2.29-r0.apk >>/tmp/na_installer.log 2>&1
cp musl-compat/libgcc_s.so.1 /usr/glibc-compat/lib
fi
fi
# if something still not installed ,error out
lack_deps=""
type kill >>/tmp/na_installer.log 2>&1 || lack_deps="${lack_deps} kill(from coreutils or psproc(at debian variants)),"
type ps >>/tmp/na_installer.log 2>&1 || lack_deps="${lack_deps} ps(from coreutils or psproc(at debian variants)),"
type df >>/tmp/na_installer.log 2>&1 || lack_deps="${lack_deps} df(from coreutils),"
type rm >>/tmp/na_installer.log 2>&1 || lack_deps="${lack_deps} rm(from coreutils),"
type cat >>/tmp/na_installer.log 2>&1 || lack_deps="${lack_deps} cat(from coreutils),"
type chmod >>/tmp/na_installer.log 2>&1 || lack_deps="${lack_deps} chmod(from coreutils),"
[ "x" != "x$lack_deps" ] && loge "for command(s) ${lack_deps%,}: Dependency installation failed, you need install dependencies mannually, then execute $0." && exit 1
# check remote state and write config.
PORTS_TO_DETECT=${PORTS_TO_DETECT-'9903,9904,9907,9981,9990,8995'}
logi 'Checking if admin host is up (and accessible), then generate config.'
/opt/swoole/script/php/swoole_php << EOF
<?php
foreach([$PORTS_TO_DETECT] as \$port){
\$cli = new Swoole\\Client(SWOOLE_SOCK_TCP, SWOOLE_SOCK_SYNC);
if (!@\$cli->connect("$ADMIN_ADDR", \$port)){
exit(1);
}
\$cli->close();
}
exit(0);
EOF
if [ "$?" != "0" ]
then
loge "Remote host $ADMIN_ADDR not accessable, check network connection or remote swoole-admin state." >&2 &&
exit 11 # 11 for EAGAIN
fi
# save config json
echo '{"ip":{"product":"'$ADMIN_ADDR'","local":"127.0.0.1"}}' > /opt/swoole/config/config_ip.conf
#echo '' > /opt/swoole/config/config_port.conf
# add startup scripts for openrc / sysvinit / systemd
if [ "x`rc-status -V 2>&1 | grep -i openrc`" != "x" ]
then
# openrc init
logi 'Installing node-agent startup script for OpenRC.'
cat > /etc/init.d/node-agent << EOF
#!`if [ -e '/sbin/openrc-run' ]; then echo /sbin/openrc-run; else echo /sbin/runscript; fi;`
name="Swoole Enterprise NodeAgent"
command="/opt/swoole/script/php/swoole_php"
pidfile="/var/run/node-agent.pid"
command_args="/opt/swoole/node-agent/src/node.php"
command_user="root"
command_background="yes"
start_stop_daemon_args="--make-pidfile --stdout /opt/swoole/logs/node-agent_stdout.log --stderr /opt/swoole/logs/node-agent_stderr.log"
depend() {
need net
}
stop() {
[ ! -e \${pidfile} ] && return
ebegin "Stopping \${name}"
/opt/swoole/script/php/swoole_php /opt/swoole/script/killtree.php \`cat \${pidfile}\`
retval=\$?
[ "0" = \$retval ] && rm \${pidfile}
eend \$retval
}
EOF
chmod 755 /etc/init.d/node-agent
rc-service node-agent stop >>/tmp/na_installer.log 2>&1
rc-service node-agent start >>/tmp/na_installer.log 2>&1
elif [ "x`/proc/1/exe --version 2>&1 | grep -i systemd`" != "x" ] || type systemctl >>/tmp/na_installer.log 2>&1
then
logi 'Installing node-agent startup script for systemd.'
cat > /etc/systemd/system/node-agent.service << EOF
[Unit]
Description=Swoole Enterprise NodeAgent
After=network.target
[Service]
Type=simple
PIDFile=/var/run/node-agent.pid
ExecStart=/opt/swoole/script/php/swoole_php /opt/swoole/node-agent/src/node.php
ExecStop=/opt/swoole/script/php/swoole_php /opt/swoole/script/killtree.php \$MAINPID
Restart=on-failure
RestartSec=60s
[Install]
WantedBy=multi-user.target
EOF
chmod 664 /etc/systemd/system/node-agent.service
# this may fail in docker 'cause can't access dbus-daemon, thus execute it here.
systemctl daemon-reload >>/tmp/na_installer.log 2>&1
systemctl stop node-agent.service >>/tmp/na_installer.log 2>&1
systemctl restart node-agent.service >>/tmp/na_installer.log 2>&1
if [ x`systemctl show node-agent.service -p ActiveState` = x'ActiveState=active' ]
then
logi ' Done restart systemd service.'
else
logw ' (Re)start systemd service failed (maybe in docker?).'
fi
elif [ "x`/proc/1/exe --version 2>&1 | grep -i upstart`" != "x" ] || type chkconfig >>/tmp/na_installer.log 2>&1
then
# upstart / sysvlike init
logi 'Installing node-agent startup script for sysvinit-like systems.'
cat > /etc/init.d/node-agent << EOF
#!/bin/bash
#
# node-agent Swoole Enterprise NodeAgent
#
# chkconfig: 345 99 04
# description: Swoole Enterprise NodeAgent
#
# processname: swoole_php
# pidfile: /var/run/node-agent.pid
#
### BEGIN INIT INFO
# Provides: node-agent
# Required-Start: \$all
# Required-Stop: \$all
# Short-Description: node-agent
# Description: Swoole Enterprise NodeAgent
### END INIT INFO
# Source function library
. /etc/rc.d/init.d/functions 2>&- ||
. /etc/init.d/functions 2>&- ||
. /lib/lsb/init-functions 2>&1
# Path to the supervisorctl script, server binary,
# and short-form for messages.
prog=node-agent
pidfile="/var/run/node-agent.pid"
lockfile="/var/lock/subsys/node-agent"
STOP_TIMEOUT=60
RETVAL=0
start() {
echo -n "Starting \$prog... "
[ -e \${lockfile} ] && echo already started && exit 1
if [ "\`which start-stop-daemon 2>&- \`x" != "x" ]
then
start-stop-daemon --pidfile \${pidfile} --start --startas /bin/bash -- -c '/opt/swoole/script/php/swoole_php /opt/swoole/node-agent/src/node.php >>/opt/swoole/logs/node-agent_stdout.log 2>>/opt/swoole/logs/node-agent_stderr.log & echo -n \$! > '\${pidfile}
RETVAL=\$?
else
daemon --pidfile \${pidfile} '/opt/swoole/script/php/swoole_php /opt/swoole/node-agent/src/node.php >>/opt/swoole/logs/node-agent_stdout.log 2>>/opt/swoole/logs/node-agent_stderr.log & echo -n \$! > '\${pidfile};
RETVAL=\$?
fi
echo
[ -d /var/lock/subsys ] && touch \${lockfile}
return \$RETVAL
}
stop() {
echo -n "Stopping \$prog... "
[ -e \${pidfile} ] && /opt/swoole/script/php/swoole_php /opt/swoole/script/killtree.php \`cat \${pidfile}\`
RETVAL=\$?
echo
[ \$RETVAL -eq 0 ] && rm -rf \${lockfile} \${pidfile}
}
restart() {
stop
start
}
case "\$1" in
start)
start
;;
stop)
stop
;;
status)
status -p \${pidfile} /opt/swoole/script/php/swoole_php
;;
restart)
restart
;;
condrestart|try-restart)
if status -p \${pidfile} /opt/swoole/script/php/swoole_php >&-; then
restart
fi
;;
*)
echo \$"Usage: \$prog {start|stop|restart|condrestart|try-restart}"
RETVAL=2
esac
exit \$RETVAL
EOF
chmod 755 /etc/init.d/node-agent
/etc/init.d/node-agent stop >>/tmp/na_installer.log 2>&1
/etc/init.d/node-agent restart
else
logw 'Unable to determine init system type (maybe in docker?).'
logw 'You can mannually add nodeagent into your init system (or docker entrypoint).'
fi
logi "Note: if you are using node-agent in docker,"
logi "\tmannually add \`/opt/swoole/script/php/swoole_php /opt/swoole/node-agent/src/node.php\` into your entrypoint."
logi "Note: this script won't enable init script automatically,"
logi "\tuse \`systemctl enable node-agent\`(on systemd systems)"
logi "\tor \`rc-update add node-agent\`(on openrc systems) to enable it."
logi Done
| true |
25e31840b4e8945f242e486539ad05fd7d0ef6fa | Shell | Magpol/hashFilesFromImages | /hashFilesFromImages.sh | UTF-8 | 1,886 | 3.953125 | 4 | [] | no_license | #!/bin/bash
# https://github.com/Magpol/
#HASHTYPE
HASHTYPE="256"
if [[ $# < 2 ]] ; then
echo 'hashFilesFromImages.sh w||r <PATH TO IMAGES> <HASHFILE>'
echo 'hashFilesFromImages.sh w testImages/ <-- hashes all images in testImages and writes output to testImages/<IMAGENAME>.txt'
echo 'hashFilesFromImages.sh r testImages/ hashes.txt <-- check if hash(es) specified in hashes.txt matches in testImages/'
exit 0
fi
if [[ $1 == 'w' ]];then
FILES="$(echo $2 | sed 's/\/$//')/*"
for f in $FILES
do
COUNTER=0
if [[ $f == *.txt ]];then
continue
fi
echo "Processing $f"
#LINUX
OFFSET=$(mmls -aM $f|grep -m 1 -P "\d{10}"|awk '{print $3}')
if [ -f "$f.txt" ]; then
echo "$f.txt already exist."
continue
fi
while IFS= read -r result
do
if [ ! -z "$result" ]; then
NAME=$(ffind -o $OFFSET $f $result)
HASH=$(echo $(icat -o $OFFSET $f $result | shasum -a $HASHTYPE | cut -d' ' -f1))
printf "%s;%s\n" $NAME $HASH >> $f.txt
let COUNTER++
fi
done < <(fls -o $OFFSET -F -r $f| awk '{sub(/:|\*/, "", $2);print $2}')
printf "%s is done! Total files processed: %d\n" $f $COUNTER
done
elif [[ $1 == 'r' ]];then
FILES="$(echo $2 | sed 's/\/$//')/*"
for f in $FILES
do
COUNTER=0
if [[ $f == *.txt ]];then
continue
fi
echo "Processing $f"
#LINUX
OFFSET=$(mmls -aM $f|grep -m 1 -P "\d{10}"|awk '{print $3}')
while IFS= read -r result
do
if [ ! -z "$result" ]; then
NAME=$(ffind -o $OFFSET $f $result)
HASH=$(echo $(icat -o $OFFSET $f $result | shasum -a $HASHTYPE | cut -d' ' -f1))
if grep -q $HASH $3; then
printf "[*] MATCH -- %s;%s\n" $NAME $HASH
fi
let COUNTER++
fi
done < <(fls -o $OFFSET -F -r $f| awk '{sub(/:|\*/, "", $2);print $2}')
printf "%s is done! Total files processed: %d\n" $f $COUNTER
done
fi
| true |
1dcda9257304beef881dec0ec3da1520ee238e00 | Shell | pixelpox/dotfiles | /.bash_aliases | UTF-8 | 569 | 3.125 | 3 | [] | no_license | alias ll='ls -lah --color=auto'
alias llt='ls -laht --color=auto'
alias c='clear'
alias h='history'
alias g='git'
alias grep='grep --color=auto'
alias wget='wget -c '
alias untar='tar -zxvf '
alias nano='vi'
alias ping8="ping 8.8.8.8"
# if user is not root, pass all commands via sudo #
if [ $UID -ne 0 ]; then
alias reboot='sudo reboot'
alias update='sudo apt-get upgrade'
fi
# Make unified diff syntax the default
alias diff="diff -u"
# Docker-compose shorthand
alias dc='docker-compose'
alias dcu='docker-compose up -d'
alias dcd='docker-compose down'
| true |
a09cad168a3326ae3cf7ef6c024425bec9c7e15a | Shell | gnocchixyz/gnocchi | /run-upgrade-tests.sh | UTF-8 | 4,254 | 3.71875 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash
set -e
if [ "$1" == "postgresql-file" ]; then
eval $(pifpaf --env-prefix INDEXER run postgresql)
elif [ "$1" == "mysql-ceph" ]; then
eval $(pifpaf --env-prefix INDEXER run mysql)
eval $(pifpaf --env-prefix STORAGE run ceph)
else
echo "error: unsupported upgrade type"
exit 1
fi
export GNOCCHI_DATA=$(mktemp -d -t gnocchi.XXXX)
echo "* Installing Gnocchi from ${GNOCCHI_VERSION_FROM}"
pip install -q --force-reinstall git+https://github.com/gnocchixyz/gnocchi.git@${GNOCCHI_VERSION_FROM}#egg=gnocchi[${GNOCCHI_VARIANT}]
RESOURCE_IDS=(
"5a301761-aaaa-46e2-8900-8b4f6fe6675a"
"5a301761-bbbb-46e2-8900-8b4f6fe6675a"
"5a301761-cccc-46e2-8900-8b4f6fe6675a"
"non-uuid"
)
dump_data(){
dir="$1"
mkdir -p $dir
echo "* Dumping measures aggregations to $dir"
gnocchi resource list -c id -c type -c project_id -c user_id -c original_resource_id -c started_at -c ended_at -c revision_start -c revision_end | tee $dir/resources.list
for resource_id in ${RESOURCE_IDS[@]} $RESOURCE_ID_EXT; do
for agg in min max mean sum ; do
gnocchi measures show --aggregation $agg --resource-id $resource_id metric -f json > $dir/${agg}.json
done
done
}
inject_data() {
echo "* Injecting measures in Gnocchi"
# TODO(sileht): Generate better data that ensure we have enought split that cover all
# situation
for resource_id in ${RESOURCE_IDS[@]}; do
gnocchi resource create generic --attribute id:$resource_id -n metric:high > /dev/null
done
# Create a resource with an history
gnocchi resource-type create ext --attribute someattr:string:false:max_length=32 > /dev/null
gnocchi resource create --type ext --attribute someattr:foobar -n metric:high historized_resource > /dev/null
gnocchi resource update --type ext --attribute someattr:foobaz historized_resource > /dev/null
{
measures_sep=""
MEASURES=$(python -c 'import datetime, random, json; now = datetime.datetime.utcnow(); print(json.dumps([{"timestamp": (now - datetime.timedelta(seconds=i)).isoformat(), "value": random.uniform(-100000, 100000)} for i in range(0, 288000, 10)]))')
echo -n '{'
resource_sep=""
for resource_id in ${RESOURCE_IDS[@]} $RESOURCE_ID_EXT; do
echo -n "$resource_sep \"$resource_id\": { \"metric\": $MEASURES }"
resource_sep=","
done
echo -n '}'
} | gnocchi measures batch-resources-metrics -
echo "* Waiting for measures computation"
while [ $(gnocchi status -f value -c "storage/total number of measures to process") -gt 0 ]; do sleep 1 ; done
}
pifpaf_stop(){
:
}
cleanup(){
pifpaf_stop
rm -rf $GNOCCHI_DATA
indexer_stop || true
[ "$STORAGE_DAEMON" == "ceph" ] && storage_stop || true
}
trap cleanup EXIT
if [ "$STORAGE_DAEMON" == "ceph" ]; then
ceph -c $STORAGE_CEPH_CONF osd pool create gnocchi 16 16 replicated
STORAGE_URL=ceph://$STORAGE_CEPH_CONF
else
STORAGE_URL=file://$GNOCCHI_DATA
fi
eval $(pifpaf run gnocchi --indexer-url $INDEXER_URL --storage-url $STORAGE_URL)
export OS_AUTH_TYPE=gnocchi-basic
export GNOCCHI_USER=$GNOCCHI_USER_ID
original_statsd_resource_id=$GNOCCHI_STATSD_RESOURCE_ID
inject_data $GNOCCHI_DATA
dump_data $GNOCCHI_DATA/old
pifpaf_stop
new_version=$(python setup.py --version)
echo "* Upgrading Gnocchi from $GNOCCHI_VERSION_FROM to $new_version"
pip install -v -U .[${GNOCCHI_VARIANT}]
eval $(pifpaf run gnocchi --indexer-url $INDEXER_URL --storage-url $STORAGE_URL)
# Gnocchi 3.1 uses basic auth by default
export OS_AUTH_TYPE=gnocchi-basic
export GNOCCHI_USER=$GNOCCHI_USER_ID
# pifpaf creates a new statsd resource on each start
gnocchi resource delete $GNOCCHI_STATSD_RESOURCE_ID
dump_data $GNOCCHI_DATA/new
echo "* Checking output difference between Gnocchi $GNOCCHI_VERSION_FROM and $new_version"
# This asserts we find the new measures in the old ones. Gnocchi > 4.1 will
# store less points because it uses the timespan and not the points of the
# archive policy
for old in $GNOCCHI_DATA/old/*.json; do
new=$GNOCCHI_DATA/new/$(basename $old)
python -c "import json; old = json.load(open('$old')); new = json.load(open('$new')); assert all(i in old for i in new)"
done
| true |
98aa533eb9ceb7ae6aaf3f0d48a6b1f3b2105be9 | Shell | autopkg/foigus-recipes | /GMCSoftware/Reference Scripts 12 CC 2018/Original GMC Software/postinstall | UTF-8 | 1,329 | 3.046875 | 3 | [] | no_license | #!/bin/bash
scriptDir=$(dirname "$0")
. "$scriptDir/common.sh"
. "$scriptDir/installer.data"
src="/tmp/InspireDesignerIn.InDesignPlugin"
dest="/tmp/com.quadient.InDesignPlugin.destination"
licenseCFG="/tmp/com.quadient.InDesignPlugin.config"
licenseLIC="/tmp/com.quadient.PNetT.lic"
if [ -f "$dest" ]; then
pluginDest="$(cat "$dest")/Plug-Ins"
iniFileDest="$pluginDest/InspireDesignerIn.ini"
if [ -d "$pluginDest" ]; then
ditto --rsrc "$src" "$pluginDest/InspireDesignerIn.InDesignPlugin" && rm -f "$dest" && rm -rf "$src"
echo "LINESPACE=EXACT,GADIALOG=YES,OUTPUT=FLOWAREA">"$iniFileDest" && chmod 777 "$iniFileDest"
mkdir -p "$pluginDest/InspireDesignerIn.InDesignPlugin/Frameworks/License"
if [ -f "$licenseCFG" ]; then
cp -f "$licenseCFG" "$pluginDest/InspireDesignerIn.InDesignPlugin/Frameworks/License/PrintNetTExporter.config" && rm -f "$licenseCFG"
fi
if [ -f "$licenseLIC" ]; then
cp -f "$(cat "$licenseLIC")" "$pluginDest/InspireDesignerIn.InDesignPlugin/Frameworks/License/" && rm -f "$licenseLIC"
fi
pushd "$pluginDest/InspireDesignerIn.InDesignPlugin/"
ln -s "./Frameworks/License"
popd
fi
fi
# Cleanup temporary folder dirty from previous versions
rm -rf "$HOME/Library/Application Support/Quadient/InspireExportInd"
ForgetPackageWithDelay 10 "$INDD_PLUGIN_IDENTIFIER" &
exit 0 | true |
a66d95a9cbb51eba7ed878d19a396dfb7d9baa09 | Shell | fultonj/oooq | /over/debug-ceph.sh | UTF-8 | 648 | 3.375 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env bash
# speed up this process:
# https://docs.openstack.org/tripleo-docs/latest/install/advanced_deployment/ansible_config_download.html#tags
source ~/stackrc
DIR=/var/lib/mistral/overcloud
if [[ ! -e $DIR ]]; then
echo "$DIR does not exist"
exit 1
fi
if [ -n "$1" ]; then
if [[ $1 == "clean" ]]; then
echo "Deleting all files in $DIR owned by $USER so mistral will not have permissions issues"
find $DIR -user $USER -exec rm -rf {} \;
fi
else
sudo setfacl -R -m u:$USER:rwx /var/lib/mistral
ls -l external_deploy_steps_tasks.yaml
bash ansible-playbook-command.sh --tags external_deploy_steps
fi
| true |
d0baf20c6ce780c970620fdae5a22df9382ce23e | Shell | giorgos1987/automation_workflow | /step600indexing.sh | UTF-8 | 439 | 2.53125 | 3 | [] | no_license | #!/bin/bash
KEY=/root/.ssh/wrkrootuserrsa
LOGS=/home/workflow/dataflows/010ingestion/scripts/OEORGANICEPRINTS/CRON_scripts/logs
echo "start indexing for ORGEPRINTS at $(date)" >> ${LOGS}/cron_ingest.log
#Run the remotely indexing script
ssh -i $KEY user@83.212.115.164 '/home/user/scripts_cron/index_eprints.sh'
echo "indexing Done for ORGEPRINTS !Step step600indexing completed at $(date)!" >> ${LOGS}/cron_ingest.log
exit;
| true |
e141d6bf767ef85ff779a81b3f63835a30a61296 | Shell | xuerongCode/software-developer-coding-challenge | /server/run.sh | UTF-8 | 9,616 | 2.796875 | 3 | [] | no_license | #!/bin/sh
function showAllUsers() {
echo "curl -X GET http://localhost:8080/user"
echo `curl -X GET http://localhost:8080/user`
}
function showAllVehicles() {
echo "curl -X GET http://localhost:8080/vehicle"
echo `curl -X GET http://localhost:8080/vehicle`
}
function showAllAuctions() {
echo "curl -X GET http://localhost:8080/auction"
echo `curl -X GET http://localhost:8080/auction`
}
function showAllBids() {
echo "curl -X GET http://localhost:8080/bid"
echo `curl -X GET http://localhost:8080/bid`
}
function showUserById() {
echo "curl -X GET http://localhost:8080/user/$1"
echo `curl -X GET http://localhost:8080/user/$1`
}
function showVehicleById() {
echo "curl -X GET http://localhost:8080/vehicle/$1"
echo `curl -X GET http://localhost:8080/vehicle/$1`
}
function showAuctionById() {
echo "curl -X GET http://localhost:8080/auction/$1"
echo `curl -X GET http://localhost:8080/auction/$1`
}
function showBidById() {
echo "curl -X GET http://localhost:8080/bid/$1"
echo `curl -X GET http://localhost:8080/bid/$1`
}
function showBidsByUser() {
echo "curl -X GET http://localhost:8080/user/$1/bid"
echo `curl -X GET http://localhost:8080/user/$1/bid`
}
function showBidsByAuction() {
echo "curl -X GET http://localhost:8080/auction/$1/bid"
echo `curl -X GET http://localhost:8080/auction/$1/bid`
}
function showBidsByVehicle() {
echo "curl -X GET http://localhost:8080/vehicle/$1/bid"
echo `curl -X GET http://localhost:8080/vehicle/$1/bid`
}
function showUserBidsOnVehicle() {
echo "curl -X GET http://localhost:8080/user/$1/vehicle/$2/bid"
echo `curl -X GET http://localhost:8080/user/$1/vehicle/$2/bid`
}
function showCurrentWinUserOfAuction() {
echo "curl -X GET http://localhost:8080/auction/$1/currentWinUser"
echo `curl -X GET http://localhost:8080/auction/$1/currentWinUser`
}
function showCurrentWinBidOfAuction() {
echo "curl -X GET http://localhost:8080/auction/$1/currentWinBid"
echo `curl -X GET http://localhost:8080/auction/$1/currentWinBid`
}
function showCurrentWinBidOfVehicle() {
echo "curl -X GET http://localhost:8080/vehicle/$1/currentWinBid"
echo `curl -X GET http://localhost:8080/vehicle/$1/currentWinBid`
}
function applyBid() {
echo "curl -X POST http://localhost:8080/auction/$1 \n
-H 'Cache-Control: no-cache' \n
-H 'Content-Type: application/json' \n
-H 'userAuth: $2' \n
-d '{
'amount': $3,
'currency': '$4'
}'
"
echo `curl -X POST http://localhost:8080/auction/$1 \
-H 'Cache-Control: no-cache' \
-H 'Content-Type: application/json' \
-H "userAuth: $2" \
-d '{
"amount": '$3',
"currency": "'$4'"
}'
`
}
function usage {
echo "The CLI requires curl."
echo "Options are:"
echo " --users : Get all users."
echo " eg: sh run.sh --users"
echo " --vehicles : Get all vehicles."
echo " eg: sh run.sh --vehicles"
echo " --auctions : Get all auctions."
echo " eg: sh run.sh --auctions"
echo " --bids : Get all bids."
echo " eg: sh run.sh --bids"
echo " --userById {userId} : Find User by userId."
echo " eg: sh run.sh --userById 1"
echo " --vehicleById {vehicleId} : Find vehicle by vehicleId."
echo " eg: sh run.sh --vehicleById 1"
echo " --auctionById {auctionId} : Find auction by auctionId."
echo " eg: sh run.sh --auctionById 1"
echo " --bidById {bidId} : Find bid by bidId."
echo " eg: sh run.sh --bidById 1"
echo " --bidsOfUser {userId} : Find all bids on a user."
echo " eg: sh run.sh --bidsOfUser 1"
echo " --bidsOfVehicle {vehicleId} : Find all bids on a vehicle."
echo " eg: sh run.sh --bidsOfVehicle 1"
echo " --bidsOfAuction {auctionId} : Find all bids on an auction."
echo " eg: sh run.sh --bidsOfAuction 1"
echo " --userBidsOnVehicle {userId} {vehicleId} : Find a user's bids on a Vehicle."
echo " eg: sh run.sh --userBidsOnVehicle 1 1"
echo " --currentWinUserOfAuction {auctionId} : Find current win user of a auction."
echo " eg: sh run.sh --currentWinUserOfAuction 1"
echo " --currentWinBidOfAuction {auctionId} : Find current win bid of a auction."
echo " eg: sh run.sh --currentWinBidOfAuction 1"
echo " --currentWinBidOfVehicle {vehicleId} : Find current win bid of a vehicle."
echo " eg: sh run.sh --currentWinBidOfVehicle 1"
echo " --applyBid {userId} {auctionId} {amount} {currency} : Post bid for an auction."
echo " eg: sh run.sh --applyBid 1 1 11.11 CAD"
}
re='^[0-9]+$'
doubleRe='^[0-9]*\.[0-9]+|[0-9]+$'
currencyRe='^(CAD|USD)$'
userId=0
auctionId=0
bidId=0
vehicleId=0
amount=0
currency=0
SHOW_ALL_USERS=0
SHOW_ALL_VEHICLE=0
SHOW_ALL_AUCTION=0
SHOW_ALL_BID=0
USER_BY_ID=0
VEHICLE_BY_ID=0
AUCTION_BY_ID=0
BID_BY_ID=0
SHOW_BIDS_BY_USER=0
SHOW_BIDS_BY_AUCTION=0
SHOW_BIDS_BY_VEHICLE=0
SHOW_USER_BIDS_ON_VEHICLE=0
SHOW_CURRENT_WIN_USER_OF_AUCTION=0
SHOW_CURRENT_WIN_BID_OF_AUCTION=0
SHOW_CURRENT_WIN_USER_OF_VEHICLE=0
APPLY_BID=0
while true ; do
case "$1" in
--users)
SHOW_ALL_USERS=1
break ;;
--vehicles)
SHOW_ALL_VEHICLE=1
break ;;
--auctions)
SHOW_ALL_AUCTION=1
break ;;
--bids)
SHOW_ALL_BID=1
break ;;
--userById)
if [[ $2 =~ $re && $2 != 0 ]] ; then
userId=$2
USER_BY_ID=1
else
echo "error: Not a number that is greater than zero" >&2; exit 1
fi
break ;;
--vehicleById)
if [[ $2 =~ $re && $2 != 0 ]] ; then
vehicleId=$2
VEHICLE_BY_ID=1
else
echo "error: Not a number that is greater than zero" >&2; exit 1
fi
break ;;
--auctionById)
if [[ $2 =~ $re && $2 != 0 ]] ; then
auctionId=$2
AUCTION_BY_ID=1
else
echo "error: Not a number that is greater than zero" >&2; exit 1
fi
break ;;
--bidById)
if [[ $2 =~ $re && $2 != 0 ]] ; then
bidId=$2
BID_BY_ID=1
else
echo "error: Not a number that is greater than zero" >&2; exit 1
fi
break ;;
--bidsOfUser)
if [[ $2 =~ $re && $2 != 0 ]] ; then
userId=$2
SHOW_BIDS_BY_USER=1
else
echo "error: Not a number that is greater than zero" >&2; exit 1
fi
break ;;
--bidsOfVehicle)
if [[ $2 =~ $re && $2 != 0 ]] ; then
vehicleId=$2
SHOW_BIDS_BY_VEHICLE=1
else
echo "error: Not a number that is greater than zero" >&2; exit 1
fi
break ;;
--bidsOfAuction)
if [[ $2 =~ $re && $2 != 0 ]] ; then
auctionId=$2
SHOW_BIDS_BY_AUCTION=1
else
echo "error: Not a number that is greater than zero" >&2; exit 1
fi
break ;;
--userBidsOnVehicle)
if [[ $2 =~ $re && $2 != 0 ]] ; then
userId=$2
else
echo "error: Not a number that is greater than zero" >&2; exit 1
fi
if [[ $3 =~ $re && $3 != 0 ]] ; then
auctionId=$3
else
echo "error: Not a number that is greater than zero" >&2; exit 1
fi
if [[ $userId != 0 && $auctionId != 0 ]] ; then
SHOW_USER_BIDS_ON_VEHICLE=1
else
echo "error: miss argument" >&2; exit 1
fi
break ;;
--currentWinUserOfAuction)
if [[ $2 =~ $re && $2 != 0 ]] ; then
SHOW_CURRENT_WIN_USER_OF_AUCTION=1
auctionId=$2
else
echo "error: Not a number that is greater than zero" >&2; exit 1
fi
break ;;
--currentWinBidOfAuction)
if [[ $2 =~ $re && $2 != 0 ]] ; then
SHOW_CURRENT_WIN_BID_OF_AUCTION=1
auctionId=$2
else
echo "error: Not a number that is greater than zero" >&2; exit 1
fi
break ;;
--currentWinBidOfVehicle)
if [[ $2 =~ $re && $2 != 0 ]] ; then
SHOW_CURRENT_WIN_USER_OF_VEHICLE=1
vehicleId=$2
else
echo "error: Not a number that is greater than zero" >&2; exit 1
fi
break ;;
--applyBid)
if [[ $2 =~ $re && $2 != 0 ]] ; then
userId=$2
else
echo "error: Not a number that is greater than zero" >&2; exit 1
fi
if [[ $3 =~ $re && $3 != 0 ]] ; then
auctionId=$3
else
echo "error: Not a number that is greater than zero" >&2; exit 1
fi
if [[ ( $4 =~ $re && $4 != 0 ) || ( $4 =~ $doubleRe ) ]] ; then
amount=$4
else
echo "error: Amount must be a double." >&2; exit 1
fi
if [[ $5 =~ $currencyRe ]] ; then
currency=$5
else
echo "error: Currency must be CAD or USD" >&2; exit 1
fi
if [[ $userId != 0 && $auctionId != 0 && $amount != 0 && $currency != 0 ]] ; then
APPLY_BID=1
else
echo "error: miss argument" >&2; exit 1
fi
break ;;
--help)
usage
break ;;
--) break ;;
*) echo "Internal error! use --help to find option" ; exit 1 ;;
esac
done
if [ "$SHOW_ALL_USERS" -eq 1 ]; then
showAllUsers
elif [ "$SHOW_ALL_VEHICLE" -eq 1 ]; then
showAllVehicles
elif [ "$SHOW_ALL_AUCTION" -eq 1 ]; then
showAllAuctions
elif [ "$SHOW_ALL_BID" -eq 1 ]; then
showAllBids
elif [ "$USER_BY_ID" -eq 1 ]; then
showUserById $userId
elif [ "$VEHICLE_BY_ID" -eq 1 ]; then
showVehicleById $vehicleId
elif [ "$AUCTION_BY_ID" -eq 1 ]; then
showAuctionById $auctionId
elif [ "$BID_BY_ID" -eq 1 ]; then
showBidById $bidId
elif [ "$SHOW_BIDS_BY_USER" -eq 1 ]; then
showBidsByUser $userId
elif [ "$SHOW_BIDS_BY_VEHICLE" -eq 1 ]; then
showBidsByVehicle $vehicleId
elif [ "$SHOW_BIDS_BY_AUCTION" -eq 1 ]; then
showBidsByAuction $auctionId
elif [ "$SHOW_USER_BIDS_ON_VEHICLE" -eq 1 ]; then
showUserBidsOnVehicle $userId $auctionId
elif [ "$SHOW_CURRENT_WIN_USER_OF_AUCTION" -eq 1 ]; then
showCurrentWinUserOfAuction $auctionId
elif [ "$SHOW_CURRENT_WIN_BID_OF_AUCTION" -eq 1 ]; then
showCurrentWinBidOfAuction $auctionId
elif [ "$SHOW_CURRENT_WIN_USER_OF_VEHICLE" -eq 1 ]; then
showCurrentWinBidOfVehicle $vehicleId
elif [ "$APPLY_BID" -eq 1 ]; then
applyBid $auctionId $userId $amount $currency
fi | true |
1aa180b3859458e054d6f1de851f512e40dc02a6 | Shell | niefan/STA5_pre | /sources/st-etal/latest-etal/applications/radio_if/loop_radio_if.bash | UTF-8 | 179 | 3.03125 | 3 | [] | no_license | #!/bin/bash
cleanup(){
rm -f /tmp/tempfile
return $?
}
ctrl_c(){
echo -en "\n*** Exiting radio_if ***\n"
cleanup
exit $?
}
trap ctrl_c SIGINT
while true; do radio_if; done | true |
837dacb0392bf6830414160eaaafb7c9de9a4dfc | Shell | nchepov/Exercise11 | /run.sh | UTF-8 | 941 | 3.3125 | 3 | [] | no_license | #!/bin/bash
directory=$(pwd)
cd ~/ubung*/src
echo "Directory: $(pwd)"
echo 'rm */*.class'
rm */*.class
echo -e 'Index\tJava File'
index=0
for file in exercise*/*Demo.java; do
echo -e "$index\t\t$file"
index=$((index + 1))
done
read -p 'Enter index: ' enteredIndex
index=0
for file in exercise*/*Demo.java; do
if [[ "$index" == "$enteredIndex" ]]; then
echo 'javac exercise11/Node.java exercise11/NodeUtils.java exercise11/LinkedQueue.java exercise11/LinkedQueueDemo.java exercise11/LinkedStack.java exercise11/LinkedStackDemo.java'
javac exercise11/Node.java exercise11/NodeUtils.java exercise11/LinkedQueue.java exercise11/LinkedQueueDemo.java exercise11/LinkedStack.java exercise11/LinkedStackDemo.java
class=$(echo $file | sed 's/.java//' | sed 's/\//./')
echo "java -ea $class"
echo -e "\U0001f680 \U0001f648 \U0001f649 \U0001f64A \U0001f680"
java -ea $class
else
:
fi
index=$((index + 1))
done
cd $directory
| true |
f15775b0c4ce8ac870cf0a4a3d8d9c4ef4f34b5d | Shell | mcclurmc/xcp-storage-managers | /drivers/mpathHBA | UTF-8 | 1,321 | 3.046875 | 3 | [] | no_license | #!/bin/bash
# Copyright (C) 2006-2007 XenSource Ltd.
# Copyright (C) 2008-2009 Citrix Ltd.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation; version 2.1 only.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
DEFAULT_TIMEOUT=30
MPATH_TIMEOUT=10
# Check for QLogic Ports
if [ -e "/sys/class/fc_remote_ports" ]; then
for i in `ls /sys/class/fc_remote_ports`; do
if [ $1 == "enable" ]; then
echo ${MPATH_TIMEOUT} > /sys/class/fc_remote_ports/${i}/dev_loss_tmo
else
echo ${DEFAULT_TIMEOUT} > /sys/class/fc_remote_ports/${i}/dev_loss_tmo
fi
echo "Adjusted Qlogic port ${i} dev_loss_tmo"
done
fi
# Now check for Emulex
for i in `ls /sys/class/scsi_host`; do
if [ -e "/sys/class/scsi_host/${i}/lpfc_fcp_class" ]; then
if [ $1 == "enable" ]; then
echo ${MPATH_TIMEOUT} > /sys/class/scsi_host/${i}/lpfc_nodev_tmo
else
echo ${DEFAULT_TIMEOUT} > /sys/class/scsi_host/${i}/lpfc_nodev_tmo
fi
echo "Adjusted Emulex port ${i} lpfc_nodev_tmo"
fi
done
| true |
57520fae92182f9b28f0b5399b1eecebb13eac2c | Shell | jg71/ansitest | /VIRTBOX/vb-virtmedia-cleanup.sh | UTF-8 | 460 | 2.84375 | 3 | [] | no_license | #!/bin/bash
# cleanup inaccessible disks in VMM
vboxmanage list hdds > /tmp/vbox-hds-all.txt
vboxmanage list hdds |grep -B 2 'State: inaccessible' \
|egrep -v 'Parent|State|--' >/tmp/infile-vboxdel.txt
#zfs-SAS-T5-12*.vdi > /tmp/infile.txt
#set -x
while read line; do
delme=$(echo "$line" |awk '{print $2}')
echo "$delme" >> /tmp/vbox-hd-inacc-del.log
vboxmanage closemedium disk "$delme" --delete
done < /tmp/infile-vboxdel.txt
date;
| true |
f65e6e1cc9657f1a0b2c9de0f3972f088a933dfc | Shell | luoxz-ai/mmo-server | /server-code/bin/shell/run/stop_zone_separat.sh | UTF-8 | 2,053 | 3.078125 | 3 | [] | no_license | #!/bin/bash
cd `dirname $0`
cd ../..
DIR_file=`pwd`
serverid=$1
if [ ! -n "$1" ] ;then
serverid=1
fi
export ASAN_OPTIONS=include_if_exists=${DIR_file}/asan_cfg/options_asan
export LSAN_OPTIONS=include_if_exists=${DIR_file}/asan_cfg/options_lsan
export UBSAN_OPTIONS=include_if_exists=${DIR_file}/asan_cfg/options_ubsan
export TSAN_OPTIONS=include_if_exists=${DIR_file}/asan_cfg/options_tsan
world()
{
${DIR_file}/binary/z${serverid}_world --worldid=${serverid} --stop=WORLD_SERVICE-0,MARKET_SERVICE-0,GM_SERVICE-0
}
route()
{
${DIR_file}/binary/z${serverid}_route --worldid=${serverid} --stop=ROUTE_SERVICE-0
}
scene1()
{
${DIR_file}/binary/z${serverid}_scene1 --worldid=${serverid} --stop=SCENE_SERVICE-1,AI_SERVICE-1
}
scene2()
{
${DIR_file}/binary/z${serverid}_scene2 --worldid=${serverid} --stop=SCENE_SERVICE-2,AI_SERVICE-2
}
scene3()
{
${DIR_file}/binary/z${serverid}_scene3 --worldid=${serverid} --stop=SCENE_SERVICE-3,AI_SERVICE-3
}
scene4()
{
${DIR_file}/binary/z${serverid}_scene4 --worldid=${serverid} --stop=SCENE_SERVICE-4,AI_SERVICE-4
}
scene5()
{
${DIR_file}/binary/z${serverid}_scene5 --worldid=${serverid} --stop=SCENE_SERVICE-5,AI_SERVICE-5
}
socket1()
{
${DIR_file}/binary/z${serverid}_socket1 --worldid=${serverid} --stop=SOCKET_SERVICE-1,AUTH_SERVICE-1
}
socket2()
{
${DIR_file}/binary/z${serverid}_socket2 --worldid=${serverid} --stop=SOCKET_SERVICE-2,AUTH_SERVICE-2
}
socket3()
{
${DIR_file}/binary/z${serverid}_socket3 --worldid=${serverid} --stop=SOCKET_SERVICE-3,AUTH_SERVICE-3
}
socket4()
{
${DIR_file}/binary/z${serverid}_socket4 --worldid=${serverid} --stop=SOCKET_SERVICE-4,AUTH_SERVICE-4
}
socket5()
{
${DIR_file}/binary/z${serverid}_socket5 --worldid=${serverid} --stop=SOCKET_SERVICE-5,AUTH_SERVICE-5
}
scene_all()
{
scene1;
scene2;
scene3;
scene4;
scene5;
}
socket_all()
{
socket1;
socket2;
socket3;
socket4;
socket5;
}
if [ $2 ];
then
$2;
else
world;
route;
scene_all;
socket_all;
fi | true |
ed5a3aa38a23df273645dec93a14fad8b25db16d | Shell | blajos/maas-rc-ha | /profiles/p_postgres_cluster/templates/pg_initial_sync.sh.erb | UTF-8 | 1,230 | 3.078125 | 3 | [] | no_license | #!/bin/sh
set -e
set -x
if [ "`hostname --fqdn`" = "<%= @primary_fqdn -%>" ];then
exit 0
fi
if [ "`id -u`" = "0" ];then
service pgpool2 stop
su postgres -c "$0"
service pgpool2 start
else
/usr/bin/pg_ctlcluster <%= @postgres_version -%> main stop
ssh <%= @primary_fqdn -%> "psql -U postgres -c \"select pg_start_backup('clone',true);\""
rsync -av --exclude pg_xlog --exclude postgresql.conf --exclude trigger <%= @primary_fqdn -%>:/var/lib/postgresql/<%= @postgres_version -%>/main/ /var/lib/postgresql/<%= @postgres_version -%>/main/
cat <<EOF >/var/lib/postgresql/<%= @postgres_version -%>/main/recovery.conf
standby_mode = 'on'
primary_conninfo = 'host=<%= @primary_fqdn -%> user=postgresrepl password=<%= @cluster_pw -%>'
trigger_file = 'trigger'
EOF
chmod 600 /var/lib/postgresql/<%= @postgres_version -%>/main/recovery.conf
ssh <%= @primary_fqdn -%> "psql -U postgres -c \"select pg_stop_backup();\""
rsync -av <%= @primary_fqdn -%>:/var/lib/postgresql/<%= @postgres_version -%>/main/pg_xlog /var/lib/postgresql/<%= @postgres_version -%>/main/
/usr/bin/pg_ctlcluster <%= @postgres_version -%> main start
touch /var/lib/postgresql/<%= @postgres_version -%>/main/.stamp-pg_initial_sync
fi
| true |
41c133575c9eb775e275a91bf99d1f9b3e7d049d | Shell | markveligod/minishell | /sh_test/sh_sq/diff_sq.sh | UTF-8 | 1,782 | 3.0625 | 3 | [] | no_license | #!/bin/bash
#COLOR;
RED='\033[1;31m'
GREEN='\033[1;32m'
CYAN='\033[1;36m'
RESET='\033[0m'
#Var
TEST1='0'
TEST2='0'
TEST3='0'
TEST4='0'
TEST5='0'
#Now time to difffffffffffffffffffffffffff
echo -en "$CYAN[TEST #1] => 'echo "\'" '$RESET | "
if cmp or_sq_1.res no_sq_1.res &> /dev/null
then
TEST1='1'
echo -en "$GREEN[OK]$RESET"
echo ""
else
echo -en "$RED[FAIL]$RESET"
echo ""
diff --normal or_sq_1.res no_sq_1.res
fi
echo -en "$CYAN[TEST #2] => 'echo '\"''$RESET | "
if cmp or_sq_2.res no_sq_2.res &> /dev/null
then
TEST2='1'
echo -en "$GREEN[OK]$RESET"
echo ""
else
echo -en "$RED[FAIL]$RESET"
echo ""
diff --normal or_sq_2.res no_sq_2.res
fi
echo -en "$CYAN[TEST #3] => 'echo '\"' "\'"'$RESET | "
if cmp or_sq_3.res no_sq_3.res &> /dev/null
then
TEST3='1'
echo -en "$GREEN[OK]$RESET"
echo ""
else
echo -en "$RED[FAIL]$RESET"
echo ""
diff --normal or_sq_3.res no_sq_3.res
fi
echo -en "$CYAN[TEST #4] => 'echo "''\\'"'''""' '\\"' "\'"'$RESET | "
if cmp or_sq_4.res no_sq_4.res &> /dev/null
then
TEST4='1'
echo -en "$GREEN[OK]$RESET"
echo ""
else
echo -en "$RED[FAIL]$RESET"
echo ""
diff --normal or_sq_4.res no_sq_4.res
fi
echo -en "$CYAN[TEST #5] => 'echo "$LANG '" '$TOTO "...'$RESET | "
if cmp or_sq_5.res no_sq_5.res &> /dev/null
then
TEST5='1'
echo -en "$GREEN[OK]$RESET"
echo ""
else
echo -en "$RED[FAIL]$RESET"
echo ""
diff --normal or_sq_5.res no_sq_5.res
fi
#Delete all!!!
CHECK='1'
if [ "$TEST1" -eq "$CHECK" ]
then
rm or_sq_1.res no_sq_1.res
fi
if [ "$TEST2" -eq "$CHECK" ]
then
rm or_sq_2.res no_sq_2.res
fi
if [ "$TEST3" -eq "$CHECK" ]
then
rm or_sq_3.res no_sq_3.res
fi
if [ "$TEST4" -eq "$CHECK" ]
then
rm or_sq_4.res no_sq_4.res
fi
if [ "$TEST5" -eq "$CHECK" ]
then
rm or_sq_5.res no_sq_5.res
fi | true |
da7c78a3e4182a16a666733013e0c1dcb04bee1c | Shell | mddub/dotfiles | /.bashrc | UTF-8 | 880 | 2.859375 | 3 | [] | no_license | # Includes
source ~/.git-completion.bash
[ -f /nail/scripts/aliases.sh ] && . /nail/scripts/aliases.sh
# Paths
export PATH=$PATH:~/local/bin:~/pg/loc/aws/bin
export PYTHONPATH=~/local/py-lib:~/local/bin
# Aliases
alias te='eval `~/local/bin/tmux-env`'
alias modifieds='git status -s | grep "^ M " | sed "s/^ M //g"'
if [ $(hostname) != "dev20" ]; then
alias d='~/local/bin/growl_irc.sh; ssh -A mwilson@dev20'
fi
# Make ls output better
if [ ${OSTYPE//[0-9.]/} == "darwin" ]; then
alias ls='ls -FGh'
else
alias ls='ls -Fh --color'
fi
if [ -f ~/.bash_aliases ]; then
. ~/.bash_aliases
fi
# Make prompt a colorful display of pwd and git branch
export PS1='[ \[\e[1;34m\]\u\[\e[0;39m\]@\[\e[1;32m\]\h\[\e[0;39m\]:\[\e[1;33m\]\w\[\e[0;39m\] ]$(__git_ps1 " \[\e[1;36m\](%s)\[\e[0;39m\] ")\$ '
# Start in working directory
if [ -d ~/pg/yelp-main ]; then
cd ~/pg/yelp-main
fi
| true |
a2e1f65f8eab1aea3888d06a8515f9283739f09f | Shell | nickybu/tsp_simulated_annealing | /run-experiment.sh | UTF-8 | 1,326 | 3.25 | 3 | [
"MIT"
] | permissive | #!/bin/sh
# Compile code
javac *.java
# Program arguments:
# numNodes, startNode, maxCoordinate, startTemp, minTemp, maxIters,
# coolingRate, numUnchangedDist, numUnchangedRoute, seed, timeout
NUM_NODES=(100 1000)
# Run serial code
echo Running serial tests...
for nodes in "${NUM_NODES[@]}"
do
echo Running test with $nodes nodes...
x=$((10 * $nodes))
for run in {1..5}
do
java RunSequentialTSP $nodes 42 $x 100 0.0005 $x 0.01 75 15 42 25 > experiments/results/serial/serial_"$nodes"_run_"$run".txt
done
done
echo Completed serial tests...
# Run parallel code
threads=(2 3 4 6 8)
echo Running parallel tests...
for nodes in "${NUM_NODES[@]}"
do
echo Running test with $nodes nodes...
for thread in "${threads[@]}"
do
echo Running test with $thread threads...
x=$((10 * $nodes))
for run in {1..5}
do
java RunParallelTSP $thread $nodes 42 $x 100 0.0005 $x 0.01 75 15 42 25 > experiments/results/parallel/parallel_threads_"$thread"_"$nodes"_run_"$run".txt
done
done
done
echo Completed parallel tests...
#done
# Parse output files and convert to .csv
echo Parsing output files to CSV...
for filename in experiments/results/serial/*.txt
do
python parser_serial.py $filename
done
for filename in experiments/results/parallel/*.txt
do
python parser_parallel.py $filename
done
| true |
b3868aca3386482b765c88c124de47bb032584ed | Shell | davep-github/dpw | /bin/my--their-args | UTF-8 | 392 | 3.3125 | 3 | [] | no_license | #!/bin/bash
# I have many utils that are wrappers around other programs, like *grep.
# E.g. rcgrep. This greps all of my currently active bash rc files.
# 99+% of the time, options are for the grepper, esp -i.
# I'd like to be able to specify args to the wrapper and to the wrapped.
# My feeling is that most of the times the args are for the wrapped.
#
dat=("$@")
case "$@" in
*--*)
| true |
fe73cca7522039b6c46df0a2e1cc09c4415b7feb | Shell | krk1729/nse | /bhav/process_file.sh | UTF-8 | 450 | 2.796875 | 3 | [] | no_license | #NSE_PATH="/cygdrive/d/workShop/NSE/"
#SRC_PATH=$NSE_PATH"/src"
#DOWNLOADS_PATH=$NSE_PATH"/downloads"
#PROCESSED_FILES_PATH=$NSE_PATH"/processed"
mv cm*bhav.csv.zip ../downloads
cd ../downloads
find . -name "cm*bhav.csv.zip" -size -4k -delete
PGPASSWORD="postgres"
for file in `ls cm*bhav.csv.zip`
do
echo "Processing $file ..."
time=`psql -h localhost -d postgres -p 5432 -u postgres -c "select now();"`
#mv $file ../processed
done | true |
64f13e1e1ff360aa0a41c9a75973c5fb7e9c8f07 | Shell | admiral0/pacbuild | /pacbuild/testsuite/testAbs/base/diffutils/PKGBUILD | UTF-8 | 449 | 2.765625 | 3 | [] | no_license | # $Id: PKGBUILD,v 1.11 2004/04/12 23:54:38 judd Exp $
# Maintainer: judd <jvinet@zeroflux.org>
pkgname=diffutils
pkgver=2.8.1
pkgrel=2
pkgdesc="Utility programs used for creating patch files"
url="http://www.gnu.org/software/diffutils"
depends=('glibc' 'bash')
source=(ftp://ftp.gnu.org/gnu/$pkgname/$pkgname-$pkgver.tar.gz)
build() {
cd $startdir/src/$pkgname-$pkgver
./configure --prefix=/usr
make || return 1
make prefix=$startdir/pkg/usr install
}
| true |
4757b7d09f04387c0282b01e69528618e5cbb54f | Shell | zarlo/gameservers | /scripts/ci.sh | UTF-8 | 3,438 | 4.1875 | 4 | [
"GPL-3.0-only",
"MIT"
] | permissive | #!/usr/bin/env bash
# by invaderctf and sappho.io
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )"
# Helper functions
source ${SCRIPT_DIR}/helpers.sh
PULL_SH="1-pull.sh"
BUILD_SH="2-build.sh"
usage()
{
echo "Usage, assuming you are running this as a ci script, which you should be"
echo " ./scripts/ci.sh pull|build <arguments>"
echo " pull: Cleans and pulls the repo (if applicable)"
echo " build: Build unbuilt and updated plugins"
echo " <arguments>: All arguments are passed down to the command, for more info check"
echo " ./scripts/${PULL_SH} usage"
echo " ./scripts/${BUILD_SH} usage"
exit 1
}
# [[ ${CI} ]] || { error "This script is only to be executed in GitLab CI"; exit 1; }
# Input check
[[ "$#" == 0 ]] && usage
# Variable initialisation
# get first arg, pass it as command to run after iterating
COMMAND=${1}
# shift args down, deleting first arg as we just set it to a var
shift 1
# dirs to check for possible gameserver folders
TARGET_DIRS=(
/srv/daemon-data
/var/lib/pterodactyl/volumes
)
# this is clever and infinitely smarter than what it was before, good job
WORK_DIR=$(du -s "${TARGET_DIRS[@]}" 2> /dev/null | sort -n | tail -n1 | cut -f2)
debug "working dir: ${WORK_DIR}"
# go to our directory with (presumably) gameservers in it or die trying
cd "${WORK_DIR}" || { error "can't cd to workdir ${WORK_DIR}!!!"; hook "can't cd to workdir ${WORK_DIR}"; exit 1; }
# kill any git operations that are running and don't fail if we don't find any
# PROBABLY BAD PRACTICE LOL
# killall -s SIGKILL -q git || true
# iterate thru directories in our work dir which we just cd'd to
for dir in ./*/ ; do
# we didn't find a git folder
if [ ! -d "${dir}/.git" ]; then
warn "${dir} has no .git folder! skipping"
hook "${dir} has no .git folder!"
# maybe remove these in the future
continue
fi
# we did find a git folder! print out our current folder
important "Operating on: ${dir}"
# go to our server dir or die trying
cd "${dir}" || { error "can't cd to ${dir}"; continue; }
# branches and remotes
CI_COMMIT_HEAD=$(git rev-parse --abbrev-ref HEAD)
CI_LOCAL_REMOTE=$(git remote get-url origin)
CI_LOCAL_REMOTE="${CI_LOCAL_REMOTE##*@}"
CI_LOCAL_REMOTE="${CI_LOCAL_REMOTE/://}"
CI_LOCAL_REMOTE="${CI_LOCAL_REMOTE%.git*}"
CI_REMOTE_REMOTE="${CI_SERVER_HOST}/${CI_PROJECT_PATH}"
info "Comparing branches ${CI_COMMIT_HEAD} and ${CI_COMMIT_REF_NAME}."
info "Comparing local ${CI_LOCAL_REMOTE} and remote ${CI_REMOTE_REMOTE}."
if [[ "${CI_COMMIT_HEAD}" == "${CI_COMMIT_REF_NAME}" ]] && [[ "${CI_LOCAL_REMOTE}" == "${CI_REMOTE_REMOTE}" ]]; then
debug "branches match"
case "${COMMAND}" in
pull)
info "Pulling git repo"
# DON'T QUOTE THIS
bash ${SCRIPT_DIR}/${PULL_SH} $*
;;
build)
COMMIT_OLD=$(git rev-parse HEAD~1)
info "Building updated and uncompiled .sp files"
# DON'T QUOTE THIS EITHER
bash ${SCRIPT_DIR}/${BUILD_SH} ${COMMIT_OLD}
;;
*)
error "${COMMAND} is not supported"
exit 1
;;
esac
else
important "Branches do not match, doing nothing"
fi
cd ..
done
| true |
5e8eb9074cf1c49917bc5d9e5b066bc20bb6921d | Shell | vvbatura/airtasker | /deploy.sh | UTF-8 | 1,479 | 2.84375 | 3 | [
"MIT"
] | permissive | #!/bin/sh
ESC_SEQ="\033["
COL_RESET=$ESC_SEQ"39;49;00m"
COL_MAGENTA=$ESC_SEQ"35;01m"
COL_GREEN=$ESC_SEQ"32;01m"
CELL_LINE="========================================="
CELL_BEGIN="\n$COL_MAGENTA $CELL_LINE \n"
CELL_DONE_BEGIN="\n$COL_GREEN $CELL_LINE \n"
CELL_END="\n $CELL_LINE $COL_RESET \n"
echo "$CELL_BEGIN ============= Pull master ============== $CELL_END"
git remote add origin https://github.com/vvbatura/airtasker.git
git pull origin master
echo "$CELL_BEGIN =========== Composer install ============ $CELL_END"
composer install
echo "$CELL_BEGIN =========== Install/update composer dependecies ============ $CELL_END"
composer install --no-interaction --prefer-dist --optimize-autoloader
echo "$CELL_BEGIN =========== Create .env ============ $CELL_END"
cp .env.host .env
echo "$CELL_BEGIN =========== Generate JWT ============ $CELL_END"
php artisan jwt:secret
echo "$CELL_BEGIN =========== Run database migrations ============ $CELL_END"
php artisan migrate --seed
echo "$CELL_BEGIN =========== Create symbolic link for storage ============ $CELL_END"
php artisan storage:link
echo "$CELL_BEGIN =========== Clear and cache routes ============ $CELL_END"
php artisan clear-compiled
php artisan route:clear
php artisan route:cache
php artisan cache:clear
echo "$CELL_BEGIN =========== Clear and cache config ============ $CELL_END"
php artisan config:clear
php artisan config:cache
echo "$CELL_DONE_BEGIN ================= DONE ================== $CELL_END"
| true |
44bdfb8a913f2fb8650c9c062f112dcc163bc40a | Shell | somesh-ballia/mcms | /MCMS/Main/AutoTestScripts/Tools/auto-test-produce-list.sh | UTF-8 | 1,557 | 3.953125 | 4 | [] | no_license | #!/bin/sh
#
# name : auto-test-produce-list.sh
# author: david rabkin
# date : 20100613
#
# produces xml entries for auto-test build forge project file, looks as
#
# <var mode="N" value="PSTNConf" position="2" action="S" name="test001"></var>
# <var mode="N" value="ISDN_DialInDialOut" position="3" action="S" name="test002"></var>
#
# extract test names from file owners.py
SRCFILE='owners.py'
# check file existence
if [ ! -f $SRCFILE ]; then
echo error: unable to find $SRCFILE
exit 1
fi
# file owners.py contains list of tests in a view
# "GW_1":"Eitan",
# "ISDNConf":"Olga S.",
#
# extract lines between __BEGIN_TESTS__ and __END_TESTS__
# deletes strings that started with # (sed '/^\#/d')
# the result separates by columns with delimeter quote and takes second
# column (cut -d'"' -f 2)
# the result sorts alphabeticalliy and put to array
#ARR=(`cat $SRCFILE | sed '/^\"/!d' | sed '/,$/!d' | cut -d'"' -f 2 | sort`)
ARR=(`cat $SRCFILE | grep -B 500 __END_TESTS__ | grep -A 500 __BEGIN_TESTS__ | sed '/^\#/d' | cut -d'"' -f 2 | sort`)
LEN=${#ARR[@]}
for ((ii=0;ii<$LEN;ii++))
do
# start with second positon
let POS=$ii+2
NAME=${ARR[${ii}]}
# format name test to 3 symbols at least
TAG=test`printf "%.3d\n" $[POS-1]`
# remove last character in test name, it is \n
line=`echo $line | sed s/.$//`
STRING="\
<var mode=\"N\" \
value=\"${NAME}\" \
position=\"$POS\" \
action=\"S\" \
name=\"$TAG\"></var>"
# put some spaces for xml formatting
echo " "$STRING
done
| true |
cc0b85825421cf553de7c4e310328040924fff04 | Shell | LuxorInteractive/scisim-tools | /bin/update-cfg.sh | UTF-8 | 6,144 | 3.71875 | 4 | [
"Intel"
] | permissive | #!/bin/bash
# Copyright (c) Intel Corporation
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# -- Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# -- Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# -- Neither the name of the Intel Corporation nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE INTEL OR ITS
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# -----------------------------------------------------------------
# This script generates the OpenSim.ini, OpenSim.exe.config, and Regions.ini
# files for all of the simulators in a plaza. It assumes that configuration
# files for the estate are in $F_HOME/$F_CFGDIR which defaults to
# /home/opensim/scisim-conf-git
# -----------------------------------------------------------------
F_USAGE='Usage: $0 -b -c cfgdir -f inifile -h rootdir -p proxy -v|--verbose plaza'
F_HOME='/home/opensim/'
if [ -n "$OPENSIM" ]; then
F_HOME=$OPENSIM
else
echo "OPENSIM not set; using $F_HOME as root"
fi
# Set up the default parameters
F_VERBOSE=''
F_LOGLEVEL='WARN'
F_INIFILE='OpenSimDefaults.ini'
F_BLDFLAG='no'
F_CLEANFLAG='no'
F_HOSTNAME="$(hostname)"
if [ -n "$OPENSIMCONFHOST" ]; then
F_HOSTNAME=$OPENSIMCONFHOST
fi
F_CONFDIR='etc'
if [ -n "$OPENSIMCONFDIR" ]; then
F_CONFDIR=$OPENSIMCONFDIR
fi
F_PROXY=''
if [ -n "$OPENSIMPROXY" ]; then
F_PROXY=$OPENSIMPROXY
fi
# -----------------------------------------------------------------
# Process command line arguments
# -----------------------------------------------------------------
TEMP=`getopt -o bc:f:h:p:v --long build,clean,config:,file:,home:,help,host:,loglevel:,proxy:,verbose \
-n 'update-cfg.sh' -- "$@"`
if [ $? != 0 ] ; then echo "Terminating..." >&2 ; exit 1 ; fi
eval set -- "$TEMP"
while true ; do
case "$1" in
--help) echo $F_USAGE ; exit 1 ;;
-b|--build) F_BLDFLAG=yes ; shift 1 ;;
--clean) F_CLEANFLAG=yes; F_BLDFLAG=yes; shift 1 ;;
-c|--config) F_CONFDIR=$2 ; shift 2 ;;
-f|--file) F_INIFILE=$2 ; shift 2 ;;
-h|--home) F_HOME=$2 ; shift 2 ;;
--host) F_HOSTNAME=$2 ; shift 2 ;;
--loglevel) F_LOGLEVEL=$2 ; shift 2 ;;
-p|--proxy) F_PROXY=$2 ; shift 2 ;;
-v|--verbose) F_VERBOSE='--verbose' ; shift ;;
--) shift ; break ;;
*) echo "Internal error!" ; exit 1 ;;
esac
done
if [[ $F_CONFDIR == /* ]] ; then
CONFROOT=$F_CONFDIR
else
CONFROOT=$F_HOME/$F_CONFDIR
fi
ECONFDIR=$CONFROOT/plaza/
RCONFDIR=$CONFROOT/region/
SCONFDIR=$CONFROOT/simulator/
# -----------------------------------------------------------------
# Define some functions for cross-platform operation
# -----------------------------------------------------------------
export PLATFORM=`uname -o`
function SafePath()
{
if [ $PLATFORM == 'Cygwin' ]; then
cygpath -m $1
else
echo $1
fi
}
function SafeExe()
{
if [ $PLATFORM != 'Cygwin' ]; then
echo $1
return 0
fi
# must use the short path version to avoid
# the ' ' in many windows paths
newPath=$(cygpath -sm "$(which $1).exe")
echo "$newPath"
}
# -----------------------------------------------------------------
# Configure each of the plazas
# -----------------------------------------------------------------
for PLAZA in $@ ; do
echo Updating $PLAZA in $F_HOME
if [ -e $ECONFDIR/$PLAZA.inc ]; then
if [ "$F_CLEANFLAG" == "yes" ]; then
rm -rf $F_HOME/plaza.$PLAZA
fi
SIMS=`cat $ECONFDIR/$PLAZA.inc`
for SIM in $SIMS ; do
# supports the host name conventions for simulators
if [[ "$SIM" =~ (.*)@(.*) ]]; then
SIM="${BASH_REMATCH[1]}"
SIMHOST="${BASH_REMATCH[2]}"
if [ `echo $SIMHOST | tr [:upper:] [:lower:]` != `echo $F_HOSTNAME | tr [:upper:] [:lower:]` ]; then
continue
fi
fi
SROOT=$F_HOME/plaza.$PLAZA/run.$SIM
if [ "$F_BLDFLAG" == "yes" ]; then
mkdir -p $SROOT/Regions
fi
if [ -d $SROOT ]; then
echo Configure simulator $PLAZA:$SIM
rm -f $SROOT/Regions/Regions.ini
"$(SafeExe php)" "$(SafePath $OPENSIM/bin/conf-region.php)" \
-r "$(SafePath $RCONFDIR/$PLAZA.inc)" -s $SIM \
> $SROOT/Regions/Regions.ini
rm -f $SROOT/$F_INIFILE
"$(SafeExe php)" "$(SafePath $OPENSIM/bin/conf-sim.php)" \
-l "@$(SafeExe php) $(SafePath $CONFROOT/conflist.php)" \
-o "$(SafePath $SROOT/$F_INIFILE)" \
-p "$F_PROXY" -- \
"@$(SafeExe php) \"$(SafePath $SCONFDIR/$PLAZA.php)\" $SIM"
rm -f $SROOT/OpenSim.exe.config
"$(SafeExe php)" "$(SafePath $OPENSIM/bin/conf-logs.php)" \
$PLAZA:$SIM $F_LOGLEVEL \
> $SROOT/OpenSim.exe.config
cp $SROOT/OpenSim.exe.config \
$SROOT/OpenSim.32BitLaunch.exe.config
"$(SafePath $OPENSIM/bin/conf-scripts.pl)" -p $SROOT --skip
else
echo "Missing directory for simulator $PLAZA:$SIM"
fi
done
fi
done
| true |
8786ee79dab23c46f9e87d1f415d5f29f2fe3044 | Shell | srizzling/build-repository | /java7/rootfs/run | UTF-8 | 319 | 3.421875 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
MAIN_CLASS=$1
WORKDIR=/app
MAIN_JAR=/app/app.jar
JAVA_ARGS=""
CP=""
if [[ -n "${MAIN_CLASS}" ]]; then
shift
CP=${MAIN_JAR}
CP=${CP}:"/app/lib/*"
JAVA_ARGS="${JAVA_ARGS} ${MAIN_CLASS}"
else
CP="/app/lib/*"
JAVA_ARGS="${JAVA_ARGS} -jar ${MAIN_JAR}"
fi
cd ${WORKDIR}
/java -cp "${CP}" ${JAVA_ARGS} $@
| true |
0559747753c3ddc5cc616c743128ba4e4087af2c | Shell | toru173/SecureBoot-on-AVR-Microcontrollers | /Research/avrnacl/run_test.sh | UTF-8 | 1,470 | 3.953125 | 4 | [] | no_license | #!/bin/bash
TOPDIR=`dirname $0`
PRIMITIVES=`cat $TOPDIR/PRIMITIVES`
. $TOPDIR/config
runtest()
{
IMPLEMENTATION=avrnacl_$1
if ! [ -d $TOPDIR/$IMPLEMENTATION ];then
echo "Error: implementation $1 (directory $TOPDIR/$IMPLEMENTATION) does not exist"
exit -1
fi
PRIMITIVE=$2
if ! grep $PRIMITIVE $TOPDIR/PRIMITIVES 2>&1 > /dev/null ;then
echo "Error: primitive $PRIMITIVE does not exist (see file $TOPDIR/PRIMITIVES for a list of supported primitives)"
exit -1
fi
avrdude -cstk500v2 -p $TARGET_DEVICE -P $DEVICE_FILE -U flash:w:$TOPDIR/${IMPLEMENTATION}/test/test_${PRIMITIVE}.hex -v
stty -F $DEVICE_FILE raw icanon eof \^d 38400
echo "===== Starting test of ${IMPLEMENTATION}/${PRIMITIVE} =====" >> $TESTLOGFILE
TESTOUT=`cat < $DEVICE_FILE`
echo $TESTOUT >> $TESTLOGFILE
if echo $TESTOUT | grep -q Checksum;then
CHECKSUM=`echo $TESTOUT | grep Checksum | sed "s/Checksum:\ //g"`
if echo $CHECKSUM | diff - $TOPDIR/test/checksums/${PRIMITIVE} >/dev/null;then
printf "Checksum OK\n" >> $TESTLOGFILE
else
echo "ERROR: wrong checksum" >> $TESTLOGFILE
fi
fi
}
testimpl()
{
for i in $PRIMITIVES;do
runtest $1 $i
done
}
if [ -e $TESTLOGFILE ]; then
rm $TESTLOGFILE
fi
if [ -n "$1" ];then
if [ -n "$2" ];then
runtest $1 $2
else
testimpl $1
fi
else
IMPLEMENTATIONS=`ls -dl $TOPDIR/avrnacl* | grep ^d | sed "s/.*_//"`
for i in $IMPLEMENTATIONS;do
testimpl "$i"
done
fi
| true |
be93ad819095b69b38714064ecfce9d72e602291 | Shell | alex3kv/UbuntuAndAws | /Lesson8/Task1-3.sh | WINDOWS-1251 | 813 | 3.515625 | 4 | [] | no_license | ### 1. 3 , .
function run() {
$@
for n in {1..3}
do
whoami
done
}
run echo " !"
### 2. while 0 100 .
i=0
while [ $i -lt 101 ]
do
echo $i
i=$(( $i + 2 ))
done
### 3. nano test.txt. 10 test.txt.bak cron.
nano test.txt
crontab -e
#
# */10 * * * * cp /home/alex/test.txt /home/alex/test.txt.bak | true |
9581f98cc48a4c5b7010cf542f4cc0df5221e700 | Shell | agrbin/v8boinc-remote | /scripts/build-doc.sh | UTF-8 | 344 | 3.390625 | 3 | [] | no_license | #!/bin/bash
rm -rf man
for section in {1..7}; do
num=$(ls doc/*.$section.md 2> /dev/null | wc -l)
[ $num -eq 0 ] && continue;
mkdir -p man/man$section
for file in $(ls doc/*.$section.md); do
./node_modules/ronn/bin/ronn.js --build --roff $file
base=$(basename $file .md)
mv doc/$base.roff man/man$section/$base
done
done
| true |
a96b1bfc9e73c3fb9fddd068b267abbce7fb84e3 | Shell | ARGANS/WEB-SOFRESH | /misc/start_https.sh | UTF-8 | 231 | 2.65625 | 3 | [] | no_license | #!/bin/bash
CERT="server.pem"
if [[ ! -f "$CERT" ]]; then
openssl req -new -x509 -keyout server.pem -out "$CERT" -days 365 -nodes -subj "/C=FR/ST=AM/L=SophiaAntipolis/O=ARGANS/OU=R&D/CN=smos.argans.co.uk"
fi
python3 server.py
| true |
0a67436d415964887c8dcc524113d90594a396c4 | Shell | beards/dotfiles | /scripts/installer/mac/python | UTF-8 | 406 | 3.3125 | 3 | [] | no_license | #!/usr/bin/env bash
set -euo pipefail
# install pyenv
brew install pyenv
# install python3
pyenv install 3:latest
PY_VER=$(pyenv versions | grep --color=no 3\. | tail -1 | tr -d '[:space:]')
pyenv global $PY_VER
# enable python command
export PYENV_ROOT="$HOME/.pyenv"
if [ -d "$PYENV_ROOT" ]; then
source ~/scripts/util_funcs.sh
pathprepend $PYENV_ROOT/bin
eval "$(pyenv init --path)"
fi
| true |
83c05d83034235208f76e3d5b1acda08e9c1fe36 | Shell | G2OR/OneStepUnblockNeteaseMusic | /unblockmusic.sh | UTF-8 | 3,741 | 3.546875 | 4 | [] | no_license | #!/usr/bin/env bash
red='\033[0;31m'
green='\033[0;32m'
yellow='\033[0;33m'
plain='\033[0m'
cur_dir=$(pwd)
# check root
[[ $EUID -ne 0 ]] && echo -e "${red}错误:${plain} 必须使用root用户运行此脚本!\n" && exit 1
# check os
if [[ -f /etc/redhat-release ]]; then
release="centos"
elif cat /etc/issue | grep -Eqi "debian"; then
release="debian"
elif cat /etc/issue | grep -Eqi "ubuntu"; then
release="ubuntu"
elif cat /etc/issue | grep -Eqi "centos|red hat|redhat"; then
release="centos"
elif cat /proc/version | grep -Eqi "debian"; then
release="debian"
elif cat /proc/version | grep -Eqi "ubuntu"; then
release="ubuntu"
elif cat /proc/version | grep -Eqi "centos|red hat|redhat"; then
release="centos"
else
echo -e "${red}未检测到系统版本,请联系脚本作者!${plain}\n" && exit 1
fi
if [ $(getconf WORD_BIT) != '32' ] && [ $(getconf LONG_BIT) != '64' ] ; then
echo "本软件不支持 32 位系统(x86),请使用 64 位系统(x86_64),如果检测有误,请联系作者"
exit -1
fi
os_version=""
# os version
if [[ -f /etc/os-release ]]; then
os_version=$(awk -F'[= ."]' '/VERSION_ID/{print $3}' /etc/os-release)
fi
if [[ -z "$os_version" && -f /etc/lsb-release ]]; then
os_version=$(awk -F'[= ."]+' '/DISTRIB_RELEASE/{print $2}' /etc/lsb-release)
fi
if [[ x"${release}" == x"centos" ]]; then
if [[ ${os_version} -le 6 ]]; then
echo -e "${red}请使用 CentOS 7 或更高版本的系统!${plain}\n" && exit 1
fi
elif [[ x"${release}" == x"ubuntu" ]]; then
if [[ ${os_version} -lt 16 ]]; then
echo -e "${red}请使用 Ubuntu 16 或更高版本的系统!${plain}\n" && exit 1
fi
elif [[ x"${release}" == x"debian" ]]; then
if [[ ${os_version} -lt 8 ]]; then
echo -e "${red}请使用 Debian 8 或更高版本的系统!${plain}\n" && exit 1
fi
fi
install_base() {
if [[ x"${release}" == x"centos" ]]; then
yum install wget curl tar unzip -y
else
apt install wget curl tar unzip -y
fi
}
close_firewall() {
if [[ x"${release}" == x"centos" ]]; then
systemctl stop firewalld
systemctl disable firewalld
elif [[ x"${release}" == x"ubuntu" ]]; then
ufw disable
elif [[ x"${release}" == x"debian" ]]; then
iptables -P INPUT ACCEPT
iptables -P OUTPUT ACCEPT
iptables -P FORWARD ACCEPT
iptables -F
fi
}
install_node(){
if [[ x"${release}" == x"centos" ]]; then
curl -sL https://rpm.nodesource.com/setup_10.x | bash -
yum install nodejs git -y
elif [[ x"${release}" == x"ubuntu" ]]; then
curl -sL https://deb.nodesource.com/setup_10.x | bash -
apt install -y nodejs git
elif [[ x"${release}" == x"debian" ]]; then
curl -sL https://deb.nodesource.com/setup_10.x | bash -
apt install -y nodejs git
fi
}
start_blockmuaicservice(){
cd /root
git clone https://github.com/nondanee/UnblockNeteaseMusic.git
cd UnblockNeteaseMusic
#node app.js -s -e https://music.163.com -p 8080:8081
}
install_system(){
cat > /etc/systemd/system/UnblockNeteaseMusic.service <<EOF
[Unit]
Description=UnblockNeteaseMusic
After=network.target
Wants=network.target
[Service]
Type=simple
PIDFile=/var/run/UnblockNeteaseMusic.pid
WorkingDirectory=/root/UnblockNeteaseMusic
ExecStart=/usr/bin/node app.js -s -e https://music.163.com -p 19980:8081
RestartPreventExitStatus=23
Restart=always
[Install]
WantedBy=multi-user.target
EOF
systemctl start UnblockNeteaseMusic
systemctl enable UnblockNeteaseMusic
}
echo -e "${green}开始安装${plain}"
install_base
close_firewall
install_node
start_blockmuaicservice
install_system
echo -e "${green}安装完成端口19980${plain}"
| true |
49271cf5d6235aa36387dc3f9c13d04950f1d361 | Shell | TommyClausner/laminarfMRI | /quickStart/template_session/template_scripts/do_EEGsplitFreqOnVirtChan.sh | UTF-8 | 675 | 3.0625 | 3 | [] | no_license | #!/bin/bash
### BEGIN HEADER ###
### Scriptinator ###
# General information #
# label=do_EEGsplitFreqOnVirtChan
# file=do_EEGsplitFreqOnVirtChan.sh
# useqsub=false
# shortLabel=spFreq
### Script ###
# Input Variables and Paths #
blocks=4
filters=2
rois=2
# Output Variables and Paths #
OutputVarName=none
# Qsub information #
jobtype=batch
walltime="24:00:00"
memory=32gb
# Misc Variables #
MiscVarName=none
### END HEADER ###
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
cd $DIR
for roi in `seq 1 $rois`
do
for filter in `seq 1 $filters`
do
for block in `seq 1 $blocks`
do
$DIR/do_EEGfreqOnVirtChan.sh $block $filter $roi &
sleep 2s
done
done
done
| true |
4c7681e146f20886160dcce096de1a11383d592e | Shell | aa3025/centos7hpc | /scripts/slurm-install/03_slurm_create_config.sh | UTF-8 | 453 | 2.859375 | 3 | [] | no_license | #!/bin/bash
echo "Now you have to create slurm.conf file and put it in this folder as slurm.conf, example provoded, you can edit it, e.g. change your comnpute nodes specs in the last sections."
echo "As a workaround slurmd pid problem, make sure to replace the respective line in slurm.conf with this: \"SlurmdPidFile=/var/log/slurm/slurmd.pid\""
echo "then run ./05_update_slurm.sh"
master=$(hostname)
sed -i 's|=master|='"${master}"'|g' ./slurm.conf
| true |
3ffc3c548433bba3bb8372ea64ac55625b828d84 | Shell | gentoo/gnome | /scripts/find-keywordreq.sh | UTF-8 | 945 | 3.671875 | 4 | [] | no_license | #!/bin/bash
# Wrapper script for find-keywordreq.py.
# Builds a temporary overlay and check for dependencies with repoman
cd $(dirname $0)
#PORTDIR=$(portageq portdir)
PORTDIR=/home/eva/devel/gentoo-x86
rm -rf /tmp/tmptree
rm -rf /tmp/step1.list
python find-keywordreq.py $@ > /tmp/step1.list
while read line
do
ATOM=$(echo $line | cut -f1 -d: | sed -nr 's/(.*)-[0-9.]+.*/\1/p'| xargs echo)
PN=$(echo $ATOM | cut -f2 -d/ | xargs echo)
VERSION=$(echo $line | cut -f1 -d: | sed -nr 's/.*-([0-9.]+.*)/\1/p' |xargs echo)
KEYWORDS=$(echo $line | cut -f2 -d:)
mkdir -p /tmp/tmptree/$ATOM
cp $PORTDIR/$ATOM/$PN-$VERSION.ebuild \
/tmp/tmptree/$ATOM
for keyword in $(echo $KEYWORDS)
do
ekeyword ~$keyword /tmp/tmptree/$ATOM/$PN-$VERSION.ebuild > /dev/null
done
done < /tmp/step1.list
cd /tmp/tmptree
PORTDIR="${PORTDIR}" PORTDIR_OVERLAY="/tmp/tmptree" repoman manifest
PORTDIR="${PORTDIR}" PORTDIR_OVERLAY="/tmp/tmptree" repoman full
| true |
431844b9ca0387b1eb3ccf80c6d9a7ccfa15c2b8 | Shell | mgonzales-cxp/Linux | /check_ext_range.sh | UTF-8 | 622 | 3.28125 | 3 | [] | no_license | #!/bin/bash
formatted_ext=$(hostname | tail -c 5)
echo $formatted_ext
function CopySeeds() {
echo $(hostname) > /home/tagent/.ext
sip=$1
cp /usr/src/IT_Files/Linux/new_defaults/aet/web2/Default/Bookmarks /home/tagent/.config/google-chrome/Default
cp /usr/src/IT_Files/Linux/new_defaults/aet/web2/Default/Preferences /home/tagent/.config/google-chrome/Default
/usr/src/IT_Files/Linux/xfce-uni/preferences/aet-zoip.sh tagent $formatted_ext $sip
}
if [[ $formatted_ext > 4999 ]] && [[ $formatted_ext < 6000 ]]; then
CopySeeds 1
elif [[ $formatted_ext > 5999 ]] && [[ $formatted_ext < 7000 ]]; then
CopySeeds 2
fi
| true |
1daafe7ed3621fffd3be5fffacddcf6578ec616d | Shell | BelfordZ/dotfiles | /setup/docker.sh | UTF-8 | 176 | 2.59375 | 3 | [] | no_license | #!/bin/bash
if [[ $OSTYPE == "darwin" ]]; then
brew install docker
brew install docker-machine
fi
if [[ $OSTYPE == "linux-gnu" ]]; then
sudo apt-get install docker
fi
| true |
ea6ee2917bbadb505fd88aa7f48c1873f0a3a8cd | Shell | jsoref/RuslanMahotkin-zabbix | /Scripts/io_stat.sh | UTF-8 | 1,515 | 4.0625 | 4 | [] | no_license | #!/bin/bash
# Sending disk I/O statistics to Zabbix server
# There are no parameters in the command line - sending data
if [ -z $1 ]; then
# Getting the statistics line. Iostat options:
# -d device usage statistics;
# -k statistics in kilobytes per second;
# -x extended statistics;
# -y skip the first statistics (from the moment of loading);
# 5 time in seconds between reports;
# 1 number of reports
RespStr=$(/usr/bin/iostat -dkxy 5 1 2>/dev/null)
# No statistics available - returning service status - 'does not work'
[ $? != 0 ] && echo 0 && exit 1
# Filtering, formatting and sending statistics data to Zabbix server
(cat <<EOF
$RespStr
EOF
) | awk 'BEGIN {split("disk rrqm_s wrqm_s r_s w_s rkB_s wkB_s avgrq-sz avgqu-sz await svctm util", aParNames)}
$1 ~ /^[hsv]d[a-z]$/ {
gsub(",", ".", $0);
if(NF == 12)
for(i = 2; i <= 12; i++) print "- iostat."aParNames[i]"["$1"]", $i
}' | /usr/bin/zabbix_sender --config /etc/zabbix/zabbix_agentd.conf --host=`hostname` --input-file - >/dev/null 2>&1
# Returning the status of the service - 'works'
echo 1
exit 0
# Disk detection
elif [ "$1" = 'disks' ]; then
# Disk list string
DiskStr=`/usr/bin/iostat -d | awk '$1 ~ /^[hsv]d[a-z]$/ {print $1}'`
# Separator for JSON list of names
es=''
# Processing the list of disks
for disk in $DiskStr; do
# JSON formatting of the drive name in the output string
OutStr="$OutStr$es{\"{#DISKNAME}\":\"$disk\"}"
es=","
done
# List of disks in JSON format
echo -e "{\"data\":[$OutStr]}"
fi
| true |
40ed80ac578b7edafd0fbe19ae367da5aa18d793 | Shell | gisaia/DockerUtils | /maven-3.5-jdk8-alpine/library.bash | UTF-8 | 890 | 2.703125 | 3 | [] | no_license | Maven::Docker_build () {
local DOCKER_DIRECTORY="$PROJECT_ROOT_DIRECTORY/maven-3.5-jdk8-alpine/Docker"
local REPOSITORY_NAME="gisaia/maven-3.5-jdk8-alpine"
local DOCKER_IMAGE_VERSION="$(cat "$PROJECT_ROOT_DIRECTORY/maven-3.5-jdk8-alpine/docker_image_version")"
cp "$PROJECT_ROOT_DIRECTORY/common/Docker/configure_user_and_group.bash" "$DOCKER_DIRECTORY/"
Docker::build
rm -f "$DOCKER_DIRECTORY/configure_user_and_group.bash"
}
Maven::Docker_publish () {
local DOCKER_DIRECTORY="$PROJECT_ROOT_DIRECTORY/maven-3.5-jdk8-alpine/Docker"
local REPOSITORY_NAME="gisaia/maven-3.5-jdk8-alpine"
local DOCKER_IMAGE_VERSION="$(cat "$PROJECT_ROOT_DIRECTORY/maven-3.5-jdk8-alpine/docker_image_version")"
cp "$PROJECT_ROOT_DIRECTORY/common/Docker/configure_user_and_group.bash" "$DOCKER_DIRECTORY/"
Docker::publish
rm -f "$DOCKER_DIRECTORY/configure_user_and_group.bash"
} | true |
84f376f8418d4113cead36a9e1551c166eb8a2f0 | Shell | m0zgen/install-elk | /install-elk.sh | UTF-8 | 5,041 | 2.953125 | 3 | [] | no_license | #!/bin/bash
HOSTNAME=`hostname`
cd /tmp
yum -y install wget firewalld epel-release
yum -y install nginx httpd-tools unzip
systemctl start firewalld
systemctl enable firewalld
wget --no-cookies --no-check-certificate --header "Cookie: gpw_e24=http%3A%2F%2Fwww.oracle.com%2F; oraclelicense=accept-securebackup-cookie" "http://download.oracle.com/otn-pub/java/jdk/8u65-b17/jdk-8u65-linux-x64.rpm"
yum -y install jdk-8u65-linux-x64.rpm
#rm jdk-8u65-linux-x64.rpm
# yum install java-openjdk -y
rpm --import http://packages.elastic.co/GPG-KEY-elasticsearch
cat > /etc/yum.repos.d/elasticsearch.repo <<EOF
[elasticsearch-2.x]
name=Elasticsearch repository for 2.x packages
baseurl=http://packages.elastic.co/elasticsearch/2.x/centos
gpgcheck=1
gpgkey=http://packages.elastic.co/GPG-KEY-elasticsearch
enabled=1
EOF
yum -y install elasticsearch
mv /etc/elasticsearch/elasticsearch.yml /etc/elasticsearch/elasticsearch.yml.old
echo 'network.host: localhost' > /etc/elasticsearch/elasticsearch.yml
systemctl start elasticsearch
systemctl enable elasticsearch
cat > /etc/yum.repos.d/kibana.repo <<EOF
[kibana-4.4]
name=Kibana repository for 4.4.x packages
baseurl=http://packages.elastic.co/kibana/4.4/centos
gpgcheck=1
gpgkey=http://packages.elastic.co/GPG-KEY-elasticsearch
enabled=1
EOF
yum -y install kibana
mv /opt/kibana/config/kibana.yml /opt/kibana/config/kibana.yml.old
echo 'server.host: "localhost"' > /opt/kibana/config/kibana.yml
systemctl start kibana
systemctl enable kibana.service
htpasswd -c /etc/nginx/htpasswd.users kibanauser
setsebool -P httpd_can_network_connect 1
mv /etc/nginx/nginx.conf /etc/nginx/nginx.conf.old
cat > /etc/nginx/nginx.conf <<EOF
user nginx;
worker_processes auto;
error_log /var/log/nginx/error.log;
pid /run/nginx.pid;
events {
worker_connections 1024;
}
http {
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log /var/log/nginx/access.log main;
sendfile on;
tcp_nopush on;
tcp_nodelay on;
keepalive_timeout 65;
types_hash_max_size 2048;
include /etc/nginx/mime.types;
default_type application/octet-stream;
include /etc/nginx/conf.d/*.conf;
}
EOF
cat > /etc/nginx/conf.d/kibana.conf <<EOF
server {
listen 80;
server_name $HOSTNAME;
auth_basic "Restricted Access";
auth_basic_user_file /etc/nginx/htpasswd.users;
location / {
proxy_pass http://localhost:5601;
proxy_http_version 1.1;
proxy_set_header Upgrade \$http_upgrade;
proxy_set_header Connection 'upgrade';
proxy_set_header Host \$host;
proxy_cache_bypass \$http_upgrade;
}
}
EOF
systemctl start nginx
systemctl enable nginx
systemctl start kibana
systemctl restart nginx
firewall-cmd --zone=public --add-port=80/tcp --perm
firewall-cmd --reload
cat > /etc/yum.repos.d/logstash.repo <<EOF
[logstash-2.2]
name=logstash repository for 2.2 packages
baseurl=http://packages.elasticsearch.org/logstash/2.2/centos
gpgcheck=1
gpgkey=http://packages.elasticsearch.org/GPG-KEY-elasticsearch
enabled=1
EOF
yum -y install logstash
# See below for file generation for you
cd /etc/pki/tls/
openssl req -subj '/CN=$HOSTNAME/' -x509 -days 3650 -batch -nodes -newkey rsa:2048 -keyout private/logstash-forwarder.key -out certs/logstash-forwarder.crt
cat > /etc/logstash/conf.d/02-beats-input.conf <<EOF
input {
beats {
port => 5044
ssl => true
ssl_certificate => "/etc/pki/tls/certs/logstash-forwarder.crt"
ssl_key => "/etc/pki/tls/private/logstash-forwarder.key"
}
}
EOF
cat > /etc/logstash/conf.d/10-syslog-filter.conf <<EOF
filter {
if [type] == "syslog" {
grok {
match => { "message" => "%{SYSLOGTIMESTAMP:syslog_timestamp} %{SYSLOGHOST:syslog_hostname} %{DATA:syslog_program}(?:\[%{POSINT:syslog_pid}\])?: %{GREEDYDATA:syslog_message}" }
add_field => [ "received_at", "%{@timestamp}" ]
add_field => [ "received_from", "%{host}" ]
}
syslog_pri { }
date {
match => [ "syslog_timestamp", "MMM d HH:mm:ss", "MMM dd HH:mm:ss" ]
}
}
}
EOF
cat > /etc/logstash/conf.d/30-elasticsearch-output.conf <<EOF
output {
elasticsearch {
hosts => ["localhost:9200"]
sniffing => true
manage_template => false
index => "%{[@metadata][beat]}-%{+YYYY.MM.dd}"
document_type => "%{[@metadata][type]}"
}
}
EOF
service logstash configtest
systemctl restart logstash
systemctl enable logstash
cd /tmp
curl -L -O https://download.elastic.co/beats/dashboards/beats-dashboards-1.1.0.zip
unzip beats-dashboards-*.zip
cd beats-dashboards-1.1.0
./load.sh
cd /tmp
curl -O https://raw.githubusercontent.com/elastic/filebeat/master/etc/filebeat.template.json
curl -XPUT 'http://localhost:9200/_template/filebeat?pretty' -d@filebeat.template.json
firewall-cmd --zone=public --add-port=5044/tcp --perm
firewall-cmd --reload
systemctl restart logstash | true |
f66a0db6e5326fd464d84db74dba2d1ae3d21ac9 | Shell | gaybro8777/indigokepler | /workflows/fg-setup-scripts/kepler-batch/setup.sh | UTF-8 | 234 | 3.03125 | 3 | [
"MIT-Modern-Variant"
] | permissive | #!/bin/bash
if [[ $# -ne 2 ]]; then
echo 'Usage: setup URI TOKEN'
exit 1
fi
curl -H "Authorization: Bearer $2" \
-H 'Content-Type: application/json' \
-X POST -d "$(cat application.json)" \
"$1/v1.0/applications"
| true |
0c8c1e9f178f00d9994cdf88d48307fb3150c3c8 | Shell | isaureCdB/Scripts | /score-perbead-paral.sh | UTF-8 | 676 | 2.921875 | 3 | [] | no_license | rec=$1
lig=$2
nlig=`cat $lig|wc -l`
nrec=`cat $rec|wc -l`
x=$RANDOM
$SCRIPTS/splitlines.py $rec /tmp/rec-$x- > rec-$x.list
$SCRIPTS/splitlines.py $lig /tmp/lig-$x- > lig-$x.list
$ATTRACTTOOLS/ensemblize.py $ATTRACTDIR/../structure-single.dat $nlig 2 all > lig.dat
for i in `seq $nrec`; do
#for coarse-grain:
$ATTRACTDIR/attract lig.dat $ATTRACTDIR/../attract.par /tmp/rec-$x-$i /tmp/lig-$x-1.pdb --ens 2 lig-$x.list > /tmp/$x.scores
#for all atom:
#$ATTRACTDIR/attract lig.dat $ATTRACTDIR/../allatoms/allatom.par /tmp/rec-$x-$i /tmp/lig-$x-1.pdb --ens 2 lig-$x.list > /tmp/$x.scores
awk -v i=$i 'BEGIN{j=0}$2=="Energy:"{j+=1; print i, j, $3}' /tmp/$x.scores
done
| true |
1ac1ab39569a8327dca558ada1860a915fd8546e | Shell | AllanWang/DST-Dedicated-Server | /scripts/dst_setup.sh | UTF-8 | 2,876 | 4.1875 | 4 | [] | no_license | #!/bin/bash
# Exposed args
# server_zip - server zip path
# argparse; see https://stackoverflow.com/a/14203146/4407321
POSITIONAL=()
update=true
function print_help {
echo "usage: dst setup server_zip"
echo "--help show this page"
echo "server_zip path to server zip (create from https://accounts.klei.com/account/game/servers?game=DontStarveTogether)"
}
while [[ $# -gt 0 ]]; do
key="$1"
case $key in
-h|--help)
echo "Setup DST server"
print_help
exit 0
;;
-*) # unknown option
echo "Invalid option $1"
print_help
exit 1
;;
*)
POSITIONAL+=("$1")
shift
;;
esac
done
set -- "${POSITIONAL[@]}" # restore positional parameters
if [[ $# -ne 1 ]]; then
echo "Missing server zip path; see TODO"
print_help
exit 1
fi
server_zip="$1"
server_zip="$(abs_path "$server_zip")"
########################################################################
# Read config
# Overrides at config.cfg
# Defaults at config.cfg.defaults
source scripts/read_config.sh;
dst_dir="$(config_get dst_dir)"
dst_dir="$(abs_path "$dst_dir")"
########################################################################
echo_header "DST Server Setup"
echo "Validating $(basename -- "$server_zip")"
# All DST zip folders are expected to have:
# - MyDediServer root folder
# - cluster.ini file
if [[ "$(zipinfo -1 "$server_zip")" != "MyDediServer/"*"cluster.ini"* ]]; then
echo "Zip format mismatch; is this zip file a DST server?"
exit 1
fi
########################################################################
echo "Extracting $(basename -- "$server_zip")"
dst_temp_dir="$dst_dir/.temp"
if [ -d "$dst_temp_dir/MyDediServer" ]; then
echo "Please clear $dst_temp_dir before proceeding"
exit 1
fi
unzip -qq "$server_zip" -d "$dst_temp_dir"
# Extract server name from ini file
# Server name starts with `cluster_name =`
# Replace all space with underscores
server_name="$(sed -n -e 's/^cluster_name =[[:space:]]*//p' "$dst_temp_dir/MyDediServer/cluster.ini" | sed 's/[[:space:]]/_/')"
if [ -z "$server_name" ]; then
echo "Could not find server name"
exit 1
fi
server_dir="$dst_dir/$server_name"
if [ -d "$server_dir" ]; then
echo "$server_dir already exists; aborting"
exit 1
fi
mv "$dst_temp_dir/MyDediServer" "$server_dir"
rm -r "$dst_temp_dir"
echo "Moved $server_zip to $server_dir"
########################################################################
server_config_dir="$project_dir/servers/$server_name"
if [ -d "$server_config_dir" ]; then
echo "Server configs already exist at $server_config_dir"
else
cp -r "$project_dir/servers/_Template" "$server_config_dir"
echo "Created server configs at $server_config_dir"
fi
echo "Finished setup! Update the configs, or call \`dst start $server_name\` to start a vanilla server"
| true |
327c678bd43d4641a076a1578efc89834c4648de | Shell | amoljore7/13-UNIX-Shell-Scripting-Programs | /shell_script3.sh | UTF-8 | 649 | 3.71875 | 4 | [] | no_license | #-----------------------------------------------------------------------------
echo Print current working directory
echo Accept string from user and search that string from all files
#-----------------------------------------------------------------------------
echo "Your current directory"
pwd
# Print working directory
echo "Enter string.."
read str
# Accept strting that we want to search
flag=0;
for file in *
do
if [ $flag -eq 0 ]
then
if [ -f $file ] # Check whether component is file or not
then
grep $str $file
# Search string in file by grep command
if [ $? -eq 0 ]
then
flag=1
echo $file
fi
fi
fi
done
| true |
232d7d9ecc29a9e3d450f472fc0073fe6cbe6c5d | Shell | openbsd/src | /regress/sys/ffs/tests/chown/02.t | UTF-8 | 380 | 2.546875 | 3 | [
"BSD-2-Clause"
] | permissive | #!/bin/sh
# $FreeBSD: src/tools/regression/fstest/tests/chown/02.t,v 1.1 2007/01/17 01:42:08 pjd Exp $
desc="chown returns ENAMETOOLONG if a component of a pathname exceeded 255 characters"
expect 0 create ${name255} 0644
expect 0 chown ${name255} 65534 65534
expect 65534,65534 stat ${name255} uid,gid
expect 0 unlink ${name255}
expect ENAMETOOLONG chown ${name256} 65533 65533
| true |
d305c7c1330bb04a7fcdf2e57510f9e98d910ef6 | Shell | surajssd/scripts | /shell/add-node-to-k8s/add-node.sh | UTF-8 | 8,061 | 3.421875 | 3 | [] | no_license | #!/bin/bash
# ./add-node.sh 192.168.199.23 /vagrant/certificates/ca.pem /vagrant/certificates/ca-key.pem /vagrant/certificates/ca-config.json
# provide the IP address to use for the kubelet
if [ -z $1 ]; then
echo "Please provide the ip address of worker node as first arg"
echo "Usage:"
echo "./script IP_ADDR CA_PEM_PATH CA_KEY_PEM_PATH CA_CONFIG_JSON_PATH"
exit 1
fi
# provide path to ca.pem
if [ -z $2 ]; then
echo "Please provide path to ca.pem file as second arg"
echo "Usage:"
echo "./script IP_ADDR CA_PEM_PATH CA_KEY_PEM_PATH CA_CONFIG_JSON_PATH"
exit 1
fi
# provide path to ca-key.pem
if [ -z $3 ]; then
echo "Please provide path to ca-key.pem file as third arg"
echo "Usage:"
echo "./script IP_ADDR CA_PEM_PATH CA_KEY_PEM_PATH CA_CONFIG_JSON_PATH"
exit 1
fi
# provide path to ca-config.json
if [ -z $4 ]; then
echo "Please provide path to ca-config.json file as fourth arg"
echo "Usage:"
echo "./script IP_ADDR CA_PEM_PATH CA_KEY_PEM_PATH CA_CONFIG_JSON_PATH"
exit 1
fi
readonly k8s_version="v1.11.2"
readonly cfssl_version="R1.2"
readonly crio_version="v1.11.2"
readonly hostname=$(hostname)
readonly ipaddr=$1
readonly capem=$2
readonly cakeypem=$3
readonly caconfig=$4
set -x
apt-get update
apt-get install -y socat libgpgme11
# --------------------------------------------------
# download tools
mkdir tools
cd tools
echo "Downloading tools ..."
curl -sSL \
-O "https://storage.googleapis.com/kubernetes-release/release/${k8s_version}/bin/linux/amd64/kube-proxy" \
-O "https://storage.googleapis.com/kubernetes-release/release/${k8s_version}/bin/linux/amd64/kubelet" \
-O "https://storage.googleapis.com/kubernetes-release/release/${k8s_version}/bin/linux/amd64/kubectl"
curl -sSL \
-O "https://github.com/containernetworking/plugins/releases/download/v0.6.0/cni-plugins-amd64-v0.6.0.tgz" \
-O "https://github.com/opencontainers/runc/releases/download/v1.0.0-rc4/runc.amd64" \
-O "https://files.schu.io/pub/cri-o/crio-amd64-${crio_version}.tar.gz"
tar -xf "crio-amd64-${crio_version}.tar.gz"
mv runc.amd64 runc
chmod +x \
kube-proxy \
kubelet \
kubectl \
runc
# --------------------------------------------------
# install those tools
mkdir -p \
/etc/containers \
/etc/cni/net.d \
/etc/crio \
/opt/cni/bin \
/usr/local/libexec/crio \
/var/lib/kubelet \
/var/lib/kube-proxy \
/var/lib/kubernetes \
/var/run/kubernetes
tar -xvf cni-plugins-amd64-v0.6.0.tgz -C /opt/cni/bin/
cp runc /usr/local/bin/
cp {crio,kube-proxy,kubelet,kubectl} /usr/local/bin/
cp {conmon,pause} /usr/local/libexec/crio/
cp {crio.conf,seccomp.json} /etc/crio/
cp policy.json /etc/containers/
curl -sSL \
-O "https://pkg.cfssl.org/${cfssl_version}/cfssl_linux-amd64" \
-O "https://pkg.cfssl.org/${cfssl_version}/cfssljson_linux-amd64"
chmod +x cfssl_linux-amd64 cfssljson_linux-amd64
mv -v cfssl_linux-amd64 /usr/local/bin/cfssl
mv -v cfssljson_linux-amd64 /usr/local/bin/cfssljson
# --------------------------------------------------
# generate config
mkdir -p ~/config
cd ~/config
# generate the 99-loopback.conf common for all the workers, can be copied
cat > 99-loopback.conf <<EOF
{
"cniVersion": "0.3.1",
"type": "loopback"
}
EOF
# generate the kube-proxy cert common for all the workers, can be copied
cat > kube-proxy-csr.json <<EOF
{
"CN": "system:kube-proxy",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "US",
"L": "Portland",
"O": "system:node-proxier",
"OU": "Kubernetes The Hard Way",
"ST": "Oregon"
}
]
}
EOF
cfssl gencert \
-ca=${capem} \
-ca-key=${cakeypem} \
-config=${caconfig} \
-profile=kubernetes \
kube-proxy-csr.json | cfssljson -bare kube-proxy
kubectl config set-cluster kubernetes-the-hard-way \
--certificate-authority=${capem} \
--embed-certs=true \
--server=https://192.168.199.10:6443 \
--kubeconfig="kube-proxy.kubeconfig"
kubectl config set-credentials kube-proxy \
--client-certificate="kube-proxy.pem" \
--client-key="kube-proxy-key.pem" \
--embed-certs=true \
--kubeconfig="kube-proxy.kubeconfig"
kubectl config set-context default \
--cluster=kubernetes-the-hard-way \
--user=kube-proxy \
--kubeconfig="kube-proxy.kubeconfig"
kubectl config use-context default --kubeconfig="kube-proxy.kubeconfig"
# generate the kube-proxy.service common for all the workers, can be copied
cat > kube-proxy.service <<EOF
[Unit]
Description=Kubernetes Kube Proxy
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
[Service]
ExecStart=/usr/local/bin/kube-proxy \\
--cluster-cidr=10.200.0.0/16 \\
--kubeconfig=/var/lib/kube-proxy/kubeconfig \\
--proxy-mode=iptables \\
--v=2
Restart=on-failure
RestartSec=5
[Install]
WantedBy=multi-user.target
EOF
# generate the service file for the crio daemon, specific to node
cat > ${hostname}-crio.service <<EOF
[Unit]
Description=CRI-O daemon
Documentation=https://github.com/kubernetes-incubator/cri-o
[Service]
ExecStart=/usr/local/bin/crio --stream-address ${ipaddr} --runtime /usr/local/bin/runc --registry docker.io
Restart=always
RestartSec=10s
[Install]
WantedBy=multi-user.target
EOF
# generate the worker certs, specific to node
cat > ${hostname}-csr.json <<EOF
{
"CN": "system:node:${hostname}",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "US",
"L": "Portland",
"O": "system:nodes",
"OU": "Kubernetes The Hard Way",
"ST": "Oregon"
}
]
}
EOF
cfssl gencert \
-ca=${capem} \
-ca-key=${cakeypem} \
-config=${caconfig} \
-hostname="${hostname},${ipaddr}" \
-profile=kubernetes \
"${hostname}-csr.json" | cfssljson -bare "${hostname}"
# generate kubeconfig specific to the node
kubectl config set-cluster kubernetes-the-hard-way \
--certificate-authority=${capem} \
--embed-certs=true \
--server=https://192.168.199.40:6443 \
--kubeconfig="${hostname}.kubeconfig"
kubectl config set-credentials system:node:${hostname} \
--client-certificate="${hostname}.pem" \
--client-key="${hostname}-key.pem" \
--embed-certs=true \
--kubeconfig="${hostname}.kubeconfig"
kubectl config set-context default \
--cluster=kubernetes-the-hard-way \
--user=system:node:${hostname} \
--kubeconfig="${hostname}.kubeconfig"
kubectl config use-context default --kubeconfig="${hostname}.kubeconfig"
# generate the kubelet service specific to the node
cat > ${hostname}-kubelet.service <<EOF
[Unit]
Description=Kubernetes Kubelet
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=crio.service
Requires=crio.service
[Service]
ExecStart=/usr/local/bin/kubelet \\
--anonymous-auth=false \\
--authorization-mode=Webhook \\
--client-ca-file=/var/lib/kubernetes/ca.pem \\
--allow-privileged=true \\
--cluster-dns=10.32.0.10 \\
--cluster-domain=cluster.local \\
--container-runtime=remote \\
--container-runtime-endpoint=unix:///var/run/crio/crio.sock \\
--image-pull-progress-deadline=2m \\
--image-service-endpoint=unix:///var/run/crio/crio.sock \\
--kubeconfig=/var/lib/kubelet/kubeconfig \\
--network-plugin=cni \\
--register-node=true \\
--runtime-request-timeout=10m \\
--tls-cert-file=/var/lib/kubelet/${hostname}.pem \\
--tls-private-key-file=/var/lib/kubelet/${hostname}-key.pem \\
--node-ip=${ipaddr} \\
--v=2
Restart=on-failure
RestartSec=5
[Install]
WantedBy=multi-user.target
EOF
# --------------------------------------------------
# install above generated config
cp 99-loopback.conf /etc/cni/net.d
cp kube-proxy.kubeconfig /var/lib/kube-proxy/kubeconfig
cp kube-proxy.service /etc/systemd/system/
cp "${hostname}-crio.service" /etc/systemd/system/crio.service
cp ${capem} /var/lib/kubernetes/
cp "${hostname}.pem" "${hostname}-key.pem" /var/lib/kubelet
cp "${hostname}.kubeconfig" /var/lib/kubelet/kubeconfig
cp "${hostname}-kubelet.service" /etc/systemd/system/kubelet.service
systemctl daemon-reload
systemctl enable crio kubelet kube-proxy
systemctl start crio kubelet kube-proxy
| true |
7eb7b2d23f44465432e4edeebc97282a5f6d97b7 | Shell | schlueter/misc | /scripts/init-linux.sh | UTF-8 | 1,267 | 4 | 4 | [] | no_license | #!/bin/sh
set -e
############################################
# Initialize specific distributions
############################################
DISTRO="$(sed -n 's/^NAME="\?\([^"]*\)"\?/\1/p' /etc/os-release)"
case "$DISTRO" in
(Ubuntu|Arch\ Linux) echo "Detected $DISTRO, running distribution initialization script:" >&2;;
*)
echo 'Could not detect a known linux distribution, skipping distribution init...' >&2
exit 5
;;
esac
normalized_distro="$(echo "$DISTRO" | sed 's/ /-/g')"
distro_init_script=init-linux-distro-"$normalized_distro".sh
[ -f "$distro_init_script" ] && "./$distro_init_script"
############################################
# Setup for specific init systems
############################################
init_system="$(ps -p 1 -o comm=)"
init_system_script=setup-for-"$init_system".sh
[ -f "$init_system_script" ] && "$init_system_script"
############################################
# Setup for specific init systems
############################################
mkdir -p "$HOME"/.local/share/applications
ln -s "$(git rev-parse --show-toplevel)"/transmission-oneoff.desktop "$HOME"/.local/share/applications/transmission-oneoff.desktop
ln -s "$(git rev-parse --show-toplevel)"/xprofile "$HOME"/.xprofile
| true |
9066bb5f7742fda96104ad9c995d7d67b781a507 | Shell | input-output-hk/mailchimp-subscribe | /scripts/update_coverage.sh | UTF-8 | 521 | 3.53125 | 4 | [
"MIT"
] | permissive | #!/bin/bash
RED='\033[0;31m'
NC='\033[0m'
if [ "$CI" == "true" ]; then
echo "On CI, continuing with updating coverage"
export COVERALLS_SERVICE_JOB_ID=$CIRCLE_SHA1
npm test -- --coverage --coverageReporters=text-lcov | ./node_modules/.bin/coveralls
else
echo -e "${RED}---------------------------------"
echo "------------ ERROR ------------"
echo "---------------------------------"
echo ""
echo "Can only update coverage on CI"
echo ""
echo -e "---------------------------------${NC}"
exit 1
fi | true |
6d0531a0e47720b76170d2a058679115d916dcc0 | Shell | digideskio/dokku-registry | /commands | UTF-8 | 1,379 | 3.609375 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env bash
[[ " help registry:help " == *" $1 "* ]] || exit "$DOKKU_NOT_IMPLEMENTED_EXIT"
set -eo pipefail; [[ $DOKKU_TRACE ]] && set -x
case "$1" in
help | registry:help)
help_content_func () {
declare desc="return registry plugin help content"
cat<<help_content
registry <app>, Display the configured registry status for an application
registry:login <app> <server> <username> <password>, Logs into a docker registry
registry:pull <app> <tag>, Pull an image from the remote registry
registry:push <app> <tag>, Push an image to the remote registry
registry:set-registry <app> <registry>, Set the registry for an app
registry:set-username <app> <username>, Set the username for an app
registry:tag-latest-local <app>, Shows latest local tag version
registry:unset-registry <app>, Unsets the registry for an app
registry:unset-username <app>, Unsets the username for an app
help_content
}
if [[ $1 = "registry:help" ]] ; then
echo -e 'Usage: dokku registry[:COMMAND] <app> [<username>] [<tag>] [<username>] [<password>]'
echo ''
echo 'Run registry-related commands on an application.'
echo ''
echo 'Additional commands:'
help_content_func | sort | column -c2 -t -s,
else
help_content_func
fi
;;
*)
exit "$DOKKU_NOT_IMPLEMENTED_EXIT"
;;
esac
| true |
c72f71ad1cf2690cfc12adebb579079c95e8402d | Shell | TeenQuotes/developers | /update.sh | UTF-8 | 935 | 3.015625 | 3 | [
"MIT"
] | permissive | # Update code
git pull
# Update dependencies
composer update
# Pull the latest version of the API
git clone https://github.com/TeenQuotes/api-documentation tmp-api
cd tmp-api
# The first file is introduction.md for Codex
mv README.md introduction.md
# Replace URLs
find ./ -type f -exec sed -i 's#https://github.com/TeenQuotes/api-documentation/blob/master##g' *.md {} \;
cd ..
# Pull the latest version of the doc for deep links
git clone https://github.com/TeenQuotes/deep-links tmp-deep-links
# Remove old directories
rm -rf public/docs/api
rm -rf public/docs/deep-links
# Create directories
mkdir -p public/docs/api
mkdir -p public/docs/deep-links
# Move directories
mv tmp-api public/docs/api/1.0
mv tmp-deep-links public/docs/deep-links/1.0
# Clear cache
php artisan cache:clear
rm -rf tmp-api
rm -rf tmp-deep-links
# Clear temporary files from sed
rm -f sed*
# Fix permissions
chown -R www-data:www-data /var/www/codex
| true |
b01c927073d1189c9820c44118bd02d51d46bbac | Shell | ComplianceAsCode/content | /linux_os/guide/system/logging/rsyslog_accepting_remote_messages/rsyslog_nolisten/bash/shared.sh | UTF-8 | 911 | 3.109375 | 3 | [
"BSD-3-Clause"
] | permissive | # platform = multi_platform_all
# reboot = false
# strategy = configure
# complexity = low
# disruption = low
legacy_regex='^\s*\$(((Input(TCP|RELP)|UDP)ServerRun)|ModLoad\s+(imtcp|imudp|imrelp))'
rainer_regex='^\s*(module|input)\((load|type)="(imtcp|imudp)".*$'
readarray -t legacy_targets < <(grep -l -E -r "${legacy_regex[@]}" /etc/rsyslog.conf /etc/rsyslog.d/)
readarray -t rainer_targets < <(grep -l -E -r "${rainer_regex[@]}" /etc/rsyslog.conf /etc/rsyslog.d/)
config_changed=false
if [ ${#legacy_targets[@]} -gt 0 ]; then
for target in "${legacy_targets[@]}"; do
sed -E -i "/$legacy_regex/ s/^/# /" "$target"
done
config_changed=true
fi
if [ ${#rainer_targets[@]} -gt 0 ]; then
for target in "${rainer_targets[@]}"; do
sed -E -i "/$rainer_regex/ s/^/# /" "$target"
done
config_changed=true
fi
if $config_changed; then
systemctl restart rsyslog.service
fi
| true |
0ac19e2d709760b865a96b544ef787f639e146c8 | Shell | krecu/dockerizer | /app.sh | UTF-8 | 4,921 | 3.25 | 3 | [] | no_license | #!/usr/bin/env bash
# composer install
function composerInstall
{
echo "ISZ => exec composer install"
echo "*******************************************************"
echo "ISZ => composer install -d /usr/share/nginx/html/application -v"
docker-compose run --rm composer composer install -d /usr/share/nginx/html/application -v
echo "ISZ => composer install -d /usr/share/nginx/html/auth -v"
docker-compose run --rm composer composer install -d /usr/share/nginx/html/auth -v
echo "ISZ => composer install -d /usr/share/nginx/html/expertise -v"
docker-compose run --rm composer composer install -d /usr/share/nginx/html/expertise -v
echo "ISZ => composer install -d /usr/share/nginx/html/storage -v"
docker-compose run --rm composer composer install -d /usr/share/nginx/html/storage -v
echo "ISZ => composer install -d /usr/share/nginx/html/integration -v"
docker-compose run --rm composer composer install -d /usr/share/nginx/html/integration -v
echo "ISZ => composer install -d /usr/share/nginx/html/filestorage -v"
docker-compose run --rm composer composer install -d /usr/share/nginx/html/filestorage -v
echo "*******************************************************"
}
function rabbitSchema
{
docker exec -it dockerizer_rabbitmq_1 /usr/local/bin/rabbitmqadmin -u root -p r00-t import /schema.json
}
# up app
function appStart
{
echo "ISZ => Build/UP all containers"
echo "*******************************************************"
echo "ISZ => docker-compose build"
docker-compose build --force-rm
composerInstall
echo "ISZ => docker-compose up"
docker-compose up -d
dnsInstall
fixPerm
rabbitSchema
appConsumer
echo "*******************************************************"
}
# up consumer
function appConsumer
{
docker-compose scale consumer_verify=2
docker-compose scale consumer_storage=10
docker-compose scale consumer_expertise=5
}
# put dns to hosts
function dnsInstall
{
echo "ISZ => Update dns"
echo "*******************************************************"
CONTAINER_DOMAIN=isz.dev
HOST_STRING=''
sed -i_bak -e '/isz\.dev/d' /etc/hosts >> /dev/null
for CID in `docker ps -q`; do
IP=`docker inspect --format '{{ .NetworkSettings.Networks.dockerizer_default.IPAddress }}' $CID`
NAME=`docker inspect --format '{{ .Config.Hostname }}' $CID`
PRJ=`docker inspect --format '{{ .Config.Domainname }}' $CID`
if [ "$PRJ" = "isz.dev" ];
then
echo "$IP http://$NAME.$CONTAINER_DOMAIN"
echo "$IP $NAME.$CONTAINER_DOMAIN" >> /etc/hosts
HOST_STRING="$HOST_STRING
$IP $NAME.$CONTAINER_DOMAIN"
fi
done
echo "*******************************************************"
}
# list hosts
function hostsList
{
echo "ISZ => List hosts"
echo "*******************************************************"
CONTAINER_DOMAIN=isz.dev
for CID in `docker ps -q`; do
IP=`docker inspect --format '{{ .NetworkSettings.Networks.dockerizer_default.IPAddress }}' $CID`
NAME=`docker inspect --format '{{ .Config.Hostname }}' $CID`
PRJ=`docker inspect --format '{{ .Config.Domainname }}' $CID`
if [ "$PRJ" = "isz.dev" ];
then
echo "$IP http://$NAME.$CONTAINER_DOMAIN"
fi
done
echo "*******************************************************"
}
# stop and remove containers
function appStop
{
echo "ISZ => Stop/Remove all containers"
echo "*******************************************************"
echo "ISZ => docker-compose down --remove-orphans"
docker-compose down --remove-orphans
echo "*******************************************************"
}
# fix permissions
function fixPerm
{
chmod -R 777 ./volumes/code
}
# restart app
function appRestart
{
echo "ISZ => Restart all containers"
echo "*******************************************************"
echo "ISZ => docker-compose restart"
docker-compose restart
echo "*******************************************************"
dnsInstall
}
case "$1" in
"rabbit" ) rabbitSchema;;
"perm" ) fixPerm;;
"start" ) appStart;;
"restart" ) appRestart;;
"stop" ) appStop;;
"dns" ) dnsInstall;;
"list" ) hostsList;;
"help" )
printf "\n"
printf "*******************************************************\n"
printf "** Simple CLI for ISZ project developer **\n"
printf "** Usage: ./app.sh [COMMAND] **\n"
printf "*******************************************************\n\n"
printf "Commands:\n"
echo " install Install project dependencies"
echo " start Build and run project with update dnsmasq"
echo " stop Kill project"
echo " dns Update dns list"
echo " list-dns Display hosts list"
printf "\n"
printf "git submodule foreach git pull origin master"
esac
| true |
499541c96fc313b9b3055f04bbebec9a58ba5757 | Shell | diegorubin/search-in-list | /minio/sync-entries | UTF-8 | 523 | 3.4375 | 3 | [] | no_license | #!/bin/sh
update_entries()
{
echo "syncing file"
mc cp $MINIO_BUCKET/$MINIO_BUCKET/entries_list.tar.gz /tmp/entries_list.tar.gz
tar -zxvf /tmp/entries_list.tar.gz -C /tmp
}
mc alias set $MINIO_BUCKET http://$MINIO_ENDPOINT:$MINIO_PORT $ACCESS_KEY $SECRET_KEY
update_entries
while :
do
mkfifo /tmp/minio_events || echo "queue exists!"
mc watch $MINIO_BUCKET --events put --suffix ".tar.gz" > /tmp/minio_events &
while read event; do
echo "$event"
update_entries
done </tmp/minio_events
done
| true |
c092ed97869d879490d951d4abe4dbfcd4c7c897 | Shell | lewislabs/mongo3-cluster | /startup/mongo-rs-setup.sh | UTF-8 | 1,479 | 2.890625 | 3 | [] | no_license | #!/bin/bash
echo "Running mongo-rs-setup.sh"
MONGODB1=`ping -c 1 mongo1 | head -1 | cut -d "(" -f 2 | cut -d ")" -f 1`
MONGODB2=`ping -c 1 mongo2 | head -1 | cut -d "(" -f 2 | cut -d ")" -f 1`
MONGODB3=`ping -c 1 mongo3 | head -1 | cut -d "(" -f 2 | cut -d ")" -f 1`
echo "MONGO1=$MONGODB1"
echo "MONGO2=$MONGODB2"
echo "MONGO3=$MONGODB3"
echo "Waiting for startup on mongo1.."
until curl http://mongo1:28017/serverStatus\?text\=1 2>&1 | grep uptime | head -1; do
printf '.'
sleep 1
done
echo "Waiting for startup on mongo2.."
until curl http://mongo2:28017/serverStatus\?text\=1 2>&1 | grep uptime | head -1; do
printf '.'
sleep 1
done
echo "Waiting for startup on mongo3.."
until curl http://mongo3:28017/serverStatus\?text\=1 2>&1 | grep uptime | head -1; do
printf '.'
sleep 1
done
echo "Replicas started..."
mongo --host mongo1 <<EOF
var cfg = {
"_id": "rs",
"version": 1,
"members": [
{
"_id": 0,
"host": "mongo1:27017",
"priority": 2
},
{
"_id": 1,
"host": "mongo2:27017",
"priority": 1
},
{
"_id": 2,
"host": "mongo3:27017",
"priority": 1
}
]
};
try{
var config = rs.config();
rs.reconfig(cfg, { force: true });
}catch(err){
rs.initiate(cfg);
}
EOF
| true |
d7609d94465c0902b9b3974a850416bdbeeec6a2 | Shell | mreetyunjaya/bash-securizer | /libs/domain | UTF-8 | 1,390 | 3.828125 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env bash
function getDNSSettingsFromDomain {
local domain="$1"
host -a "$domain"
}
function getFullDomainFromUrl {
local url="$1"
echo "$url" | sed -e 's|^[^/]*//||' -e 's|/.*$||' | rev | cut -d"@" -f1 | rev
}
function getDomainFromFullDomain {
local domain="$1"
echo "$domain" | rev | cut -d"." -f1,2 | rev
}
function getIpFromDomain {
local domain="$1"
dig A +short "$domain" | tail -1
}
function getNameserver {
local domain=$(getDomainFromFullDomain "$1")
local nameserver=$(host -t NS "$domain" | rev | cut -d" " -f1 | rev | sed 's/\.$//' | sort -u)
if [ "$nameserver" != "" ]; then
echo "$nameserver"
return 0
fi
echo "Unknown"
return 1
}
function getProviderFromIP {
local ip="$1"
local whoisResult=$(whois "$ip")
local provider=""
local orgName=$(\
echo "$whoisResult" | \
grep --color=never "org-name:\|OrgName:" | \
cut -d":" -f2 | \
sed 's/^[ ]*//g'
)
local descr=$(\
echo "$whoisResult" | \
grep --color=never "descr:" | \
cut -d":" -f2 | \
sed 's/^[ ]*//g'
)
[ "orgName" != "" ]; provider=$(echo -en "$provider\n$orgName")
[ "descr" != "" ]; provider=$(echo -en "$provider\n$descr")
[ "$provider" == "" ] && provider="Unknown"
echo "$provider" | awk '!a[$0]++' | awk NF
}
| true |
047e84062a2bdc9ff3c7681ce813f578e9143160 | Shell | nagisayuu/axc | /geth/init.sh | UTF-8 | 217 | 2.515625 | 3 | [] | no_license | #!/bin/bash
geth \
--datadir /home/vagrant/geth/eth-network \
init /home/vagrant/geth/genesis.json
if [ $? = 0 ]; then
echo "geth init succeeded"
else
echo "error occured when initializing geth, confirm log!"
fi
| true |
8998b3c4541833cbcc6a5b670cea51cee7991599 | Shell | asr1191/FOSSLab | /EXP 6/unames.sh | UTF-8 | 479 | 3.890625 | 4 | [] | no_license | #!/bin/bash
if [ "$#" -ne 2 ]
then
echo "Wrong number of parameters"
exit 1
else
if [ ! -f "$1" ]
then
echo "File does not exist"
exit 1
else
t=1
input="$1"
while IFS= read -r line
do
if [ "$line" == "$2" ]
then
echo "Username exists in the file"
t=0
exit 1
fi
done < "$1"
if [ "$t" == 1 ]
then
echo "Username does not exist in the file"
echo "$2" >> "$1"
echo "Username appended to the end of the file"
exit 1
fi
fi
fi
| true |
bcb7efbb40a172fbde7616851153b8f3c8da3b66 | Shell | liubailing/electronux | /app/services/scripts/startup-new.sh | UTF-8 | 899 | 3.5 | 4 | [
"MIT"
] | permissive | #!/bin/bash
# targetDir=/home/nojsja/.config/autostart/
targetDir=""
name=""
comment=""
exec=""
hidden="false"
while [ -n "$1" ]; do
case "$1" in
--dir | -d )
targetDir="$2"
shift
;;
--name | -n )
name="$2"
shift
;;
--comment | -c )
comment="$2"
shift
;;
--exec | -e )
exec="$2"
shift
;;
esac
shift;
done
if [ -z "$targetDir" -o -z "$name" -o -z "$exec" ]; then
exit 1
fi
touch $targetDir/"$name.desktop"
sed -i "/^Name=/d" $targetDir/"$name.desktop"
sed -i "/^Comment=/d" $targetDir/"$name.desktop"
sed -i "/^Exec=/d" $targetDir/"$name.desktop"
sed -i "/^Hidden=/d" $targetDir/"$name.desktop"
echo "Name=$name" >> $targetDir/"$name.desktop"
echo "Comment=$comment" >> $targetDir/"$name.desktop"
echo "Exec=$exec" >> $targetDir/"$name.desktop"
echo "Hidden=$hidden" >> $targetDir/"$name.desktop"
exit 0
| true |
990c3a29be20ffdb79a8a9e2b2af080d7691b485 | Shell | Gioyik/firebox-packer | /scripts/cleanup.sh | UTF-8 | 949 | 3.3125 | 3 | [] | no_license | #!/bin/bash
SSH_USER=${SSH_USERNAME:-vagrant}
echo "cleaning up dhcp leases"
if [ -d "/var/lib/dhcp" ]; then
rm /var/lib/dhcp/*
fi
echo "cleaning up udev rules"
rm /etc/udev/rules.d/70-persistent-net.rules
mkdir /etc/udev/rules.d/70-persistent-net.rules
rm -rf /dev/.udev/
rm /lib/udev/rules.d/75-persistent-net-generator.rules
echo "Adding a 2 sec delay to the interface up, to make the dhclient happy"
echo "pre-up sleep 2" >> /etc/network/interfaces
echo "cleaning up tmp"
rm -rf /tmp/*
echo "cleanup apt cache"
apt-get -y autoremove --purge
apt-get -y clean
apt-get -y autoclean
echo "installed packages"
dpkg --get-selections | grep -v deinstall
echo "remove Bash history"
unset HISTFILE
rm -f /root/.bash_history
rm -f /home/${SSH_USER}/.bash_history
echo "clean up log files"
find /var/log -type f | while read f; do echo -ne '' > $f; done;
echo "clearing last login information"
>/var/log/lastlog
>/var/log/wtmp
>/var/log/btmp
| true |
6fd75b01c9d682a07943812b41bd604a3364f697 | Shell | braph/snippets | /sh/getopt.sh | UTF-8 | 279 | 3.0625 | 3 | [
"MIT"
] | permissive | #!/bin/bash
unset CDPATH
set -u +o histexpand
while getopts ":n:t:" options; do
case "${options}" in
n) NAME=${OPTARG}
;;
t)
;;
:)
echo "Error: -${OPTARG} requires an argument."
exit 1 ;;
*) exit 1 ;;
esac
done
| true |
8a36607d2ac74b8496def81bae145a595faf3105 | Shell | ivmfnal/cms_consistency | /monitor/old/docker/run.sh | UTF-8 | 340 | 2.8125 | 3 | [
"BSD-3-Clause"
] | permissive | #!/bin/bash
prefix=cmscon
if [ "$2" == "shell" ]; then
docker run --rm --name appsrv-$1 \
-ti \
-v /storage/local/data1/ivm/logs:/home/appsrv/logs \
-p 9093:9093 \
${prefix}-$1 /bin/bash
else
docker run --rm --name appsrv-$1 \
-d \
-v /storage/local/data1/ivm/logs:/home/appsrv/logs \
-p 9093:9093 \
${prefix}-$1
fi
| true |
a670fda7348247e31bc36545600f887a963fb26e | Shell | XAOS-Interactive/FFMPEG-Installer | /installers/libfdk.sh | UTF-8 | 270 | 3.125 | 3 | [
"MIT"
] | permissive | libfdkInstall()
{
if [ ! -d $libfdk_aac ]; then
echo "Installing AAC audio encoder..."
git clone --depth 1 https://github.com/mstorsjo/fdk-aac
cd $libfdk_aac
autoreconf -fiv
./configure --prefix="$BUILD_DIR" --disable-shared
make
make install
echo
fi
} | true |
45fac3bcf9ce8b4a61202fcd2188bf0a1b3c2cbe | Shell | SimonLammer/dotfiles | /data/xfce4-genmon-plugin/scripts/cpu_util.sh | UTF-8 | 612 | 2.96875 | 3 | [
"MIT"
] | permissive | #!/bin/sh
coloredbar="$DOTFILES_HOME/data/xfce4-genmon-plugin/scripts/coloredbar.sh"
util=`awk -f $DOTFILES_HOME/data/scripts/cpu_utilization.awk`
threads=`grep "processor" /proc/cpuinfo | wc -l`
utillog=`awk "BEGIN{printf(\"%.0f\n\", 100*log($util + 1)/log(101))}"`
low=`awk "BEGIN{printf(\"%.0f\n\", 100*log(100/$threads + 1)/log(101))}"`
################################################################################
# 25% util ~ 70% utillog
# 87% util ~ 97% utillog
$coloredbar $utillog \
0 100 \
$low 70 97 \
191 191 191 \
240 240 0 \
255 15 15
echo "<tool>CPU utilization: $util%</tool>"
| true |
404ef54ec53df4fe2d485018025560b7fb1d8e21 | Shell | sarikayamehmet/sweep-toolkit | /setup-openssl-case.sh | UTF-8 | 1,348 | 3.1875 | 3 | [
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | # The assumption is that you have met the requirements set out in the README file.
# This script needs to be run from the sweep-toolkit root directory
# House keeping
echo "OpenSSL Study Case -- Record" > Cases-Study/OpenSSL/housekeeping.rst
echo " " >> Cases-Study/OpenSSL/housekeeping.rst
echo "Date:" >> Cases-Study/OpenSSL/housekeeping.rst
date >> Cases-Study/OpenSSL/housekeeping.rst
echo " " >> Cases-Study/OpenSSL/housekeeping.rst
# Set up a local directory with all OpenSSL old releases to study
python3 Cases-Study/OpenSSL/downloader.py
sleep 5
echo "Environment configurations for the Understand Tool"
# Replace "/path/scitools" with the full path of the Understand installation directory (/path/scitools)
# as described in: "Step 3" of the "instructions.txt" guide.
# i.e. /home/username/Downloads/scitools
python3 plugins/MetricsGeneration/setupEnv.py /path/scitools
source runmefirst.sh
echo
echo "Verify that the PATH settings are properly set ..."
echo $PATH
sleep 5
echo "Auto assisted software metrics generations for OpenSSL old releases's source codes"
python3 manager.py Cases-Study/OpenSSL/Releases/ 1
echo "Auto analysis of the generated metrics for the OpenSSL dataset auto construction"
python3 manager.py Understand-Data/Metrics 0
# Word of wisdom: This is a PoC. Subsquent updates will be made as the need arise. | true |
604887cd1a2a92b70e8ec81965e4cc77164caff3 | Shell | Taishi-Y/dotfiles | /.zshrc | UTF-8 | 3,650 | 3.234375 | 3 | [] | no_license | ################
### 環境変数 ###
################
export LANG=ja_JP.UTF-8
# Added by the Heroku Toolbelt
export PATH="/usr/local/heroku/bin:$PATH"
# 色を使用出来るようにする
autoload -Uz colors
colors
# ヒストリの設定
HISTFILE=~/.zsh_history
HISTSIZE=1000
SAVEHIST=1000
##################
### プロンプト ###
##################
local p_cdir="%B%F{yellow}[%~]%f%b"
DEFAULT=$'\U1F603 '
ERROR=$'\U1F47F '
local p_emoji="%(?,${DEFAULT},${ERROR})"
local p_mark="%B%(?,%F{green}▶,%F{red}▶)%f%b"
PROMPT="
$p_emoji $p_cdir
$p_mark "
################
### 補完機能 ###
################
#for zsh-completions
fpath=(/usr/local/share/zsh-completions $fpath)
# 補完機能を有効にする
autoload -Uz compinit
compinit -u
# 補完で小文字でも大文字にマッチさせる
zstyle ':completion:*' matcher-list 'm:{a-z}={A-Z}'
# ../ の後は今いるディレクトリを補完しない
zstyle ':completion:*' ignore-parents parent pwd ..
# sudo の後ろでコマンド名を補完する
zstyle ':completion:*:sudo:*' command-path /usr/local/sbin /usr/local/bin \
/usr/sbin /usr/bin /sbin /bin /usr/X11R6/bin
# ps コマンドのプロセス名補完
zstyle ':completion:*:processes' command 'ps x -o pid,s,args'
################
### vcs_info ###
################
autoload -Uz vcs_info
autoload -Uz add-zsh-hook
zstyle ':vcs_info:git:*' formats '%b@%r' '%c' '%u'
zstyle ':vcs_info:git:*' actionformats '%b@%r|%a' '%c' '%u'
function _update_vcs_info_msg() {
STY= LANG=en_US.UTF-8 vcs_info
local prefix branchname suffix
# .gitの中だから除外
if [[ "$PWD" =~ '/\.git(/.*)?$' ]]; then
RPROMPT=""
return
fi
branchname=`get-branch-name`
# ブランチ名が無いので除外
if [[ -z $branchname ]]; then
RPROMPT=""
return
fi
prefix=`get-branch-status` #色だけ返ってくる
suffix='%{'${reset_color}'%}'
RPROMPT="${prefix}%B${branchname}%b${suffix}"
}
function get-branch-name {
# gitディレクトリじゃない場合のエラーは捨てます
echo "$vcs_info_msg_0_"
#echo `git rev-parse --abbrev-ref HEAD 2> /dev/null`
}
function get-branch-status {
local res color
output=`git status --short 2> /dev/null`
if [ -z "$output" ]; then
res=':' # status Clean
color='%{'${fg[green]}'%}'
elif [[ $output =~ "[\n]?\?\? " ]]; then
res='?:' # Untracked
color='%{'${fg[yellow]}'%}'
elif [[ $output =~ "[\n]? M " ]]; then
res='M:' # Modified
color='%{'${fg[red]}'%}'
else
res='A:' # Added to commit
color='%{'${fg[cyan]}'%}'
fi
# echo ${color}${res}'%{'${reset_color}'%}'
echo ${color} # 色だけ返す
}
add-zsh-hook precmd _update_vcs_info_msg
##################
### オプション ###
##################
# 日本語ファイル名を表示可能にする
setopt print_eight_bit
# ヒストリに保存するときに余分なスペースを削除する
setopt hist_reduce_blanks
# lsコマンド時、自動で色がつく(ls -Gのようなもの?)
export LSCOLORS=gxfxcxdxbxegedabagacad
export LS_COLORS='di=36:ln=35'
zstyle ':completion:*' list-colors 'di=36' 'ln=35'
# cdしたあとで、自動的に ls する
function chpwd() { ls -1 }
##################
### エイリアス ###
##################
alias la='ls -a'
alias l='ls -l'
alias p='cd `find ~/Develop/ | peco`'
alias vi='vim'
export PATH=$PATH:/Applications/MAMP/Library/bin
export NVM_DIR="/Users/yamasakitaishi/.nvm"
[ -s "$NVM_DIR/nvm.sh" ] && . "$NVM_DIR/nvm.sh" # This loads nvm
autoload -U add-zsh-hook
load-nvmrc() {
if [[ -f .nvmrc && -r .nvmrc ]]; then
nvm use
fi
}
add-zsh-hook chpwd load-nvmrc
| true |
458ed9eca1a825528fd6f399e0e3c61b6374b30c | Shell | morningspace/elastic-shell | /lib/bin/common/base.sh | UTF-8 | 3,716 | 3.8125 | 4 | [
"MIT"
] | permissive | #!/bin/bash
trap clean_up exit
bin_dir=$app_home/bin
config_dir=$app_home/config
tmp=/tmp/tmp$$
tmp_res=/tmp/tmp_res$$
tmp_ret=/tmp/tmp_ret$$
tmp_dryrun=/tmp/tmp_dryrun$$
selected_dir=
selected_file=
exists() {
command -v $1 >/dev/null 2>&1
}
preflight_check() {
exists "curl" || {
warn "dependency 'curl' not found, launch in dry run mode"
}
exists "jq" || {
warn "dependency 'jq' not found, some features may not be available"
}
exists "dialog" || {
warn "dependency 'dialog' not found, dialog mode disabled"
}
}
on_init() (:)
init_app() {
preflight_check
on_init
}
on_exit() (:)
clean_up() {
rm -f $tmp $tmp_res $tmp_ret $tmp_dryrun
on_exit
exit
}
config() {
local conf=()
while IFS='=' read -r key value ; do
if [[ ! -z $key && $@ =~ ${key%%_*} ]] ; then
conf+=("$key")
fi
done < $config_dir/main.properties
formbox "Config" "Available configuration:" "${conf[@]}"
}
select_dir() {
local dirs=($1/* "...")
dirs=(${dirs[@]##*/})
local title=$2
local item=$(echo $2 | tr '[:upper:]' '[:lower:]')
menubox "$title" "Select $item from the list:" "selected_dir" "${dirs[@]}"
[[ $? != 0 ]] && return 255
if [[ $selected_dir == "..." ]] ; then
local default_value=$3
inputbox "Input $title" "Input the name of $item" "selected_dir" "$default_value"
fi
}
select_file() {
local files=()
[[ -d $1 ]] && files=($(ls -l $1 | awk '{print $9}' | egrep "^$3" | cut -d . -f 1))
[[ $@ =~ --allow-none ]] && files+=("none")
[[ $@ =~ --allow-input ]] && files+=("...")
local title=$2
local item=$(echo $2 | tr '[:upper:]' '[:lower:]')
menubox "$title" "Select $item from the list:" "selected_file" "${files[@]}"
}
cat_query() {
local choice
local options=(
"indices"
"shards"
"nodes"
"..."
)
while true ; do
menubox "cat" "Select an Elasticsearch cat API:" "choice" "${options[@]}"
[[ $? != 0 ]] && return 255
case $choice in
"indices") do_cat_query "indices" ;;
"shards") do_cat_query "shards" ;;
"nodes") do_cat_query "nodes" ;;
"...") do_cat_query ;;
esac
done
}
do_cat_query() {
local cmd=$1
[[ -z $cmd ]] && inputbox "cat" "Command(press Enter to list all commands)" "cmd"
[[ ! -z $cmd ]] && cmd="/$cmd?v"
net_get "_cat$cmd" --silent | to_json | textbox "_cat$cmd"
}
display_time() {
local T=$1
local D=$((T/60/60/24))
local H=$((T/60/60%24))
local M=$((T/60%60))
local S=$((T%60))
(( $D > 0 )) && printf '%d ' $D
(( $H > 0 )) && printf '%02d:' $H
(( $M > 0 )) && printf '%02d:' $M
printf '%02d\n' $S
}
to_json() {
jq --raw-input --raw-output '. as $line | try fromjson catch $line'
}
value_of() {
jq $1 ${@:2} 2>/dev/null
}
has_key() {
jq -e "has(\"$1\")" 2>/dev/null
}
if ! exists "jq" ; then
to_json() {
cat
}
value_of() {
grep -o "\"${1:1}\":\s*\"\?[^\"^,]*" | grep -o '[^"]*$' | sed -e 's/^://'
}
has_key() {
grep -o "\"$1\":" && echo true || echo false
}
fi
[[ $@ =~ --quiet ]] && is_quiet=1
[ -f $config_dir/main.properties ] && . $config_dir/main.properties
[ -f $bin_dir/common/log.sh ] && . $bin_dir/common/log.sh
[ -f $bin_dir/common/net.sh ] && . $bin_dir/common/net.sh
[ -f $bin_dir/common/task.sh ] && . $bin_dir/common/task.sh
if [[ $@ =~ --ui-dialog ]] ; then
if exists "dialog" ; then
[[ -f $bin_dir/ui/dialog.sh ]] && . $bin_dir/ui/dialog.sh
else
error "can not run in dialog mode due to 'dialog' not found"
exit 1
fi
elif [[ $@ =~ --ui-text ]] ; then
[ -f $bin_dir/ui/text.sh ] && . $bin_dir/ui/text.sh
else
[ -f $bin_dir/ui/cli-base.sh ] && . $bin_dir/ui/cli-base.sh
[ -f $bin_dir/ui/cli-seq.sh ] && . $bin_dir/ui/cli-seq.sh
fi
| true |
a60a626a2a142088bc8e84daccbb0f18921ec1c6 | Shell | akinomyoga/cpulook | /cpuseekd | UTF-8 | 3,989 | 3.71875 | 4 | [] | no_license | #!/usr/bin/env bash
function cpudir.initialize {
# cpudir=${MWGDIR:-$HOME/.mwg}/share/cpulook
# [[ -d $cpudir ]] && return
local _scr=$(readlink -f "$0" || /bin/readlink -f "$0" || echo "$0")
local _dir=${_scr%/*}
[[ $_dir == "$_scr" ]] && _dir=.
[[ ! $_dir ]] && _dir=/
cpudir=$_dir
}
cpudir.initialize
tmpdir=$HOME/.local/share/cpulook/tmp
#------------------------------------------------------------------------------
# definitions
: ${MWGDIR:="$HOME/.mwg"}
SUBTYPE=$cpudir/m/switch
cpulist=$cpudir/cpulist.cfg
list=$cpudir/task.txt
cook=$cpudir/task.eat
stat=$tmpdir/cpustat.txt
seeklog=$cpudir/cpuseekd.log
# configuration
cpu_wait=300
task_wait=600
if [[ $1 == v ]]; then
fV=v
source "$MWGDIR/libexec/echox"
# echom 'dbg: verbose mode'
else
fV=
fi
function fileage {
# how many seconds has past since the file was modified
local file=$1
echo $(($(date '+%s')-$(date '+%s' -r "$file")))
}
function isvalidcpu {
local cpus=$(cat "$cpulist"|awk '/^[[:space:]]*#/{next;}/[^[:space:]]/{print $1}')
local cpu
for cpu in $cpus; do
[[ $cpu == "$1" ]] && return 0
done
echoe "fatal: invalid cpu name '$1'"
return 1
}
#------------------------------------------------------------------------------
# cpu managing
declare -a cpustat_name
declare -a cpustat_idle
declare -a cpustat_info
declare cpustat_size=0
declare cpustat_first=1
function update_cpustat {
if [[ $fV ]]; then
echom "updating current cpustatus ..."
"$cpudir/cpulook" 10
else
"$cpudir/cpulook" 10 &>/dev/null
fi
cpustat_first=
cpustat_size=0
while read line; do
local -a arr=($line)
cpustat_name[${arr[0]}]=${arr[1]}
cpustat_idle[${arr[0]}]=${arr[2]}
cpustat_nice[${arr[0]}]=${arr[3]}
cpustat_size=$(($cpustat_size+1))
done < <(awk 'BEGIN{i=0} $3!="I"{print i++,$1,$3,$4}' "$stat")
}
declare freecpu_name
declare freecpu_nice
function allocate_cpu {
if [[ ! $cpustat_first && $cpustat_size -eq 0 ]]; then
# 残機がないとき暫く待って再度取得
if [[ -e $stat ]]; then
local _wait=$((60-$(fileage "$stat")))
((_wait>0)) && sleep "$_wait"
fi
update_cpustat
elif [[ $cpustat_first || ! -e $stat || $(fileage "$stat") -gt 60 ]]; then
# cpustat 情報が古い時、更新
update_cpustat
fi
while true; do
local name=
local j
local n=$cpustat_size
for ((j=0;j<n;j++)); do
if ((cpustat_idle[j]>0)); then
local name=${cpustat_name[j]}
local idle=${cpustat_idle[j]}
local nice=${cpustat_nice[j]}
((cpustat_idle[j]=idle-1))
isvalidcpu "$name" || name=
break
fi
done
if [[ ! $name ]]; then
datetime=$(date +'[%x %T]')
[[ $fV ]] && echom "$datetime there are no more cpus available. waiting for $cpu_wait seconds ..."
sleep $cpu_wait
update_cpustat
continue
fi
freecpu_name=$name
freecpu_nice=$nice
return
done
}
#------------------------------------------------------------------------------
#-- single instance guard --
(($(ps ux|grep '\bcpuseekd\b'|wc -l) > 1)) || exit 0
function seek_submit {
local cmd=$1
allocate_cpu
local name=$freecpu_name
local nice=$freecpu_nice
[[ $name == 0 ]] && exit 1
datetime=$(date +'[%x %T]')
function log_submit {
echo "$datetime $1" >> "$seeklog"
[[ $fV ]] && echom "$datetime $1"
}
source "$SUBTYPE/submit.src"
return
}
function set_term_title { echo -n $'\ek'"$*"$'\e\\'; }
function next_tasklist {
[[ -s $list ]] && return 0
set_term_title "cpuseekd: ### waiting next task.txt... ###"
sleep $task_wait
[[ -s $list ]]
}
while next_tasklist; do
[[ -f $cook ]] && mv "$cook" "$cook.$(date +%Y%m%d-%H%M%S -r "$cook")"
mv "$list" "$cook"
iN=$(cat "$cook" | wc -l)
for((i=1;i<=iN;i++)); do
set_term_title "cpuseekd: ### throwing task $i/$iN ###"
cmd="$(tail -n +$i $cook | head -1)"
[[ -n $cmd ]] && seek_submit "$cmd"
done
done
| true |
cc1b0415216c5119b5b604850530f99985f7a202 | Shell | leandro-ss/script_misc | /jmx/gc_monitor/load.sh | UTF-8 | 11,226 | 3.71875 | 4 | [] | no_license | #!/usr/bin/ksh
set -x
cd . ./.profile
# pega no fqdn pela linha de comando
FQDN="$1"
# diretorio remoto de onde extrair os dados
ACL_REMOTE_HOME="$2"
# prefixo logs http para o grep
PREFIX="$3"
# sufixo para os arquivos de saida
SUFFIX="$4"
# diretorio local
ACL_LOCAL_HOME="$5"
# sufixo para definição de arquivos zips
SUFFIX_GZIP="gz"
# pega o hostname remoto
REMOTE_HOSTNAME="${echo $FQDN | sed 's/\([A-z]*\).*/\L\1/g'}"
# arquivos de contagem
FLE_CNT_ANT=${ACL_LOCAL_HOME}/$REMOTE_HOSTNAME"_"$SUFFIX"_ANT.cnt"
FLE_CNT_ATL=${ACL_LOCAL_HOME}/$REMOTE_HOSTNAME"_"$SUFFIX"_ATL.cnt"
# arquivos de controle
FLE_AWK=${ACL_LOCAL_HOME}/$REMOTE_HOSTNAME"_"$SUFFIX".awk"
ACL_CTL=${ACL_LOCAL_HOME}/$REMOTE_HOSTNAME"_"$SUFFIX".ctl"
ACL_LOG=${ACL_LOCAL_HOME}/$REMOTE_HOSTNAME"_"$SUFFIX".log"
# Home do usuário
FLE_TMP=$SUFFIX"_gc_tmp.log"
touch "$FLE_CNT_ATL"
touch "$FLE_CNT_ANT"
echo "`date '+%Y-%m-%d %T'` Iniciando Conexão $REMOTE_HOSTNAME";
ssh scap01@$FQDN "awk -v r=0 '{if(FNR==1 && NR!=1) print r; else r = FILENAME\"|\"FNR\"|\"\$0;}END{print FILENAME\"|\"FNR\"|\"\$0;}' $ACL_REMOTE_HOME/$PREFIX*" > "$FLE_CNT_ATL"
# pega lista de logs do host remoto
for arquivo in $(ssh scap01@$FQDN "find $ACL_REMOTE_HOME -maxdepth 1 | grep -i $PREFIX"); do
#Carregando numero da ult linha lida
ult_linha_anterior="$(awk -v ARQUIVO="$arquivo" 'BEGIN {FS="\\|"} { if ( ARQUIVO==$1) print $2}' $FLE_CNT_ANT)"
#Condição para o caso do arquivo ainda não estar presente na listagem anterior
[ -z "$ult_linha_anterior" ] && ult_linha_anterior=0
#Carregando numero da ult linha lida
ult_linha_atual="$(awk -v ARQUIVO="$arquivo" 'BEGIN {FS="|"} { if ( ARQUIVO==$1) print $2}' $FLE_CNT_ATL)"
if [ $ult_linha_anterior -ne 0 ] && [ $ult_linha_atual -ge $ult_linha_anterior ]; then
ult_texto_anterior="$(awk -v ARQUIVO="$arquivo" 'BEGIN {FS="|"} { if ( ARQUIVO==$1) print $3}' $FLE_CNT_ANT)"
if [ $(ssh scap01@$FQDN "tail -$ult_linha_anterior | head -1") -eq $ult_texto_anterior ]; then
#Calcula a diferença na quantidade de linhas
numero_linha="$(expr $ult_linha_atual - $ult_linha_anterior)"
#Gera um arquivo auxiliar para utilização em transferência posterior
ssh scap01@$FQDN "tail -n $numero_linha $arquivo > ~/$FLE_TMP"
#Realiza a transferencia com um nível de compactação pré-definido
scp -C scap01@$FQDN:~/$FLE_TMP $ACL_LOCAL_HOME/$REMOTE_HOSTNAME
else
#Realiza a transferencia com um nível de compactação pré-definido
scp -C scap01@$FQDN:$arquivo $ACL_LOCAL_HOME/$REMOTE_HOSTNAME
fi
elif [ $ult_linha_atual -lt $ult_linha_anterior ]; then
#Realiza a transferencia com um nível de compactação pré-definido
scp -C scap01@$FQDN:$arquivo $ACL_LOCAL_HOME/$REMOTE_HOSTNAME
fi
done
cat "$FLE_CNT_ATL" > "$FLE_CNT_ANT"
echo "`date '+%Y-%m-%d %T'` $REMOTE_HOSTNAME: Parseando logs $ACL_LOCAL_HOME/$REMOTE_HOSTNAME"
echo ' ###### SCRIPT PARA COLETA DE GC SOB ALGORITMO SREIAL - TESTADO EM HOSTSPOT JDK6 ###### ' > "${FLE_AWK_SERIAL}"
echo 'BEGIN{ OFS=";";}' >> "${FLE_AWK_SERIAL}"
echo '{' >> "${FLE_AWK_SERIAL}"
echo '' >> "${FLE_AWK_SERIAL}"
echo ' match( $0,/OC#[0-9]+|YC#[0-9]+/, arr);' >> "${FLE_AWK_SERIAL}"
echo '' >> "${FLE_AWK_SERIAL}"
echo ' gsub("Jan","01"); gsub("Feb","02"); gsub("Mar","03"); gsub("Apr","04");' >> "${FLE_AWK_SERIAL}"
echo ' gsub("May","05"); gsub("Jun","06"); gsub("Jul","07"); gsub("Aug","08");' >> "${FLE_AWK_SERIAL}"
echo ' gsub("Sep","09"); gsub("Oct","10"); gsub("Nov","11"); gsub("Dec","12");' >> "${FLE_AWK_SERIAL}"
echo '' >> "${FLE_AWK_SERIAL}"
echo ' gsub("[[:alpha:]]|\\[|\\]|\\(|\\)|\\-|\\,|>|#"," ");' >> "${FLE_AWK_SERIAL}"
echo '' >> "${FLE_AWK_SERIAL}"
echo ' if( 0 < length(arr[0])){' >> "${FLE_AWK_SERIAL}"
echo '' >> "${FLE_AWK_SERIAL}"
echo ' print HOSTNAME, INSTANCE, arr[0], $2"/"$1"/"$4" "$3, $9, $10, $11, $12;' >> "${FLE_AWK_SERIAL}"
echo ' }' >> "${FLE_AWK_SERIAL}"
echo '}' >> "${FLE_AWK_SERIAL}"
echo ' ###### SCRIPT PARA COLETA DE GC SOB ALGORITMO CMS - TESTADO EM HOSTSPOT JDK6 ###### '> "${FLE_AWK_CMS}"
echo 'BEGIN{ OFS=";"; temp =0;}' >> "${FLE_AWK_CMS}"
echo '{' >> "${FLE_AWK_CMS}"
echo ' match( $0,/Full GC|ParNew|concurrent mode failure|CMS[A-z\-]*:/, arr);' >> "${FLE_AWK_CMS}"
echo ' gsub( "[[:alpha:]]|\\[|\\]|\\(|\\)|\\-|\\,|>|#|:|=|\\/" ," ");' >> "${FLE_AWK_CMS}"
echo ' gsub( ":", "", arr[0]);' >> "${FLE_AWK_CMS}"
echo '' >> "${FLE_AWK_CMS}"
echo ' if(arr[0] == "ParNew"){' >> "${FLE_AWK_CMS}"
echo '' >> "${FLE_AWK_CMS}"
echo ' print HOSTNAME, INSTANCE,arr[0] " - YOUNG GC", $1, $3, $4, $5, $6;' >> "${FLE_AWK_CMS}"
echo '' >> "${FLE_AWK_CMS}"
echo ' } else if(arr[0] == "Full GC"){' >> "${FLE_AWK_CMS}"
echo '' >> "${FLE_AWK_CMS}"
echo ' print HOSTNAME, INSTANCE,arr[0] " - OLD GC", $1, $3, $4, $5, $6;' >> "${FLE_AWK_CMS}"
echo ' print HOSTNAME, INSTANCE,arr[0] " - TOTAL GC", $1, $7, $8, $9;' >> "${FLE_AWK_CMS}"
echo ' print HOSTNAME, INSTANCE,arr[0] " - PERM GC", $1, $10, $11, $12, $13;' >> "${FLE_AWK_CMS}"
echo '' >> "${FLE_AWK_CMS}"
echo ' }else if( 0 < match(arr[0], "CMS-initial-mark")){' >> "${FLE_AWK_CMS}"
echo '' >> "${FLE_AWK_CMS}"
echo ' print HOSTNAME, INSTANCE, arr[0],$1, $4, $5, $6;' >> "${FLE_AWK_CMS}"
echo '' >> "${FLE_AWK_CMS}"
echo ' }else if( 0 < match(arr[0], "CMS-concurrent")){' >> "${FLE_AWK_CMS}"
echo '' >> "${FLE_AWK_CMS}"
echo ' print HOSTNAME, INSTANCE, arr[0],$1 ,"","","", $3;' >> "${FLE_AWK_CMS}"
echo ' ' >> "${FLE_AWK_CMS}"
echo ' } else if( 0 < match(arr[0], "CMS-remark")){' >> "${FLE_AWK_CMS}"
echo '' >> "${FLE_AWK_CMS}"
echo ' print HOSTNAME, INSTANCE, arr[0]" - YOUNG GC",$1, "", $2, $3, $5;' >> "${FLE_AWK_CMS}"
echo ' print HOSTNAME, INSTANCE, arr[0]" - OLD GC", $1, "", $9, $10, $13;' >> "${FLE_AWK_CMS}"
echo ' print HOSTNAME, INSTANCE, arr[0]" - TOTAL GC", $1, "", $11, $12;' >> "${FLE_AWK_CMS}"
echo '' >> "${FLE_AWK_CMS}"
echo ' } else if( 0 < match(arr[0], "concurrent mode failure")){' >> "${FLE_AWK_CMS}"
echo '' >> "${FLE_AWK_CMS}"
echo ' print HOSTNAME, INSTANCE, arr[0]" - OLD GC", temp_data, $1, $2, $3, $4;' >> "${FLE_AWK_CMS}"
echo ' print HOSTNAME, INSTANCE, arr[0]" - TOTAL GC", temp_data, $5, $6, $7, $11;' >> "${FLE_AWK_CMS}"
echo ' print HOSTNAME, INSTANCE, arr[0]" - PERM GC", temp_data, $8, $9, $10, $11;' >> "${FLE_AWK_CMS}"
echo ' }' >> "${FLE_AWK_CMS}"
echo ' temp_data = $1' >> "${FLE_AWK_CMS}"
echo '}' >> "${FLE_AWK_CMS}"
awk -v HOSTNAME="$REMOTE_HOSTNAME" -v INSTANCE="$SUFFIX" -f "${FLE_AWK}" "$REMOTE_HOSTNAME"/"$PREFIX"* > $REMOTE_HOSTNAME"_"$SUFFIX".csv"
echo "`date '+%Y-%m-%d %T'` $REMOTE_HOSTNAME: Iniciando loader $SUFFIX"
echo "LOAD DATA" > "${ACL_CTL}"
echo "INFILE '${ACL_LOCAL_HOME}/"$REMOTE_HOSTNAME"_"$SUFFIX".csv'" >> "${ACL_CTL}"
echo "APPEND INTO TABLE CAPACITY.TOUT_GC" >> "${ACL_CTL}"
echo "FIELDS TERMINATED BY ';'" >> "${ACL_CTL}"
echo "(" >> "${ACL_CTL}"
echo "HOSTNAME," >> "${ACL_CTL}"
echo "INSTANCIA," >> "${ACL_CTL}"
echo "DATA DATE \"dd/mm/yyyy HH24:MI:SS\"," >> "${ACL_CTL}"
echo "GC_ANTES," >> "${ACL_CTL}"
echo "GC_DEPOIS," >> "${ACL_CTL}"
echo "GC_TAMANHO," >> "${ACL_CTL}"
echo "TEMPO)" >> "${ACL_CTL}"
${ORACLE_HOME}/bin/sqlldr userid="impmnegocio/intim#2011" control=${ACL_CTL} log=${ACL_LOG} errors=10000000
echo "`date '+%Y-%m-%d %T'` $REMOTE_HOSTNAME: Removendo arquivos copiados"
find $ACL_LOCAL_HOME/$REMOTE_HOSTNAME -name "$PREFIX*" -exec rm -f {} \;
| true |
74141e1e26c24d39e33993413803ae1066b8045b | Shell | GijsvanDulmen/sample-cluster-setup | /1-setup-cluster.sh | UTF-8 | 3,179 | 2.671875 | 3 | [] | no_license | #!/bin/bash
source 0-config.sh
# config
gcloud config set account ${ACCOUNT}
gcloud config set project ${PROJECT_ID}
# enable needed services
gcloud services enable storage-api.googleapis.com
gcloud services enable cloudresourcemanager.googleapis.com
gcloud services enable compute.googleapis.com
gcloud services enable container.googleapis.com
gcloud services enable iam.googleapis.com
# setup cluster with terraform
cd gke-cluster-01
terraform init
terraform validate
terraform apply \
-var="project_id=${PROJECT_ID}" \
-var="cluster_name=${PROJECT_ID}" \
-var="region=${REGION}" \
-var='network=gke-network' \
-var='subnetwork=gke-subnetwork'
# set correct credentials
gcloud container clusters get-credentials ${PROJECT_ID} --region ${REGION}
# set cluster admin role to current user
kubectl create clusterrolebinding cluster-admin-binding \
--clusterrole cluster-admin \
--user $(gcloud config get-value account)
# install cert-manager
kubectl apply -f https://github.com/jetstack/cert-manager/releases/download/v1.1.0/cert-manager.yaml
# wait for it to be installed
kubectl wait --for=condition=available --timeout=600s deployment/cert-manager -n cert-manager
kubectl wait --for=condition=available --timeout=600s deployment/cert-manager-cainjector -n cert-manager
kubectl wait --for=condition=available --timeout=600s deployment/cert-manager-webhook -n cert-manager
# install argocd
kubectl create namespace argocd
kubectl apply -n argocd -f https://raw.githubusercontent.com/argoproj/argo-cd/stable/manifests/install.yaml
# wait for it to be installed
kubectl wait --for=condition=available --timeout=600s deployment/argocd-application-controller -n argocd
kubectl wait --for=condition=available --timeout=600s deployment/argocd-dex-server -n argocd
kubectl wait --for=condition=available --timeout=600s deployment/argocd-redis -n argocd
kubectl wait --for=condition=available --timeout=600s deployment/argocd-repo-server -n argocd
kubectl wait --for=condition=available --timeout=600s deployment/argocd-server -n argocd
# install ingress
kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v0.41.2/deploy/static/provider/cloud/deploy.yaml
# Wait till it's ready
kubectl wait --namespace ingress-nginx \
--for=condition=available deployment/ingress-nginx-controller \
--timeout=120s
# install kubernetes ExternalDNS with Google Cloud DNS enabled
CLOUD_DNS_SA=cloud-dns-admin
gcloud --project ${PROJECT_ID} iam service-accounts \
create ${CLOUD_DNS_SA} \
--display-name "Service Account for ExternalDNS."
gcloud projects add-iam-policy-binding ${PROJECT_ID} \
--member serviceAccount:${CLOUD_DNS_SA}@${PROJECT_ID}.iam.gserviceaccount.com \
--role roles/dns.admin
gcloud iam service-accounts keys create ./external-dns-key.json \
--iam-account=${CLOUD_DNS_SA}@${PROJECT_ID}.iam.gserviceaccount.com
# create ns
kubectl create ns externaldns
kubectl create secret -n externaldns generic cloud-dns-key \
--from-file=key.json=./external-dns-key.json
rm ./external-dns-key.json # delete the key again
kubectl apply -n externaldns -f ../cluster-infra/external-dns.yml | true |
dcedabfa12a3ce0ce38b2abafc197fc21ab65ae4 | Shell | goku-KaioKen/scripts | /recon.sh | UTF-8 | 19,156 | 3.640625 | 4 | [
"MIT"
] | permissive | #!/bin/bash
set -u
set -e
set -o pipefail
declare DIR
declare FILE
declare NOPING=false
RED='\e[91m'
GREEN='\e[32m'
BLUE='\e[34m'
NC='\e[0m'
echo "______ __ _____
___ / / /___ __________ /_____________
__ /_/ /_ / / /_ __ \ __/ _ \_ ___/
_ __ / / /_/ /_ / / / /_ / __/ /
/_/ /_/ \__,_/ /_/ /_/\__/ \___//_/
"
echo ""
function parse_args {
while [ "${1:-}" != "" ]; do
case "$1" in
"-d" | "--directory")
shift
#Using sed to make sure the / at the end is removed, just in case user puts in !
[ -z "${1:+x}" ] && print_usage || DIR="$(echo $1 | sed -e 's/\/$//g')"
;;
"-f" | "--file")
shift
[ -z "${1:+x}" ] && print_usage || FILE="$1"
;;
"-n" | "--noping")
NOPING=true
;;
*)
print_usage
exit 1
;;
esac
shift
done
}
function nmap_scan {
echo -e "${BLUE}[*] Nmap TCP scan initiated${NC}"
nmap_speedy_tcp $1 &
echo -e "${BLUE}[*] Nmap UDP scan initiated${NC}"
nmap_speedy_udp $1 &
wait
echo -e "${GREEN}[+] Nmap TCP scan completed${NC}"
echo -e "${GREEN}[+] Nmap UDP scan completed${NC}"
if [ -f "$DIR/$1/out" ]; then
#get the ports
ports=($(egrep -v "^#|Status: Up" "$DIR/$1/out" | cut -d' ' -f4- | sed -e 's/Ignored.*//p' | tr ',' '\n' | sed -e 's/^[ \t]*//' | sort -n | uniq | grep -iv "closed" | cut -d'/' -f1))
#This can proceed in background to speed up the whole thing as we only need the grepable output to continue
nmap -sV -sC --max-retries 10 --max-scan-delay 50 --min-rate 350 --script=vulners.nse --script-args mincvss=7.0 -vv -Pn -n -A -O -p$(echo ${ports[@]} | tr ' ' ',') $1 -oN "$DIR/$1/nmap_tcp_full" 2>&1>/dev/null &
fi
if [ -f "$DIR/$1/out" ] && [ -f "$DIR/$1/out_udp" ]; then
ps=($(egrep -v "^#|Status: Up" "$DIR/$1/out" "$DIR/$1/out_udp" | cut -d' ' -f4- | sed -e 's/Ignored.*//p' | tr ',' '\n' | sed -e 's/^[ \t]*//' | sort -n | uniq | grep -iv "closed"))
for ps in "${ps[@]}"; do
ports=($(echo $ps | awk -F '/' '{print $1}'))
services=($(echo $ps | awk -F '/' '{print $5}'))
for serv in "${!services[@]}"; do
service=${services[$serv]}
port=${ports[$serv]}
if [[ "$service" == "http" ]]; then
http_enum $port $1 &
#gobuster_scan $service $1 &
elif [[ "$service" =~ ^apani1 ]]; then
cassandra_enum $port $1 &
elif [[ "$service" =~ ^ipp ]]; then
cups_enum $port $1 &
elif [[ "$service" =~ ^distccd ]]; then
distcc_enum $port $1 &
elif [[ "$service" =~ ^domain ]]; then
dns_enum $port $1 &
elif [[ "$service" =~ ^imap ]]; then
imap_enum $port $1 &
elif [[ "$service" =~ ^kerberos ]] || [[ "$service" =~ ^kpasswd ]]; then
kerberos_enum $port $1 &
elif [[ "$service" =~ ^ldap ]]; then
LDAP_enum $port $1 &
elif [[ "$service" =~ ^mongod ]]; then
mongodb_enum $port $1 &
elif [[ "$service" =~ ^mssql ]] || [[ "$service" =~ ^ms-sql ]]; then
mssql_enum $port $1 &
elif [[ "$service" =~ ^mysql ]]; then
mysql_enum $port $1 &
elif [[ "$service" =~ ^nfs ]] || [[ "$service" =~ ^rpcbind ]]; then
nfs_enum $port $1 &
elif [[ "$service" =~ ^nntp ]]; then
nntp_enum $port $1 &
elif [[ "$service" =~ ^oracle ]]; then
oracle_enum $port $1 &
elif [[ "$service" =~ ^pop3 ]]; then
pop3_enum $port $1 &
elif [[ "$service" =~ ^rdp ]] || [[ "$service" =~ ^ms-wbt-server ]] || [[ "$service" =~ ^ms-term-serv ]]; then
rdp_enum $port $1 &
elif [[ "$service" =~ ^java\-rmi ]] || [[ "$service" =~ ^rmiregistry ]]; then
rmi_enum $port $1 &
elif [[ "$service" =~ ^msrpc ]] || [[ "$service" =~ ^rpcbind ]] || [[ "$service" =~ ^erpc ]]; then
rpc_enum $port $1
elif [[ "$service" =~ ^asterisk ]]; then
sip_enum $port $1 &
elif [[ "$service" =~ ^ssh ]]; then
ssh_enum $port $1 &
elif [[ "$service" =~ ^telnet ]]; then
telnet_enum $port $1 &
elif [[ "$service" =~ ^tftp ]]; then
tftp_enum $port $1 &
elif [[ "$service" =~ ^vnc ]]; then
vnc_enum $port $1 &
elif [[ "$service" == "ssl|http" ]] || [[ "$service" == "https" ]]; then
https_enum $port $1 &
#gobuster_scan $service $1 &
elif [[ "$service" =~ ^smtp ]]; then
smtp_enum $port $1 &
elif [[ "$service" =~ ^ftp ]] || [[ "$service" =~ ^ftp-data ]]; then
ftp_enum $port $1 &
elif [[ "$service" =~ ^microsoft-ds ]] || [[ "$service" =~ ^smb ]] || [[ "$service" =~ ^netbios ]]; then
if [ $port -eq 445 ];then
echo $port
smb_enum $port $1 &
fi
elif [[ "$service" =~ ^snmp ]]; then
snmp_enum $port $1 &
fi
done
done
wait
cleanup $1
fi
}
function nmap_speedy_tcp {
#make sure you got a beefy machine to handle this.
limit=4369
for i in {1..15}; do
lower=$((limit - 4368))
nmap_range $lower $limit "$1" &
limit=$((limit + 4369))
done
wait
}
function nmap_range {
nmap "-p$1-$2" -T4 --open --min-rate=150 --max-scan-delay 30 --max-retries 5 -Pn -n -vv "$3" -oG "$DIR/$3/$1" 2>&1>/dev/null
# write to file if output contains any open port
grep -q "open/" "$DIR/$3/$1"
if [[ "$?" -eq 0 ]]; then
cat "$DIR/$3/$1" | grep -i "open/" >> "$DIR/$3/out"
fi
}
function nmap_speedy_udp {
nmap -Pn -n -T4 -sU -vv --min-rate=500 --max-retries=5 --top-ports 200 "$1" -oG "$DIR/$1/out_udp" 2>&1>/dev/null
ps=($(egrep -v "^#|Status: Up" "$DIR/$1/out_udp" | cut -d' ' -f4- | sed -e 's/Ignored.*//p' | tr ',' '\n' | sed -e 's/^[ \t]*//' | sort -n | uniq | grep -iv "closed" | cut -d'/' -f1))
#This can proceed in background to speed up the whole thing as we only need grepable output to continue
nmap -sV -sC -sU -T4 -Pn -n -vv --min-rate=350 --max-retries=8 --script=vulners.nse --script-args mincvss=7.0 -A -O -p$(echo ${ps[@]} | tr ' ' ',') $1 -oN "$DIR/$1/nmap_udp_top200" 2>&1>/dev/null &
}
function gobuster_scan {
if [ "$1" == "ssl|http" ] || [ "$1" == "https" ]; then
echo -e "${BLUE}[*] Gobuster scan initiated${NC}"
gobuster -u https://$2/ -w /usr/share/wordlists/dirb/common.txt -t 40 >> "$DIR/$2/gobuster"
else
echo -e "${BLUE}[*] Gobuster scan initiated${NC}"
gobuster -u http://$2/ -w /usr/share/wordlists/dirb/common.txt -t 40 >> "$DIR/$2/gobuster"
fi
echo -e "${GREEN}[+] Gobuster scan completed${NC}"
}
function http_enum {
echo -e "${BLUE}[*] HTTP enumeration initiated${NC}"
nmap -sV -Pn -n -T4 -vv -p $1 --script="banner,(http* or ssl*) and not (brute or broadcast or dos or external or http-slowloris* or fuzzer)" --append-output -oN "$DIR/$2/http_enum" $2 >/dev/null
echo -e "${BLUE}[*] Running Nikto${NC}"
#nikto -ask=no -host http://$2:$1 > "$DIR/$2/nikto_http"
echo -e "${GREEN}[+] Nikto scan completed"
echo -e "${GREEN}[+] HTTP enumeration completed${NC}"
}
function https_enum {
echo -e "${BLUE}[*] HTTPS Enumeration initiated${NC}"
nmap -sV -Pn -n -T4 -p $1 --script=http-vhosts,http-userdir-enum,http-apache-negotiation,http-backup-finder,http-config-backup,http-default-accounts,http-methods,http-method-tamper,http-passwd,http-robots.txt --append-output -oN "$DIR/$2/https_enum" -vv $2 >/dev/null
sslscan --show-certificate $2 2>&1 | tee "$DIR/$2/sslscan"
echo -e "${BLUE}[*] Running Nikto${NC}"
#nikto -ask=no -h https://$2:$1 -ssl > "$DIR/$2/nikto_http"
echo -e "${GREEN}[+] Nikto scan completed${NC}"
echo -e "${GREEN}[+] HTTPS Enumeration completed${NC}"
}
function cassandra_enum {
echo -e "${BLUE}[*] Cassandra Scan initiated${NC}"
nmap -sV -Pn -n -T4 -p $1 --script="banner,(cassandra* or ssl*) and not (brute or broadcast or dos or external or fuzzer)" --append-output -oN "$DIR/$2/cassandra_enum_nmap" -vv $2 >/dev/null
echo -e "${GREEN}[+] Cassandra Scan completed${NC}"
}
function cups_enum {
echo -e "${BLUE}[*] Cups Scan initiated${NC}"
nmap -sV -Pn -n -T4 -p $1 --script="banner,(cups* or ssl*) and not (brute or broadcast or dos or external or fuzzer)" --append-output -oN "$DIR/$2/cups_enum_nmap" -vv $2 >/dev/null
echo -e "${GREEN}[+] Cups scan completed${NC}"
}
function distcc_enum {
echo -e "${BLUE}[*] distcc Scan initiated${NC}"
nmap -sV -Pn -n -T4 -p $1 --script="banner,distcc-cve2004-2687" --script-args="distcc-cve2004-2687.cmd=id" --append-output -oN "$DIR/$2/distcc_enum_nmap" -vv $2 >/dev/null
echo -e "${GREEN}[+] distcc scan completed"
}
function dns_enum {
echo -e "${BLUE}[*] DNS Scan initiated${NC}"
nmap -sV -Pn -n -T4 -p $1 --script="banner,(dns* or ssl*) and not (brute or broadcast or dos or external or fuzzer)" --append-output -oN "$DIR/$2/dns_enum_nmap" -vv $2 >/dev/null
echo -e "${GREEN}[+] DNS scan completed"
}
function ftp_enum {
echo -e "${BLUE}[*] FTP Scan initiated${NC}"
nmap -sV -Pn -T4 -n -p $1 --script="banner,(ftp* or ssl*) and not (brute or broadcast or dos or external or fuzzer)" --append-output -oN "$DIR/$2/ftp_enum" -vv $2 >/dev/null
echo -e "${GREEN}[+] FTP Scan completed${NC}"
}
function imap_enum {
echo -e "${BLUE}[*] IMAP Scan initiated${NC}"
nmap -sV -Pn -n -T4 -p $1 --script="banner,(imap* or ssl*) and not (brute or broadcast or dos or external or fuzzer)" --append-output -oN "$DIR/$2/imap_enum_nmap" -vv $2 >/dev/null
echo -e "${GREEN}[+] IMAP Scan completed${NC}"
}
function LDAP_enum {
echo -e "${BLUE}[*] LDAP Scan initiated${NC}"
nmap -sV -Pn -n -T4 -p $1 --script="banner,(ldap* or ssl*) and not (brute or broadcast or dos or external or fuzzer)" --append-output -oN "$DIR/$2/LDAP_enum_nmap" -vv $2 >/dev/null
echo -e "${GREEN}[+] LDAP Scan completed${NC}"
}
function kerberos_enum {
echo -e "${BLUE}[*] Kerberos Scan inititated${NC}"
nmap -sV -Pn -n -T4 -p $1 --script="banner,krb5-enum-users" --append-output -oN "$DIR/$2/kerberus_enum_nmap" -vv $2 >/dev/null
echo -e "${GREEN}[+] Kerberos Scan completed${NC}"
}
function mongodb_enum {
echo -e "${BLUE}[*] Mongodb Scan initiated${NC}"
nmap -sV -Pn -n -T4 -p $1 --script="banner,(mongodb* or ssl*) and not (brute or broadcast or dos or external or fuzzer)" --append-output -oN "$DIR/$2/mongodb_enum_nmap" -vv $2 >/dev/null
echo -e "${GREEN}[+] Mongodb Scan completed${NC}"
}
function mysql_enum {
echo -e "${BLUE}[*] MySql Scan started${NC}"
nmap -sV -Pn -n -T4 -p $1 --script="banner,(mysql* or ssl*) and not (brute or broadcast or dos or external or fuzzer)" --append-output -oN "$DIR/$2/mysql_enum_nmap" -vv $2 >/dev/null
echo -e "${GREEN}[+] MySql Scan completed${NC}"
}
function nfs_enum {
echo -e "${BLUE}[*] NFS enumeration started${NC}"
nmap -sV -Pn -n -T4 -p $1 --script="banner,(mysql* or ssl*) and not (brute or broadcast or dos or external or fuzzer)" --append-output -oN "$DIR/$2/nfs_enum_nmap" -vv $2 >/dev/null
showmount -e $2 2>&1 | tee "$DIR/$2/nfs_showmount"
echo -e "${GREEN}[+] NFS enumeration completed${NC}"
}
function nntp_enum {
echo -e "${BLUE}[*] nntp scan started${NC}"
nmap -sV -Pn -n -T4 -p $1 --script="banner,nntp-ntlm-info" --append-output -oN "$DIR/$2/nntp_enum_nmap" -vv $2 >/dev/null
echo -e "${GREEN}[+] nttp scan completed${NC}"
}
function oracle_enum {
echo -e "${BLUE}[*] Oracle enumeration initiated${NC}"
nmap -sV -Pn -n -T4 -p $1 --script="banner,(oracle* or ssl*) and not (brute or broadcast or dos or external or fuzzer)" --append-output -oN "$DIR/$2/oracle_enum_nmap" -vv $2 >/dev/null
nmap -sV -Pn -n -T4 -p $1 --script="banner,oracle-sid-brute" --append-output -oN "$DIR/$2/oracle_sidbrute_nmap" -vv $2 >/dev/null
tnscmd10g ping -h $2 -p $1 2>&1 | tee "$DIR/$2/oracle-tnscmd-ping"
tnscmd10g version -h $2 -p $1 2>&1 | tee "$DIR/$2/oracle-tnscmd-version"
oscanner -v -s $2 -P $1 2>&1 | tee "$DIR/$2/oracle-scanner"
echo -e "${GREEN}[+] Oracle enumeration completed${NC}"
}
function pop3_enum {
echo -e "${BLUE}[*] pop3 Scan initiated${NC}"
nmap -sV -Pn -n -T4 -p $1 --script="banner,(pop3* or ssl*) and not (brute or broadcast or dos or external or fuzzer)" --append-output -oN "$DIR/$2/pop3_enum_nmap" -vv $2 >/dev/null
echo -e "${GREEN}[+] pop3 Scan completed${NC}"
}
function rdp_enum {
echo -e "${BLUE}[*] RDP Scan inititated${NC}"
nmap -sV -Pn -n -T4 -p $1 --script="banner,(rdp* or ssl*) and not (brute or broadcast or dos or external or fuzzer)" --append-output -oN "$DIR/$2/rdp_enum_nmap" -vv $2 >/dev/null
echo -e "${GREEN}[+] RDP Scan completed${NC}"
}
function rmi_enum {
echo -e "${BLUE}[*] RMI Scan inititated${NC}"
nmap -sV -Pn -n -T4 -p $1 --script="banner,rmi-vuln-classloader,rmi-dumpregistry" --append-output -oN "$DIR/$2/rmi_enum_nmap" -vv $2 >/dev/null
echo -e "${GREEN}[+] RMI Scan completed${NC}"
}
function rpc_enum {
echo -e "${BLUE}[*] RPC Scan inititated${NC}"
nmap -sV -Pn -n -T4 -p $1 --script="banner,msrpc-enum,rpc-grind,rpcinfo" --append-output -oN "$DIR/$2/rpc_enum_nmap" -vv $2 >/dev/null
echo -e "${GREEN}[+] RPC Scan completed${NC}"
}
function sip_enum {
echo -e "${BLUE}[*] SIP Scan inititated${NC}"
nmap -sV -Pn -n -T4 -p $1 --script="banner,sip-enum-users,sip-methods" --append-output -oN "$DIR/$2/sip_enum_nmap" -vv $2 >/dev/null
echo -e "${GREEN}[+] SIP Scan completed${NC}"
}
function smb_enum {
echo -e "${BLUE}[*] SMB scan initiated${NC}"
nmap -sV -Pn -T4 -n -p $1 --script="banner,(nbstat or smb* or ssl*) and not (brute or broadcast or dos or external or fuzzer)" --script-args=unsafe=1 --append-output -oN "$DIR/$2/smb_enum_nmap" -vv $2 >/dev/null
echo -e "${GREEN}[+] SMB scan completed${NC}"
echo -e "${BLUE}[*] Running nbtscan${NC}"
nbtscan -rvh $2 >> "$DIR/$2/nbtscan"
echo -e "${GREEN}[+] nbtscan completed${NC}"
echo -e "${BLUE}[*] Trying to enumerate SMB shares${NC}"
smbmap -H $2 >> "$DIR/$2/smbmap"
smbclient -L\\ -N -I $2 > "$DIR/$2/smbclient"
echo -e "${GREEN}[+] SMB shares enum completed${NC}"
echo -e "${BLUE}[*] Running enum4linux${NC}"
enum4linux -a -M -l -d $2 >> "$DIR/$2/enum4linux"
echo -e "${GREEN}[+] Completed enum4linux scan${NC}"
}
function snmp_enum {
echo -e "${BLUE}[*] Running snmpwalk${NC}"
snmpwalk -c public -v1 $2 >> "$DIR/$2/snmpwalk"
echo -e "${GREEN}[+] snmpwalk enum completed${NC}"
echo -e "${BLUE}[*] SNMP scan initiated${NC}"
nmap -sV -sU -Pn -T4 -n -p $1 --script="banner,(snmp* or ssl*) and not (brute or broadcast or dos or external or fuzzer)" --append-output -oN "$DIR/$2/snmp_enum_nmap" -vv $2 >/dev/null
onesixtyone -c /opt/Seclists/Discovery/SNMP/common-snmp-community-strings-onesixtyone.txt -dd $2 2>&1 | tee "$DIR/$2/onesixtyone"
echo -e "${GREEN}[+] SNMP scan completed${NC}"
}
function smtp_enum {
echo -e "${BLUE}[*] Running SMTP Scan${NC}"
nmap -sV -sV -Pn -T4 -n -p $1 --script="banner,(smtp* or ssl*) and not (brute or broadcast or dos or external or fuzzer)" --append-output -oN "$DIR/$2/smtp_enum_nmap" -vv $2 >/dev/null
echo -e "${GREEN}[+] SMTP scan completed"
echo -e "${BLUE}[*] Enumerating SMTP usernames${NC}"
smtp-user-enum -M VRFY -U /opt/Seclists/Usernames/top-usernames-shortlist.txt -t $2 -p $1 2>&1 | tee "$DIR/$2/smtp_user_enum"
echo -e "${GREEN}[+] SMTP username enumeration completed${NC}"
}
function ssh_enum {
echo -e "${BLUE}[*] Running SSH Scan${NC}"
nmap -sV -Pn -n -T4 -p $1 --script="banner,ssh2-enum-algos,ssh-hostkey,ssh-auth-methods" --append-output -oN "$DIR/$2/ssh_enum_nmap" -vv $2 >/dev/null
echo -e "${GREEN}[+] SSH Scan completed${NC}"
}
function telnet_enum {
echo -e "${BLUE}[*] Running telnet Scan${NC}"
nmap -sV -Pn -n -T4 -p $1 --script="banner,telnet-encryption,telnet-ntlm-info" --append-output -oN "$DIR/$2/telnet_enum_nmap" -vv $2 >/dev/null
echo -e "${GREEN}[+] telnet Scan completed${NC}"
}
function tftp_enum {
echo -e "${BLUE}[*] Running TFTP Scan${NC}"
nmap -sV -Pn -n -T4 -p $1 --script="banner,tftp-enum" --append-output -oN "$DIR/$2/tftp_enum_nmap" -vv $2 >/dev/null
echo -e "${GREEN}[+] TFTP Scan completed${NC}"
}
function mssql_enum {
echo -e "${BLUE}[*] MSSQL scan initiated${NC}"
nmap -sV -Pn -n -T4 -p $1 --script="banner,(ms-sql* or ssl*) and not (brute or broadcast or dos or external or fuzzer)" --script-args="mssql.instance-port=$1,mssql.username=sa,mssql.password=sa" --append-output -oN "$DIR/$2/mssql_enum_nmap" $2 >/dev/null
echo -e "${GREEN}[+] MSSQL scan completed${NC}"
}
function vnc_enum {
echo -e "${BLUE}[*] VNC scan initiated${NC}"
nmap -sV -Pn -n -T4 -p $1 --script="banner,(vnc* or realvnc* or ssl*) and not (brute or broadcast or dos or external or fuzzer)" --script-args="unsafe=1" --append-output -oN "$DIR/$2/vnc_enum_nmap" -vv $2 >/dev/null
echo -e "${GREEN}[+] VNC scan completed${NC}"
}
function cleanup {
rm -f "$DIR/$1/out"
rm -f "$DIR/$1/out_udp"
rm -f $DIR/$1/1* $DIR/$1/2* $DIR/$1/3* $DIR/$1/4* $DIR/$1/5* $DIR/$1/6* $DIR/$1/7* $DIR/$1/8* $DIR/$1/9*
}
function ping_check {
ping -qc1 $1 &>/dev/null && echo -e "${GREEN}[+] Host is pingable!${NC}"
if [ "$?" -eq 1 ]; then
echo -e "${RED}[-] Host seems to be down${NC}"
exit 1
fi
}
function print_usage {
echo -e "${RED}[!] USAGE: $(basename $0) [-d DIRECTORY] [-f FILE] [-n]${NC}"
echo -e "${RED}[!] -d | --directory Specifies output directory${NC}"
echo -e "${RED}[!] -f | --file Specifies input file containing list of IPs${NC}"
echo -e "${RED}[!] -n | --noping Specifies to not check whether target is alive using ping${NC}"
}
parse_args "$@"
if [ ! -z "${DIR:+x}" ] && [ ! -z "${FILE:+x}" ]; then
[ ! -d "$DIR" ] && mkdir -p "$DIR"
while read p; do
[ "$NOPING" = false ] && ping_check $p
rm -rf $p
mkdir -p "$DIR/$p"
nmap_scan $p
done < $FILE
wait
echo "============================DONE============================="
else
print_usage
fi
| true |
3fec93058cd9432fb38a508258938012b9debacd | Shell | beyoung0110/p_code | /shell/9.10.sh | UTF-8 | 143 | 3.296875 | 3 | [] | no_license | #!/bin/bash
MINLEN=4
while read line
do
echo "$line"
len=${#line}
if [ "$len" -lt "$MINLEN" ]
then echo
fi
done
exit 0
| true |
f74b4359bca2f519f3324d21df0c02d45f1c0f81 | Shell | neel-mookerjee/aws-emr-dag-orchestrator | /scripts/setup.sh | UTF-8 | 4,469 | 3.84375 | 4 | [] | no_license | #!/bin/bash
CLUSTER=$1
DATA=$2
MODE=$3
EXECUTE=$4
STACK=$5
NOTIFY_URL=$SLACK_WEBHOOK_URL
handle_error(){
if [ $1 != 0 ]; then
echo $2
echo 'payload= {"text":"*CLUSTER: '$CLUSTER_NAME' # '$CLUSTER'*\n>'$2'"}' > msg.txt
curl -k -X POST -H 'Content-type: application/x-www-form-urlencoded' --data @msg.txt $NOTIFY_URL
exit 1
fi
}
if [[ -z $DATA ]] || [[ -z $CLUSTER ]]; then
handle_error 1 "CLUSTER (cluster id for emr ['dafault' for the daily one]) and DATA ('full' or 'trim') must be supplied. Exiting."
fi
if [ "$DATA" != "full" ] && [ "$DATA" != "trim" ]; then
handle_error 1 "Invalid data option: $DATA - must be 'full' OR 'trim'"
fi
if [ "$MODE" != "full" ] && [ "$MODE" != "adhoc" ]; then
handle_error 1 "Invalid mode option: $MODE - must be 'full' OR 'adhoc'"
fi
if [ "$EXECUTE" != "step" ] && [ "$EXECUTE" != "datapipeline" ]; then
handle_error 1 "Invalid execution option: $EXECUTE - must be 'step' OR 'datapipeline'"
fi
CLUSTER_IP=""
CLUSTER_NAME=""
DEFAULT="N"
if [ "$CLUSTER" == "default" ]; then
DEFAULT="Y"
aws emr list-clusters --active --region us-west-2 > ./output.txt 2>&1
if [ $? != 0 ]; then
RESPONSE=`cat output.txt`
if [ "$(echo $RESPONSE | grep 'Unable to locate credentials')" != "" ] || [ "$(echo $RESPONSE | grep 'exception')" != "" ]; then
echo "Crappy pod you got! Exiting."
exit 1
else
handle_error 1 "Cluster details could not be retrieved. Exiting."
fi
fi
CLUSTER=`cat ./output.txt | jq '.Clusters[] | select(.Name == "emr-cluster") | .Id' | sed s/\"//g`
fi
echo CLUSTER: $CLUSTER
echo DATA: $DATA
echo MODE: $MODE
echo EXECUTE: $EXECUTE
echo STACK: $STACK
if [[ -z $CLUSTER ]]; then
handle_error 1 "Cluster Id could not be retrieved. Exiting."
fi
CLUSTER_IP=`aws emr list-instances --region us-west-2 --cluster-id $CLUSTER --instance-group-types MASTER | jq '.Instances[] | .PrivateIpAddress' | sed s/\"//g`
handle_error $? "Error in getting cluster details. Exiting."
if [[ -z $CLUSTER_IP ]]; then
handle_error 1 "Cluster IP could not be retrieved. Exiting."
fi
CLUSTER_NAME=`aws emr describe-cluster --region us-west-2 --cluster-id $CLUSTER | jq '.Cluster.Name' | sed s/\"//g`
handle_error $? "Error in getting cluster details. Exiting."
if [[ -z $CLUSTER_NAME ]]; then
handle_error 1 "Cluster Name could not be retrieved. Exiting."
fi
echo -e "Host github.com\arghanil\n\tStrictHostKeyChecking no\n" >> ~/.ssh/config
echo "Cloning parent Git repo..."
git clone git@github.com:arghanil/hive-modules.git
handle_error $? "Error in cloning the parent Git repo. Exiting."
echo "Cloning submodule Git repo..."
git clone git@github.com:arghanil/other-modules.git
handle_error $? "Error in cloning the submodule Git repo. Exiting."
if [ $DATA == "trim" ]; then
echo "Altering S3 path for trimmed data..."
find . -name '*.sql' -exec sed -i 's/tmp01_dataroot/tmp02_dataroot/g' {} \;
fi
echo "Checking if the required S3 bucket exists..."
aws s3 ls s3://scripts --region us-west-2
if [ $? != 0 ]; then
echo "Trying to create S3 bucket scripts..."
aws s3 mb s3://scripts --region us-west-2
handle_error $? "Error while creating required S3 bucket. Exiting."
fi
echo 'payload= {"text":"*CLUSTER: '$CLUSTER_NAME' # '$CLUSTER'*\n>Private IP:'$CLUSTER_IP'\n>Hue URL: http://'$CLUSTER_IP':8888/\n>r53: http://'$CLUSTER_NAME'.nonprod.r53.domain.net:8888/\n>*'$EXECUTE' creation process initiated.*"}' > msg.txt
curl -X POST -H 'Content-type: application/x-www-form-urlencoded' --data @msg.txt $NOTIFY_URL
MSG=""
if [ $EXECUTE == "steps" ]; then
MSG="Steps generation process completed. Check progress under AWS EMR Steps."
bash setup-emr-steps.sh $CLUSTER $CLUSTER_NAME $MODE
if [ $? != 0 ]; then
MSG="Steps generation process finished with *ERROR*. Check k8 logs and progress under AWS Data Pipeline."
fi
fi
if [ $EXECUTE == "datapipeline" ]; then
MSG="Pipeline creation process completed. Check progress under AWS Data Pipeline."
bash setup-datapipeline.sh $CLUSTER $CLUSTER_NAME $MODE $STACK
if [ $? != 0 ]; then
MSG="Pipeline creation process finished with *ERROR*. Check k8 logs and progress under AWS Data Pipeline."
fi
fi
echo 'payload= {"text":"*CLUSTER: '$CLUSTER_NAME' # '$CLUSTER'*\n>'$MSG'"}' > msg.txt
curl -k -X POST -H 'Content-type: application/x-www-form-urlencoded' --data @msg.txt $NOTIFY_URL
| true |
93826270ca716a019945d107eb20bd141f8c43d4 | Shell | PsymonLi/sw | /nic/apollo/tools/apulu/docker/setup_env_mock.sh | UTF-8 | 885 | 2.703125 | 3 | [] | no_license | #! /bin/bash
if [ -z "$1" ]
then
echo "Pipeline argument not supplied"
exit 1
fi
export ASIC="${ASIC:-capri}"
export PDSPKG_TOPDIR=$NIC_DIR/
export PIPELINE=$1
export BUILD_DIR=${PDSPKG_TOPDIR}/build/x86_64/${PIPELINE}/${ASIC}
export CONFIG_PATH=$PDSPKG_TOPDIR/conf/
export PATH=${PATH}:${BUILD_DIR}/bin
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$PDSPKG_TOPDIR/third-party/metaswitch/output/x86_64/:$PDSPKG_TOPDIR/lib:$PDSPKG_TOPDIR/lib64:$NIC_DIR/../usr/lib
export PERSISTENT_LOG_DIR=$PDSPKG_TOPDIR
export NON_PERSISTENT_LOG_DIR=$PDSPKG_TOPDIR
export LOG_DIR=$NON_PERSISTENT_LOG_DIR
export ZMQ_SOC_DIR=$PDSPKG_TOPDIR
export ASIC_MOCK_MODE=1
export ASIC_MOCK_MEMORY_MODE=1
export IPC_MOCK_MODE=1
#GDB='gdb --args'
#VALGRIND='valgrind --leak-check=full --show-leak-kinds=all --gen-suppressions=all --error-limit=no --verbose --log-file=valgrind-out.txt --track-origins=yes'
| true |
c29c846360acabd6ce69c1370fd899ae61845638 | Shell | agladstein/AJ_ABC | /checkque_ice.sh | UTF-8 | 3,731 | 3.34375 | 3 | [] | no_license | #!/usr/bin/env bash
cd /home/u15/agladstein/ABC/macsSwig_AJmodels
GOAL=$1
QUEMAX=$2
OUT=$3
MODEL=$4
SYSTEM=$5 #smp, cluster, htc
IP_ADDRESS=$(curl https://gist.githubusercontent.com/agladstein/2bdc122f50314f2a4c7cbc9544e7a325/raw/8bfef8b8f3f7c43fd99832a323ef7130f98571bb/atmo_instance_ip.txt)
set -f
if [ -e switch${MODEL}.txt ] ; then
echo "\n #################"
date
if [ "$SYSTEM" == "ocelote" ] ; then
qstat=/cm/shared/apps/pbspro/current/bin/qstat
qsub=/cm/shared/apps/pbspro/current/bin/qsub
else
qstat=/usr/local/bin/qstat_local
qsub=/usr/pbs/bin/qsub
fi
RESULTS=${OUT}/results_sims_AJ_M${MODEL}
date
echo "Check for ${GOAL} completed runs in $RESULTS"
#check number of completed simulations
COMP=$(ssh agladstein@${IP_ADDRESS} find /vol_c/results_macsSwig_AJmodels_instant/sim_values_AJ_M${MODEL} -type f | wc -l)
echo "${COMP} runs have completed"
if [ "$COMP" -ge "$GOAL" ]; then
echo "Goal completed"
rm switch${MODEL}.txt
echo "Goal completed. ${COMP} runs have completed in $RESULTS." | sendmail agladstein@email.arizona.edu
exit
else
#check number of jobs in que
if [ "$SYSTEM" == "cluster" ]; then
JOBS=$($qstat | grep "agladstein" | grep -v smp | grep -v htc | cut -d " " -f1)
elif [ "$SYSTEM" == "ocelote" ]; then
JOBS=$($qstat | grep "agladstein" | cut -d " " -f1)
else
JOBS=$($qstat | grep "agladstein" | grep $SYSTEM | cut -d " " -f1)
fi
echo $JOBS
n=0
m=0
p=0
for j in $JOBS; do
q=$($qstat -t $j | grep -w "Q" | wc -l)
n=$(($n + $q))
r=$($qstat -t $j | grep "stan" | grep -w "R" | wc -l)
m=$(($m + $r))
s=$($qstat -t $j | grep "qual" | wc -l)
p=$(($p + $s))
done
echo "You have $n jobs in the que"
if [ "$n" -ge "$QUEMAX" ]; then
echo "That's enough jobs in the que"
exit
else
#create PBS scripts
./main_function_AJmodel_j2.sh ${SYSTEM} ${OUT} ${MODEL}
#check standard hrs left in group
SHRS=$(va | cut -f2 | tail -1 | cut -d ":" -f1)
DAYS=$(( $(($(cal | wc -w) - 9)) - $(($(date | cut -d " " -f3))) ))
SBOUND=$(( $DAYS * 1 + $n + $m))
echo "${SHRS} mfh standard hrs are left"
echo "There are $DAYS days left in the month"
echo "You should leave $SBOUND for the rest of the lab"
if [ "$SHRS" -le "$SBOUND" ]; then
echo "There are no standard hrs left to use"
if [ "$SYSTEM" == "smp" ] || [ "$SYSTEM" == "ocelote" ] ; then
#check qualified hrs left in group
QHRS=$(va | cut -f3 | tail -1 | cut -d ":" -f1)
QBOUND=$p
echo "${QHRS} mfh qualified hrs are left"
if [ "$QHRS" -gt "$QBOUND" ]; then
echo "Submit to qualified"
echo "$qsub model${MODEL}_${SYSTEM}_qualified.pbs"
$qsub model${MODEL}_${SYSTEM}_qualified.pbs
exit
else
echo "There are no qualified hrs left to use"
fi
fi
echo "Submit to windfall"
echo "$qsub model${MODEL}_${SYSTEM}_windfall.pbs"
$qsub model${MODEL}_${SYSTEM}_windfall.pbs
else
echo "Submit to standard"
echo "$qsub model${MODEL}_${SYSTEM}_standard.pbs"
$qsub model${MODEL}_${SYSTEM}_standard.pbs
fi
fi
fi
else
exit
fi
| true |
c298bdf416c7704b3b60925e4dc3e86457025f34 | Shell | nickli1664/bash20180403 | /bash/bashnew/chapter9/test98mysql/studychaxunshuju.sh | UTF-8 | 504 | 3.34375 | 3 | [] | no_license | #!/bin/bash
USER="pig"
PASS="123456"
depts=`mysql -u $USER -p$PASS students <<EOF | tail -n +2
SELECT DISTINCT dept FROM students;
EOF`
for d in $depts;
do
echo Department : $d
result="`mysql -u $USER -p$PASS students <<EOF
SET @i:=0;
SELECT @i:=@i+1 as rank,name,mark FROM students WHERE dept="$d" ORDER BY mark DESC;
EOF`"
echo "$result"
echo
done
#line16 SET @i:=0 是一个SQL构件(SQL construct),用来设置变量 i=0 。
#line17 sql语句还需要进一步学习
| true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.