blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 4 115 | path stringlengths 2 970 | src_encoding stringclasses 28 values | length_bytes int64 31 5.38M | score float64 2.52 5.28 | int_score int64 3 5 | detected_licenses listlengths 0 161 | license_type stringclasses 2 values | text stringlengths 31 5.39M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
9bd4b5689011069cdb2742215984231a9dec5f88 | Shell | 010penetrator/dotfiles | /sh/fehf | UTF-8 | 1,394 | 2.921875 | 3 | [] | no_license | #!/bin/bash
# See and sort images with feh
frsrt() {
a=$1__*/
[ -d $a ] || mkdir -p $1__
mv --backup=numbered "$2" $1__*/
}
export -f frsrt
feh -d. --auto-rotate --edit --sort mtime --reverse -B "#333" --font /usr/share/fonts/truetype/liberation2/LiberationSans-Regular.ttf/12 --action1 "frsrt 1 \"%f\"" --action2 "frsrt 2 \"%f\"" --action3 "frsrt 3 \"%f\"" --action4 "frsrt 4 \"%f\"" --action5 "frsrt 5 \"%f\"" --action6 "frsrt 6 \"%f\"" --action7 "frsrt 7 \"%f\"" --action8 "frsrt 8 \"%f\"" --action9 " mv -b \"%f\" $HOME/.del/" "$1"
# feh -d. --auto-rotate --sort mtime --reverse -B black --font /usr/share/fonts/TTF/LiberationSans-Regular.ttf/12 --action1 "frsrt 1 \"%f\"" --action2 "frsrt 2 \"%f\"" --action3 "frsrt 3 \"%f\"" --action4 "frsrt 4 \"%f\"" --action5 "frsrt 5 \"%f\"" --action6 "frsrt 6 \"%f\"" --action7 "frsrt 7 \"%f\"" --action8 "frsrt 8 \"%f\"" --action9 " mv -b \"%f\" $HOME/.del/" "$1"
# feh -d. --auto-rotate --sort mtime --reverse -B black --font /usr/share/fonts/TTF/LiberationSans-Regular.ttf/12 --action1 "mkdir -p 1; mv -b \"%f\" 1/" --action2 "mkdir -p 2; mv -b \"%f\" 2/" --action3 "mkdir -p 3; mv -b \"%f\" 3/" --action4 "mkdir -p 4; mv -b \"%f\" 4/" --action5 "mkdir -p 5; mv -b \"%f\" 5/" --action6 "mkdir -p 6; mv -b \"%f\" 6/" --action7 "mkdir -p 7; mv -b \"%f\" 7/" --action8 "mkdir -p 8; mv -b \"%f\" 8/" --action9 " mv -b \"%f\" $HOME/.del" "$1"
| true |
c7fafdf4139040685531195c191877dd4be69744 | Shell | tomarraj008/multee | /publish-secrets-to-skiff.sh | UTF-8 | 1,950 | 3.109375 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
set -e
echo -----------------------------------
echo Getting secrets from Vault
echo -----------------------------------
echo
# Vault secrets were once written with publish-secrets-to-vault.sh
export AWS_ES_HOSTNAME=$( vault read -field=AWS_ES_HOSTNAME /secret/aristo/multee-on-skiff/env-vars)
export AWS_ES_REGION=$( vault read -field=AWS_ES_REGION /secret/aristo/multee-on-skiff/env-vars)
export AWS_ES_INDEX=$( vault read -field=AWS_ES_INDEX /secret/aristo/multee-on-skiff/env-vars)
export AWS_ES_DOCUMENT_TYPE=$( vault read -field=AWS_ES_DOCUMENT_TYPE /secret/aristo/multee-on-skiff/env-vars)
export AWS_ES_FIELD_NAME=$( vault read -field=AWS_ES_FIELD_NAME /secret/aristo/multee-on-skiff/env-vars)
export AWS_ACCESS_KEY_ID=$( vault read -field=AWS_ACCESS_KEY_ID /secret/aristo/multee-on-skiff/env-vars)
export AWS_SECRET_ACCESS_KEY=$( vault read -field=AWS_SECRET_ACCESS_KEY /secret/aristo/multee-on-skiff/env-vars)
echo -----------------------------------
echo Deleting secret in Kubernetes
echo -----------------------------------
echo
kubectl \
--context skiff-production \
--namespace aristo-multee \
--ignore-not-found=true \
delete secret env-vars
echo
echo -----------------------------------
echo Writing new values to Kubernetes
echo -----------------------------------
echo
kubectl \
--context skiff-production \
--namespace aristo-multee \
create secret generic env-vars \
--from-literal="AWS_ES_HOSTNAME=$AWS_ES_HOSTNAME" \
--from-literal="AWS_ES_REGION=$AWS_ES_REGION" \
--from-literal="AWS_ES_INDEX=$AWS_ES_INDEX" \
--from-literal="AWS_ES_DOCUMENT_TYPE=$AWS_ES_DOCUMENT_TYPE" \
--from-literal="AWS_ES_FIELD_NAME=$AWS_ES_FIELD_NAME" \
--from-literal="AWS_ACCESS_KEY_ID=$AWS_ACCESS_KEY_ID" \
--from-literal="AWS_SECRET_ACCESS_KEY=$AWS_SECRET_ACCESS_KEY"
echo
echo If we got this far, then everything probably succeeded.
| true |
337ec5530133a073cb05c3b758bb2ac9487facac | Shell | crodjer/configs | /scripts/archived/hourly-reminder.sh | UTF-8 | 899 | 3.765625 | 4 | [] | no_license | #/usr/bin/env bash
# A script to remind me of getting up every half hour. Add this string to your
# crontab:
#
# 0,30 * * * * /path/to/this/script/hourly-reminder.sh
script_dir=$(dirname "$0")
hours=$(date +%H)
minutes=$(date +%M)
title="$hours:$minutes"
if [[ $(uname -a) =~ Darwin ]]; then
MACOS=true
fi
export DISPLAY=$(pgrep -fa xserverrc | cut -d " " -f 6)
if [ $(command -v task) ]; then
notification="Pending: $(task status:pending +next count 2> /dev/null)"
fi
case $minutes in
00)
osx_sound=Blow
reminder=reminder
;;
*)
osx_sound=Tink
reminder=reminder-soft
esac
if [[ $MACOS ]]; then
if [ "$osx_sound" ]; then
osx_sound="-sound $osx_sound"
fi
terminal-notifier -message "$notification" -title "$title" $osx_sound
else
pactl play-sample $reminder
notify-send "$title" "$notification"
fi
| true |
1ee6e487639f7bb95e8198627928ac037b8a9027 | Shell | coderdba/oracle-12c-upgrade-scripts | /do-02-stop-db.sh | UTF-8 | 116 | 2.90625 | 3 | [] | no_license | #!/bin/ksh -x
if [ $# -lt 1 ]
then
echo
echo USAGE: $0 DB_UNIQUE_NAME
echo
exit
else
srvctl stop database -d $1
fi
| true |
e8f7ff16e284d37ed98bc91190e11a24e7561f7e | Shell | jaimey/script-mysql-db-user | /mysql_create_db_user.sh | UTF-8 | 2,637 | 4.59375 | 5 | [] | no_license | #!/bin/bash
# Shell script to create MySQL database and user.
function printUsage()
{
echo -n "$(basename "$0") [OPTION]...
Create MySQL database and user.
Options:
-h, --host MySQL Host
-d, --database MySQL Database
-u, --user MySQL User
-p, --pass MySQL Password (If empty, auto-generated)
Examples:
$(basename "$0") -u=user -d=database
Version $VERSION
"
exit 1
}
function processArgs()
{
# Parse Arguments
for arg in "$@"
do
case $arg in
-h=*|--host=*)
DB_HOST="${arg#*=}"
;;
-d=*|--database=*)
DB_NAME="${arg#*=}"
;;
-u=*|--user=*)
DB_USER="${arg#*=}"
;;
-p=*|--pass=*)
DB_PASS="${arg#*=}"
;;
*)
;;
esac
done
[[ -z $DB_NAME ]] && echo "Database name cannot be empty." && exit 1
[[ $DB_USER ]] || DB_USER=$DB_NAME
}
function createMysqlDbUser()
{
SQL1="CREATE DATABASE IF NOT EXISTS ${DB_NAME};"
SQL2="CREATE USER '${DB_USER}'@'localhost' IDENTIFIED BY '${DB_PASS}';"
SQL3="GRANT ALL PRIVILEGES ON ${DB_NAME}.* TO '${DB_USER}'@'localhost';"
SQL4="FLUSH PRIVILEGES;"
echo "Please enter root user MySQL password!"
read -r rootPassword
if ! $BIN_MYSQL --silent -h "$DB_HOST" -u root -p"${rootPassword}" -e "${SQL1}${SQL2}${SQL3}${SQL4}" ; then
exit 0
fi
}
function printSuccessMessage()
{
echo "MySQL DB / User creation completed!"
echo "################################################################"
echo ""
echo " >> Host : ${DB_HOST}"
echo " >> Database : ${DB_NAME}"
echo " >> User : ${DB_USER}"
echo " >> Pass : ${DB_PASS}"
echo ""
echo "################################################################"
}
function CheckIsRoot()
{
if [ "$EUID" -ne 0 ]; then
echo "Sorry, you need to run this as root"
exit 1
fi
}
################################################################################
# Main
################################################################################
VERSION="0.1.0"
BIN_MYSQL=$(which mysql)
DB_HOST='localhost'
DB_NAME=
DB_USER=
DB_PASS="$(openssl rand -base64 12)"
function main()
{
[[ $# -lt 1 ]] && printUsage
echo "Processing arguments..."
processArgs "$@"
echo "Done!"
echo "Creating MySQL db and user..."
createMysqlDbUser
echo "Done!"
printSuccessMessage
exit 0
}
CheckIsRoot
main "$@" | true |
eaa2dd031767ba429c4434454c5a866ca25ced1e | Shell | afalak94/bash | /PripremaPrakse_AntonioFalak/zadatak08_zapisDat.sh | UTF-8 | 250 | 3.484375 | 3 | [] | no_license | #! /bin/bash
Dat=$1
echo "Unesite 10 rijeci za zapis u datoteku: "
truncate -s 0 $Dat #brisanje sadržaja datoteke
for (( i = 0; i < 10; i++ )); do
read word #citanje inputa u varijablu word
echo $word >> $Dat #append varijable u datoteku
done
| true |
0bad7426ad8121b4f4f3aecb5adb22e53d2deb1c | Shell | Cloudxtreme/mirage-platform | /bindings/build.sh | UTF-8 | 850 | 2.578125 | 3 | [] | no_license | #!/bin/sh -ex
export PKG_CONFIG_PATH=`opam config var prefix`/lib/pkgconfig
PKG_CONFIG_DEPS="mirage-xen-minios mirage-xen-ocaml"
pkg-config --print-errors --exists ${PKG_CONFIG_DEPS} || exit 1
CFLAGS=`pkg-config --cflags mirage-xen-ocaml`
MINIOS_CFLAGS=`pkg-config --cflags mirage-xen-minios mirage-xen-ocaml`
# This extra flag only needed for gcc 4.8+
GCC_MVER2=`gcc -dumpversion | cut -f2 -d.`
if [ $GCC_MVER2 -ge 8 ]; then
EXTRA_CFLAGS="-fno-tree-loop-distribute-patterns -fno-stack-protector"
fi
CC=${CC:-cc}
$CC -Wall -Wno-attributes ${MINIOS_CFLAGS} ${EXTRA_CFLAGS} ${CI_CFLAGS} -c barrier_stubs.c eventchn_stubs.c exit_stubs.c gnttab_stubs.c main.c sched_stubs.c start_info_stubs.c xb_stubs.c
$CC -Wall -Wno-attributes ${CFLAGS} ${EXTRA_CFLAGS} ${CI_CFLAGS} -c atomic_stubs.c clock_stubs.c cstruct_stubs.c
ar rcs libxencamlbindings.a *.o
| true |
1dfb0e0b20c1345b20c7e37e3b2d08da29074a02 | Shell | deckb/eos | /.cicd/package.sh | UTF-8 | 2,041 | 3.59375 | 4 | [
"MIT",
"Apache-2.0",
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | #!/bin/bash
set -eo pipefail
echo '--- :evergreen_tree: Configuring Environment'
. ./.cicd/helpers/general.sh
mkdir -p "$BUILD_DIR"
if [[ $(uname) == 'Darwin' && $FORCE_LINUX != true ]]; then
echo '+++ :package: Packaging EOSIO'
PACKAGE_COMMANDS="bash -c 'cd build/packages && chmod 755 ./*.sh && ./generate_package.sh brew'"
echo "$ $PACKAGE_COMMANDS"
eval $PACKAGE_COMMANDS
ARTIFACT='*.rb;*.tar.gz'
else # Linux
echo '--- :docker: Selecting Container'
ARGS="${ARGS:-"--rm --init -v \"\$(pwd):$MOUNTED_DIR\""}"
. "$HELPERS_DIR/file-hash.sh" "$CICD_DIR/platforms/$PLATFORM_TYPE/$IMAGE_TAG.dockerfile"
PRE_COMMANDS="cd \"$MOUNTED_DIR/build/packages\" && chmod 755 ./*.sh"
if [[ "$IMAGE_TAG" =~ "ubuntu" ]]; then
ARTIFACT='*.deb'
PACKAGE_TYPE='deb'
PACKAGE_COMMANDS="./generate_package.sh \"$PACKAGE_TYPE\""
elif [[ "$IMAGE_TAG" =~ "centos" ]]; then
ARTIFACT='*.rpm'
PACKAGE_TYPE='rpm'
PACKAGE_COMMANDS="mkdir -p ~/rpmbuild/BUILD && mkdir -p ~/rpmbuild/BUILDROOT && mkdir -p ~/rpmbuild/RPMS && mkdir -p ~/rpmbuild/SOURCES && mkdir -p ~/rpmbuild/SPECS && mkdir -p ~/rpmbuild/SRPMS && yum install -y rpm-build && ./generate_package.sh \"$PACKAGE_TYPE\""
fi
COMMANDS="echo \"+++ :package: Packaging EOSIO\" && $PRE_COMMANDS && $PACKAGE_COMMANDS"
DOCKER_RUN_COMMAND="docker run $ARGS $(buildkite-intrinsics) '$FULL_TAG' bash -c '$COMMANDS'"
echo "$ $DOCKER_RUN_COMMAND"
eval $DOCKER_RUN_COMMAND
fi
cd build/packages
[[ -d x86_64 ]] && cd 'x86_64' # backwards-compatibility with release/1.6.x
if [[ "$BUILDKITE" == 'true' ]]; then
echo '--- :arrow_up: Uploading Artifacts'
buildkite-agent artifact upload "./$ARTIFACT" --agent-access-token $BUILDKITE_AGENT_ACCESS_TOKEN
fi
for A in $(echo $ARTIFACT | tr ';' ' '); do
if [[ $(ls "$A" | grep -c '') == 0 ]]; then
echo "+++ :no_entry: ERROR: Expected artifact \"$A\" not found!"
pwd
ls -la
exit 1
fi
done
echo '--- :white_check_mark: Done!'
| true |
c07994b7239e0f15ac5fd0d70279c9ce1645393d | Shell | shenyunhang/CSC | /experiments/scripts/result_merge.sh | UTF-8 | 368 | 3 | 3 | [
"MIT"
] | permissive | #!/bin/bash
set -x
set -e
#我们还需要算NMS,不能直接合并结果
for entry in `ls $2`
do
echo "$entry"
cp $2/$entry $1/$entry
done
for entry in `ls $1`
do
echo "$entry"
awk '{ print $1, 0.1*$2, $3, $4, $5, $6 }' $1/$entry > $1/$entry.txt
mv $1/$entry.txt $1/$entry
done
for entry in `ls $3`
do
echo "$entry"
cat $3/$entry >> $1/$entry
done
exit
| true |
e84377e66e8fa26d4f332532b01dbdcfb0e294da | Shell | fabioboh/nipype | /tools/ci/install_deb_dependencies.sh | UTF-8 | 341 | 2.96875 | 3 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | #!/bin/bash
echo "Installing NeuroDebian dependencies"
set -eu
echo "INSTALL_DEB_DEPENDENCIES = $INSTALL_DEB_DEPENDENCIES"
if $INSTALL_DEB_DEPENDENCIES; then
bash <(wget -q -O- http://neuro.debian.net/_files/neurodebian-travis.sh)
sudo apt update
sudo apt install -y -qq fsl afni elastix fsl-atlases xvfb fusefat graphviz
fi
| true |
ee518564c502474c612be7db48e71ad2c3b06536 | Shell | djo155/analysis_pipeline | /check_highres2standard.sh | UTF-8 | 857 | 3.640625 | 4 | [] | no_license |
Usage(){
echo ""
echo "check_highres2standard.sh <output> <analysis directories....>"
echo ""
exit 1
}
if [ $# -le 1 ] ; then
Usage
fi
output=$1
shift 1
if [ ! -d ${output} ] ; then
/bin/mkdir $output
fi
#find all the images
images=""
for i in $@ ; do
if [ `${FSLDIR}/bin/imtest ${i}/reg/highres2standard_warped` = 0 ] ; then
echo "Invalid image : ${i}/reg/highres2standard_warped"
fi
f=`${FSLDIR}/bin/imglob -extension ${i}/reg/highres2standard_warped`
echo $i $f
f=`readlink -f $f`
images="${images} $f"
done
#switch to desired output directory (allows concurrent slicesdir)
cd $output
slicesdir -p ${FSLDIR}/data/standard/MNI152_T1_2mm $images
mv slicesdir/* ./
/bin/rmdir slicesdir
f=`readlink -f index.html`
echo "Ignore FSL's link"
echo "Finished. To view, point your web browser at"
echo "file:${f}"
| true |
1205efdec9c03f633e252b5f85ff9c229b4b938a | Shell | android1and1/saltit | /scripts/tests/test_help_find_dot_token.sh | UTF-8 | 1,265 | 2.859375 | 3 | [] | no_license | #!/bin/bash
# filename:
project_dir=$( cd $( dirname $0 ) && cd ../../ && pwd -P )
. $project_dir/conf/abs.conf
. ${FUNCPATH}/base 2>/dev/null
# first
echotest "1st:no '.token' file exists."
echotest "$( help_find_dot_token )"
huali
# mkdir
echotest "2nd:touch a 'token' file and a 'hasprefixtoken' file."
mkdir -p ${ABSROOT}/temp/temp11/
touch ${ABSROOT}/temp/temp11/hasprefixtoken
touch ${ABSROOT}/temp/temp11/token
echotest "$( help_find_dot_token )"
huali
rm ${ABSROOT}/temp/temp11/token
echotest "3th:has a '.token' file, and its directory."
touch ${ABSROOT}/temp/temp11/.token
echotest "$( help_find_dot_token )"
huali
echotest "All of 2 dirs has .token."
# parent dir has .done,its sun dir has no .done but a .token.
TEMPDIR=${ABSROOT}/temp/temp12
mkdir -p $TEMPDIR/sundir/
touch $TEMPDIR/.done
touch $TEMPDIR/sundir/{a,b,c,d}.text
touch $TEMPDIR/sundir/.token
# because temp11/ and temp12/ both of all has '.token',the func will found 2,then return error status code.
# stdout will tell us "No Way".
echotest "$(help_find_dot_token)"
huali
echotest "rm temp11\'s .token,keep temp12\'s .token,then run."
rm ${ABSROOT}/temp/temp11/.token
echotest "$(help_find_dot_token)"
# HOMEKEEPING!
rm -rf ${ABSROOT}/temp/temp11/
rm -rf ${ABSROOT}/temp/temp12/
| true |
6fbe5fecffabcf54e402f0bdf55bdf26277fae30 | Shell | saeidsafavi/OpenWindow | /scripts/run.sh | UTF-8 | 911 | 3.3125 | 3 | [
"ISC"
] | permissive | #!/bin/bash
set -e
SEEDS=(1000 2000 3000 4000)
trial() {
fname=$(basename "$1")
id="${date,,}_${fname%.*}/seed=$2"
echo -e "\nTraining for '$id'..."
python openwindow/train.py -f "$1" --seed $2 --training_id "$id"
echo -e "\nPredicting for '$id'..."
python openwindow/predict.py -f "$1" --training_id "$id" --clean=True
}
evaluate() {
echo "Evaluating '$1'..."
fname=$(basename "$1")
ids=${SEEDS[@]/#/${date,,}_${fname%.*}/seed=}
python openwindow/evaluate.py -f "$1" --training_id ${ids// /,}
echo
}
experiment() {
echo "Running experiment for $(basename $1)..."
for seed in ${SEEDS[@]}; do
trial "$1" $seed
done
echo
}
date=$(date -u +%b%d)
# evaluate 'default.ini'
# evaluate 'scripts/closedset-50.ini'
# evaluate 'scripts/openset-o1-o2-f.ini'
experiment 'scripts/openset-o1-f-o2.ini'
evaluate 'scripts/openset-o1-f-o2.ini'
# evaluate 'scripts/openset-f-o1-o2.ini'
| true |
9af6802070bae0ba70ef20ac278e2cc9f19da408 | Shell | anandrkskd/jkube-website | /scripts/deploy.sh | UTF-8 | 2,692 | 2.984375 | 3 | [] | no_license | #!/bin/bash
trap 'error' ERR
ECLIPSE_SERVERS[0]="[git.eclipse.org]:29418 ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBKPWcmP3foX15NodSZXwPWP/YZUDRSLAGF/1nAVDYuJIPpbhnCrsZ5imxzMyzufEZoQ4IainqYj71MFtTyeSXwc="
ECLIPSE_SERVERS[1]="[git.eclipse.org]:29418 ecdsa-sha2-nistp384 AAAAE2VjZHNhLXNoYTItbmlzdHAzODQAAAAIbmlzdHAzODQAAABhBERdg5QiGIbLMjUSLShomjVOSczU4o24GfbDDzzJJcuVP4xmcXv6JEZdfr7ijjpZtqRH9ZTwRlildVbMlWb8/IJakZzr1zhehsw+sD+EF+gmxWPu71ZvNgRfZPumx8I7sQ=="
ECLIPSE_SERVERS[2]="[git.eclipse.org]:29418 ecdsa-sha2-nistp521 AAAAE2VjZHNhLXNoYTItbmlzdHA1MjEAAAAIbmlzdHA1MjEAAACFBACayKawmejZ2qqculp0fBRKrtWgybnDvzCVy0x2E4ayTLlLmWc80ak411bfqH9qmN9O8MmLnS8nMaun7LXzNUG28gGLYLn+IevprUpFK1o256Yute4APJtoHZRNIAgf62BtwuBptudKR7ZLEE/g62R8e9BHpump10duT8RPl9dgOgy7rg=="
ECLIPSE_SERVERS[3]="[git.eclipse.org]:29418 ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAn1P3D1rGBOVnj043ArtjaJBILKuQy5LSt9LCJc0a/xLwVvHltiQtXH9fnZ6oKUNr6zacRF0fq2Bid3hdh9fQO94+l4qFOOszfX4662Z3pi3nR4yE/bmCoNswKloUiQHy7BWjM8JTJOStZuQjBI2cTvVWKzUCT8A+iyqrDsNeqKPVXfwoOCUo3+O5Tfvv0h1VrXCmNS31W7srGQRiTUEzeKa3IXuQ85UvozHNMs1vmguCZYpNeoL/3U+dYaR3xba19ijbHrNog3GZ3ku8NiNeGhcCjx/Ar+Fj2bH4X1JIls6lC7NRYZadlifu9BxvqB2tgdcqCEw9OqqzeSKo+715Hw=="
ECLIPSE_SERVERS[4]="[git.eclipse.org]:29418 ssh-dss AAAAB3NzaC1kc3MAAACBAJhl1CSP2rzgPCUPvl+jxdKcD3npSp5MNYdqLL2XoCLw/PHL2JZUN0zVV9/mCT3Im39OvyyPtAQ/KvAlMtJeX+mfHvG/33fub5P/xMJlrJhS+VrVVIZxDBGPbYktO7ySiOs/FWJE1+5pjMpJbqt4a4FhpnsojmKHsY9FEg7mufN7AAAAFQDyJAzuwliAQKXAQzqa2KqmyPFhVQAAAIAVzilOrNogcZuA3y8sUg/wjnQG2rZhyfbMhSpc7NKjkctf3fdIGjQp7HUJlNA29TnMoiThNng3KvuGm4WtOQYi3KxIxAlom+2Rxm1RR5kYyvGK0hDW86ZXnhaCiuGxctS+rNf6QjJ8FVtUEG8v84xiHtOWh5FrlkEB3UcSFFwBAAAAAIBK8vb6wXY9J/KXv7e3X1lyg81EJma/UuFXcizaZrw2bAhiJ/P+AK3TGNcOF7ypTKCoSkRZdEMeYjx9ljCFHkgGuUpO6vyABai9CG9zpyaHAMbcQ3PlBeCws0l2rqRHay0eIACvX2xMhFXxXr8n6zJy0FiVQ2aRAb6/4OFhWR9rMQ=="
GERRIT_REPO="ssh://$SSH_USER@git.eclipse.org:29418/www.eclipse.org/jkube"
BASEDIR=$(dirname "$BASH_SOURCE")
TEMP_DIR=$(realpath "$BASEDIR/../temp")
WEB_DIR=$(realpath "$BASEDIR/..")
ECLIPSE_REPO_DIR="$TEMP_DIR/jkube"
function initEnvironment() {
mkdir -p "$TEMP_DIR"
mkdir -p ~/.ssh
echo "$SSH_KEY" > ~/.ssh/id_rsa
chmod 600 ~/.ssh/id_rsa
for _eclipse_server in "${ECLIPSE_SERVERS[@]}" ; do
grep -qxF "$_eclipse_server" ~/.ssh/known_hosts || echo "$_eclipse_server" >> ~/.ssh/known_hosts
done
git config --global user.email "$GIT_EMAIL"
git config --global user.name "$GIT_NAME"
}
function cleanUp() {
rm -rf "$TEMP_DIR"
}
function cloneAndPrepareEclipseRepo() {
git clone "$GERRIT_REPO" "$ECLIPSE_REPO_DIR"
find "$ECLIPSE_REPO_DIR" -maxdepth 1 ! -path "$ECLIPSE_REPO_DIR" ! -name '.git' -exec rm -rf {} +
}
function build() {
npm install --prefix "$WEB_DIR" "$WEB_DIR"
npm run --prefix "$WEB_DIR" build
}
function deploy() {
cp -avr "$WEB_DIR/public/"* "$ECLIPSE_REPO_DIR"
git --git-dir "$ECLIPSE_REPO_DIR/.git" --work-tree "$ECLIPSE_REPO_DIR" add "$ECLIPSE_REPO_DIR"
git --git-dir "$ECLIPSE_REPO_DIR/.git" --work-tree "$ECLIPSE_REPO_DIR" commit -m "CI: Website updated"
git --git-dir "$ECLIPSE_REPO_DIR/.git" --work-tree "$ECLIPSE_REPO_DIR" push origin master
}
function error() {
echo 'Error while deploying JKube website - Cleaning Up temporary files'
cleanUp
exit 1
}
cleanUp
initEnvironment
cloneAndPrepareEclipseRepo
build
deploy
cleanUp
| true |
2a19f8931d4c1264ffc1766f6bfcb8dd660acd6d | Shell | jalmond/LQanalyzer | /bin/CheckNewTagCompiler.sh | UTF-8 | 487 | 3.09375 | 3 | [] | no_license | ### Change tag name if not fresh terminal
current_tag=$CATTAG
source $1
# scripts/setup/SetBrachAndTag.sh
new_tag=$CATTAG
if [[ $current_tag != $new_tag ]];then
echo "@@@@@@@@@@@@@@!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!"
echo "This is a new tag. You need to work from a new terminal"
echo "@@@@@@@@@@@@@@!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!"
echo " This message will stop when you have setup CATANalyzer in a fresh terminal."
make distclean
fi | true |
376a91d1f7d592204664e89785acdd6f2df1767f | Shell | leohilbert/protoc-gen-java-leo | /createDebugInput.sh | UTF-8 | 745 | 2.84375 | 3 | [
"BSD-3-Clause"
] | permissive | #!/bin/bash
set -e
# --------------------------------------------------------------------------------
# Will use protoc on the test-proto file and store the CodeGenerationRequest in text-form in ./generated/code_generator_request.pb.bin
# This allows you to debug the plugin by adding "debugInput" as a program argument (see main.cpp)
# --------------------------------------------------------------------------------
mkdir -p build
cd build || exit
cmake ..
make
cd ..
go build -o ./build/protoc-gen-debug ./tools/protoc-gen-debug/main.go
protoc \
--plugin=protoc-gen-debug=./build/protoc-gen-debug \
--debug_out=./generated \
--proto_path=./java/src/main/proto \
--proto_path=./java/src/test/proto \
./java/src/test/proto/*.proto
| true |
5255e0f96de6e66f26d061459a7ef9520d4afbe4 | Shell | zoickx/dotfiles | /aconfmgr/70-apps.sh | UTF-8 | 4,406 | 2.8125 | 3 | [
"MIT"
] | permissive | ################
## CLI
################
# u want mah files? well come and get 'em!
AddPackage cryfs # Cryptographic filesystem for the cloud
AddPackage gocryptfs # Encrypted overlay filesystem written in Go.
AddPackage veracrypt # Disk encryption with strong security based on TrueCrypt
AddPackage bitwarden-cli # The command line vault
# cleanup/deduplication
AddPackage rmlint # Tool to remove duplicates and other lint, being much faster than fdupes
AddPackage --foreign findimagedupes # Tool to find visually similar or duplicate images
# etc
AddPackage asciinema # Record and share terminal sessions
AddPackage gitui # Blazing fast terminal-ui for git written in Rust
AddPackage just # A handy way to save and run project-specific commands
AddPackage lshw # A small tool to provide detailed information on the hardware configuration of the machine.
AddPackage rclone # Sync files to and from Google Drive, S3, Swift, Cloudfiles, Dropbox and Google Cloud Storage
AddPackage unarchiver # unar and lsar
AddPackage libqalculate # Multi-purpose desktop calculator
AddPackage --foreign pandoc-bin # Pandoc - executable only, without 750MB Haskell depends/makedepends
AddPackage --foreign dotter-rs-bin # A dotfile manager and templater written in Rust
AddPackage --foreign jmtpfs # FUSE and libmtp based filesystem for accessing MTP (Media Transfer Protocol) devices
AddPackage --foreign inxi # Full featured CLI system information tool
# TODO: auto-submit?
AddPackage pkgstats # Submit a list of installed packages to the Arch Linux project
################
## Semi-GUI
################
AddPackage syncthing # Open Source Continuous Replication / Cluster Synchronization Thing
AddPackage yt-dlp # A youtube-dl fork with additional features and fixes
AddPackage --foreign noti # Monitor a process and trigger a notification
# # Mullvad kind of doesn't play well
AddPackage --foreign mullvad-vpn-bin # The Mullvad VPN client app for desktop
IgnorePath '/etc/mullvad-vpn/account-history.json'
IgnorePath '/etc/mullvad-vpn/settings.json'
IgnorePath '/usr/bin/mullvad-exclude'
CreateLink /etc/systemd/system/mullvad-daemon.service /opt/Mullvad\ VPN/resources/mullvad-daemon.service
CreateLink /etc/systemd/system/multi-user.target.wants/mullvad-daemon.service /opt/Mullvad\ VPN/resources/mullvad-daemon.service
CreateLink /usr/lib/systemd/system/mullvad-daemon.service /opt/Mullvad\ VPN/resources/mullvad-daemon.service
################
## Proper GUI
################
# basics-101
AddPackage alacritty # A cross-platform, GPU-accelerated terminal emulator
AddPackage --foreign emacs-gcc-wayland-devel-bin # GNU Emacs. Development native-comp branch and pgtk branch combined, served as a binary.
# browsers
AddPackage firefox # Standalone web browser from mozilla.org
AddPackage torbrowser-launcher # Securely and easily download, verify, install, and launch Tor Browser in Linux
AddPackage --foreign brave-bin # Web browser that blocks ads and trackers by default (binary release)
# messengers
AddPackage signal-desktop # Signal Private Messenger for Linux
AddPackage telegram-desktop # Official Telegram Desktop client
# viewers of all sorts
AddPackage thunar # Modern file manager for Xfce
AddPackage imv # Image viewer for Wayland and X11
AddPackage tumbler # D-Bus service for applications to request thumbnails
AddPackage mpv # a free, open source, and cross-platform media player
AddPackage gthumb # Image browser and viewer for the GNOME Desktop
AddPackage foliate # A simple and modern GTK eBook reader
AddPackage evince # Document viewer (PDF, PostScript, XPS, djvu, dvi, tiff, cbr, cbz, cb7, cbt)
AddPackage mupdf-gl # Lightweight PDF and XPS viewer with OpenGL backend
# graphical editors
AddPackage gimp # GNU Image Manipulation Program
AddPackage inkscape # Professional vector graphics editor
AddPackage --foreign losslesscut-bin # Crossplatform GUI tool for lossless trimming/cutting of video/audio files
# files over networks
AddPackage qbittorrent # An advanced BitTorrent client programmed in C++, based on Qt toolkit and libtorrent-rasterbar
# DOOMs
AddPackage --foreign prboom-plus # An advanced, Vanilla-compatible Doom engine based on PrBoom
# etc
AddPackage --foreign onlyoffice-bin # An office suite that combines text, spreadsheet and presentation editors
AddPackage --foreign activitywatch-bin # Log what you do on your computer. Simple, extensible, no third parties.
| true |
3bff08bb4edfe94c8fd20d6eb60cb12284df3c64 | Shell | rubyisbeautiful/elm-es-client | /entrypoint.sh | UTF-8 | 270 | 3.09375 | 3 | [
"MIT"
] | permissive | #!/bin/bash
set -e
cd "/usr/src/app" || exit 1
if [ ! -d "/usr/src/app/elm-stuff" ]; then
npm install
# cd tests || exit 1
# elm-package install -y
# cd .. || exit 1
fi
if [ -z "$1" ]; then
exec elm-reactor -p "$ELM_ES_PORT" -a 0.0.0.0
else
exec "$@"
fi
| true |
b80f51316d48008f24726b591647829d86893a6c | Shell | jiechencyz/Perm-InSAR | /processScripts/geocodeGdalAll | UTF-8 | 239 | 2.828125 | 3 | [] | no_license | #!/usr/bin/env bash
fileList=$1
old_IFS="$IFS"
IFS='
'
for file in $(cat $fileList)
do
geocodeGdal.py -l ../geom_master/lat.rdr -L ../geom_master/lon.rdr -f $file -x 0.0028125 -y 0.00081081 -b '71.85 74 123.1 129.7'
done
IFS=$old_IFS | true |
614a39363dca0471e37e92111d1dce05cb2c35f9 | Shell | inbitcoin/bitcore | /tests/integration/tests/txhistory.sh | UTF-8 | 1,982 | 3.28125 | 3 | [
"MIT"
] | permissive | #!/bin/bash
# exported variables:
# arguments:
. ./tests/common.sh
test_header="testing GET /v1/txhistory/ ... "
echo -n "${test_header}"
report_basename="txhistory"
gen_report_dir > /dev/null
entrypoint="${UTILS_DIR}/txhistory.sh"
loop_msg="requesting txhistory of wallet n. %s"
cmd_base="${DOCKER_RUN_D} --name wlt_%s ${IMG_NAME} ${entrypoint} ${report_dir} ${num_calls}"
call_and_track > /dev/null || handle_unexpected_error
wlt_file="${report_dir_host}/wlt_1"
if [ -f "${wlt_file}" ]; then
output=$(cat ${wlt_file})
jq '.' ${wlt_file} > /dev/null 2>&1 || handle_error
declare -a mandatory_params
mandatory_params=( id txid confirmations blockheight fees time amount
action outputs )
for param in "${mandatory_params[@]}"; do
[ "$(jq "all(has(\"${param}\"))" ${wlt_file})" == "true" ] || handle_error
done
[ "$(jq 'map(select(.action=="sent")) | all(.addressTo != null)' ${wlt_file})" == "true" ] || \
handle_error
[ "$(jq 'map(select(.action=="moved")) | all(.addressTo == null)' ${wlt_file})" == "true" ] || \
handle_error
[ "$(jq 'map(select(.action=="received")) | any(has("addressTo")) | not' ${wlt_file})" == "true" ] || \
handle_error
[ "$(jq -r 'map(select(has("proposalId"))) | all(has("createdOn"))' ${wlt_file})" == "true" ] || \
handle_error
[ "$(jq -r 'map(select(has("proposalId"))) | all(has("creatorName"))' ${wlt_file})" == "true" ] || \
handle_error
[ "$(jq -r 'map(select(has("proposalId"))) | all(has("message"))' ${wlt_file})" == "true" ] || \
handle_error
[ "$(jq -r 'map(select(has("proposalId"))) | all(has("actions"))' ${wlt_file})" == "true" ] || \
handle_error
[ "$(jq -r 'map(select(has("proposalId"))) | all(has("customData"))' ${wlt_file})" == "true" ] || \
handle_error
log_success
fi
if [ -f "${wlt_file}.err" ]; then
output=$(cat ${wlt_file}.err)
handle_error
fi
handle_unexpected_error
| true |
f58b5775bae29a9602c884086fc228e171616650 | Shell | lystewie-yzkj/online-services | /ci/unit-tests.sh | UTF-8 | 416 | 3.234375 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env bash
# https://explainshell.com
set -uo pipefail
[[ -n "${DEBUG-}" ]] && set -x
cd "$(dirname "$0")/../"
PROJECT_DIR="$(pwd)"
TEST_RESULTS_DIR="${PROJECT_DIR}"
pushd services/csharp
TEST_FAILED=0
for n in `find . -maxdepth 1 -type d -name '*.Test'`;
do
if ! dotnet test "./${n}" --logger:"nunit;LogFilePath=${TEST_RESULTS_DIR}/${n}.xml"; then
TEST_FAILED=1
fi
done
exit $TEST_FAILED | true |
b50c0b67020304e855cb485c6bbe037aca38b590 | Shell | environmentalomics/build-bio-linux | /recon.test.d/nothing_in_local_sbin | UTF-8 | 281 | 3.125 | 3 | [
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | #!/bin/sh
# Test that /usr/local/sbin is empty
if [ -z "${ROOT:-}" ] ; then
echo '!'" \$ROOT is not set. Can't continue"
exit 1
fi
echo
echo "Testing that /usr/local/sbin has no redirects left by openchroot"
[ "`ls $ROOT/root/usr/local/sbin 2>/dev/null | wc -l`" = 0 ]
| true |
8f5a9133b02c679dd8be9fa1e6fcac05662e537f | Shell | ChuckFoo/ddb-importer | /icon.sh | UTF-8 | 160 | 2.984375 | 3 | [
"MIT"
] | permissive | #!/bin/bash
mv data data-raw
mkdir data
cd data-raw
FILES="./*"
for f in $FILES
do
echo "Processing $f file..."
cat "$f" | jq -c > "../data/$f"
done
cd ..
| true |
a968ba6a7cd003a910411d86f0bf0338cd91d8b0 | Shell | dsichkar/pgrd | /pgrd | UTF-8 | 226 | 3.375 | 3 | [] | no_license | #!/bin/sh
process="named"
logfile="/var/log/pguard.log
while(true)
do
if [ ! `ps ax | pgrep $process` ]
then
/etc/rc.d/named start
echo "`date` [RESTARTED] $process was crashed" >> $logfile
fi
sleep 5
done
| true |
7826ef22fa1cd7ed9d81ac5333a9ec46f51c5336 | Shell | simon-song/hooker_laws | /my.sh | UTF-8 | 563 | 2.8125 | 3 | [] | no_license |
echo "copy file book5.tex ..."
cp backup/book5_orig.tex book5.tex
echo "Remove lines starting with BOOK V ..."
perl -p -e 's/^BOOK V.*\n//g; s/&/\\&/g;' book5.tex >a.tex
mv a.tex book5.tex
echo "Remove line number such as [312] ..."
perl -p -e 's/\[[0-9]{3}\]//g; s/\[[2-9][0-9]\]//g; s/\[1[1-9]\]//g;' book5.tex >a.tex
mv a.tex book5.tex
echo "Remove footnote numbers ..."
perl -p -e 's/([a-z])([0-9]{1,2})([,.:;? ])/\1\3/g;' book5.tex >a.tex
mv a.tex book5.tex
echo "Find and add section title ..."
perl my.pl <book5.tex >a.tex
mv a.tex book5.tex
| true |
57d8d2b167cfea826975e3c880430425b25ae831 | Shell | kokizzu/gotro | /W2/example/clickhouse_setup_local.sh | UTF-8 | 1,029 | 3 | 3 | [] | no_license | #!/bin/bash
sudo apt-get install -y apt-transport-https ca-certificates dirmngr
sudo apt-key adv --keyserver keyserver.ubuntu.com --recv E0C56BD4
echo "deb https://repo.clickhouse.tech/deb/stable/ main/" | sudo tee \
/etc/apt/sources.list.d/clickhouse.list
sudo apt-get -y update
sudo apt-get install -y clickhouse-server clickhouse-client
sudo clickhouse start
CHB_BIN="$(which clickhouse-backup)"
if [[ -z "${CHB_BIN}" ]] ; then
wget -c https://github.com/AlexAkulov/clickhouse-backup/releases/download/v1.3.2/clickhouse-backup-linux-amd64.tar.gz
tar -xf clickhouse-backup-linux-amd64.tar.gz
cd build/linux/amd64
sudo mv clickhouse-backup /usr/local/bin
sudo mkdir -p /etc/clickhouse-backup
sed -e '/^ *skip_tables:/b ins' -e b -e ':ins' -e 'a\'$'\n'' - information_schema.\*\n - INFORMATION_SCHEMA.\*' -e ': done' -e 'n;b done' config.yml
sudo mv config.yml /etc/clickhouse-backup
sudo chown ${USER} /etc/clickhouse-backup
clickhouse-backup -v
cd ~
rm -rf build
rm clickhouse-backup-linux-amd64.tar.gz
fi | true |
19b2ed8fd5102537b5f848cc0e22f444431ef27f | Shell | DDtoma/Profiles | /zsh/install.sh | UTF-8 | 191 | 2.71875 | 3 | [] | no_license | #!/bin/bash
URL="https://github.com/robbyrussell/oh-my-zsh"
source_path="$HOME/Profiles/zsh"
git clone ${URL} $HOME/.oh-my-zsh
ln -s ${source_path}/.zshrc $HOME
unset source_path
unset URL
| true |
afad4ad307b037cb241b541c349eec9b71ea7bf9 | Shell | vinravun/dotfiles | /bash_aliases | UTF-8 | 1,190 | 2.90625 | 3 | [] | no_license | #aliases
alias cp='cp -i'
alias mv='mv -i'
alias rm='rm -i'
alias ls='ls -CF --color=auto'
alias ll='ls -la --color=auto'
alias l.='ls -d .* --color=auto'
alias grep='grep --color=auto'
alias egrep='egrep --color=auto'
alias fgrep='fgrep --color=auto'
alias mount='mount | column -t'
# pacman helpers
alias pac='sudo pacman -S' # install
alias pacup='sudo pacman -Syu && pacaur -Su' # update
alias pacr='sudo pacman -Rs' # remove
alias pacs='sudo pacman -Ss' # search
alias paci='sudo pacman -Si' # info
alias paclo='pacman -Qdt' # list orphans
alias pacro='paclo && sudo pacman -Rns $(pacman -Qtdq)' # remove orphans
alias pacc='sudo pacman -Scc' # clean cache
alias paclf='sudo pacman -Ql' # list files
alias startgdm='sudo systemctl start gdm.service'
alias ymusic='youtube-dl --extract-audio --audio-format mp3 -i -o "%(title)s.%(ext)s"'
alias wget='wget -c'
alias cpcamfiles='bash $HOME/code/bash/cpphoto.sh' #copy files from the camera card
alias elog='bash $HOME/code/bash/elog.sh' # entering log
alias killallwine="wineserver -k; killall -9 wine wineserver; for i in `ps ax|egrep \"*\.exe\"|grep -v 'egrep'|awk '{print $1 }'`;do kill -9 $i;done"
| true |
70033ad3e1a73076de79ded21cb655f32e8c4dc7 | Shell | rschultheis/rspec_capybara_starter | /setup_drivers.sh | UTF-8 | 2,606 | 4.65625 | 5 | [] | no_license | #!/bin/bash
set -e
# SETUP WEBDRIVER UTILITIES
#
# Install chromedriver and geckodriver (and other drivers if needed).
# These command line programs are needed to run webdriver tests on Chrome and Firefox.
#
# The programs are put into a "path_ext" subfolder.
# The test suite will prepend "path_ext" to $PATH, ensuring these drivers are used during test execution.
# This way of installing into local subfolder is non-intrusive to the rest of the system
# Subfolder to place utilities into
PathExt="path_ext"
# base paths for the drivers to download
# ChromeDriver: https://chromedriver.chromium.org/downloads
ChromeDriverBasePath="https://chromedriver.storage.googleapis.com/110.0.5481.77/chromedriver_"
# GeckoDriver for Firefox: https://github.com/mozilla/geckodriver/releases
GeckoDriverBasePath="https://github.com/mozilla/geckodriver/releases/download/v0.32.2/geckodriver-v0.32.2-"
# make sure we have everything needed to do the setup
NEEDED_UTILS=( \
"wget" \
"unzip" \
"tar" \
)
for util in ${NEEDED_UTILS[*]}
do
command -v $util >/dev/null 2>&1 || { echo >&2 "I require $util but it's not installed. Aborting."; exit 1; }
done
# Depending on the OS, figure out which driver package file to download
if [[ "$OSTYPE" == "linux-gnu" ]]; then
#is it 32 or 64 bit linux...
MACHINE_TYPE=`uname -m`
if [ ${MACHINE_TYPE} == 'x86_64' ]; then
# 64 Bit Linux
FilesToDownload=( \
$ChromeDriverBasePath"linux64.zip" \
$GeckoDriverBasePath"linux64.tar.gz" \
)
else
# 32 Bit Linux
FilesToDownload=( \
$ChromeDriverBasePath"linux32.zip" \
$GeckoDriverBasePath"linux32.tar.gz" \
)
fi
elif [[ "$OSTYPE" == "darwin"* ]]; then
# Apple OSX / Macbook
FilesToDownload=( \
$ChromeDriverBasePath"mac64.zip" \
$GeckoDriverBasePath"macos.tar.gz" \
)
elif [[ "$OSTYPE" == "win32" ]]; then
# Microsoft Windows
FilesToDownload=( \
$ChromeDriverBasePath"win32.zip" \
$GeckoDriverBasePath"win32.zip" \
)
else
echo "ERROR: Unknown OS type"
exit 1
fi
# make the path extension subfolder
mkdir -p $PathExt"/tmp"
# Download the driver package files
for download_url in ${FilesToDownload[*]}
do
basename=${download_url##*/}
file_path=$PathExt"/tmp/$basename"
if [ ! -e $file_path ]
then
wget -x \
-O $file_path \
$download_url
fi
done
# Extract package files
pushd $PathExt
shopt -s nullglob #need this for cases where no .tar.bz2 files
for p in tmp/*.zip; do unzip -n -q $p; done
for p in tmp/*.tar.bz2; do tar jxf $p; done
for p in tmp/*.tar.gz; do tar xzf $p; done
popd
echo "Setup successful"
| true |
2d2ad4472460b15b8a70727d3f754344636f8de9 | Shell | trigrass2/neato-software | /c_src/build_c.sh | UTF-8 | 609 | 3.625 | 4 | [] | no_license | #!/bin/bash
GYP_GENERATORS=ninja gyp sensors.gyp \
--toplevel-dir=`pwd` --depth=.. --no-circular-check
ninja -C out/Default/ all
# Move SWIGed libraries to their proper directories.
SWIG_DIR="../swig"
# Create directory.
if [ ! -d "$SWIG_DIR" ]; then
mkdir "$SWIG_DIR"
else
rm ${SWIG_DIR}/*
fi
echo "" > ${SWIG_DIR}/__init__.py
# Move files.
find out/Default/lib -name "libswig_*" -type f -exec cp -t $SWIG_DIR {} \+
find . -name "*.py" -type f -exec cp -t $SWIG_DIR {} \+
# Rename library files.
cd $SWIG_DIR
for f in *; do
if [[ $f == libswig_*.so ]]; then
mv $f _${f#libswig_}
fi
done
| true |
7770912042fbf97388c58146a8c0a0d9fa9f7236 | Shell | gtk-gnutella/bitter | /config.conf | UTF-8 | 400 | 2.953125 | 3 | [
"BSD-2-Clause"
] | permissive | #! /bin/sh
clear_var() {
eval $1=1
eval unset $1
}
# Unused stuff
clear_var header_dir
clear_var library_dir
clear_var use_gethostbyname
clear_var use_threads
clear_var use_zlib
clear_var use_poll
clear_var use_socker
clear_var use_ipv6
# Paths
prefix=${PREFIX}
if [ "x${prefix}" = x ]; then
prefix='auto'
fi
bin_dir='auto'
# Optional stuff
use_large_files='auto'
# Use stuff
use_sha1=1
| true |
ad8b3a095672aaf8dfd6efcde6fbbbb1732ede17 | Shell | Pigiel/udemy-kubernetes-devops | /Introduction to Kubernetes/Your First k8s App/commands.sh | UTF-8 | 1,012 | 2.609375 | 3 | [] | no_license | # Commands to run the deployment
kubectl apply -f deployment.yaml
kubectl expose deployment tomcat-deployment --type=NodePort
# Only if you run the minikube instance
minikube service tomcat-deployment --url
curl <URL>
# Basic Kubectl commands
kubectl get pods
kubectl get pods [pod name]
kubectl expose <type name> <identifier/name> [—port=external port] [—target-port=container-port [—type=service-type]
kubectl expose deployment tomcat-deployment --type=NodePort
kubectl port-forward <pod name> [LOCAL_PORT:]REMOTE_PORT]
kubectl attach <pod name> -c <container>
kubectl exec [-it] <pod name> [-c CONTAINER] — COMMAND [args…]
kubectl exec -it <pod name> bash
kubectl label [—overwrite] <type> KEY_1=VAL_1 ….
kubectl label pods <pod name> healthy=fasle
kubectl run <name> —image=image
kubectl run hazelcast --image=hazelcast/hazelcast --port=5701
# The hazelcast docker image has been moved to hazelcast/hazelcast (https://hub.docker.com/r/hazelcast/hazelcast
kubectl describe pod | true |
6cf8399f8c110c20fa6653e95d8b884f4dc02f3b | Shell | JaimeSolisS/ITP | / Big Data/shell/p7.sh | UTF-8 | 453 | 3.65625 | 4 | [] | no_license | #Find out a way where you can use substring like feature in bash scripting and use that.
#Example for this is print all the files with same extension O/P should look like
#.txt - one, two, three
#pdf - four, five. six
echo "what type of files do you want: \c"
read type
#find . -type f -name "*.$type"
#ls *.$type
#ls *.$type | sed 's/\.[a-z]*//g'
list=$(ls *.$type | sed 's/\.[a-z]*//g')
echo "$type - \c"
for i in $list
do echo "$i, \c"
done
echo "\b" | true |
ecdd71279385f4cab7224fe77099cb9c63690e15 | Shell | m-wiesner/GKT | /local/decode_sgmm.sh | UTF-8 | 3,665 | 2.984375 | 3 | [] | no_license | set -e
set -o pipefail
. conf/common_vars.sh || exit 1;
. ./cmd.sh
. ./lang.conf
dataset_id=lorelei
my_nj=64
skip_kws=true
skip_stt=false
extra_kws=false
skip_scoring=false
wip=0.5
. ./utils/parse_options.sh
####################################################################
##
## FMLLR decoding
##
####################################################################
dataset_dir=data/${dataset_id}
echo $dataset_dir
echo $dataset_id
if [ ! -f data/langp_test/.done ]; then
echo "Copying data/langp/tri5_ali"
cp -R data/langp/tri5_ali/ data/langp_test
cp data/lang/G.fst data/langp_test
touch data/langp_test/.done
fi
decode=exp/tri5/decode_${dataset_id}
if [ ! -f ${decode}/.done ]; then
echo ---------------------------------------------------------------------
echo "Spawning decoding with SAT models on" `date`
echo ---------------------------------------------------------------------
utils/mkgraph.sh \
data/langp_test exp/tri5 exp/tri5/graph |tee exp/tri5/mkgraph.log
mkdir -p $decode
#By default, we do not care about the lattices for this step -- we just want the transforms
#Therefore, we will reduce the beam sizes, to reduce the decoding times
steps/decode_fmllr_extra.sh --skip-scoring true --beam 10 --lattice-beam 4\
--nj $my_nj --cmd "$decode_cmd" "${decode_extra_opts[@]}"\
exp/tri5/graph ${dataset_dir} ${decode} |tee ${decode}/decode.log
touch ${decode}/.done
fi
####################################################################
## SGMM2 decoding
## We Include the SGMM_MMI inside this, as we might only have the DNN systems
## trained and not PLP system. The DNN systems build only on the top of tri5 stage
####################################################################
if [ -f exp/sgmm5/.done ]; then
decode=exp/sgmm5/decode_fmllr_${dataset_id}
if [ ! -f $decode/.done ]; then
echo ---------------------------------------------------------------------
echo "Spawning $decode on" `date`
echo ---------------------------------------------------------------------
utils/mkgraph.sh \
data/langp_test exp/sgmm5 exp/sgmm5/graph |tee exp/sgmm5/mkgraph.log
mkdir -p $decode
steps/decode_sgmm2.sh --skip-scoring true --use-fmllr true --nj $my_nj \
--cmd "$decode_cmd" --transform-dir exp/tri5/decode_${dataset_id} "${decode_extra_opts[@]}"\
exp/sgmm5/graph ${dataset_dir} $decode |tee $decode/decode.log
touch $decode/.done
fi
####################################################################
##
## SGMM_MMI rescoring
##
####################################################################
for iter in 1 2 3 4; do
# Decode SGMM+MMI (via rescoring).
decode=exp/sgmm5_mmi_b0.1/decode_fmllr_${dataset_id}_it$iter
if [ ! -f $decode/.done ]; then
mkdir -p $decode
steps/decode_sgmm2_rescore.sh --skip-scoring true \
--cmd "$decode_cmd" --iter $iter --transform-dir exp/tri5/decode_${dataset_id} \
data/langp_test ${dataset_dir} exp/sgmm5/decode_fmllr_${dataset_id} $decode | tee ${decode}/decode.log
touch $decode/.done
fi
done
#We are done -- all lattices has been generated. We have to
#a)Run MBR decoding
#b)Run KW search
for iter in 1 2 3 4; do
# Decode SGMM+MMI (via rescoring).
decode=exp/sgmm5_mmi_b0.1/decode_fmllr_${dataset_id}_it$iter
local/run_kws_stt_task2.sh --cer $cer --max-states $max_states \
--skip-scoring $skip_scoring --extra-kws $extra_kws --wip $wip \
--cmd "$decode_cmd" --skip-kws $skip_kws --skip-stt $skip_stt \
"${lmwt_plp_extra_opts[@]}" \
${dataset_dir} data/langp_test $decode
done
fi
| true |
721ac54b6717582c1561886b1cf9cff855b4155c | Shell | fact-project/mars_pulse_truth | /.svn/pristine/72/721ac54b6717582c1561886b1cf9cff855b4155c.svn-base | UTF-8 | 12,058 | 3.75 | 4 | [] | no_license | #!/bin/sh
#
# ========================================================================
#
# *
# * This file is part of MARS, the MAGIC Analysis and Reconstruction
# * Software. It is distributed to you in the hope that it can be a useful
# * and timesaving tool in analysing Data of imaging Cerenkov telescopes.
# * It is distributed WITHOUT ANY WARRANTY.
# *
# * Permission to use, copy, modify and distribute this software and its
# * documentation for any purpose is hereby granted without fee,
# * provided that the above copyright notice appear in all copies and
# * that both that copyright notice and this permission notice appear
# * in supporting documentation. It is provided "as is" without express
# * or implied warranty.
# *
#
#
# Author(s): Daniela Dorner 08/2008 <mailto:dorner@astro.uni-wuerzburg.de>
#
# Copyright: MAGIC Software Development, 2000-2008
#
#
# ========================================================================
#
# This script produces the plots from all root-files in the web directory
#
# After checking, if the script is already running, the plots are produced:
# With the programm showplot a ps-file is written, from which the png
# files are produced.
#
# The files from which plots are created can be devided into two categories:
# general: plots needed only once per telescope
# individual: plots done for each sequence / dataset
#
# For 'general' first the rootfiles are created with a macro from the
# database, then from this the ps via showplot, from this the pdf and the
# png files.
# For 'individual' running the macro is only needed for the plotdb for
# ganymed. The other steps are done accordingly.
#
# Via command line options the plots can be selected. For more information
# execute the script with the -h option.
#
source `dirname $0`/sourcefile
printprocesslog "INFO starting $0"
program=dowebplots
set -C
scriptlog=$runlogpath/$program-$datetime.log
date >> $scriptlog 2>&1
echo $@ >> $scriptlog 2>&1
# definition of the plots to be created
generals=( plotdb optical condor status )
individuals=( calib signal star ganymed gplotdb )
# definition of the variables needed and varying for the plots
names=( plotdb optical condor status calib signal star ganymed gplotdb )
pathnames=( plotdb optical condor status callisto callisto star ganymed ganymed )
macros=( plotdb plotoptical plotusage plotstat no no no no no )
filenames=( plotdb plotoptical plotusage plotstat calib signal star ganymed plotdb )
columns=( no no no no fWebCalib fWebSignal fWebStar fWebGanymed fWebPlotDBGanymed )
digits=( 0 0 0 0 4 4 4 5 5 )
# function to print the usage of the script
function usage()
{
echo "Usage: $0 [options]"
echo "options:"
echo -n " --general name "
echo " where 'name' can be 'all' or one of the following: ${generals[@]} "
echo -n " --individual name "
echo " where 'name' can be 'all' or one of the following: ${individuals[@]} "
echo -n " -h "
echo " print this usage "
echo ""
echo "Remark: if no commandline option is given the plots are produced for everything."
echo ""
exit
}
# reading in the command line options
general=( )
individual=( )
while [ "$1" ]
do
case $1 in
--general) shift
if [ "$1" = "all" ]
then
general=( ${generals[@]} )
else
general=( ${general[@]} $1 )
fi
;;
--individual) shift
if [ "$1" = "all" ]
then
individual=( ${individuals[@]} )
else
individual=( ${individual[@]} $1 )
fi
;;
-h) usage
exit
;;
*) echo "unknown option $1 " >> $scriptlog 2>&1
usage >> $scriptlog 2>&1
exit
;;
esac
shift
done
if [ ${#general[@]} -eq 0 ] && [ ${#individual[@]} -eq 0 ]
then
all=( ${names[@]} )
else
all=( ${general[@]} ${individual[@]} )
fi
echo "For the following plots will be produced: "${all[@]} >> $scriptlog 2>&1
# using hour for lock => after 24 no new script is
# started in case the previous is still running
hour=`date +%H`
lockfile=$lockpath/${program}-${hour}h.txt
checklock >> $scriptlog 2>&1
# make sure that ./showplot is executed from the right directory
cd $mars
count=0
# loop over all scripts in $names
while [ 1 -gt 0 ]
do
check="ok"
# get needed variables from the arrays
name=${names[$count]}
macro=${macros[$count]}
pathname=${pathnames[$count]}
filename=${filenames[$count]}
column=${columns[$count]}
digit=${digits[$count]}
count=`expr $count + 1`
if [ $count -gt ${#names[@]} ]
then
count=0
continue
fi
# find out if these plots have to be created
# by looping over the complete array $all and comparing the entries to those of the array $names
for (( a=0 ; a <= ${#all[@]} ; a++ ))
do
# exit the loop if the entries agree
# i.e. produce plots for this entry
if [ "${all[$a]}" = "$name" ]
then
break
fi
# continue if the end of array is not yet reached
if [ $a -lt ${#all[@]} ]
then
continue
fi
# exit both loops if no entry in the array is left
# i.e. there is nothing to do anymore
if [ ${#all[@]} -eq 0 ]
then
break 2
fi
# continue with the next entry in the array $names
# in case the end of the array $all is reached without agreement
continue 2
done
# do things specific to 'general' and 'individual'
if [ "$macro" = "no" ]
then
# 'individual'
# remove name from list in case there is nothing left to do
getstatus >> $scriptlog 2>&1
if [ "$numproc" = "0" ]
then
unset all[$a]
all=( ${all[@]} )
continue
fi
# get one sequence or dataset from the database for which plots have to be produced
gettodo "1" >> $scriptlog 2>&1
num=${primaries[0]}
echo "executing $name for number "$num >> $scriptlog 2>&1
# lock the process
lockfile=$lockpath/$program-$name-$num.txt
checklock continue >> $scriptlog 2>&1
setstatus "start" >> $scriptlog 2>&1
# set needed file and path names
pathpart=$pathname/`printf %08d $num | cut -c 0-${digit}`/`printf %08d $num`
inpath=$datapath/$pathpart
outpath=$webpath/$pathpart
tmppath=/tmp/pstoimg$pathpart
rootfile=$inpath/$filename`printf %08d $num`.root
psfile=$inpath/$filename`printf %08d $num`.ps
csvfile=$outpath/$filename.csv
pdffile=$outpath/$filename`printf %08d $num`.pdf
# rsync
makedir $outpath >> $scriptlog 2>&1
rsync -aLv --exclude=20[01][0-9]*_[YI]_*[.]root $inpath/ $outpath >> $scriptlog 2>&1
# for ganymed not only the ganymed*.root is processed but also a plotdb.root for the
# dataset. therefore the macro plotdb.C has to be run in this case
if [ "$name" = "gplotdb" ]
then
# overwrite needed file and path names
rootfile=$inpath/$filename.root
psfile=$inpath/$filename.ps
csvfile=$outpath/$filename.csv
pdffile=$outpath/$filename.pdf
datasetfile=$datasetpath/`printf %08d $num | cut -c 0-${digit}`/dataset`printf %08d $num`.txt
echo "check1=root -q -b $macrospath/plotdb.C+\(\"$datasetfile\"\,\"$inpath/\"\) 2>|/dev/null | tee -a $scriptlog | intgrep" >> $scriptlog 2>&1
check1=`root -q -b $macrospath/plotdb.C+\("\"$datasetfile\""\,"\"$inpath/\""\) 2>|/dev/null | tee -a $scriptlog | intgrep`
case $check1 in
1) echo " check1=$check1 -> everything ok" >> $scriptlog 2>&1
printprocesslog "INFO rootfile $rootfile successfully produced"
;;
*) echo " check1=$check1 -> ERROR -> couldn't create plots -> exit" >> $scriptlog 2>&1
printprocesslog "ERROR producing rootfile $rootfile failed"
com=$Fdowebplots
check=$check1
;;
esac
fi
else
# 'general'
echo "executing $name " >> $scriptlog 2>&1
# remove 'general' from list as they need to be executed only once
unset all[$a]
all=( ${all[@]} )
# lock process
lockfile=$lockpath/$program-$name.txt
checklock continue >> $scriptlog 2>&1
# set needed file and path names
outpath=$webpath/$pathname
tmppath=/tmp/$pathname
rootfile=$outpath/$filename.root
psfile=$outpath/$filename.ps
csvfile=$outpath/$filename.csv
pdffile=$outpath/$filename.pdf
txtfile=$outpath/$filename.txt
echo "check2=root -q -b $macrospath/$macro.C+\(\"$outpath/\"\) 2>| $txtfile | tee -a $scriptlog | intgrep" >> $scriptlog 2>&1
check2=`root -q -b $macrospath/$macro.C+\("\"$outpath/\""\) 2>| $txtfile | tee -a $scriptlog | intgrep`
case $check2 in
1) echo " check2=$check2 -> everything ok" >> $scriptlog 2>&1
printprocesslog "INFO rootfile $rootfile successfully produced"
;;
*) echo " check2=$check2 -> ERROR -> couldn't produce root file $rootfile -> exit" >> $scriptlog 2>&1
printprocesslog "ERROR producing rootfile $rootfile failed"
com=$Fdowebplots
check=$check2
;;
esac
fi
# in case no error occured, the plots are produced from the rootfile
if [ "$check" = "ok" ]
then
printprocesslog "INFO producing plots (psfile and png) for $rootfile"
if ! ls $rootfile >/dev/null 2>&1
then
echo "rootfile $rootfile does not exist" >> $scriptlog 2>&1
printprocesslog "ERROR rootfile $rootfile does not exist"
com=$Fdowebplots
check=3
else
echo "producing psfile and csvfile..." >> $scriptlog 2>&1
if ! ./showplot -b --save-as-ps=$psfile --save-as-csv=$csvfile $rootfile >> $scriptlog 2>&1
then
com=$Fdowebplots
check=4
else
echo "creating temporary directory for pstoimg..." >> $scriptlog 2>&1
makedir $tmppath >> $scriptlog 2>&1
echo "converting plots to png..." >> $scriptlog 2>&1
if ! pstoimg -antialias -aaliastext -interlaced -flip r270 -density 100 -tmp $tmppath -out=$outpath/$name -type png -multipage $psfile >> $scriptlog 2>&1
then
printprocesslog "WARN could not create png files for $psfile "
com=$Fdowebplots
check=5
fi
echo "removing temporary directory..." >> $scriptlog 2>&1
rmdir -pv $tmppath >> $scriptlog 2>&1
echo "converting $psfile to pdf..." >> $scriptlog 2>&1
fi
if ps2pdf $psfile $pdffile >> $scriptlog 2>&1
then
rm -v $psfile >> $scriptlog 2>&1
else
printprocesslog "WARN could not convert $psfile to $pdffile"
com=$Fdowebplots
check=6
fi
fi
fi
setstatus "stop" >> $scriptlog 2>&1
rm -v $lockfile >> $scriptlog 2>&1
sleep 2
done
# rsync subsystemdata, sequence and dataset files
echo "do rsync for subsystem files" >> $scriptlog 2>&1
rsync -av --delete $subsystempath/ $webpath/subsystemdata >> $scriptlog 2>&1
echo "do rsync for sequence files" >> $scriptlog 2>&1
rsync -av --delete $sequpath/ $webpath/sequences >> $scriptlog 2>&1
echo "do rsync for dataset files" >> $scriptlog 2>&1
rsync -av --delete $datasetpath/ $webpath/datasets >> $scriptlog 2>&1
lockfile=$lockpath/${program}-${hour}h.txt
finish >> $scriptlog 2>&1
| true |
f86563846832245d7b3870d3cd319dfa8f4ed264 | Shell | killangell/micros | /user/partition/unit_test/partition/test_user_partition_define.sh | UTF-8 | 8,100 | 2.96875 | 3 | [] | no_license | #!/bin/sh
source sys_debug.sh
source user_partition_define.sh
UNIT_TEST_DIR="$SYS_LOG_DIR/unit_test/partition"
mkdir -p $UNIT_TEST_DIR
#set -xv
#return: true(1)/false(0)
function test_get_partition_mount_point_by_name()
{
name="swap"
expect="swap"
real="null"
get_partition_mount_point_by_name $name real
if [ $expect != $real ];then
return $FALSE
fi
name="efi"
expect="/boot/efi"
real="null"
get_partition_mount_point_by_name $name real
if [ $expect != $real ];then
return $FALSE
fi
name="data"
expect="/data"
real="null"
get_partition_mount_point_by_name $name real
if [ $expect != $real ];then
return $FALSE
fi
return $TRUE
}
#return: true(1)/false(0)
function test_get_dest_drive()
{
expect="sda"
real="null"
get_dest_drive real
if [ $expect != $real ];then
return $FALSE
fi
return $TRUE
}
#return: true(1)/false(0)
function test_set_dest_drive()
{
expect="hda"
real="null"
old_drive="null"
get_dest_drive old_drive
set_dest_drive $expect
get_dest_drive real
#Set to default
set_dest_drive $old_drive
if [ $expect != $real ];then
return $FALSE
fi
return $TRUE
}
#return: true(1)/false(0)
function test_get_pt_name_index()
{
index="-1"
get_pt_name_index efi index
if [ $index -ne 1 ];then
return $FALSE
fi
get_pt_name_index root index
if [ $index -ne 4 ];then
return $FALSE
fi
return $TRUE
}
#return: true(1)/false(0)
function test_is_valid_partition_index()
{
index=0
expect=1
is_valid_partition_index $index
if [ $? -ne $expect ];then
return $FALSE
fi
index=5
expect=1
is_valid_partition_index $index
if [ $? -ne $expect ];then
return $FALSE
fi
index=9
expect=1
is_valid_partition_index $index
if [ $? -ne $expect ];then
return $FALSE
fi
index=15
expect=0
is_valid_partition_index $index
if [ $? -ne $expect ];then
return $FALSE
fi
return $TRUE
}
#return: true(1)/false(0)
function test_get_partition_info_by_index_1()
{
index=1
expect_name="efi"
expect_size="200M"
expect_loca="disk"
expect_fs_type="vfat"
real_name="null"
real_size="null"
real_loca="null"
real_fs_type="null"
get_partition_info_by_index $index real_name real_size real_loca real_fs_type
#echo $real_name,$real_size,$real_loca,$real_fs_type
if [ $expect_name != $real_name ];then
return $FALSE
fi
if [ $expect_size != $real_size ];then
return $FALSE
fi
if [ $expect_loca != $real_loca ];then
return $FALSE
fi
if [ $expect_fs_type != $real_fs_type ];then
return $FALSE
fi
return $TRUE
}
#return: true(1)/false(0)
function test_get_partition_info_by_index_2()
{
index=4
expect_name="root"
expect_size="10G"
expect_loca="lvm"
expect_fs_type="ext4"
real_name="null"
real_size="null"
real_loca="null"
real_fs_type="null"
get_partition_info_by_index $index real_name real_size real_loca real_fs_type
if [ $expect_name != $real_name ];then
return $FALSE
fi
if [ $expect_size != $real_size ];then
return $FALSE
fi
if [ $expect_loca != $real_loca ];then
return $FALSE
fi
if [ $expect_fs_type != $real_fs_type ];then
return $FALSE
fi
return $TRUE
}
#return: true(1)/false(0)
function test_get_partition_info_by_index_3()
{
index=10
expect_name="data"
expect_size="0"
expect_loca="lvm"
expect_fs_type="ext4"
real_name="null"
real_size="null"
real_loca="null"
real_fs_type="null"
get_partition_info_by_index $index real_name real_size real_loca real_fs_type
if [ $expect_name != $real_name ];then
return $FALSE
fi
if [ $expect_size != $real_size ];then
return $FALSE
fi
if [ $expect_loca != $real_loca ];then
return $FALSE
fi
if [ $expect_fs_type != $real_fs_type ];then
return $FALSE
fi
return $TRUE
}
#return: true(1)/false(0)
function test_set_partition_info_by_index_1()
{
index=10
expect_name="data"
expect_size="1G"
expect_loca="disk"
expect_fs_type="ext3"
real_name="null"
real_size="null"
real_loca="null"
real_fs_type="null"
old_name="null"
old_size="null"
old_loca="null"
old_fs_type="null"
get_partition_info_by_index $index old_name old_size old_loca old_fs_type
set_partition_info_by_index $index $expect_name $expect_size $expect_loca $expect_fs_type
get_partition_info_by_index $index real_name real_size real_loca real_fs_type
#echo $real_name,$real_size,$real_loca,$real_fs_type
#Set default value again
set_partition_info_by_index $index $expect_name $old_size $old_loca $old_fs_type
if [ $expect_name != $real_name ];then
return $FALSE
fi
if [ $expect_size != $real_size ];then
return $FALSE
fi
if [ $expect_loca != $real_loca ];then
return $FALSE
fi
if [ $expect_fs_type != $real_fs_type ];then
return $FALSE
fi
return $TRUE
}
#return: true(1)/false(0)
function test_get_partition_info_by_name_1()
{
name="efi"
expect_size="200M"
expect_loca="disk"
expect_fs_type="vfat"
real_size="null"
real_loca="null"
real_fs_type="null"
get_partition_info_by_name $name real_size real_loca real_fs_type
if [ $expect_size != $real_size ];then
return $FALSE
fi
if [ $expect_loca != $real_loca ];then
return $FALSE
fi
if [ $expect_fs_type != $real_fs_type ];then
return $FALSE
fi
return $TRUE
}
#return: true(1)/false(0)
function test_get_partition_info_by_name_2()
{
name="home"
expect_size="20G"
expect_loca="lvm"
expect_fs_type="ext4"
real_size="null"
real_loca="null"
real_fs_type="null"
get_partition_info_by_name $name real_size real_loca real_fs_type
if [ $expect_size != $real_size ];then
return $FALSE
fi
if [ $expect_loca != $real_loca ];then
return $FALSE
fi
if [ $expect_fs_type != $real_fs_type ];then
return $FALSE
fi
return $TRUE
}
#return: true(1)/false(0)
function test_get_partition_info_by_name_3()
{
name="data"
expect_size="0"
expect_loca="lvm"
expect_fs_type="ext4"
real_size="null"
real_loca="null"
real_fs_type="null"
get_partition_info_by_name $name real_size real_loca real_fs_type
#echo $name,$real_size,$real_loca,$real_fs_type
if [ $expect_size != $real_size ];then
return $FALSE
fi
if [ $expect_loca != $real_loca ];then
return $FALSE
fi
if [ $expect_fs_type != $real_fs_type ];then
return $FALSE
fi
return $TRUE
}
#return: true(1)/false(0)
function test_get_partition_info_by_name_4()
{
name="xxx"
expect_size="null"
expect_loca="null"
expect_fs_type="null"
real_size="null"
real_loca="null"
real_fs_type="null"
get_partition_info_by_name $name real_size real_loca real_fs_type
if [ $expect_size != $real_size ];then
return $FALSE
fi
if [ $expect_loca != $real_loca ];then
return $FALSE
fi
if [ $expect_fs_type != $real_fs_type ];then
return $FALSE
fi
return $TRUE
}
#return: true(1)/false(0)
function test_set_partition_info_by_name_1()
{
name="data"
expect_size="3G"
expect_loca="disk"
expect_fs_type="ext2"
real_size="null"
real_loca="null"
real_fs_type="null"
old_size="null"
old_loca="null"
old_fs_type="null"
get_partition_info_by_name $name old_size old_loca old_fs_type
set_partition_info_by_name $name $expect_size $expect_loca $expect_fs_type
get_partition_info_by_name $name real_size real_loca real_fs_type
#Set default value again
set_partition_info_by_name $name $old_size $old_loca $old_fs_type
#echo $name,$real_size,$real_loca,$real_fs_type
if [ $expect_size != $real_size ];then
return $FALSE
fi
if [ $expect_loca != $real_loca ];then
return $FALSE
fi
if [ $expect_fs_type != $real_fs_type ];then
return $FALSE
fi
return $TRUE
}
#Test list
test_partition_define_func_arr=(
test_get_partition_mount_point_by_name
test_get_dest_drive
test_set_dest_drive
test_get_pt_name_index
test_is_valid_partition_index
test_get_partition_info_by_index_1
test_get_partition_info_by_index_2
test_get_partition_info_by_index_3
test_set_partition_info_by_index_1
test_get_partition_info_by_name_1
test_get_partition_info_by_name_2
test_get_partition_info_by_name_3
test_get_partition_info_by_name_4
test_set_partition_info_by_name_1
)
source sys_loop_array_and_exec.sh ${test_partition_define_func_arr[*]}
exit $TRUE
| true |
1a9d9c4b93b86d573710b4ff6c034b071e419d26 | Shell | kren1/testSymExec | /scripts/creduceTest.sh | UTF-8 | 1,889 | 3.046875 | 3 | [] | no_license | #!/bin/bash
#example USAGE ./creduceTest.sh
DIR_NAME=$(dirname "$(realpath $0)")
source $DIR_NAME/settings.sh
NULL=/dev/null
NULL=/dev/tty
NATIVE_O_FILE=$(mktemp)
NATIVE_OUT=$(mktemp)
ERRS=$(mktemp)
DIFF_FILE=$(mktemp)
NAME='test2.c'
#Check that the given file doesn't have any weird behaviour by compiling it with sctrict policy and running it natively
CLANG=~/dependencies/llvm-3.9/bin/clang
$CLANG -fsanitize=address -fsanitize=bounds -fsanitize=undefined -Werror=extra -w \
-Werror=all -Wno-error=unused-value -Wno-error=unused-variable -Wno-error=parentheses-equality \
-Wno-error=implicit-function-declaration -Wno-error=self-assign -Wno-error=unused-function \
-Wno-error=unused-parameter -Wno-error=sign-compare -Wno-error=ignored-qualifiers -Werror=int-conversion\
-o $NATIVE_O_FILE -D DS $INST_LIB_PATH/native/*.c -I$CSMITH_RUNTIME $NAME &&\
timeout 5 $NATIVE_O_FILE > $NATIVE_OUT &&\
#grep checksum $NATIVE_OUT > $DIFF_FILE &&\
#! grep '...checksum after hashing' $NATIVE_OUT > $DIFF_FILE &&\
#$DIR_NAME/generalSingleExperiment.sh $NAME 2> $ERRS >&2 &&\
#$DIR_NAME/crosscheck/generalCrosscheckExperiment.sh $NAME 2> $ERRS >&2 &&\
$COMPILE_AND_RUN_1 $NAME $NAME 2> $ERRS >&2 &&\
#! $DIR_NAME/CaR/fuzzballCompileAndRun.sh $NAME `realpath $NAME` > $ERRS 2>&1 &&\
#grep 'KLEE: ERROR: (location information missing) ASSERTION FAIL: pref_g_158 == g_158' $ERRS &&\
grep "^func_1$" $ERRS &&\
grep "^pref_func_1$" $ERRS &&\
grep 'Prediction failed!' $ERRS | wc -l | grep "2" &&\
grep -B1 'Prediction failed!' $ERRS | grep "Iteration 30" &&\
grep -B1 'Prediction failed!' $ERRS | grep "Iteration 29"
#grep 'KLEE: ERROR: (location information missing) ASSERTION FAIL: pref_g_158 == g_158' $ERRS
#grep "checksum" $ERRS | cut -f 2- -d ' ' | diff $DIFF_FILE - | wc -l | grep 2
EXIT_CODE=$?
rm $NATIVE_O_FILE $NATIVE_OUT $ERRS $DIFF_FILE
exit $EXIT_CODE
| true |
b2b67a606420a6c90f716bbcd3bfe36e0dadc455 | Shell | trocotronic/Colossus | /colossus.in | ISO-8859-2 | 2,886 | 3.828125 | 4 | [] | no_license | #!/bin/sh
PID_FILE="colossus.pid"
PID_BACKUP="colossus.pid.bak"
BINDIR=`pwd`/servicios
if [ "$1" = "start" ] ; then
echo "Iniciando Colossus"
if [ -r $PID_FILE ] ; then
mv -f $PID_FILE $PID_BACKUP
fi
$BINDIR
sleep 10
if [ ! -r $PID_FILE ] ; then
echo "Se ha detectado un error en la ejecucin de Colossus"
echo "====================================================="
echo "Comprueba todos los errores primero "
echo "====================================================="
echo "Si se ha generado un archivo .core, ejecute ./colossus backtrace"
echo "Dirjase a http://bugs.redyc.com/, abra un post y copie la informacin que se le ha mostrado entre las secciones INICIO y FIN";
echo "====================================================="
if [ -r $PID_BACKUP ] ; then
mv -f $PID_BACKUP $PID_FILE
fi
fi
elif [ "$1" = "stop" ] ; then
echo "Deteniendo Colossus"
kill -3 `cat $PID_FILE`
elif [ "$1" = "rehash" ] ; then
echo "Refrescando Colossus"
kill -1 `cat $PID_FILE`
elif [ "$1" = "restart" ] ; then
echo "Reiniciando Colossus"
kill -2 `cat $PID_FILE`
elif [ "$1" = "conectar" ] ; then
echo "Conectando con el servidor"
kill -13 `cat $PID_FILE`
elif [ "$1" = "version" ] ; then
$BINDIR -v
elif [ "$1" = "backtrace" ] ; then
if [ -d "modulos" ]; then
modpath="modulos"
elif [ -d "src/modulos" ]; then
modpath="src/modulos"
else
echo 'Ingrese la ruta de los modulos'
read modpath
if [ ! -d "$modpath" ]; then
echo "No es un directorio"
exit
fi
fi
# Find the corefile
echo "Escoja un core:"
n="0"
for i in `echo *core*`
do
ls -l $i
n=`expr $n + 1`
done
if [ "$n" -gt 1 ]; then
echo "Escriba el nombre del core que quiere examinar:"
read corefile
elif [ "$i" = "*core*" -o "$n" -eq 0 ]; then
echo 'No se encuentra ningun archivo core'
exit 1
else
corefile="$i"
fi
if [ ! -f "$corefile" ]; then
echo "No se encuentra el core '$corefile'"
fi
if [ ! -s "$corefile" ]; then
echo 'El archivo core esta vacio'
echo '(escriba "ulimit -c unlimited")'
exit 1
fi
(echo quit|gdb $BINDIR $corefile 2>&1)|\
grep -i 'no such file'|\
awk -F ':' '{ print $1 }'|sort|uniq|\
awk -F '.' "{ system(\"ln -s ../$modpath/\" \$2 \".so \" \$0) }"
echo ""
echo "=================== INICIO ======================"
echo "BACKTRACE:"
cat >gdb.commands << __EOF__
bt
echo \n
frame
echo \n
x/s backupbuf
echo \n
bt 5 full
quit
__EOF__
gdb -batch -x gdb.commands $BINDIR $corefile
rm -f gdb.commands
echo "GCC: `gcc -v 2>&1|tail -n 1`"
echo "UNAME: `uname -a`"
echo "COLOSSUS: `$0 version`"
echo "CORE: `ls -al $corefile`"
echo "=================== FIN ======================="
echo ""
echo "Copie lo que hay entre las secciones INICIO y FIN"
echo "y envielo a http://bugs.redyc.com/"
echo ""
echo "Gracias!"
else
echo "Uso: ./colossus start|stop|conectar|rehash|restart|version|backtrace"
fi
| true |
e4175ad9edd5290b3cd18cde6017bc89c0ea6f5d | Shell | chef/automate | /components/secrets-service/habitat/config/migration.sh | UTF-8 | 1,162 | 3.546875 | 4 | [
"Apache-2.0",
"CC-BY-2.0",
"SAX-PD",
"MPL-2.0",
"Artistic-2.0",
"MIT-CMU",
"BSD-3-Clause",
"0BSD",
"CC-BY-4.0",
"LGPL-3.0-or-later",
"CC0-1.0",
"CC-BY-3.0",
"BSD-Source-Code",
"Apache-1.1",
"Ruby",
"WTFPL",
"BSD-1-Clause",
"MIT",
"Unlicense",
"BSD-2-Clause"
] | permissive | #!{{pkgPathFor "core/bash"}}/bin/bash
# Migrating A1 data to the A2 secrets service database
function migrate_A1_data() {
echo "Migrating A1 secrets service tables"
pg-helper -d migrate-tables-v2 "$A1_DBNAME" "$DBNAME" s_tags s_secrets s_secrets_tags
if [ $? -ne 0 ]; then
echo "Failed to migrate Automate 1 secrets service tables from $A1_DBNAME to $DBNAME"
exit 1
fi
}
# Migrating A2 compliance secrets service tables to the new A2 secrets service database
function migrate_A2_data() {
pg-helper -d migrate-tables-v2 "$COMPLIANCE_DBNAME" "$DBNAME" s_tags s_secrets s_secrets_tags \
--fail-if-src-missing
errcode="$?"
if [ $errcode -eq 0 ]; then
echo "Migrated secrets service tables from $COMPLIANCE_DBNAME"
return 0
elif [ $errcode -eq 10 ]; then
# 10 means the source database did not exist
echo "$COMPLIANCE_DBNAME does not exist. No tables migrated"
return 1
else
echo "Failed to migrate Automate 2 secrets service tables from $COMPLIANCE_DBNAME to $DBNAME"
exit 1
fi
}
function trigger_migrations() {
migrate_A2_data
if [[ $? -ne 0 ]]; then
migrate_A1_data
fi
}
| true |
52d318b56734c0bbea6a362a746882e287ff7e5e | Shell | riccardomurri/mp.sh | /mp.sh | UTF-8 | 7,811 | 4.15625 | 4 | [] | no_license | #! /bin/sh
#
# A library of sh functions for doing multiprecision arithmetic.
# This file contains definitions that should work on a generic
# POSIX shell.
#
## sanity checks
have_command () {
type "$1" >/dev/null 2>/dev/null
}
require_command () {
if ! have_command "$1"; then
die 1 "Could not find required command '$1' in system PATH. Aborting."
fi
}
require_command cut
require_command expr
require_command rev
require_command sed
## library functions
# _do_many BINOP ARG [ARG ...]
#
# execute associative BINOP on list of arguments
#
_do_many () {
if [ $# -le 2 ]; then
echo "${2}"
else
binop="${1}"
first="${2}"
second="${3}"
shift 3
_do_many ${binop} "$(${binop} "${first}" "${second}")" "$@"
fi
}
# remove leading zeroes from a number
normalize () {
sed -r -e 's/^0+//'
}
# remove leading 0s from an RTL number, i.e., remove trailing zeroes
_normalize () {
sed -r -e 's/0+$//'
}
# mp_lt A B
#
# exit successfully if A is numerically less than B
#
mp_lt () {
local a b len_a len_b a1 b1 a2 b2
# remove leading zeroes
a="$( echo ${1} | normalize)"
b="$( echo ${2} | normalize)"
len_a=$(echo -n "${a}" | wc -c)
len_b=$(echo -n "${b}" | wc -c)
# shortcut: if A has less digits than B, then it's less-than
if [ "$len_a" -lt "${len_b}" ]; then
return 0
# if A and B have the same number of digits, use lexical comparison
elif [ "${len_a}" -eq "${len_b}" ]; then
if [ "${len_a}" -eq 0 ]; then
# special case for _mp_div2
return 0
else
a1=$(echo "${1}" | cut -c1)
b1=$(echo "${2}" | cut -c1)
if [ "${a1}" -lt "${b1}" ]; then
return 0
elif [ "${a1}" -eq "${b1}" ]; then
# recurse
a2=$(echo "${1}" | cut -c2-)
b2=$(echo "${2}" | cut -c2-)
mp_lt "${a2}" "${b2}"
else
# b1 > a1, hence B > A
return 1
fi
fi
else
return 1
fi
}
# same as `mp_lt`, but using the RTL representation
_mp_lt () {
mp_lt "$(echo "${1}" | rev)" "$(echo "${2}" | rev)"
}
mp_add () {
_do_many mp_add2 "$@"
}
mp_add2 () {
_mp_add2 "$(echo "${1}" | rev)" "$(echo "${2}" | rev)" | rev
}
# add ${1} and ${2}, print result to STDOUT
_mp_add2 () {
local pfx carry a1 b1 a2 b2 tot d
pfx="${4}"
carry="${3}"
if [ -z "${1}" ] || [ -z "${2}" ]; then
if [ -z "${carry}" ]; then
echo "${pfx}${1}${2}" | _normalize
else
_mp_add2 "${1}${2}" "${carry}" '' "${pfx}"
fi
else
a1=$(echo "${1}" | cut -c1)
b1=$(echo "${2}" | cut -c1)
a2=$(echo "${1}" | cut -c2-)
b2=$(echo "${2}" | cut -c2-)
if [ -n "${3}" ]; then
carry="${3}"
else
carry='0'
fi
tot=$(expr "${a1}" + "${b1}" + "${carry}" | rev)
d=$(echo "${tot}" | cut -c1)
carry=$(echo "${tot}" | cut -c2-)
_mp_add2 "${a2}" "${b2}" "${carry}" "${pfx}${d}"
fi
}
mp_sub () {
_do_many mp_sub2 "$@"
}
mp_sub2 () {
_mp_sub2 "$(echo "${1}" | rev)" "$(echo "${2}" | rev)" | rev
}
_mp_sub2 () {
local pfx carry a b a1 b1 a2 b2 d
pfx="${4}"
carry="${3}"
a="$(echo "${1}" | _normalize)"
b="$(echo "${2}" | _normalize)"
if [ "${a}" = "${b}" ] && [ -z "${carry}" ]; then
# shortcut
if [ -z "${pfx}" ]; then
echo '0'
else
echo "${pfx}" | _normalize
fi
return 0
elif [ -z "${a}" ]; then
if [ -z "${b}" ]; then
# end of recursion
echo "${pfx}" | _normalize
return 0
else
echo 1>&2 "ERROR: negative numbers not supported."
return 1
fi
elif [ -z "${b}" ]; then
#
if [ -z "${carry}" ]; then
echo "${pfx}${a}"
else
_mp_sub2 "${a}" "${carry}" '' "${pfx}"
fi
else
a1=$(echo "${1}" | cut -c1)
b1=$(echo "${2}" | cut -c1)
a2=$(echo "${1}" | cut -c2-)
b2=$(echo "${2}" | cut -c2-)
if [ -z "${carry}" ]; then
carry=0
fi
d=$(expr "${a1}" - "${b1}" - "${carry}")
case "$d" in
-*) # reduce mod 10
d=$(expr "${d}" + 10)
carry=1
;;
*)
carry=0
;;
esac
_mp_sub2 "${a2}" "${b2}" "${carry}" "${pfx}${d}"
fi
}
mp_mul () {
_do_many mp_mul2 "$@"
}
mp_mul2 () {
_mp_mul2 "$(echo "${1}" | rev)" "$(echo "${2}" | rev)" | rev
}
# decompose a = a1 + 10*a2 where 0<=a1<=9 and use distributive law +
# the fact that multiplication by 10 is just a shift in the digits
_mp_mul2 () {
local a1 b1 a2 b2 t t2_1 t2_2 t2 t3
if [ -z "${1}" ] || [ -z "${2}" ]; then
echo ''
else
a1=$(echo "${1}" | cut -c1)
b1=$(echo "${2}" | cut -c1)
a2=$(echo "${1}" | cut -c2-)
b2=$(echo "${2}" | cut -c2-)
# a*b = (a1 + 10*a2)*(b1 + 10*b2) = a1*b1 + 10*(a2*b1 + a1*b2) + 100*a2*b2
# t1 t2' t2'' t3
t="$(expr "${a1}" '*' "${b1}" | rev)"
t2_1="$(_mp_mul2 "${a1}" "${b2}")"
t2_2="$(_mp_mul2 "${a2}" "${b1}")"
t2="$(_mp_add2 "${t2_1}" "${t2_2}")"
if [ -n "${t2}" ]; then
t=$(_mp_add2 "${t}" "0${t2}")
fi
t3="$(_mp_mul2 "${a2}" "${b2}")"
if [ -n "${t3}" ]; then
t=$(_mp_add2 "${t}" "00${t3}")
fi
echo "${t}"
fi
}
mp_div () {
_do_many mp_div2 "$@"
}
mp_div2 () {
_mp_div2 "$(echo "${1}" | rev)" "$(echo "${2}" | rev)" | rev
}
# let q = a // b (where '//' denotes integer division), and write
# q = q1 + 10*q2; now we have:
#
# * q2 = (a // b*10) = (a // 10) // b = (a2 // b): multiplication and
# division by 10 can be implemented as shifts;
# * q1 = (a - q2*10) // b
#
_mp_div2 () {
local a b q1 q2 r
if [ -z "${1}" ] || [ -z "${2}" ]; then
echo ''
return 0
else
a="${1}"
b="${2}"
if _mp_lt "${a}" "${b}"; then
echo '0'
else
# compute q2 by recursion
#a2=$(echo "${a}" | cut -c2-)
q2="$(_mp_div2 "${a}" "0${b}")"
# iteratively compute q1
q1=0
if _mp_lt "0${q2}" "${a}"; then
r="$(_mp_sub2 "${a}" $(_mp_mul2 "0${q2}" "${b}"))"
while _mp_lt "${b}" "${r}"; do
q1=$(expr 1 + "${q1}")
r="$(_mp_sub2 "${r}" "${b}")"
done
fi
echo "$(_mp_add2 "${q1}" "0${q2}")"
fi
fi
}
# _many N STR
#
# output concatentation of N copies of STR
_many () {
if [ "${1}" -gt 0 ]; then
echo "${2}$(_many $(expr ${1} - 1) "${2}")"
else
echo ''
fi
}
# digits_of_e N
#
# output Euler's number with a precision of N decimal digits
# E=2.7182818284590452353602874713527...
#
digits_of_e () {
local N e numerator denominator k term
N="${1}"
# compute E by summing the series \sum (1/k!)
# multiply whole series by 10^(N+2) so we can use just int arithmetic;
# the extra 2 digits are for better accuracy
e="2$(_many $N 0)00"
numerator="1$(_many $N 0)00"
denominator=1
k=2
term="${numerator}"
while mp_lt 100 "${term}"; do
denominator=$(mp_mul "${denominator}" "${k}")
term=$(mp_div "${numerator}" "${denominator}")
e=$(mp_add "${e}" "${term}")
k=$(expr "${k}" + 1)
done
echo "2.$(echo ${e} | cut -c2-$(expr ${N} + 1))"
}
| true |
326a8b182054ab1cc83dfc1e71b05fc9967a817a | Shell | kvirund/codingame | /easy/the.descent/main.sh | UTF-8 | 612 | 3.0625 | 3 | [
"MIT"
] | permissive | # Author: Anton Gorev aka Veei
# Date: 2015/09/24
# game loop
last=100500
while true; do
read spaceX spaceY
max=-1
for (( i=0; i<8; i++ )); do
# mountainH: represents the height of one mountain, from 9 to 0. Mountain heights are provided from left to right.
read mountainH
if (( max < mountainH )); then
max=$mountainH
maxX=$i
fi
done
# Write an action using echo
# To debug: echo "Debug messages..." >&2
if (( last != spaceY && maxX == spaceX )); then
echo "FIRE"
last=$spaceY
else
echo "HOLD"
fi
done
| true |
831121ebf6725304b87ee0a8e31771fc71a62811 | Shell | flyingjett/neuron-os | /scripts/run.sh | UTF-8 | 337 | 3.265625 | 3 | [] | no_license | #!/bin/bash
# Include our configuration file
if [ -f "../config/scripts.conf" ]; then
. ../config/scripts.conf
elif [ -f "./config/scripts.conf" ]; then
. ./config/scripts.conf
else
echo "Error finding config file"
exit 1;
fi
# Script to compile the code, then build the image, then execute bochs
bochs -qf `echo ${BOCHS_CONFIG}`;
| true |
d976975346ca6570712820d65a728fe81193a95a | Shell | ilventu/aur-mirror | /freetuxtv-svn/PKGBUILD | UTF-8 | 981 | 2.875 | 3 | [] | no_license | # Maintainer: LeCrayonVert
pkgname=freetuxtv-svn
pkgver=703
pkgrel=1
pkgdesc="WebTV player to watch ADSL TV on the PC with ISPs Free and SFR."
arch=('i686' 'x86_64')
url="http://code.google.com/p/freetuxtv/"
license=('GPL')
depends=('vlc' 'gtk2' 'sqlite3' 'curl' 'libnotify')
makedepends=('subversion' 'libtool')
_svntrunk=http://freetuxtv.googlecode.com/svn/trunk/
_svnmod=freetuxtv-read-only
build() {
cd "$srcdir"
msg "Connecting to SVN server...."
if [[ -d "$_svnmod/.svn" ]]; then
(cd "$_svnmod" && svn up -r "$pkgver")
else
svn co "$_svntrunk" --config-dir ./ -r "$pkgver" "$_svnmod"
fi
msg "SVN checkout done or server timeout"
msg "Starting build..."
rm -rf "$srcdir/$_svnmod-build"
cp -r "$srcdir/$_svnmod" "$srcdir/$_svnmod-build"
cd "$srcdir/$_svnmod-build"
#
# BUILD HERE
#
./autogen.sh
./configure --prefix=/usr
make
}
package() {
cd "$srcdir/$_svnmod-build"
make DESTDIR="$pkgdir/" install
}
# vim:set ts=2 sw=2 et:
| true |
2caf605147edb744bcb916e16cc06dd29b566125 | Shell | jprovaznik/tripleo-incubator | /scripts/setup-baremetal | UTF-8 | 839 | 3.140625 | 3 | [] | no_license | #!/bin/bash
set -eu
BASE=$(dirname $0)/../
CPU=$1
MEM=$2
DISK=$3
NODES=$4
arch=i386
# Perhaps make this always recreate the nodes for repeatability?
$BASE/scripts/register-nodes $CPU $MEM $DISK $NODES
deploy_kernel=$TRIPLEO_ROOT/deploy-ramdisk.kernel
deploy_ramdisk=$TRIPLEO_ROOT/deploy-ramdisk.initramfs
deploy_kernel_id=$(glance image-create --name bm-deploy-kernel --public \
--disk-format aki < "$deploy_kernel" | awk ' / id / {print $4}')
deploy_ramdisk_id=$(glance image-create --name bm-deploy-ramdisk --public \
--disk-format ari < "$deploy_ramdisk" | awk ' / id / {print $4}')
nova flavor-delete baremetal || true
nova flavor-create baremetal auto $2 $3 $1
nova flavor-key baremetal set "cpu_arch"="$arch" \
"baremetal:deploy_kernel_id"="$deploy_kernel_id" \
"baremetal:deploy_ramdisk_id"="$deploy_ramdisk_id"
| true |
89976a6fa11e88fc3fe4854360068ffef6de6277 | Shell | gpxlnx/virtualbox-scripts | /vb_help.sh | UTF-8 | 1,419 | 3.84375 | 4 | [] | no_license | #!/bin/bash
#
# Virtualbox helper script which list all available Bash aliases that can be used from command line to manage Virtualbox
#
# NOTE: Don't forget to add the Bash aliases from the bashrc.txt file to ~/.bashrc
#
bold=$(tput bold)
normal=$(tput sgr0)
echo -e "${bold}Use the Bash aliases below to manage Virtualbox from the command line:\n${normal}"
echo -e "${bold}Start/stop VM:${normal}
vbstart server01 - Start VM server01
vbstop server01 - Stop VM server01";
echo -e "\n${bold}Start/stop all VMs:${normal}
vbstartall - Start all VMs
vbstopall - Stop all VMs"
echo -e "\n${bold}Group action:${normal}
vbstartg puppet - Start all VMs in group puppet
vbstopg puppet - Stop all server in grou puppet"
echo -e "\n${bold}Network:${normal}
vbports - List port forwarding for all Virtualbox VMs
vbmac server01 - List MAC for VM
vbip - List IP for all VMs";
echo -e "\n${bold}Snapshot:${normal}
vbsnaps sever01 - List snapshots for server01
vbsnap server01 - Take snapshot of server01
vbrestoresnap server01 snapshotUUID - Restore snapshot on server01 with snapshotUUID
vbremovesnap server01 snapshotUUID - Remove snapshot from server01 with snapshotUUID"
echo -e "\n${bold}Misc:${normal}
vbhelp - Show this output
vblist - List all Virutalbox VMs active & disabled
vbrunning - List all running VMs";
| true |
6ce6d7a34086e36475bc57c06718ca3bcc8c33f8 | Shell | teru01/scripts | /script.sh | UTF-8 | 1,045 | 2.59375 | 3 | [] | no_license | #!/bin/bash
set -u
shopt -s dotglob
PROJ_DIR=$1
yes "" | sudo add-apt-repository ppa:git-core/ppa
sudo apt update
sudo apt install -y nginx dstat percona-toolkit sysstat curl git build-essential tree net-tools vim unzip
go get -u github.com/matsuu/kataribe
go get -u github.com/google/pprof
curl -LO https://github.com/teru01/log2discord/releases/download/1.0/log2discord
chmod +x log2discord
sudo mv log2discord /bin
cd $PROJ_DIR
#######################
# 計測スクリプト
#######################
curl -LO https://github.com/teru01/scripts/archive/master.zip
unzip master.zip
rm master.zip
mv scripts-master/* .
rm -r scripts-master
chmod +x pprof_npr.sh
chmod +x setup_npr.sh
chmod +x summarize_npr.sh
$(go env GOPATH)/bin/kataribe -generate
sudo systemctl stop apparmor
sudo systemctl disable apparmor
sudo systemctl stop ufw
sudo systemctl disable ufw
cat <<EOF > ~/.tmux.conf
set -g mouse on
unbind C-b
set -g prefix C-g
bind C-b send-prefix
# scroll buffer
set -g history-limit 65535
EOF
tmux source-file ~/.tmux.conf
| true |
f5793397aff2875b9a71728c4dc0186060c9042d | Shell | mvanveen/bootstrap.sh | /ssh/bootstrap.sh | UTF-8 | 223 | 2.96875 | 3 | [] | no_license | SSH_PUB_KEY_FILE="$HOME/.ssh/id_rsa.pub"
set_ssh_keys() {
if [ ! -f $SSH_PUB_KEY_FILE ]; then
info "ssh keys not detected. setting some up\n"
ssh-keygen -q -N "" -t rsa -C "michael@mvanveen.net"
fi
}
set_ssh_keys;
| true |
c68bdcc87b077a7c2907da468311f2917a62f610 | Shell | vcheesbrough/phishpi.node-red | /commit.sh | UTF-8 | 149 | 2.640625 | 3 | [] | no_license | #!/bin/bash
cd /home/pi/dev/PhishPi
find /home/pi/dev/PhishPi/admin -maxdepth 1 -mindepth 1 "${@:2}" -exec rm -fR {} \;
git add .
git commit -m "$1"
| true |
4b681ce77fcd571674fef692764561f1e9847080 | Shell | alifjamaluddin/Android-Easy-Rooting-Toolkit | /UnRoot/runme-UNROOT.sh | UTF-8 | 2,780 | 3.5 | 4 | [] | no_license | ADB_CMD=/home/fvalverd/Android/android-sdk-linux_x86/platform-tools/adb
echo "---------------------------------------------------------------"
echo " Easy rooting toolkit (v1.0)"
echo " UNROOTING SCRIPT"
echo " created by DooMLoRD"
echo " based heavily on FlashTool scripts (by Bin4ry and Androxyde)"
echo " Credits go to all those involved in making this possible!"
echo "---------------------------------------------------------------"
echo " [*] This script will:"
echo " (1) unroot ur device using special script"
echo " (2) remove Busybox and assocaited symlinks"
echo " (3) remove SU files and assocaiated data"
echo " [*] Before u begin:"
echo " (1) make sure u have installed adb drivers for ur device"
echo " (2) enable \"USB DEBUGGING\""
echo " from (Menu\Settings\Applications\Development)"
echo " (3) enable \"UNKNOWN SOURCES\""
echo " from (Menu\Settings\Applications)"
echo " (4) [OPTIONAL] increase screen timeout to 10 minutes"
echo " (5) connect USB cable to PHONE and then connect to PC"
echo " (6) skip \"PC Companion Software\" prompt on device"
echo "---------------------------------------------------------------"
echo ""
echo "CONFIRM ALL THE ABOVE THEN"
echo "---------------------------------------------------------------"
echo "MAKE SURE THAT THE SCREEN IS UNLOCKED"
echo "and if you get Superuser prompts ACCEPT/ALLOW THEM"
echo "ELSE THIS WILL NOT WORK"
echo "---------------------------------------------------------------"
echo ""
read -p "Press [Enter] AFTER TO CONFIRM ALL THE ABOVE..."
echo ""
echo "--- STARTING ----"
echo "--- WAITING FOR DEVICE"
adb wait-for-device
echo "--- TESTING FOR SU PERMISSIONS"
echo "MAKE SURE THAT THE SCREEN IS UNLOCKED"
echo "and if you get Superuser prompts ACCEPT/ALLOW THEM"
echo "ELSE THIS WILL NOT WORK"
adb shell "su -c 'echo --- Superuser check successful'"
echo ""
echo "--- cleaning"
adb shell "cd /data/local/tmp/; rm *"
echo "--- pushing busybox"
adb push ./files/busybox "/data/local/tmp/."
echo " --- correcting permissions"
adb shell "chmod 755 /data/local/tmp/busybox"
echo " --- remounting /system"
echo "MAKE SURE THAT THE SCREEN IS UNLOCKED"
echo "and if you get Superuser prompts ACCEPT/ALLOW THEM"
echo "ELSE THIS WILL NOT WORK"
adb shell "su -c '/data/local/tmp/busybox mount -o remount,rw /system'"
# unroot
echo "--- pushing unroot script"
adb push ./files/unroot "/data/local/tmp/."
echo " --- correcting permissions"
adb shell "chmod 777 /data/local/tmp/unroot"
echo " --- executing unroot"
adb shell "su -c '/data/local/tmp/unroot'"
echo ""
echo "--- cleaning /data/local/tmp/"
adb shell "cd /data/local/tmp/; rm *"
echo "--- rebooting"
adb reboot
echo "ALL DONE!!!" | true |
21241e455d1ffbc346d33a600831b69e0b29780a | Shell | ocdladefense/vagrant-environment | /provision.sh.help | UTF-8 | 867 | 3.09375 | 3 | [] | no_license | #!/bin/bash
# Commands to be executed when installing clickpdx/devenv for use with WordPress.
1. Given an existing DOCUMENT_ROOT directory
a. For example, as specified in an Apache2 <VirtualHost> directive.
2. Given that Composer is installed in the given environment
3. Create a directory that is to be DOCUMENT_ROOT
a. Use the appropriate permissions for setgid and getfacl/setfacl -d -m
b. setfacl -d -m g::rwx /DOCUMENT_ROOT
c. See https://www.linuxquestions.org/questions/linux-desktop-74/applying-default-permissions-for-newly-created-files-within-a-specific-folder-605129/ for more info
d. Set the <gid> git on DOCUMENT_ROOT
e. Change the grp permission on DOCUMENT_ROOT to 775 - so group members can manage the directory
3. Clone the clickpdx/devenv directory to DOCUMENT_ROOT
a. Run the <git init> command
b. Set the remote origin
c.
| true |
f7043b2bf50f55ba12eaf60e9699db4b0638f37f | Shell | SachinG007/fila | /try3.sh | UTF-8 | 146 | 2.734375 | 3 | [] | no_license | #!/bin/bash
horizon=1000 # example
seeds=100
for ((i = 0; i < horizon; i++)); do
for ((j = 0; j < seeds; j++)); do
echo "$i $j"
done
done | true |
e6d74462b5d5ec25d256565fefc192cdc7141e0d | Shell | digitalmediabremen/design-of-printed-circuit-boards | /sessions/start-presentation.sh | UTF-8 | 298 | 3.0625 | 3 | [] | no_license | #! /bin/bash
BASEDIR=$(cd "$(dirname "$0")"; pwd)
PRESENTATION="$1"
CSS_FILE="$2"
PORT="$3"
echo "$1"
echo "$2"
echo "$BASEDIR"
cd "$BASEDIR"
PORT=$PORT marp --server --html --watch --theme $CSS_FILE $PRESENTATION & PROCESS_ID=$!
sleep 2.0
open http://localhost:$PORT/
read -r
kill $PROCESS_ID
| true |
d3e61fca9bade904626d616062e104fc977c67a1 | Shell | tomhoover/homeshick | /lib/commands/clone.sh | UTF-8 | 2,354 | 4.28125 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env bash
clone() {
[[ ! $1 ]] && help_err clone
local git_repo=$1
is_github_shorthand "$git_repo"
if is_github_shorthand "$git_repo"; then
if [[ -e "$git_repo/.git" ]]; then
local msg="$git_repo also exists as a filesystem path,"
msg="${msg} use \`homeshick clone ./$git_repo' to circumvent the github shorthand"
warn 'clone' "$msg"
fi
git_repo="https://github.com/$git_repo.git"
fi
local repo_path
# repos is a global variable
# shellcheck disable=SC2154
repo_path=$repos"/"$(repo_basename "$git_repo")
pending 'clone' "$git_repo"
test -e "$repo_path" && err "$EX_ERR" "$repo_path already exists"
local git_out
version_compare "$GIT_VERSION" 1.6.5
if [[ $? != 2 ]]; then
git_out=$(git clone --recursive "$git_repo" "$repo_path" 2>&1) || \
err "$EX_SOFTWARE" "Unable to clone $git_repo. Git says:" "$git_out"
success
else
git_out=$(git clone "$git_repo" "$repo_path" 2>&1) || \
err "$EX_SOFTWARE" "Unable to clone $git_repo. Git says:" "$git_out"
success
pending 'submodules' "$git_repo"
git_out=$(cd "$repo_path" && git submodule update --init 2>&1) || \
err "$EX_SOFTWARE" "Unable to clone submodules for $git_repo. Git says:" "$git_out"
success
fi
return "$EX_SUCCESS"
}
symlink_cloned_files() {
local cloned_castles=()
while [[ $# -gt 0 ]]; do
local git_repo=$1
if is_github_shorthand "$git_repo"; then
git_repo="https://github.com/$git_repo.git"
fi
local castle
castle=$(repo_basename "$git_repo")
shift
local repo="$repos/$castle"
if [[ ! -d $repo/home ]]; then
continue;
fi
local num_files
num_files=$(find "$repo/home" -mindepth 1 -maxdepth 1 | wc -l | tr -dc "0123456789")
if [[ $num_files -gt 0 ]]; then
cloned_castles+=("$castle")
fi
done
ask_symlink "${cloned_castles[@]}"
return "$EX_SUCCESS"
}
# Convert username/repo into https://github.com/username/repo.git
is_github_shorthand() {
if [[ ! $1 =~ \.git$ && $1 =~ ^([0-9A-Za-z-]+/[0-9A-Za-z_\.-]+)$ ]]; then
return 0
fi
return 1
}
# Get the repo name from an URL
repo_basename() {
if [[ $1 =~ ^[^/:]+: ]]; then
# For scp-style syntax like '[user@]host.xz:path/to/repo.git/',
# remove the '[user@]host.xz:' part.
basename "${1#*:}" .git
else
basename "$1" .git
fi
}
| true |
e27b3b2f8d727a54beaff784cacbfea6178e97b9 | Shell | bluskyjosh/support-ticket-api | /scripts/db-dump.sh | UTF-8 | 561 | 4.03125 | 4 | [
"MIT"
] | permissive | #!/bin/sh
curdir=$(pwd)
db_dir=$curdir/database/backups
if [ -f $curdir/.env ] ; then
. $curdir/.env
else
echo "Couldn't find .env"
exit 1
fi
if [ ! -z $1 ] ; then
tag="_$1"
fi
datestring=$(date --iso-8601=minutes)
sql_file=$db_dir/$DB_DATABASE-$datestring$tag.sql
echo -n "Backing up ... "
mkdir -p $db_dir
mysqldump -u$DB_USERNAME -p$DB_PASSWORD -h$DB_HOST --add-drop-database --database $DB_DATABASE > $sql_file
exit_status=$?
if [ $exit_status -eq 0 ] ; then
echo Done.
echo Backuped to: $sql_file
else
echo Failed!
exit $exit_status
fi
| true |
82755ab7dc1de86ce0cd46ae28cf6e81088ec3e4 | Shell | SamJGldstn/datarobot-user-models | /jenkins/test3_drop_in_envs.sh | UTF-8 | 1,735 | 3.8125 | 4 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env bash
set -ex
CDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )"/.. && pwd )"
DRUM_BUILDER_IMAGE="datarobot/drum-builder"
echo $CDIR
# pull DRUM builder container and build DRUM wheel
docker pull ${DRUM_BUILDER_IMAGE}
# If we are in terminal will be true when running the script manually. Via Jenkins it will be false.
TERMINAL_OPTION=""
if [ -t 1 ] ; then
TERMINAL_OPTION="-t"
fi
docker run -i ${TERMINAL_OPTION} --user $(id -u):$(id -g) -v $CDIR:/tmp/drum $DRUM_BUILDER_IMAGE bash -c "cd /tmp/drum/custom_model_runner && make"
docker rmi $DRUM_BUILDER_IMAGE --force
DRUM_WHEEL=$(find custom_model_runner/dist/datarobot_drum*.whl)
DRUM_WHEEL_FILENAME=$(basename $DRUM_WHEEL)
DRUM_WHEEL_REAL_PATH=$(realpath $DRUM_WHEEL)
# Change every environment Dockerfile to install freshly built DRUM wheel
WITH_R=""
pushd public_dropin_environments
DIRS=$(ls)
for d in $DIRS
do
pushd $d
cp $DRUM_WHEEL_REAL_PATH .
# check if DRUM is installed with R option
if grep "datarobot-drum\[R\]" dr_requirements.txt
then
WITH_R="[R]"
fi
# insert 'COPY wheel wheel' after 'COPY dr_requirements.txt dr_requirements.txt'
sed -i "/COPY \+dr_requirements.txt \+dr_requirements.txt/a COPY ${DRUM_WHEEL_FILENAME} ${DRUM_WHEEL_FILENAME}" Dockerfile
# replace 'datarobot-drum' requirement with a wheel
sed -i "s/^datarobot-drum.*/${DRUM_WHEEL_FILENAME}${WITH_R}/" dr_requirements.txt
popd
done
popd
# installing DRUM into the test env is required for push test
pip install -U $DRUM_WHEEL_REAL_PATH
# requirements_test may install newer packages for testing, e.g. `datarobot`
pip install -r requirements_test.txt
py.test tests/functional/test_drop_in_environments.py \
--junit-xml="$CDIR/results_drop_in.xml"
| true |
d2f0f412bdd43a39499c310a0b35866450d8c0d9 | Shell | seips-net/rvm | /scripts/functions/requirements/osx | UTF-8 | 2,287 | 3.734375 | 4 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env bash
requirements_osx_find_brew()
{
if
\which brew >/dev/null 2>&1
then
return 0
else
typeset __binary
for __binary in ~/homebrew/bin/brew ~/.homebrew/bin/brew /usr/local/bin/brew
do
if
[[ -f "${__binary}" && -s "${__binary}" && -x "${__binary}" ]]
then
# link to rvm_bin for easy adding to PATH
ln -nfs "${__binary}" "${rvm_bin_path:-$rvm_path/bin}/brew"
return 0
fi
done
return 1
fi
}
requirements_osx_find_port()
{
if # find macports in PATH
\which port >/dev/null 2>&1
then
return 0
else # find macports out of PATH and link to rvm/bin
typeset __binary
for __binary in ~/opt/local/bin/port ~/.opt/local/bin/port /usr/local/bin/port /opt/local/bin/port
do
if
[[ -f "${__binary}" && -s "${__binary}" && -x "${__binary}" ]]
then
# link to rvm_bin for easy adding to PATH
ln -nfs "${__binary}" "${rvm_bin_path:-$rvm_path/bin}/port"
return 0
fi
done
return 1
fi
}
requirements_osx_find_fink()
{
if # find macports in PATH
\which fink >/dev/null 2>&1
then
return 0
else # find fink out of PATH and link to rvm/bin
typeset __binary
for __binary in ~/sw/bin/fink ~/.sw/bin/fink /usr/local/bin/fink /sw/bin/fink
do
if
[[ -f "${__binary}" && -s "${__binary}" && -x "${__binary}" ]]
then
# link to rvm_bin for easy adding to PATH
ln -nfs "${__binary}" "${rvm_bin_path:-$rvm_path/bin}/fink"
return 0
fi
done
return 1
fi
}
requirements_osx_run()
{
typeset __type
__type=$1
shift
[[ -s "$rvm_scripts_path/functions/requirements/osx_${__type}" ]] ||
{
rvm_error "Requirements support for osx_${__type} is not implemented yet,
report a bug here => https://github.com/wayneeseguin/rvm/issues"
return 1
}
source "$rvm_scripts_path/functions/requirements/osx_${__type}"
requirements_${__type} "$@" || return $?
}
requirements_osx()
{
typeset __type
for __type in brew port # fink ...
do
if
requirements_osx_find_${__type}
then
requirements_osx_run ${__type} "$@" || return $?
return 0
fi
done
# fallback
requirements_osx_run brew "$@" || return $?
}
| true |
6d98e17a012aea172d9d7b816aa049966c51239c | Shell | CL-9a/4chan-archiver | /archiveUntilArchived.sh | UTF-8 | 181 | 3.09375 | 3 | [] | no_license | #!/usr/bin/env bash
board=$1
thread=$2
waitTime=${3:-3800}
while sh ./archive.sh "$board" "$thread" ; [ $? -ne 2 ] ; do
echo "waiting for $waitTime seconds"
sleep $waitTime
done | true |
82948b4af4b857dc82872981d5bf664c93adb8a5 | Shell | tuulos/metaflow-datastore-tests | /tests/cas_hit | UTF-8 | 387 | 2.5625 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
source util.sh
NONCE=$(uuidgen)
MODE=$(basename $1)
runmf $1 cas_hit run --nonce=$NONCE
first_cas=$(echo "$took" | cut -d ' ' -f 1)
# the artifact exists in the datastore, the second time should be faster
runmf $1 cas_hit run --nonce=$NONCE
second_cas=$(echo "$took" | cut -d ' ' -f 1)
echo "cas_hit[$MODE][$METAFLOW_DATASTORE]: first $first_cas, second $second_cas" >&2
| true |
b90b98eee535329d8a7e00f6ecc24cf3430ffb8d | Shell | kkqueenie/Mytest | /runfiles/run_jobs_N.sh | UTF-8 | 3,221 | 3.640625 | 4 | [] | no_license | #!/bin/bash
##time unit :minute
Lammps_location=$1
dN=$2
nNs=$3
nNe=$4
inputfile_name=$5
data_location_dirname=$6
number_line=$7
runcase=$8
current_location=$(pwd)
####function to run job at $data_location_dirname$N/$runcase
RunMultiJob(){
##run with given n_count and s_time(sleep time)
tab_name="N="
content="printf \"Total jobs: $# \n\";"
job_count=0
for n_count_tmp in $@
do
N=$(( $n_count_tmp * $dN ))
# check if n_count in range[nNs:nNe]
if [ $n_count_tmp -lt $nNs ]; then
exit;
elif [ $n_count_tmp -gt $nNe ]; then
exit;
fi
job_count=$(($job_count+1))
#if runcase folder doesnt exist then skip this
if [ ! -d "$data_location_dirname$N/$runcase" ];
then
printf "\x1b[31m \tNO address: $data_location_dirname$N/$runcase \x1b[0m\n"
continue
fi
tab_name="$tab_name($N)"
content="$content cd $data_location_dirname$N/$runcase; echo \"Job$job_count: position:\"\$(pwd); printf \"\tsleep $s_time min\n\"; sleep $(( $s_time ))m; now=\$(date +\"%Y.%m.%d-%H:%M:%S\"); printf \"\tStart time : \$now\n\"; $Lammps_location < $inputfile_name > out; now=\$(date +\"%Y.%m.%d-%H:%M:%S\"); printf \"Job$job_count done: \$now\n\n\"; cd $current_location;"
done
### run job in new tab
if [ "$content" == "printf \"Total jobs: 0 \n\";" ];
then
return 0
fi
gnome-terminal --tab -t “$tab_name” -- bash -c "$content exec bash;"
#exec bash"
}
function ceil(){
floor=`echo "scale=0;$1/1"|bc -l ` #
add=`awk -v num1=$floor -v num2=$1 'BEGIN{print(num1<num2)?"1":"0"}'`
echo `expr $floor + $add`
}
############################################################################################################
### run jobs in parallel
printf "\nrun jobs at $data_location_dirname\$N/$runcase\n"
n_N=$(($nNe-$nNs+1))
job_per_line=`ceil $n_N/$number_line `
##run from nNe to nNs
#: '
###calculate n,N
n_count=$(( $nNe+1 ))
count_line=-1
for (( counter=$nNe; counter>=$nNs; counter--))
do
s_time=0
n_count=$(( $n_count - 1 ))
count_line=$(($count_line+1))
line_num=$(( `expr $count_line % $number_line` +1))
if [ $line_num -eq 1 ]; then
LINE1=( "${LINE1[@]}" $n_count )
elif [ $line_num -eq 2 ]; then
LINE2=( "${LINE2[@]}" $n_count )
elif [ $line_num -eq 3 ]; then
LINE3=( "${LINE3[@]}" $n_count )
elif [ $line_num -eq 4 ]; then
LINE4=( "${LINE4[@]}" $n_count )
elif [ $line_num -eq 5 ]; then
LINE5=( "${LINE5[@]}" $n_count )
elif [ $line_num -eq 6 ]; then
LINE6=( "${LINE6[@]}" $n_count )
elif [ $line_num -eq 7 ]; then
LINE7=( "${LINE7[@]}" $n_count )
elif [ $line_num -eq 8 ]; then
LINE8=( "${LINE8[@]}" $n_count )
fi
done
#'
echo 'N=$(( $n_count * $dN ))'
echo "dN=$dN"
echo "line1: n_count = "${LINE1[@]}; RunMultiJob ${LINE1[@]}
echo "line2: n_count = "${LINE2[@]}; RunMultiJob ${LINE2[@]}
echo "line3: n_count = "${LINE3[@]}; RunMultiJob ${LINE3[@]}
echo "line4: n_count = "${LINE4[@]}; RunMultiJob ${LINE4[@]}
echo "line5: n_count = "${LINE5[@]}; RunMultiJob ${LINE5[@]}
echo "line6: n_count = "${LINE6[@]}; RunMultiJob ${LINE6[@]}
echo "line7: n_count = "${LINE7[@]}; RunMultiJob ${LINE7[@]}
echo "line8: n_count = "${LINE8[@]}; RunMultiJob ${LINE8[@]}
| true |
67b7a1cfbf13748d4cacd3ed9b4608959b812d97 | Shell | zvika77/zvika_env | /Vertica/Sql/health_check.sh | UTF-8 | 1,112 | 2.6875 | 3 | [] | no_license | #!/bin/bash
SCRIPT_DIR=/home/oracle/11g/vertica_scripts
set_env()
{
export V_USER=$1
export V_PASS=$2
export V_DB=$3
}
run_report()
{
echo "From:vertica@liveperson.com">/home/zvikag/Vertica/Log/sendmail.header
echo "Subject: $V_DB Vertica Health Check Report">>/home/zvikag/Vertica/Log/sendmail.header
# echo "To: zvikag@liveperson.com">>/home/zvikag/Vertica/Log/sendmail.header
echo "To: dbaoncall-lp@liveperson.com">>/home/zvikag/Vertica/Log/sendmail.header
echo "Content-type: text/html">>/home/zvikag/Vertica/Log/sendmail.header
echo Running Report...
echo $V_DB
cat /home/zvikag/Vertica/Sql/health_check.sql | /opt/vertica/bin/vsql -H -E -U $V_USER -w $V_PASS -h $V_DB > /home/zvikag/Vertica/Log/health_check_${V_DB}.html
echo Sending Mail ...
cat /home/zvikag/Vertica/Log/sendmail.header /home/zvikag/Vertica/Log/health_check_${V_DB}.html | /usr/sbin/sendmail -t
}
set_env vertica lpbi4u svpr-dbv05
run_report
set_env vertica lpbi4uk slpr-dbv01
run_report
set_env vertica lpbi4am rapr-dbv01
run_report
set_env vertica lpbi4dr ropr-dbv05
run_report
set_env vertica lp4alpha svor-dbv100
run_report
echo Finished
| true |
0bbe0a10bde51e33811b214d27bcc001d6240191 | Shell | antocuni/pytabletop | /deploy.sh | UTF-8 | 675 | 3.25 | 3 | [
"MIT"
] | permissive | #!/bin/bash
# usage: deploy.sh [--all]
TMPDIR=/tmp/adbpush/pytabletop-master
FILES=(android.txt
main.py
libs
pytt
)
rm -rf $TMPDIR
mkdir -p $TMPDIR
if [ x$1 = "x--all" ]
then
# deploy everything -- slowish
adb shell rm -rf /sdcard/kivy/pytabletop-master
cp -L -r "${FILES[@]}" $TMPDIR
cd $TMPDIR
py.cleanup
cd -
adb push $TMPDIR /sdcard/kivy/pytabletop-master
else
# deploy only pytt -- faster
cp -L -r pytt $TMPDIR
cd $TMPDIR
py.cleanup
cd -
adb shell rm -rf /sdcard/kivy/pytabletop-master/pytt
adb push $TMPDIR/pytt /sdcard/kivy/pytabletop-master
fi
adb logcat -c
adb logcat -s python
| true |
829e1d3532dc6d41bf535eabf677842c2c15b754 | Shell | simonyoung/dotfiles | /brew.sh | UTF-8 | 2,465 | 3.875 | 4 | [] | no_license | #!/bin/zsh
fancy_echo() {
local fmt="$1"; shift
# shellcheck disable=SC2059
printf "\n$fmt\n" "$@"
}
fancy_echo "Updating Homebrew formulae ..."
brew update
brew upgrade
export HOMEBREW_NO_AUTO_UPDATE=1
BREW_PREFIX=$(brew --prefix)
###################
# Node.js #
###################
# Manage Node.js versions with n
if ! brew info n &>/dev/null; then
brew install n
# Deal with Permission Denied errors that would stop n from running - this is fine in a single user environment
if [ ! -d "/usr/local/n" ]; then
fancy_echo "Resolving permission issues with n..."
sudo mkdir -p /usr/local/n
sudo chown -R $(whoami) /usr/local/n
sudo chown -R $(whoami) /usr/local/bin /usr/local/lib /usr/local/include /usr/local/share
fi
# Now install the LTS version of Node.js
fancy_echo "Info: 'n' now installed, installing the latest Node.js LTS version ..."
n lts
# Add Node.js to path
export PATH=$PATH:/usr/local/bin/node
# Now install Yarn
fancy_echo "Info: 'Installing Yarn ..."
curl -o- -L https://yarnpkg.com/install.sh | bash
else
# Check/update the LTS version of Node.js
fancy_echo "Info: 'n' is already installed, updating to the latest Node.js LTS version ..."
n lts
fi
# Create a user profile .npmrc if it doesn't exist and prompt user to configure
if [ ! -f "$HOME/.npmrc" ]; then
fancy_echo "Creating an empty per-user .npmrc config file ..."
touch "$HOME/.npmrc"
fi
###################
# Casks #
###################
# Install Visual Studio Code & the Code Settings extension to sync VS Code settings from GitHub
if [ ! -d "$BREW_PREFIX/Caskroom/visual-studio-code" ]; then
brew cask install visual-studio-code
code --install-extension Shan.code-settings-sync
else
fancy_echo "Info: Visual Studio Code is already installed"
fi
# Install Postman
if [ ! -d "$BREW_PREFIX/Caskroom/postman" ]; then
brew cask install postman
else
fancy_echo "Info: Postman is already installed"
fi
# Install Alfred
if [ ! -d "$BREW_PREFIX/Caskroom/alfred" ]; then
brew cask install alfred
else
fancy_echo "Info: Alfred is already installed"
fi
# TODO Install Alfred workflows from backup
# Install Kap for screen captures
if [ ! -d "$BREW_PREFIX/Caskroom/kap" ]; then
brew cask install kap
else
fancy_echo "Info: Kap is already installed"
fi
###################
# Cleanup #
###################
brew cleanup
| true |
5b28db19812c863248bfa50082d43eb47a52c7b1 | Shell | b23prodtm/kubespray | /extra_scripts/my_playbook.sh | UTF-8 | 4,008 | 3.9375 | 4 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env bash
TOPDIR=$(cd "$(dirname "${BASH_SOURCE[@]}")/.." && pwd)
function set_yml_vars() {
[ "$#" -lt 3 ] && echo -e "Usage: $0 -s <path/to/thevars.yml> <var_name> <value>..."
silent=""
[ $1 == "-s" ] && shift && silent="-s"
[ ! -f $1 ] && logger -st $0 "File $1 not found" && exit 1
python3 "${TOPDIR}/library/yaml-tool.py" "$silent" "$@"
}
function ask_vault() {
[ "$#" -lt 2 ] && echo -e "Usage: $0 <group> <ansible-vault command>" && exit 1
group=$1
shift
printf "Vault File %s \nPress [ENTER] to continue." "${GVARS}/${group}/vault.yml"
if [ ! -f "$GVARS/${group}/vault.yml" ]; then
if [ "$(cfrm_act "You wish to create the vault with new passwords ?" 'y')" ]; then
cat << EOF | cat
---
# example password vault for group ${group}
ansible_password: raspberry
EOF
ansible-vault create "$GVARS/${group}/vault.yml"
fi
else
ansible-vault "$@" "$GVARS/${group}/vault.yml"
fi
}
function cfrm_act () {
def_go=$2
y='y'
n='n'
[ "$def_go" == "$y" ] && y='Y'
[ "$def_go" == "$n" ] && n='N'
while true; do case $go in
[nN]*) break;;
[yY]*) echo $go; break;;
*)
read -p "Confirm $1 [${y}/${n}] ? " go
[ -z $go ] && go=$def_go;;
esac; done
}
INV=${INV:-"${TOPDIR}/inventory/mycluster/inventory.yaml"}
HOSTS=$(dirname $INV)/hosts.yaml
defaults=(--become --become-user=root --ask-vault-pass --timeout=240 --flush-cache -e dns_early=true)
options=()
usage=("" \
"Usage: $0 [-i,--inventory <inventory/path/to/inventory.yaml>] " \
" [-H, --hosts <inventory/path/to/hosts.yaml>] <yaml>" \
" [--edit-vault <inventory/path/to/group/vault.yml>]" \
" [ansible-playbook options]" \
" <yaml>:" \
"cluster.yml builds up a new the cluster from the inventory (CONFIG_FILE)." \
"reset.yml resets all hosts off and the cluster is teared down." \
"scale.yml scales up or down the cluster as defined by some" \
" change in the inventory." \
" options: (${defaults[@]}) if one option is used, the others must be there or it gets erased." \
"")
[ "$#" -lt 1 ] && printf "%s\n" "${usage[@]}" && exit 0
vault_cmd="view"
while [ "$#" -gt 0 ]; do case $1 in
--edit-vault)
vault_cmd="edit $*";;
-i*|--inventory)
INV="$2"
HOSTS="$(dirname $INV)/hosts.yaml"
shift;;
-H*|--hosts)
HOSTS=$2;;
-h*|--help)
printf "%s\n" "${usage[@]}"
exit 0;;
-[-]?b*|--ask*|--timeout*)
options=("${options[@]}" "$1")
defaults=();;
*) options=("${options[@]}" "$1");;
esac; shift; done
cat $HOSTS
export CONFIG_FILE="$INV"
GVARS="$(dirname $CONFIG_FILE)/group_vars"
if [ "$(cfrm_act "You wish to update the CONFIG_FILE=$CONFIG_FILE with $HOSTS ?" 'n')" ]; then
CONFIG_FILE="$INV"
python3 "${TOPDIR}/contrib/inventory_builder/inventory.py" load "$HOSTS"
ask_vault "bastion" "$vault_cmd" "securized" "$vault_cmd"
ansible -i "$INV" --ask-vault-pass -m ping all
fi
logger -st $0 "$(hostname) must be connected to the ""same network"" than the master/bastion host"
logger -st $0 "After ansible-playbook . reset.yml, a 'bare metal' reboot the cluster may be required..."
logger -st $0 "Reminder: ""A functional cluster's good DNS configuration"""
logger -st $0 "If a TASK failed on timeouts, try again with longer delays the kubernetes cluster"
logger -st $0 "Known TASKs that take more time : [Starting of Calico/Flannel/.. kube controllers], [Kubernetes Apps | Start Resources]..."
logger -st $0 "Run ansible-playbook -i $INV ${defaults[*]} ${options[*]}" \
&& ansible-playbook -i "$INV" "${defaults[@]}" "${options[@]}" \
&& logger -st $0 "Next call must be $TOPDIR/extra_scripts/my_kubectl.sh -h"
logger -st $0 "It's going to take past half an hour per host to complete the cluster boot process, pods... On failure you can reset cluster "
logger -st $0 "--flush-cache --timeout=240 reset.yml --ask-vault-pass -b"
logger -st $0 "Now that's finished and it's time to control the kube. Call sudo $TOPDIR/extra_scripts/my_kubectl.sh -i $INV --help to get help."
| true |
2eeb3e95163784d90037d5ce38b877a1320fec5a | Shell | Gsyltc/helmrepo | /updateRepository.sh | UTF-8 | 1,265 | 3.890625 | 4 | [] | no_license | #!/bin/bash
CHARTS_DIR=$HOME/workspace/projects/jeedom/helm/charts
WORKING_DIR=$HOME/workspace/repositories/my-helm-repo
TMP_DIR=$WORKING_DIR/.tmp
DEPLOY_DIR=$WORKING_DIR/.deploy
INDEX_DIR=$WORKING_DIR/.toindex
# Updatye local repositorie
cd $WORKING_DIR
git pull
mkdir -p $TMP_DIR
mkdir -p $INDEX_DIR
for currentDirectory in `find $CHARTS_DIR -maxdepth 1 -mindepth 1 -type d`
do
echo "Current directory : $currentDirectory"
helm package $currentDirectory --destination $TMP_DIR
cd $TMP_DIR
for currentFile in `find -type f`
do
echo "Current file : $currentFile"
if [ -f $DEPLOY_DIR/$currentFile ] ; then
echo "\033[33;1m [WARNING] $currentFile already exist\033[0;1m"
rm $currentFile
else
echo "Moving file"
mv $currentFile $WORKING_DIR/.toindex
echo "\033[35;1m [INFO] $currentFile updated\033[0;1m"
fi
done
done
if [ "$(ls -A $INDEX_DIR)" ] ; then
echo "CR Upload"
cr upload -p $WORKING_DIR/.toindex
echo "======> Updating index.yaml"
cr index -i $WORKING_DIR/index.yaml -p $INDEX_DIR
mv $INDEX_DIR/*.tgz $DEPLOY_DIR
else
echo "Nothing to update"
fi
git add -A .
git commit -a -m "Updating repository"
git push origin version
helm repo update
| true |
0e2272b626f54843927d38ed1555cc6aa2ef3d9c | Shell | compbio-UofT/alu-detect | /setup | UTF-8 | 5,947 | 3.84375 | 4 | [] | no_license | #!/bin/bash
trap 'echo $0: line $LINENO: exit code $?' ERR
set -e
add_dependency() {
label=$1
check_command=$2
extra_path=
while ! PATH=$new_path:$extra_path:$PATH eval $check_command; do
echo "$label not found"
extra_path=$(readlink -f "$(read -p "Enter $label path: " -e extra_path; eval echo "$extra_path")")
done
[ ! "$extra_path" ] || new_path=$new_path${new_path:+:}$extra_path
}
DIR=$( cd -P "$( dirname "${BASH_SOURCE[0]}" )"; pwd )
REF_NAME=${1:-}
[ -d "$DIR/settings" ] || mkdir settings
[ -d "$DIR/data" ] || mkdir data
GLOBAL_SETTINGS=$DIR/settings/global
if [ -r "$GLOBAL_SETTINGS" ]; then
echo "Global settings file already exists: $GLOBAL_SETTINGS"
echo "If you need to regenerate it, remove the existing file"
else
#
# first the paths
#
new_path=
add_dependency "python2.6+" "which python >/dev/null 2>&1 && [ \"\$(python --version 2>&1 | cut -d '.' -f 2)\" -ge 6 ]"
add_dependency "samtools" "which samtools >/dev/null 2>&1"
add_dependency "bedtools" "which intersectBed >/dev/null 2>&1"
add_dependency "RepeatMasker" "which RepeatMasker >/dev/null 2>&1"
add_dependency "bowtie2" "which bowtie2 >/dev/null 2>&1"
add_dependency "gmapper" "which gmapper-ls >/dev/null 2>&1"
echo "[[ \"\$PATH\" =~ $DIR/bin ]] || PATH=$DIR/bin:$new_path${new_path:+:}\$PATH" >"$GLOBAL_SETTINGS"
echo "[[ \"\$AWKPATH\" =~ $DIR/bin ]] || export AWKPATH=$DIR/bin\${AWKPATH:+:}\$AWKPATH" >>"$GLOBAL_SETTINGS"
# echo "[[ \"\$PYTHONPATH\" =~ $DIR/bin ]] || PYTHONPATH=$DIR/bin:\$PYTHONPATH" >>"$GLOBAL_SETTINGS"
#
# next, alu consensus sequences
#
if [[ -z "$ALU_ALL_FA" || ! -r "$ALU_ALL_FA" ]]; then
echo "Need to build list of Alu consensus sequences"
ALU_ALL_FA=$DIR/data/alus.pos.fa
ALU_NO_POLYA_FA=$DIR/data/alus.hidden-polya.fa
REPEATMASKER_DIR=$(dirname "$(readlink -f "$(PATH=$new_path:$PATH which RepeatMasker)")")
echo -n "Generating list of alu consensus sequences... "
$REPEATMASKER_DIR/util/queryRepeatDatabase.pl -species homo -class SINE |
$DIR/bin/grep-fasta "^>Alu[JSY][a-z0-9]*#" |
sed 's/^\(.*\)#SINE\/Alu/\1/' |
sed 's/^\(.*\)RepbaseID:/\1/' |
sed 's/^\(.*\)ALU/\1/' |
awk '/^>/ {if (substr($2,1,3)=="Alu"&&$2!=substr($1,2)) $1=$1"/"substr($2,4); print $1} /^[^>]/' >$ALU_ALL_FA
if [ -z "$(head -n 1 $ALU_ALL_FA)" ]; then
echo "oops"
cat <<EOF
Your RepeatMasker installation does not include the Repbase libraries,
which are needed to derive the Alu consensus sequences. The libraries
can be downloaded from http://www.girinst.org/server/RepBase/index.php
EOF
rm -f "$GLOBAL_SETTINGS"
exit 1
else
echo "done"
fi
echo "Building list with hidden poly-A streches"
$DIR/bin/hide-polya <$ALU_ALL_FA >$ALU_NO_POLYA_FA
fi
echo "export ALU_ALL_FA=\"$ALU_ALL_FA\"" >>"$GLOBAL_SETTINGS"
echo "export ALU_NO_POLYA_FA=\"$ALU_NO_POLYA_FA\"" >>"$GLOBAL_SETTINGS"
fi
while [ -z "$REF_NAME" ]; do
read -p "Enter reference name: " -e REF_NAME
done
REF_SETTINGS=$DIR/settings/ref.$REF_NAME
if [ -r "$REF_SETTINGS" ]; then
echo "Reference settings file already exists: $REF_SETTINGS"
echo "If you need to regenerate it, remove the existing file"
else
. "$GLOBAL_SETTINGS"
#
# fasta file
#
while [[ -z "$REF_FA" || ! -r "$REF_FA" ]]; do
read -p "Enter reference genome FASTA file: " -e REF_FA
if [[ "$REF_FA" && ! -r "$REF_FA" ]]; then
echo "$REF_FA: not found"
REF_FA=
fi
done
echo "export REF_FA=\"$REF_FA\"" >"$REF_SETTINGS"
# link if not already there
REF_FA_LINK=$DIR/data/$REF_NAME.fa
[ -f "$REF_FA_LINK" -a ! -L "$REF_FA_LINK" ] || ln -sf "$REF_FA" "$REF_FA_LINK"
#
# fasta index
#
CHROMINFO=$REF_FA.fai
CHROMINFO_LINK="$REF_FA_LINK".fai
if [ -r "$CHROMINFO" ]; then
[ -f "$CHROMINFO_LINK" -a ! -L "$CHROMINFO_LINK" ] || ln -sf "$CHROMINFO" "$CHROMINFO_LINK"
else
CHROMINFO=$CHROMINFO_LINK
echo -n "Creating fasta index... "
samtools faidx "$REF_FA_LINK"
echo "done"
fi
echo "export CHROMINFO=\"$CHROMINFO\"" >>"$REF_SETTINGS"
#
# bowtie2 indexes
#
while [[ -z "$BOWTIE2_INDEX" || ! -r "$BOWTIE2_INDEX.1.bt2" ]]; do
read -p "Enter bowtie2 index prefix: " -e BOWTIE2_INDEX
if [[ "$BOWTIE2_INDEX" && ! -r "$BOWTIE2_INDEX.1.bt2" ]]; then
echo "$BOWTIE2_INDEX.1.bt2: not found"
BOWTIE2_INDEX=
fi
done
missing=$(diff <(cut -f 1 $CHROMINFO | sort) <(bowtie2-inspect -n $BOWTIE2_INDEX | sort) | grep '^>' | cut -c 3-)
[ ! "$missing" ] || { echo "Some chromosomes in the bowtie2 index [$BOWTIE2_INDEX] are missing from the fasta file [$REF_FA]:"; echo "$missing"; exit 1; }
echo "export BOWTIE2_INDEX=\"$BOWTIE2_INDEX\"" >>"$REF_SETTINGS"
BOWTIE2_INDEX_LINK=$DIR/data/$REF_NAME
for suffix in .1.bt2 .2.bt2 .3.bt2 .4.bt2 .rev.1.bt2 .rev.2.bt2; do
[ -f "$BOWTIE2_INDEX_LINK$suffix" -a ! -L "$BOWTIE2_INDEX_LINK$suffix" ] || ln -sf "$BOWTIE2_INDEX$suffix" "$BOWTIE2_INDEX_LINK$suffix"
done
#
# reference alu annotations
#
while [[ -z "$ALUS_BED" || ! -r "$ALUS_BED" ]]; do
ALUS_BED=$DIR/data/alus.$REF_NAME.bed.gz
echo "Reference Alu annotations not found"
while [[ -z "$CHROM_FA_OUT" ]]; do
echo "RepeatMasker output files not found"
read -p "Enter path to RepeatMasker output file(s) (can use wildcards): " -e CHROM_FA_OUT
CHROM_FA_OUT=$(eval echo "$CHROM_FA_OUT" | xargs -n 1 readlink -e)
done
echo -n "Generating list of reference alus in bed format... "
grep -h SINE/Alu $CHROM_FA_OUT |
sed 's/ \+/\t/g' |
sed 's/^\t//' |
sed 's/\t$//' |
awk 'BEGIN{OFS="\t"} {pct=int($2+$3+$4);if(pct>100)pct=100; if($9=="C"){strand="-"}else{strand="+"}if(substr($12,1,1)=="("){start=$13;end=$14}else{start=$12;end=$13} print $5, $6-1, $7, $10, 100-pct, strand, start, end}' |
gzip >$ALUS_BED
echo "done"
done
echo "export ALUS_BED=\"$ALUS_BED\"" >>"$REF_SETTINGS"
fi
| true |
a65bc7820f90a85741f6cb031e6429a1960e1fe6 | Shell | Dwight-D/bashscripts | /scripstrap | UTF-8 | 1,961 | 4.46875 | 4 | [] | no_license | #!/bin/bash
#Script for starting new scripts
name=""
overwrite=false
args=0
opts=""
usage (){
echo "${0##*/} - Generates a bash script stub."
echo "Usage: $(basename $0) name [nrOfargs] [opts]"
exit 1
}
# Parse options
#=============================
while getopts "oh" opt; do
case $opt in
h )
usage
;;
o )
overwrite=true
;;
? )
usage
;;
esac
done
shift $((OPTIND -1))
# Parse arguments
#===============================
if [ "$#" -lt 1 ]; then
usage
fi
echo -n "Generating script $name"
if [ ! -z "$2" ]; then
args=$2
echo " with $args arguments "
fi
if [ ! -z "$3" ]; then
opts="$3"
echo "Opts: $opts"
fi
name=$1
if [ -f $1 ]; then
if [ "$overwrite" = false ]; then
echo "Error: File exists!"
exit
fi
fi
# Begin output
#=================================
#Brackets redirect all output to target file
{
touch $name
chmod +x $name
echo "#!/bin/bash
#Autogenerated script template
"
#Create argument declarations
echo "# Variable declarations
#========================================
"
echo "me=\${0##*/}"
for (( i=0; i<$args; i++ )); do
echo "_arg${i}_=\"\""
done
#Generate usage function
echo '
usage (){
echo "$me - Description missing"
echo "Usage: $me"
exit 1
}'
#Generate opts parsing
echo "
# Parse options
#=======================================
while getopts \"h$opts\" opt; do"
echo ' case $opt in'
for (( i=0; i<${#opts}; i++ )); do
echo " ${opts:$i:1})
#dostuff
;;"
done
echo ' h )
usage
;;
\? )
usage
;;
: )
echo "Option requires an argument: $OPTARG"
exit
;;
esac
done
shift $((OPTIND -1))
'
echo -n 'if [ "$#" -lt '
echo "$args ]; then
usage
fi
"
}> $name
$EDITOR $name
| true |
443fa05dd803b25355f4e6850a0c209b5d7a9282 | Shell | gnowledge/gstudio-docker | /scripts/bulk-User-creation-database.sh | UTF-8 | 2,231 | 3.84375 | 4 | [] | no_license | #!/bin/bash
dHOME="/home/docker/code";
# Mrunal : 20160131-2130 : Take user input as School id (small letter of initials of state and school no in 3 digit)
if [[ $1 == "" ]]; then
echo "Please provide the user details file name." ;
echo "(For example Rajasthan state and school 001 'r001-user-details' must be the default file name and hit Enter key of Keyboard)" ;
read INPUT_FILE ;
else
INPUT_FILE=$1;
fi
echo "File name entered is $INPUT_FILE ." ;
filename=$(basename "$INPUT_FILE")
extension="${filename##*.}"
filename1="${filename%.*}"
#echo ":$filename:$extension:$filename1:$INPUT_FILE: $dHOME/user-details/${INPUT_FILE}";
if [[ "${INPUT_FILE}" == "${filename}" ]]; then
if [[ -f "$dHOME/user-details/${INPUT_FILE}" ]]; then
INPUT_FILE="$dHOME/user-details/${INPUT_FILE}"
fi
fi
#echo "\nNow filename : ${INPUT_FILE}"
# Mrunal : 20160131-2130 :
if [[ "${INPUT_FILE}" == "" ]] ; then
echo "No input. Hence exiting please restart / re-run the script again." ;
exit ;
elif [[ ! -f "${INPUT_FILE}" ]]; then
echo "File ${INPUT_FILE} does not exists. Hence exiting please restart / re-run the script again." ;
exit ;
elif [[ "${extension}" != csv ]]; then
echo "Only csv file can be used to create new users. Hence exiting please restart / re-run the script again." ;
exit ;
else
echo "File ${INPUT_FILE} exists. Continuing the process." ;
fi
# Mrunal : 20160131-2130 : Take username and password from file and add the user. (username as "username from file"-"school id")
#INPUT_FILE='Users.csv' ;
IFS=';' ;
i=1 ;
while read sch_id Uname UPass ;
do
echo "Name - $Uname" ;
echo "Password - $UPass" ;
cd $dHOME/gstudio/gnowsys-ndf/
echo "[run] create superuser $Uname" ;
echo "from django.contrib.auth.models import User ;
if not User.objects.filter(username='$Uname').count():
User.objects.create_user('$Uname', '', '$UPass')
" | python manage.py shell
if [[ $? == "0" ]]; then
echo "User : $Uname and Password : $UPass created successfully in the database" 2&1 >> Bulk-User-creation-database.logs
fi
i=$((i+1))
done < $INPUT_FILE
exit ;
| true |
e351433ce89cabcfae4e91ef7ec7c4c2fdf0bb7f | Shell | advten/meta-advantech | /meta-fsl-imx6/recipes-bsp/advantech-autobrightness/advantech-autobrightness/auto_brightness_level_mapping.sh | UTF-8 | 2,948 | 2.96875 | 3 | [] | no_license | #!/bin/sh
AMBIENT_LIGHT_SENSOR_PATH="/proc/adv_input_manager/lux"
CAL="200"
LOOP="1"
LUX="1"
MAPPING_LUX="0"
MAPPING_BL="0"
MAPPING_RES="0"
RESULT="0"
SW="0"
counter=1
SYN_BUG="0"
sleep 2
function init_test() {
BK_ENABLE=$(cat "/proc/adv_input_manager/light_en")
BK_CONTORL=$(cat "/proc/adv_input_manager/control_bl")
BK_TABLE=$(cat "/proc/adv_input_manager/levels")
BK_RANGE=$(cat "/proc/adv_input_manager/threshold_range")
BK_CAL=$(cat "/proc/adv_input_manager/lux200")
### echo BK_TABLE:$BK_TABLE
sleep 0.5
echo "0" > "/proc/adv_input_manager/light_en"
echo "0" > "/proc/adv_input_manager/control_bl"
echo "200" > "/proc/adv_input_manager/lux200"
echo "[100,40][200,60][500,80][1200,140][2500,180][6000,220][10000,250]" > "/proc/adv_input_manager/levels"
NEW_BK_TABLE=($(cat "/proc/adv_input_manager/levels"))
echo NEW_BK_TABLE:$NEW_BK_TABLE
sleep 1
echo "3" > "/proc/adv_input_manager/threshold_range"
echo "1" > "/proc/adv_input_manager/control_bl"
echo "1" > "/proc/adv_input_manager/light_en"
}
function recover_data() {
echo "$BK_RANGE" > "/proc/adv_input_manager/threshold_range"
echo "$BK_TABLE" > "/proc/adv_input_manager/levels"
echo "$BK_ENABLE" > "/proc/adv_input_manager/light_en"
echo "$BK_CONTORL" > "/proc/adv_input_manager/control_bl"
echo "$BK_CAL" > "/proc/adv_input_manager/lux200"
}
function mapping2() {
if [[ "$LUX" -lt "100" ]] && [[ "$BL" -eq "40" ]];then
export RESULT="1"
fi
if [[ "$LUX" -gt "99" ]] && [[ "$LUX" -lt "200" ]] && [[ "$BL" -eq "60" ]];then
export RESULT="1"
fi
if [[ "$LUX" -gt "199" ]] && [[ "$LUX" -lt "500" ]] && [[ "$BL" -eq "80" ]];then
export RESULT="1"
fi
if [[ "$LUX" -gt "499" ]] && [[ "$LUX" -lt "1200" ]] && [[ "$BL" -eq "140" ]];then
export RESULT="1"
fi
if [[ "$LUX" -gt "1199" ]] && [[ "$LUX" -lt "2500" ]] && [[ "$BL" -eq "180" ]];then
export RESULT="1"
fi
if [[ "$LUX" -gt "2499" ]] && [[ "$LUX" -lt "6000" ]] && [[ "$BL" -eq "220" ]];then
export RESULT="1"
fi
if [[ "$LUX" -gt "5999" ]] && [[ "$LUX" -lt "10000" ]] && [[ "$BL" -eq "250" ]];then
export RESULT="1"
fi
if [[ "$LUX" -gt "9999" ]] && [[ "$BL" -eq "255" ]];then
export RESULT="1"
fi
}
if [ -f "$AMBIENT_LIGHT_SENSOR_PATH" ]; then
CAL=($(cat "/proc/adv_input_manager/lux200"))
echo CAL:$CAL
init_test
fi
sleep 1
while [ "$LOOP" != "0" ]
do
sleep 1
if [ -f "$AMBIENT_LIGHT_SENSOR_PATH" ]; then
LUX=($(cat "$AMBIENT_LIGHT_SENSOR_PATH"))
BL=($(cat "/sys/class/backlight/backlight/brightness"))
if [ "$RESULT" = "0" ]
then
mapping2
fi
if [ "$RESULT" = "1" ]
then
recover_data
export LOOP="0"
echo LUX:$LUX
echo BL:$BL
echo "auto brightness mapping table"
echo "PASS"
fi
else
recover_data
export LOOP="0"
echo "Light Sensor node is not exist"
echo "FAIL"
fi
if [ "$counter" -gt "5" ]
then
recover_data
echo "time out"
echo "FAIL"
export LOOP="0"
fi
counter=$(($counter+1))
### echo "counter:$counter"
done
| true |
323ba7555cd1817f672902102ecc5a85d1851675 | Shell | corretto/amazon-corretto-crypto-provider | /tests/ci/docker_images/linux-arm/push_images.sh | UTF-8 | 671 | 3.265625 | 3 | [
"Apache-2.0",
"BSD-3-Clause",
"OpenSSL"
] | permissive | #!/bin/bash -ex
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
source ./common.sh
if [ -z ${1+x} ]; then
ECS_REPO="838297025124.dkr.ecr.us-west-2.amazonaws.com/accp-docker-images-linux"
else
ECS_REPO=$1
fi
echo "Uploading docker images to ${ECS_REPO}."
$(aws ecr get-login --no-include-email)
# Tag images with date to help find old images, CodeBuild uses the latest tag and gets updated automatically
tag_and_push_img 'ubuntu-20.04:gcc-7x_corretto-arm' "${ECS_REPO}:ubuntu-20.04_gcc-7x_corretto_arm"
tag_and_push_img 'amazonlinux-2:gcc-7x_corretto-arm' "${ECS_REPO}:amazonlinux-2_gcc-7x_corretto_arm"
| true |
59831329e381b71a34c53eb8c5ce9fb601339696 | Shell | fairhopeweb/aim | /docker/build-wheels.sh | UTF-8 | 565 | 3.421875 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
cd /opt/aim
echo "build python wheels"
for python_version in 'cp36-cp36m' 'cp37-cp37m' 'cp38-cp38' 'cp39-cp39' 'cp310-cp310'
do
PYTHON_ROOT=/opt/python/${python_version}/
if [ $python_version != "cp310-cp310" ]
then
# downgrade to pip-18
$PYTHON_ROOT/bin/pip install --upgrade pip==18
fi
$PYTHON_ROOT/bin/python setup.py bdist_wheel -d linux_dist
done
# produce multilinux wheels
for whl in $(ls ./linux_dist)
do
auditwheel repair linux_dist/${whl} --wheel-dir multilinux_dist
done
echo "python wheels build. SUCCESS"
echo "DONE" | true |
6eb5fcddf35be91da28347c8e823076bea685741 | Shell | wezm/dotfiles | /scripts/follower-count | UTF-8 | 562 | 3.546875 | 4 | [] | no_license | #!/bin/sh
set -e
# This script runs in ash on Alpine Linux. wget is the busybox wget.
# # min hour day month weekday command
# 59 10 * * * /home/wmoore/follower-count wezm >> /home/wmoore/follower-count.csv
export PATH="$HOME/.cargo/bin:$PATH"
USERNAME="$1"
if [ -z "$USERNAME" ]; then
echo "Usage: follower-count username"
exit 1
fi
DATE=$(date -u '+%Y-%m-%dT%H:%M:%SZ')
COUNT=$(wget -O - -o /dev/null "https://nitter.decentralised.social/$USERNAME" | scraper -t '.profile-statlist .followers .profile-stat-num' | tr -cd '0-9')
echo "$DATE","$COUNT"
| true |
7b7b820c65954a2036cb5b2ab988bf0dd7f83b99 | Shell | thorgeir93/i3wm-config-thorgeir | /microprograms/screen_1.sh | UTF-8 | 225 | 3.34375 | 3 | [] | no_license | #!/bin/bash
# Return the main screen name depending
# on which computer you are using.
export HOST=`uname -n`
if [ $HOST -eq "MEGAS" ]; then
return "HDMI-1"
fi
if [ $HOST -eq "thorgeir" ]; then
return "LVDS1"
fi
| true |
bd6c9fa0191fa2ef6db4de0b1008375887faf23e | Shell | microice333/IPP1 | /test.sh | UTF-8 | 450 | 3.875 | 4 | [] | no_license | #!/bin/bash
PASSED=0
ALL=0
PARAMETERS=
PROGRAM=
DIR=
if [ $# == 3 ]; then
PARAMETERS=$1
PROGRAM="./$2"
DIR=$3
else
PROGRAM="./$1"
DIR=$2
fi
for file in $DIR/test*.in; do
$PROGRAM $PARAMETERS < $file > out 2> err
if diff out ${file%in}out; then
if diff err ${file%in}err; then
PASSED=$((PASSED+1))
echo "[$file] OK"
fi
else
echo "[$file] WRONG"
fi
ALL=$((ALL+1))
done
echo "Passed $PASSED from $ALL tests."
rm out
rm err
| true |
a5ff1b508820e8fa8d43d152eff8235f31c80300 | Shell | ps2homebrew/Open-PS2-Loader | /download_lwNBD.sh | UTF-8 | 384 | 3.203125 | 3 | [
"AFL-3.0"
] | permissive | #!/bin/bash
## Download lwNBD
REPO_URL="https://github.com/bignaux/lwNBD.git"
REPO_FOLDER="modules/network/lwNBD"
COMMIT="9777a10f840679ef89b1ec6a588e2d93803d7c37"
if test ! -d "$REPO_FOLDER"; then
git clone $REPO_URL "$REPO_FOLDER" || { exit 1; }
(cd $REPO_FOLDER && git checkout "$COMMIT" && cd -) || { exit 1; }
else
(cd "$REPO_FOLDER" && git fetch origin && git checkout "$COMMIT" && cd - )|| exit 1
fi
| true |
49f020b83483392b312979f22ed6433a7965db08 | Shell | ncronquist/dotfiles | /bin/yrs | UTF-8 | 183 | 3.28125 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env bash
# Run a node project using yarn with a specified NODE_ENV
# - defaults to dev
env="$1"
if [ "$env" == '' ]; then
env='dev'
fi
NODE_ENV="${env}" yarn start
| true |
4bc1ae3cce8a511f30be3c13c257b56d66b22b69 | Shell | custodietmssp/Custodiet-Build | /Dev Environment/scripts/setup/install-vbox.sh | UTF-8 | 4,980 | 3.8125 | 4 | [] | no_license | #!/bin/sh -e
# install the most recent v-box and extension pack
# 130923 - dls - initial version
# =====================================
# variables
# vbox download site
vbox_site=download.virtualbox.org
vbox_download=${vbox_site}/virtualbox
# =====================================
# functions
download_checksums() {
# always download a new version of the checksum file
#+ so we can get the up-to-date version of the .deb filename to download
rm --force SHA256SUMS
wget ${url}/SHA256SUMS
}
download_vbox() {
download_checksums
# get the actual filename by grepping it in the SHA256SUMS file
filename_deb=$( grep ~${id}~${codename}_${arch}.deb SHA256SUMS | \
sed s:^.*\ .:: )
echo "--filename_deb: ${filename_deb}"
if [ -f "${filename_deb}" ] ; then
echo "--> deb file already downloaded"
else
echo "--> downloading vbox deb; this will take a few moments"
wget --progress=bar ${url}/${filename_deb}
fi
# run checksum
echo "--> run checksum against download"
grep ${filename_deb} SHA256SUMS | sha256sum -c
}
install_vbox() {
echo "--updating pkg list - this could take a few moments"
if aptitude -q=2 update ; then
echo "--pkg list updated"
else
echo "---> \"aptitude update\" failed !!!"
exit 1
fi
# first install dependency
echo "--> installing dependencies for vbox"
list_deps="
dkms
gcc
xmlstarlet
libsdl1.2debian
libgl1-mesa-glx
libqt4-network
libqt4-opengl
libqtcore4
libqtgui4
libvpx1
libxcursor1
libxinerama1
libxmu6
libxt6
"
aptitude -y install ${list_deps}
# install vbox
echo "--> installing vbox .deb w/ dpkg"
file_deb=$( ls ${vbox_deb_name_wc} )
echo "--file_deb: ${file_deb}"
dpkg -i ${file_deb} || true
# this may be unnecessary, but sometimes it is not done at installation
echo "--> compile/recompile vbox kernel modules"
/etc/init.d/vboxdrv setup
}
download_extpack() {
download_checksums
# ext pack filename
if [ -f ${filename_ext} ] ; then
echo "-- extension pack already downloaded"
else
echo "--> downloading extension pack"
wget ${url}/${filename_ext}
fi
# run checksum
echo "--> run checksum against download"
grep ${filename_ext} SHA256SUMS | sha256sum -c
}
install_extpack() {
echo "--> installing extension pack"
# the "--replace" switch must follow "install"
VBoxManage extpack install --replace ${filename_ext}
}
# =====================================
# sanity checks
echo "-= $0 =-"
if [ $(id -u) = 0 ] ; then
echo "--running as root: OK"
else
echo "--> this script must be run as root !!!"
exit 1
fi
# =====================================
# get version and downloads
# get the file containing version number of the most recent release
version_latest=$( wget -qO- ${vbox_download}/LATEST.TXT )
echo "--version_latest: ${version_latest}"
# dir contianing the files
url=http://${vbox_download}/${version_latest}
ver_major=$( echo ${version_latest} | cut -d . -f 1 )
echo "--ver_major: ${ver_major}"
ver_minor=$( echo ${version_latest} | cut -d . -f 2 )
echo "--ver_minor: ${ver_minor}"
ver_patch=$( echo ${version_latest} | cut -d . -f 3 )
echo "--ver_patch: ${ver_patch}"
id=$( lsb_release -s --id )
echo "--id: ${id}"
codename=$( lsb_release -s --codename )
echo "--codename: ${codename}"
arch=$( dpkg --print-architecture )
echo "--arch: ${arch}"
# =====================================
# install/upgrade vbox
# is vbox already installed
if which VBox ; then
echo "--> vbox is already installed"
# get the installed version
##version_vbox=$(VBoxManage -v)
##echo "version_vbox: ${version_vbox}"
if VBoxManage -v | grep ${version_latest} ; then
echo "--> vbox is already the latest version"
else
# NOT the latest version
echo "--> installing a new version of vbox"
download_vbox
install_vbox
fi
else
download_vbox
install_vbox
fi
# check that it is correctly installed
pkg_name=virtualbox-${ver_major}.${ver_minor}
if dpkg -l ${pkg_name} | grep ^iU ; then
echo "---> there was a problem installing \"${pkg_name}\""
echo "---> it is not configured correctly"
echo "---> it may be missing dependencies"
exit 1
else
echo "--> ${pkg_name} is installed correctly"
fi
# =====================================
# install most recent version of extension pack
# check if extpack is installed
ext_packs=$( VBoxManage list extpacks | grep ^Version: | sed s/^.*:[[:space:]]*// )
echo "ext_packs: ${ext_packs}"
# this should be the filename of the most recent ext pack
filename_ext=Oracle_VM_VirtualBox_Extension_Pack-${ver_major}.${ver_minor}.${ver_patch}.vbox-extpack
echo "-- filename_ext: ${filename_ext}"
if [ "$( VBoxManage list extpacks )" = "Extension Packs: 0" ] ; then
echo "--> extension pack is not installed"
download_extpack
install_extpack
elif [ "${ext_packs}" = ${version_latest} ] ; then
echo "--> already have the most recent vbox extension pack"
else
echo "--> extension pack is not the most recent"
download_extpack
install_extpack
fi
# =====================================
# fin
echo "- exiting -= $0 =-"
| true |
562044f58dade5d977f623d79dbdb9d0ad407069 | Shell | sujanshresthanet/drupal-docker-lite | /scripts/create.sh | UTF-8 | 461 | 3.546875 | 4 | [
"MIT"
] | permissive | #!/bin/bash
DIR=$1
if [[ ! "$DIR" ]]; then
echo "A directory name is required"
exit 1
fi
if [[ -d "$DIR" ]]; then
echo "The $DIR directory already exists"
exit 1
fi
git clone https://github.com/mortenson/drupal-docker-lite.git "$DIR"
message_on_error "Errors encountered when cloning drupal-docker-lite"
cd $DIR
$DDL start
message_on_error "Errors encountered when starting drupal-docker-lite"
echo "New drupal-docker-lite created in \"$DIR\""
| true |
31eaeac1de0f2181c537716aad4d469dfd4ed65a | Shell | jharuda/network | /.travis/runcoveralls.sh | UTF-8 | 3,206 | 4.4375 | 4 | [
"BSD-3-Clause"
] | permissive | #!/bin/bash
# SPDX-License-Identifier: MIT
# Reports coverage results using coveralls. The aim of this script is to
# provide a unified way to reporting coverage results across all linux system
# roles projects.
# The given command line arguments are passed to coveralls.
# Environment variables:
#
# LSR_PUBLISH_COVERAGE
# If the variable is unset or empty (the default), no coverage is published.
# Other valid values for the variable are:
# strict - the reporting is performed in strict mode, so situations
# like missing data to be reported are treated as errors
# debug - coveralls is run in debug mode (see coveralls debug --help)
# normal - coverage results will be reported normally
# LSR_TESTSDIR
# a path to directory where tests and tests artifacts are located; if unset
# or empty, this variable is set to ${TOPDIR}/tests; this path should
# already exists and be populated with tests artifacts before the script
# starts performing actions on it
set -e
ME=$(basename $0)
SCRIPTDIR=$(readlink -f $(dirname $0))
. ${SCRIPTDIR}/utils.sh
. ${SCRIPTDIR}/config.sh
# Publish the results only if it is desired.
if [[ -z "${LSR_PUBLISH_COVERAGE}" ]]; then
lsr_info "${ME}: Publishing coverage report is not enabled. Skipping."
exit 0
fi
case "${LSR_PUBLISH_COVERAGE}" in
strict) : ;;
debug) : ;;
normal) : ;;
*) lsr_error Error: \"${LSR_PUBLISH_COVERAGE}\" is not a valid option ;;
esac
LSR_TESTSDIR=${LSR_TESTSDIR:-${TOPDIR}/tests}
# Ensure we are in $LSR_TESTSDIR. It is supposed that if a user wants to submit
# tests results, $LSR_TESTSDIR always exists.
cd ${LSR_TESTSDIR}
# For simplicity, we suppose that coverage core data file has name .coverage
# and it is situated in $LSR_TESTSDIR. Similarly for .coveragerc.
COVERAGEFILE='.coverage'
COVERAGERCFILE='.coveragerc'
# In case there is no $COVERAGEFILE, there is nothing to report. If we are
# running in strict mode, treat this situation as error.
if [[ ! -s ${COVERAGEFILE} ]]; then
NO_COVERAGEFILE_MSG="${COVERAGEFILE} is missing or empty"
if [[ "${LSR_PUBLISH_COVERAGE}" == "strict" ]]; then
lsr_error "${ME} (strict mode): ${NO_COVERAGEFILE_MSG}!"
fi
lsr_info "${ME}: ${NO_COVERAGEFILE_MSG}, nothing to publish."
exit 0
fi
# Create $COVERAGERCFILE file with a [paths] section. From the official docs:
#
# The first value must be an actual file path on the machine where the
# reporting will happen, so that source code can be found. The other values
# can be file patterns to match against the paths of collected data, or they
# can be absolute or relative file paths on the current machine.
#
# So in our $COVERAGERCFILE file we make both locations to point to the
# project's top directory.
cat > ${COVERAGERCFILE} <<EOF
[paths]
source =
..
$(readlink -f ..)
EOF
# Rename $COVERAGEFILE to ${COVERAGEFILE}.merge. With this trick, coverage
# combine applies configuration in $COVERAGERCFILE also to $COVERAGEFILE.
mv ${COVERAGEFILE} ${COVERAGEFILE}.merge
python -m coverage combine --append
MAYBE_DEBUG=""
if [[ "${LSR_PUBLISH_COVERAGE}" == "debug" ]]; then
MAYBE_DEBUG=debug
fi
set -x
coveralls ${MAYBE_DEBUG} "$@"
| true |
4b7b54da2f629ddbd44c1ac0408e6dce8e62587c | Shell | TheKoguryo/cf-cli-resource | /itest/run-synchronous-service-tests | UTF-8 | 2,166 | 3.1875 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
set -eu
set -o pipefail
test_dir=$(dirname $0)
source $test_dir/config.sh
source $test_dir/helpers.sh
it_can_create_a_service_that_does_not_exist() {
it_can_create_a_service "$@"
}
it_can_create_a_service_that_already_exists() {
it_can_create_a_service "$@"
}
it_can_create_a_service_that_already_exists_and_update_with_changes() {
it_can_create_a_service "$@"
}
it_can_create_a_service_that_already_exists_and_update_without_changes() {
it_can_create_a_service "$@"
}
org=$(generate_test_name_with_spaces "Org")
space=$(generate_test_name_with_spaces "Space")
app_name=$(generate_test_name_with_spaces "App")
service=$CCR_SYNC_SERVICE
plan_1=$CCR_SYNC_PLAN_1
plan_2=$CCR_SYNC_PLAN_2
service_instance=$(generate_test_name_with_spaces "Sync Service")
configuration_1=$CCR_SYNC_CONFIGURATION_1
configuration_2=$CCR_SYNC_CONFIGURATION_2
tags="list, of, tags"
wait_for_service="false"
update_service="true"
setup_integration_tests "$org" "$space"
run it_can_create_a_service_that_does_not_exist \"$org\" \"$space\" \"$service\" \"$plan_1\" \"$service_instance\" \"$configuration_1\"
run it_can_create_a_service_that_already_exists \"$org\" \"$space\" \"$service\" \"$plan_1\" \"$service_instance\" \"$configuration_1\"
run it_can_create_a_service_that_already_exists_and_update_with_changes \"$org\" \"$space\" \"$service\" \"$plan_2\" \"$service_instance\" \"$configuration_2\" \"$wait_for_service\" \"$update_service\"
run it_can_create_a_service_that_already_exists_and_update_without_changes \"$org\" \"$space\" \"$service\" \"$plan_2\" \"$service_instance\" \"$configuration_2\" \"$wait_for_service\" \"$update_service\"
run it_can_update_a_service \"$org\" \"$space\" \"$service_instance\" \"$plan_1\" \"$configuration_1\" \"$tags\"
run it_can_push_an_app \"$org\" \"$space\" \"$app_name\"
run it_can_bind_a_service \"$org\" \"$space\" \"$app_name\" \"$service_instance\"
run it_can_unbind_a_service \"$org\" \"$space\" \"$app_name\" \"$service_instance\"
run it_can_delete_a_service \"$org\" \"$space\" \"$service_instance\"
run it_can_delete_an_app \"$org\" \"$space\" \"$app_name\"
teardown_integration_tests "$org" "$space"
| true |
6cb8765c0d83558c9411d09d15189afafbe8dbda | Shell | esposj/radio_recorder | /Test_Scripts/radio_streamer_tester.sh | UTF-8 | 845 | 2.671875 | 3 | [] | no_license | #!/bin/bash
if [ $# -eq 0 ]; then
FREQ=106.5
else
FREQ=$1
fi
rtl_fm -f ${FREQ}M -M fm -s 180k -A fast -l 0 -E deemp -p 82 -g 20 | \
sox -r 180k -t raw -e signed -b 16 -c 1 -V3 -v 2.2 - -r 48k -t mp3 - sinc 0-15k -t 1000 | \
#sox -r 180k -t raw -e signed -b 16 -c 1 -V1 -v 2.2 - -r 32k -t vorbis - sinc 0-15k -t 1000 | \
ezstream -c ezstream_stdin_mp3.xml &
PID=$!
# Input Options
# -r 180k: sample rate of input source
# -t raw: defines the type of input
# -e signed: encoding type of signed for input
# -b 16: number of bits in encoded sample
# -c 1: number of audio channels in encoded sample
# -V1: verbose level 1 (only errors shown) 2 and 3 may prove useful!
# -v 2.2 volume as a multiplier
# - (use stdin)
# Output Options
# -r 32k: sample rate for output
# -t vorbis: output format
# - filename
# sinc 0-15k bandpass filter
| true |
a599920933b82faaa3c3d91b2314602242e72a63 | Shell | ZimbiX/dotfiles | /bin/git-stash-selection | UTF-8 | 327 | 3.484375 | 3 | [
"MIT"
] | permissive | #!/bin/sh
#
# Git stash only a selection of files, with a message.
#
# Usage:
# git-stash-selection [<message>] [<paths>...]
message=$1
shift
stash_paths="$*"
git add --all
git reset $stash_paths
git commit --allow-empty -m "temp - excluded from stash"
git add --all
git stash save $message
git reset --soft HEAD^
git reset
| true |
c021600ebf0150fb8b73f948307a091eec3894ed | Shell | michaelmob/it490-car-calendar | /broker/provision-broker.sh | UTF-8 | 2,089 | 3.1875 | 3 | [] | no_license | #!/usr/bin/env bash
# Exit on failure
set -ex
# Ensure required environment variables exist
[[ -z "$RABBITMQ_LOG_USER" ]] && echo '$RABBITMQ_LOG_USER is not set' && exit
[[ -z "$RABBITMQ_LOG_PASS" ]] && echo '$RABBITMQ_LOG_PASS is not set' && exit
[[ -z "$RABBITMQ_WEB_USER" ]] && echo '$RABBITMQ_WEB_USER is not set' && exit
[[ -z "$RABBITMQ_WEB_PASS" ]] && echo '$RABBITMQ_WEB_PASS is not set' && exit
[[ -z "$RABBITMQ_DMZ_USER" ]] && echo '$RABBITMQ_DMZ_USER is not set' && exit
[[ -z "$RABBITMQ_DMZ_PASS" ]] && echo '$RABBITMQ_DMZ_PASS is not set' && exit
[[ -z "$RABBITMQ_ADMIN_USER" ]] && echo '$RABBITMQ_ADMIN_USER is not set' && exit
[[ -z "$RABBITMQ_ADMIN_PASS" ]] && echo '$RABBITMQ_ADMIN_PASS is not set' && exit
# Update package lists and install rabbitmq
apt-get update
apt-get install -y rabbitmq-server
# Enable and start the rabbitmq server
systemctl --now enable rabbitmq-server
# Create admin account and delete guest
rabbitmqctl add_user $RABBITMQ_ADMIN_USER $RABBITMQ_ADMIN_PASS
rabbitmqctl set_user_tags $RABBITMQ_ADMIN_USER administrator
rabbitmqctl set_permissions -p / $RABBITMQ_ADMIN_USER ".*" ".*" ".*"
# Create rabbitmq users for web and dmz
rabbitmqctl add_user $RABBITMQ_WEB_USER $RABBITMQ_WEB_PASS
rabbitmqctl add_user $RABBITMQ_DMZ_USER $RABBITMQ_DMZ_PASS
rabbitmqctl add_user $RABBITMQ_LOG_USER $RABBITMQ_LOG_PASS
rabbitmqctl set_permissions -p / $RABBITMQ_WEB_USER ".*" ".*" ".*"
rabbitmqctl set_permissions -p / $RABBITMQ_DMZ_USER ".*" ".*" ".*"
rabbitmqctl set_permissions -p / $RABBITMQ_LOG_USER "^log-.*" "^log-.*" "^log-.*"
# Enable rabbitmq web interface
rabbitmq-plugins enable rabbitmq_management
# Allow password auth (temporarily, until we can copy a key over)
sed -i '/PasswordAuthentication/c\PasswordAuthentication yes' /etc/ssh/sshd_config
service ssh restart
# Set up firewall
sed -i '/\-\-icmp/d' /etc/ufw/before.rules # block pinging from unknown
ufw default deny incoming # deny all incoming connections
ufw allow from 10.0.2.0/24 # allow from host
ufw allow from 10.0.0.0/24 # allow from network
ufw --force enable
ufw reload
| true |
192914433f3e4a114eb2798a0ea60cd582afe520 | Shell | Rufflewind/config | /home/sbin/github-clone | UTF-8 | 1,208 | 4.15625 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/sh
set -eu
prog=`basename "$0"`
usage() {
cat >&2 <<EOF
usage: $prog <original-user>/<project> [args...]
EOF
exit 1
}
if [ $# -lt 1 ]
then
usage
fi
case $1 in
-*) usage;;
esac
if [ -f "$HOME/.config/$prog.sh" ]
then
. "$HOME/.config/$prog.sh"
else
cat >&2 <<EOF
$prog: config file (~/.config/$prog.sh) is missing
You must define the 'forkroot' variable in the config file.
A simple example would be:
forkroot=github.com:<username>
You can optionally define the 'destdir' variable.
EOF
exit 1
fi
url=$1
case $url in
http*://github.com/*/*)
user=`printf "%s" "$url" | cut -d / -f 4`
proj=`printf "%s" "$url" | cut -d / -f 5`
;;
github.com/*/*)
user=`printf "%s" "$url" | cut -d / -f 2`
proj=`printf "%s" "$url" | cut -d / -f 3`
;;
*/*)
user=`printf "%s" "$url" | cut -d / -f 1`
proj=`printf "%s" "$url" | cut -d / -f 2`
;;
*)
printf >&2 "%s: invalid repo name: %s\n" "$prog" "$url"
esac
if [ "${destdir+x}" ]
then
printf >&2 "%s\n" "note: cloning to $destdir"
cd "$destdir"
fi
git-clone-both https://github.com/$user/$proj git@$forkroot/$proj
| true |
150bef02e969a1e76cbd12aa7dc299410086ebbd | Shell | AndrewDDavis/can-bind-dmri | /bin/sn_submit_dmasks_jobs | UTF-8 | 3,114 | 3.6875 | 4 | [] | no_license | #!/bin/bash
# Run this on Graham to start dmasks jobs
# Arg defaults and parsing
dry_run=False
vis_list=()
while [[ $# -gt 0 ]]; do
if [[ "$1" == '-n' ]]; then
dry_run=True
shift
else
vis_list=($1)
shift
fi
done
# CB-DTI project
cd ~/project/davisad/CAN-BIND_DTI
export PWD
# Ensure FSL is available
if ! which fsl > /dev/null 2>&1; then
echo "FSL not found"
exit 2
elif [[ "$FSLDIR" != /home/davisad/fsl ]]; then
echo "FSLDIR incorrect"
exit 2
fi
# Locate dwi_merged niis to be run
echo "Locating dwi_merged files..."
if [[ ${#vis_list[@]} -gt 0 ]]; then
files_arr=()
for v in "${vis_list[@]}"; do
files_arr+=($(find 'data/subprojects/' -wholename "*/$v/dti_preprocessed/dwi_merged.nii.gz" -print))
done
else
files_arr=($(find 'data/subprojects/' -wholename '*/*/dti_preprocessed/dwi_merged.nii.gz' -print | sort))
fi
if [[ ${#files_arr[@]} -eq 0 ]]; then
echo "No files found"
exit 2
else
echo "Found ${#files_arr[@]} files"
start_time=$(date '+%H:%M')
fi
# Add dmasks job to the Graham scheduler
for f in "${files_arr[@]}"; do
# e.g. f -> data/subprojects/CAM_B/CAM_0006_01/dti_preprocessed/dwi_merged.nii.gz
new_path="${f%/dwi_merged.nii.gz}"
new_vid=$(printf $f | cut -d '/' -f 4)
# check for run already started
if [[ -n "$(find "${new_path}" -maxdepth 1 -name 'dwi_dmasks-*.out' -print -quit)" ]]; then
echo "sbatch output file exists; skipping ${new_path}"
continue
elif [[ -d "${new_path}/dwi_dmasks" ]]; then
echo "dwi_dmasks dir exists; skipping ${new_path}"
continue
fi
# make a copy of the generic job and edit it
cp -f ~/bin/sn_dmasks_indiv_job ~/bin/sn_dmasks_indiv_job_working
sed --in-place "s:dummy_path:${new_path}:g" ~/bin/sn_dmasks_indiv_job_working
sed --in-place "s/dummy_vid/${new_vid}/" ~/bin/sn_dmasks_indiv_job_working
# UCA_A jobs need a *lot* more memory
# Check for 128 or 256 resolution, allocate more
res=$(fslval $f dim1)
if [[ $res -eq 128 ]]; then
sed --in-place "s/mem_mb/1710/" ~/bin/sn_dmasks_indiv_job_working
sed --in-place "s/time_limit/01:20:00/" ~/bin/sn_dmasks_indiv_job_working
elif [[ $res -eq 256 ]]; then
sed --in-place "s/mem_mb/6800/" ~/bin/sn_dmasks_indiv_job_working
sed --in-place "s/time_limit/05:20:00/" ~/bin/sn_dmasks_indiv_job_working
else
sed --in-place "s/mem_mb/1280/" ~/bin/sn_dmasks_indiv_job_working
sed --in-place "s/time_limit/00:45:00/" ~/bin/sn_dmasks_indiv_job_working
fi
# schedule the job
if [[ $dry_run == True ]]; then
echo "Would submit job for ${new_path}"
else
echo "Submitting job for ${new_path}"
sbatch ~/bin/sn_dmasks_indiv_job_working
fi
rm ~/bin/sn_dmasks_indiv_job_working
# don't overwhelm the scheduler
sleep 1
done
echo "Done"
echo "Monitor jobs with: squeue -u $USER -t all"
echo "and/or: sacct --format='JobID,JobName%15,State,ExitCode,Timelimit,Elapsed,NCPUS,TotalCPU,ReqMem,MaxRSS' -S ${start_time}"
| true |
b9d6e002d3cc8fa168cc28f938053e068154da4d | Shell | shkreza/iotlock | /bin/deploy_gcp_secrets.sh | UTF-8 | 411 | 3.3125 | 3 | [] | no_license | #!/bin/bash
BIN_DIR=$(dirname $0)
BIN_DIR_ABSOLUATE=$(cd $BIN_DIR; pwd)
SECRETS_ROOT=$(cd $BIN_DIR_ABSOLUATE/../secrets/; pwd)
# GCP BACKEND
GCP_SECRETS_ROOT="$SECRETS_ROOT/gcp-secrets"
if [ ! -d "$GCP_SECRETS_ROOT" ]; then
echo "Missing secrets folder at: $GCP_SECRETS_ROOT."
exit 1
fi
kubectl create secret generic gcp-secrets --dry-run --from-file=$GCP_SECRETS_ROOT -o yaml | kubectl apply -f -
| true |
d80e150c964fb687ab099406aadba7841e64866d | Shell | XJDKC/University-Code-Archive | /Course Experiment/Principle of Compiler/Stanford CS106/PP3/src/test.sh | UTF-8 | 169 | 3.046875 | 3 | [
"MIT"
] | permissive | #!/bin/bash
for file in `ls $1`
do
if [ ${file##*.} = "decaf" -o ${file##*.} = "frag" ]; then
./dcc < $1/$file &> $1/${file%.*}.out
fi
done
folder=$1
| true |
c1b859096fa08556fd6de595112e4d592b68d25b | Shell | ali5ter/carrybag | /v1/scripts/cpiofrom | UTF-8 | 1,473 | 4.15625 | 4 | [] | no_license | #!/bin/bash
# @file cpiofrom
# ☆ Wrapper for cpio'ing a remote dir structure over ssh
# @author Alister Lewis-Bowen <alister@different.com>
# @requires color <http://code.google.com/p/ansi-color/>
# @requires parseuri <http://vpalos.com/537/uri-parsing-using-bash-built-in-features/>
#
# directory this script exists within
#
scriptDir=$(dirname $(stat -f $0))
#
# includes
#
. $scriptDir/parseuri.fnc
#
# main
#
case $1 in
#
# help
#
-h|--help|h|help|?)
echo "
Wrapper for cpio to transfer a remote dir structure to this machine via ssh.
usage:
$(color bd)cpiofrom$(color) user[:password]@hostname/path
$(color bd)cpiofrom$(color) [ -h | --help ]
examples
cpiofrom alister@green.different.com/projects/site/htdocs
pulls the entire htdocs directory down to the current directory
"
;;
#
# process transfer
#
*)
parseuri "$1" || {
echo "$(color red)This argument is incorrect. Try using the format user@host/path$(color)";
exit 1; }
[[ -n $uri_user && -n $uri_host && -n $uri_path ]] || {
echo "$(color red)Need at least the user, host and path to be successful. Try using the format user@host/path$(color)";
exit 1; }
uri_path=${uri_path:1}
echo "$(color green)Fetching data from $uri_path on $uri_host as $uri_user...$(color)";
(ssh $uri_user@$uri_host "find $uri_path -depth -print | cpio -oa" | cpio -imd)
echo "$(color green)Finished$(color)";
;;
esac
| true |
43f8818c96d4c1bf5d80b9b3a120722f50b69106 | Shell | caziz/dotfiles | /.bash_aliases | UTF-8 | 4,143 | 3.4375 | 3 | [] | no_license | # editing dotfiles
alias ea='open ~/.bash_aliases' # edit aliases
alias ep='open ~/.bash_prompt' # edit prompt
alias rl='source ~/.bash_aliases' # reload aliases
# allow sudo with aliases
alias sudo='sudo '
# viewing files
alias o='open .'
if ls --color > /dev/null 2>&1; then # GNU `ls`
alias ls="command ls --color"
else # macOS `ls`
alias ls="command ls -G"
fi
alias grep='grep --color=auto'
alias fgrep='fgrep --color=auto'
alias egrep='egrep --color=auto'
alias wrapon='tput rmam'
alias wrapoff='tput smam'
# navigation
alias ..='cd ..'
alias ...='cd ../..'
alias ....='cd ../../..'
alias .....='cd ../../../..'
alias cdd='cd ~/Developer'
alias cddt='cd ~/Desktop'
alias cddoc='cd ~/Documents'
alias cddl='cd ~/Downloads'
alias cdapp='cd /Applications'
cl() { cd "$1" && ls; }
cs() { cd "$HOME/Developer/UCLA-CS-$1"; }
# file creation
alias mkdir='mkdir -pv'
mcd() { if [ "$#" -ne 1 ]; then echo "usage: mcd directory"; else mkdir "$1"; cd "$1"; fi;}
# emacs: esc x tetris
mk() {
if [[ "$#" -ne 1 && "$#" -ne 2 ]]; then
echo "usage: mk file [shebang]"
else
touch "$1"
if [ "$#" -ne 1 ]; then
echo -en "$2\n$(cat $1)" > "$1"
fi
chmod +x "$1"
open -t "$1"
fi
}
mksh() { if [ "$#" -ne 1 ]; then echo "usage: mksh file"; else mk "$1" '#!/usr/bin/env bash'; fi; }
mkpy() { if [ "$#" -ne 1 ]; then echo "usage: mkpy file"; else mk "$1" '#!/usr/bin/env python'; fi; }
# hide/show all desktop icons (useful when presenting)
alias hidedt="defaults write com.apple.finder CreateDesktop -bool false && killall Finder"
alias showdt="defaults write com.apple.finder CreateDesktop -bool true && killall Finder"
# show path line by line
alias path='echo -e ${PATH//:/\\n}'
# seasnet
# alias seasnet='ssh aziz@lnxsrv09.seas.ucla.edu'
# alias sn='seasnet'
# alias sshfss='/usr/local/bin/sshfs aziz@lnxsrv09.seas.ucla.edu: ~/seasnet/ -o volname=SEASNET'
# alias fuse_setup='mkdir ~/seasnet/; sshfss; open ~/SEASNET'
# alias fuse='fuse_setup; while [ $? -ne 0 ]; do ufuse; sleep 1; fuse_setup; open ~/SEASNET; done'
# alias ufuse='umount ~/seasnet/; rmdir ~/seasnet'
fuse() {
LOC="$HOME/Desktop"
DIR="/"
if [ "$#" -ne 2 ]; then
NAME="deterlab"
DEST="la136ah@users.deterlab.net"
else
NAME="$1"
DEST="$2"
fi
if [ ! -d "$LOC/$NAME-mountpoint" ]; then
echo "Making directory: $LOC/$NAME-mountpoint"
mkdir "$LOC/$NAME-mountpoint"
fi
eval "sshfs $DEST:$DIR $LOC/$NAME-mountpoint -o volname=$NAME"
while [ -d "$LOC/$NAME-mountpoint" ]; do
sleep 1
echo "sleepin"
echo "$LOC/$NAME-mountpoint"
done
cd "$LOC/$NAME"
open "$LOC/$NAME"
}
# git commands
alias g='git'
# cd up to git repo, go back with `cd -`
cdg() {
TEMP_PWD=`pwd`
while ! [ -d .git ]; do
cd ..
done
OLDPWD=$TEMP_PWD
}
gh() {
open `git remote -v |\
grep fetch |\
awk '{print $2}' |\
sed 's/git@/http:\/\//' |\
sed 's/com:/com\//'` |\
head -n1
}
ghcl() {
if [[ "$1" == *"/"* ]]; then
git clone "https://github.com/$1.git"
else
git clone "https://github.com/caziz/$1.git"
fi
}
# macOS commands
alias lock='/System/Library/CoreServices/Menu\ Extras/User.menu/Contents/Resources/CGSession -suspend'
# macOS config commands
alias sct='defaults write com.apple.screencapture type'
alias lwt='sudo defaults write /Library/Preferences/com.apple.loginwindow LoginwindowText'
alias cpu='sysctl -n machdep.cpu.brand_string'
alias hw='system_profiler SPHardwareDataType'
alias ikr='defaults write -g InitialKeyRepeat -int 15' # normal minimum is 15 (225 ms)
alias kr='defaults write -g KeyRepeat -int 1' # normal minimum is 2 (30 ms)
# print current finder directory
function pfd { osascript -e 'tell application "Finder"'\
-e "if (${1-1} <= (count Finder windows)) then"\
-e "get POSIX path of (target of window ${1-1} as alias)"\
-e 'else' -e 'get POSIX path of (desktop as alias)'\
-e 'end if' -e 'end tell'; }
# cd to current finder directory
function cdf { cd "`pfd $@`"; }
# open man page in Preview
function pman { man -t "$1" | open -f -a Preview; }
# open man page in x-man-page
function xman {
if [ $# -eq 1 ]; then
open x-man-page://$1;
elif [ $# -eq 2 ]; then
open x-man-page://$1/$2;
fi
}
| true |
d42602fd15cad4c48a1ca7d52e0da06b6b1ab38f | Shell | epfl-dias/proteus | /.githooks/prepare-commit-msg | UTF-8 | 192 | 3.4375 | 3 | [] | no_license | #!/bin/sh
firstLine=$(head -n1 $1)
if [ -z "$firstLine" ] ;then
commitTemplate=$(cat `git rev-parse --git-dir`/../.config/commitTemplate)
echo "$commitTemplate\n $(cat $1)" > $1
fi
| true |
a72360486679609e8711260ebb0674325e5cfbbc | Shell | diennea/blazingcache | /blazingcache-services/src/main/resources/bin/setenv.sh | UTF-8 | 480 | 3.40625 | 3 | [
"Apache-2.0"
] | permissive | # Basic Environment and Java variables
#JAVA_HOME=
JAVA_OPTS="-Xmx1g -Xms1g -Djava.net.preferIPv4Stack=true -XX:MaxDirectMemorySize=1g "
if [ -z "$JAVA_HOME" ]; then
JAVA_PATH=`which java 2>/dev/null`
if [ "x$JAVA_PATH" != "x" ]; then
JAVA_BIN=`dirname $JAVA_PATH 2>/dev/null`
JAVA_HOME=`dirname $JAVA_BIN 2>/dev/null`
fi
if [ -z "$JAVA_HOME" ]; then
echo "JAVA_HOME environment variable is not defined and is needed to run this program"
exit 1
fi
fi
| true |
69e6a791ab8a4b663469428536491c3bc6f8ccff | Shell | HPCE/hpce-2015-cw2 | /prepare_submissions.sh | UTF-8 | 674 | 3.109375 | 3 | [] | no_license | #!/bin/bash
FILES=".gitignore */.gitignore audio/coeffs/*.csv";
if [ ! -d .git ]; then
echo "Warning: there appears to be no git repository here";
else
FILES="$FILES .git";
fi
WANTED="makefile audio/makefile audio/passthrough.c audio/print_audio.c audio/signal_generator.c audio/fir_filter.c audio/merge.c";
WANTED="${WANTED} audio/corrupter.sh audio/all_firs_direct.sh audio/all_firs_staged.sh";
WANTED="${WANTED} audio/mp3_file_src.sh audio/mp3_file_src.sh audio/audio_sink.sh audio/all_firs_staged.sh";
for W in $WANTED; do
if [ ! -f $W ]; then
echo "Warning: no file called $W";
else
FILES="${FILES} ${W}";
fi
done
tar -czf hpce_cw2_${USER}.tar.gz $FILES;
| true |
507eef28a89d9e6ef2818d4787aa1678c9888357 | Shell | nmg1986/solomon | /etc/script.d/check_tomcat | UTF-8 | 237 | 2.921875 | 3 | [] | no_license | #!/bin/bash
##
##check if tomcat is accessable.
##if not return 1 ,else return 0
################################################################
if [ `/etc/init.d/tomcat status | grep -c running` -eq 0 ];then
exit 1
else
exit 0
fi
| true |
60ee0921c5f6e131b2eccf80a5e68c9017eb475c | Shell | myazhq/ssmu_install | /py/ssmu_py_install_debian8.sh | UTF-8 | 3,692 | 2.640625 | 3 | [] | no_license | #!/bin/bash
# A script to automatically install and config ss mu py.
# Author: Harry from 2645 Studio
# Date: 2017-4-1
# Update system and get some packages
apt-get update
apt-get upgrade -y
apt-get install make gcc g++ rsyslog supervisor redis-server git wget curl python python-pip m2crypto -y
source ../patch/libsodium.sh
# Source config file and copy iptables config file
source ./ssmu.cfg
if [ $is_iptables != 0 ];
then
cp iptables.banmailports.rules /etc/iptables.banmailports.rules
fi
# Install and ss py mu
cd /root
git clone https://github.com/fsgmhoward/shadowsocks-py-mu.git
# Write ssmu-py config file
cd /root/shadowsocks-py-mu/shadowsocks
echo "import logging" >> config.py
echo "CONFIG_VERSION = '20160623-2'" >> config.py
echo "API_ENABLED = $api_enable" >> config.py
echo "MYSQL_HOST = '$my_host'" >> config.py
echo "MYSQL_PORT = $my_port" >> config.py
echo "MYSQL_USER = '$my_user'" >> config.py
echo "MYSQL_PASS = '$my_pass'" >> config.py
echo "MYSQL_DB = '$my_db'" >> config.py
echo "MYSQL_USER_TABLE = '$my_table'" >> config.py
echo "MYSQL_TIMEOUT = 30" >> config.py
echo "API_URL = '$api_url'" >> config.py
echo "API_PASS = '$api_key'" >> config.py
echo "NODE_ID = '$api_node_id'" >> config.py
echo "CHECKTIME = 30" >> config.py
echo "SYNCTIME = 120" >> config.py
echo "CUSTOM_METHOD = $custom_method" >> config.py
echo "MANAGE_PASS = 'passwd'" >> config.py
echo "MANAGE_BIND_IP = '127.0.0.1'" >> config.py
echo "MANAGE_PORT = 65000" >> config.py
echo "SS_BIND_IP = '::'" >> config.py
echo "SS_METHOD = '$ss_method'" >> config.py
echo "SS_OTA = False" >> config.py
echo "SS_SKIP_PORTS = $ss_skip_ports" >> config.py
echo "SS_FASTOPEN = False" >> config.py
echo "SS_TIMEOUT = 185" >> config.py
echo "SS_FIREWALL_ENABLED = $firewall_enable" >> config.py
echo "SS_FIREWALL_MODE = '$firewall_mode'" >> config.py
echo "SS_BAN_PORTS = $ban_ports" >> config.py
echo "SS_ALLOW_PORTS = $allow_ports" >> config.py
echo "SS_FIREWALL_TRUSTED = [443]" >> config.py
echo "SS_FORBIDDEN_IP = []" >> config.py
echo "LOG_ENABLE = True" >> config.py
echo "SS_VERBOSE = False" >> config.py
echo "LOG_LEVEL = logging.INFO" >> config.py
echo "LOG_FILE = 'shadowsocks.log'" >> config.py
echo "LOG_FORMAT = '%(asctime)s %(levelname)s %(message)s'" >> config.py
echo "LOG_DATE_FORMAT = '%b %d %H:%M:%S'" >> config.py
# Write supervisor config file
cd /etc/supervisor/conf.d
echo "[program:ssserver]" >> ssserver.conf
echo "command = python /root/shadowsocks-py-mu/shadowsocks/servers.py" >> ssserver.conf
echo "directory = /root/shadowsocks-py-mu/shadowsocks/" >> ssserver.conf
echo "user = root" >> ssserver.conf
echo "autostart = true" >> ssserver.conf
echo "autorestart = true" >> ssserver.conf
echo "stdout_logfile = /var/log/supervisor/ssserver.log" >> ssserver.conf
echo "stderr_logfile = /var/log/supervisor/ssserver_err.log" >> ssserver.conf
# Install serverspeeder if necessary
cd /root
if [ $is_serverspeeder != 0 ];
then
wget -N --no-check-certificate https://raw.githubusercontent.com/91yun/serverspeeder/master/serverspeeder-all.sh && bash serverspeeder-all.sh
fi
# Overwrite iptables if necessary
if [ $is_iptables != 0 ];
then
iptables-restore < /etc/iptables.banmailports.rules
iptables-save > /etc/iptables.up.rules
echo "#!/bin/bash" >> /etc/network/if-pre-up.d/iptables
echo "/sbin/iptables-restore < /etc/iptables.up.rules" >> /etc/network/if-pre-up.d/iptables
chmod +x /etc/network/if-pre-up.d/iptables
echo ':msg,contains,"IPTABLES" /var/log/iptables.log' >> /etc/rsyslog.d/my_iptables.conf
systemctl restart rsyslog
fi
# Reload supervisor
systemctl enable supervisor
systemctl restart supervisor
supervisorctl reload
echo "ssmu-py install complete QwQ"
| true |
f86d3e1e888e938650a40139158b0809823d2554 | Shell | ksuomala/filler | /filler_test.sh | UTF-8 | 3,031 | 3.890625 | 4 | [] | no_license | #!/bin/bash
p1="ksuomala.filler"
p2=""
vm_path="resources/filler_vm_old"
log_dir="test_logs"
log_file="$log_dir/test_log"
map_dir="resources/maps"
players_dir="resources/players"
games_lost=0
os=$(uname -s | tr A-Z a-z)
rm -rf $log_dir
mkdir $log_dir
touch $log_file
echo YOUR OS is $os
read -n 1 -p "Do you want to manually select a player? [y / n] If n is selected, all the players in $players_dir will be played against." select_player
echo
if [ $select_player == "y" ]
then
ls $players_dir
read -p "Type name for PLAYER2 and press ENTER " p2
p2=$players_dir/$p2
echo "$p2 set as PLAYER2"
fi
read -p "How many games do you want to play? __ " number_of_games
echo $number_of_games
read -n 1 -p "Do you want to visualize the games that you lose? [y/n]" visualize
echo
read -n 1 -p "Do you want to quit if you lose a single game? [y/n] " quit_if_lose
echo
loop_games()
{
# checking wether to display colors or not
for map in $map_dir/*
do
if [ $os = "linux" ]
then
echo -e "Running map \e[33m$map\e[0m $number_of_games times. \e[32m$1\e[0m as player1"
else
echo "Running map $map $number_of_games times. $1 as player1"
fi
games_won=0
for i in {1..$number_of_games}
do
echo $i
if [ $(($number_of_games/2)) -lt $games_won ]
then
echo "$games_won games won"
break
fi
./$vm_path -f $map -p1 ./$1 -p2 ./$2 > $log_file
grep -A1 "error" $log_file > $log_dir/error
if [ -s $log_dir/error ]
then
cat $log_dir/error
exit
fi
grep "Segfault" $log_file > $log_dir/error
if [ -s $log_file/error ]
then
if [ $os = "linux" ]
then
echo -e "\e[31mSEGFAULT:\e[0m"
else
echo "SEGFAULT:"
fi
cat $log_dir/error
exit
fi
winner=$(grep "won" filler.trace)
if [[ "$winner" == *"$p1"* ]];
then
echo -n 1
games_won=$(($games_won+1))
elif [[ "$winner" == *"$p2"* ]];
then
echo -n 0
games_lost=$(($games_lost+1))
cp $log_file $log_file$games_lost
if [ $visualize = "y" ]
then
echo
echo "visualizing..."
./visualizer < $log_file
fi
if [ $quit_if_lose = "y" ]
then
echo
echo You lost, quitting...
exit
fi
fi
done
if [ $games_won -lt $(($number_of_games/2)) ]
then
if [ $os = "linux" ]
then
echo -e "\e[31m $games_won/$number_of_games\e[0m"
else
echo " $games_won/$number_of_games"
fi
elif [ $os = "linux" ]
then
echo -e "\e[32m $games_won/$number_of_games\e[0m"
else
echo " $games_won/$i"
fi
done
}
if [ $select_player != "y" ]
then
for player in $players_dir/*
do
p2=$player
if [ $os = "linux" ]
then
echo -e "\e[34m$p1 \e[0mvs \e[35m$p2\e[0m"
else
echo "$p1 vs $p2"
fi
loop_games $p1 $p2
echo "$p2 vs $p1"
loop_games $p2 $p1
done
else
if [ $os = "linux" ]
then
echo -e "\e[34m$p1 \e[0mvs \e[35m$p2\e[0m"
else
echo "$p1 vs $p2"
fi
loop_games $p1 $p2
echo "$p2 vs $p1"
loop_games $p2 $p1
fi
| true |
cb3e7abbc6ee912e367ffefc028c0e14d3b3f2be | Shell | alexlenk/deepracer-for-dummies | /scripts/evaluation/adv-start.sh | UTF-8 | 2,696 | 3.015625 | 3 | [] | no_license | #!/bin/bash
echo "Uploading rewards file"
cp ../../src/markov/rewards/default.py ../../docker/volumes/minio/bucket/custom_files/reward.py
echo "Uploading actions file"
cp ../../src/markov/actions/model_metadata.json ../../docker/volumes/minio/bucket/custom_files/model_metadata.json
echo "Uploading preset file"
cp ../../src/markov/presets/default.py ../../docker/volumes/minio/bucket/custom_files/preset.py
if [ "$WORLD" == "" -o "$WORLD" == "null" ]; then
WORLD=reinvent_base
fi
if [ "$TEST" == "" -o "$TEST" == "null" ]; then
TEST=False
fi
if [ "$X_NUMBER_OF_TRIALS" == "" -o "$X_NUMBER_OF_TRIALS" == "null" ]; then
X_NUMBER_OF_TRIALS=21
fi
../../reset-checkpoint.sh
echo "Eval: Evaluating for $X_NUMBER_OF_TRIALS rounds"
if [ "$reeval" == "true" ]; then
echo "Eval: Using pre-trained model"
sed -i 's/#"pretrained_s3_bucket"/"pretrained_s3_bucket"/g' ../../deepracer/rl_coach/rl_deepracer_coach_robomaker.py
sed -i 's/#"pretrained_s3_prefix"/"pretrained_s3_prefix"/g' ../../deepracer/rl_coach/rl_deepracer_coach_robomaker.py
else
echo "Eval: Using currently trained model"
sed -i 's/[^#]"pretrained_s3_bucket"/ #"pretrained_s3_bucket"/g' ../../deepracer/rl_coach/rl_deepracer_coach_robomaker.py
sed -i 's/[^#]"pretrained_s3_prefix"/ #"pretrained_s3_prefix"/g' ../../deepracer/rl_coach/rl_deepracer_coach_robomaker.py
fi
sed "s/###WORLD###/$WORLD/g" ../../docker/template.env > ../../docker/.env
sed -i 's/metric.json/eval_metrics.json/g' ../../docker/.env
sed -i "s/NUMBER_OF_TRIALS=5/NUMBER_OF_TRIALS=$X_NUMBER_OF_TRIALS/g" ../../docker/.env
echo "ALTERNATE_DRIVING_DIRECTION=True" >> ../../docker/.env
echo "CHANGE_START_POSITION=True" >> ../../docker/.env
echo "EVALUATION_TEST=$TEST" >> ../../docker/.env
echo "TRAINING_RUN_TOTAL=$j" >> ../../docker/.env
echo "TRAINING_RUN=$((j-skipped))" >> ../../docker/.env
echo "MEDIAN_PERCENTAGE=$curr_median_perc" >> ../../docker/.env
echo "AVERAGE_PERCENTAGE=$curr_avg_perc" >> ../../docker/.env
rm -f ../../docker/volumes/minio/bucket/custom_files/eval_metrics.json
touch ../../docker/volumes/minio/bucket/custom_files/eval_metrics.json
cat ../../docker/.env
. ../evaluation/start.sh > ~/deepracer_eval.log &
RUNNING=true
while [ $RUNNING == true ]; do
complete=`jq -r ".metrics[$(($X_NUMBER_OF_TRIALS-1))].completion_percentage" ../../docker/volumes/minio/bucket/custom_files/eval_metrics.json`
if [ "$complete" == "null" -o "$complete" == "" ] ; then
RUNNING=true
else
RUNNING=false
fi
sleep 5
#cat ../../docker/volumes/minio/bucket/custom_files/eval_metrics.json
done
cat ../../docker/volumes/minio/bucket/custom_files/eval_metrics.json
. ./stop.sh
| true |
972d7316a37ba95a71ff4f87ac09db322f04a14f | Shell | boriphuth/DevSecOps-1 | /infrastructure/provisioning/alpine/without_docker.sh | UTF-8 | 405 | 2.546875 | 3 | [
"MIT"
] | permissive | #!/bin/bash
if which python3; then
echo 'Python already installed lets move';
else
apk --update --no-cache add ca-certificates openssh-client openssl python3 rsync ca-certificates bash curl
apk --update add --virtual .build-deps python3-dev libffi-dev openssl-dev build-base
pip3 install --upgrade pip cffi
pip3 install ansible
apk del .build-deps
rm -rf /var/cache/apk/*
fi | true |
27e8f8fc501ef0c81bc54be3bc81efc39f6540e6 | Shell | Dior222/blog-microservices | /build-all.sh | UTF-8 | 1,425 | 2.796875 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env bash
function note() {
local GREEN NC
GREEN='\033[0;32m'
NC='\033[0m' # No Color
printf "\n${GREEN}$@ ${NC}\n" >&2
}
set -e
cd util; note "Building util..."; ./gradlew clean build publishToMavenLocal; cd -
cd microservices/core/product-service; note "Building prod..."; ./gradlew clean build; cd -
cd microservices/core/recommendation-service; note "Building rec..."; ./gradlew clean build; cd -
cd microservices/core/review-service; note "Building rev..."; ./gradlew clean build; cd -
cd microservices/composite/product-composite-service; note "Building comp..."; ./gradlew clean build; cd -
cd microservices/support/auth-server; note "Building auth..."; ./gradlew clean build; cd -
cd microservices/support/config-server; note "Building conf..."; ./gradlew clean build; cd -
cd microservices/support/discovery-server; note "Building disc..."; ./gradlew clean build; cd -
cd microservices/support/edge-server; note "Building edge..."; ./gradlew clean build; cd -
cd microservices/support/monitor-dashboard; note "Building mon..."; ./gradlew clean build; cd -
cd microservices/support/turbine; note "Building turb..."; ./gradlew clean build; cd -
find . -name *SNAPSHOT.jar -exec du -h {} \;
docker-compose build | true |
4f3937360a629adcf5d4db0bc235bb8ab1308c10 | Shell | maksverver/zeeslag-go | /run-server.sh | UTF-8 | 324 | 3.0625 | 3 | [] | no_license | #!/bin/sh
SOCK=server.sock
LOG=server.log
BIN=./server
HOST=
PORT=14000
ROOT=/player
if [ -e "${SOCK}" ]; then echo "${SOCK} exists!"; exit 1; fi
if [ ! -x "${BIN}" ]; then echo "${BIN} does is not executable!"; exit 1; fi
dtach -n "${SOCK}" /bin/sh -c "'${BIN}' -h '${HOST}' -p '${PORT}' -r '${ROOT}' | tee -a '${LOG}'"
| true |
14b47db84e7bc25d6e8a7ae1b622baf541b625f8 | Shell | hallelujah-shih/start-learn | /tools/docker/template/ngx_dev_env/auto_build.sh | UTF-8 | 3,940 | 2.890625 | 3 | [] | no_license | #!/bin/bash
SRC_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" &>/dev/null && pwd)"
set +x
# install lua pkg
# luarocks install lua-resty-http
# luarocks install lua-resty-logger-socket
# luarocks install lua-resty-dns
default_max_timeout=60
default_conn_timeout=5
download_url=""
dst_name=""
force_download () {
if [[ ! -f "$dst_name" ]]; then
while [[ true ]]
do
curl --connect-timeout ${default_conn_timeout} -m ${default_max_timeout} -fL ${download_url} -o ${dst_name}
if [[ "$?" -eq 0 ]]; then
break
fi
sleep 3
done
fi
}
set -x
# ndk download
download_url=https://github.com/vision5/ngx_devel_kit/archive/refs/tags/v0.3.1.tar.gz
dst_name=ndk.tar.gz
force_download
tar -xzf ndk.tar.gz
NGX_DEV_KIT_ROOT_DIR=ngx_devel_kit-0.3.1
# lua_ngx module
download_url=https://github.com/openresty/lua-nginx-module/archive/refs/tags/v0.10.22.tar.gz
dst_name=lua_ngx.tar.gz
force_download
tar -xzf lua_ngx.tar.gz
LUA_NGX_MODULE_ROOT_DIR=lua-nginx-module-0.10.22
# lua-resty-core
download_url=https://codeload.github.com/openresty/lua-resty-core/tar.gz/refs/tags/v0.1.24
dst_name=lua-resty-core.tar.gz
force_download
tar -xzf lua-resty-core.tar.gz
LUA_RESTY_CORE_ROOT_DIR=lua-resty-core-0.1.24
cd ${LUA_RESTY_CORE_ROOT_DIR} && make install
cd ${SRC_DIR}
# lua-resty-lrucache
download_url=https://github.com/openresty/lua-resty-lrucache/archive/refs/tags/v0.13.tar.gz
dst_name=lua-resty-lrucache.tar.gz
force_download
tar -xzf lua-resty-lrucache.tar.gz
LUA_RESTY_LRUCACHE_ROOT_DIR=lua-resty-lrucache-0.13
cd ${LUA_RESTY_LRUCACHE_ROOT_DIR} && make install
cd ${SRC_DIR}
# luajit2
download_url=https://github.com/openresty/luajit2/archive/refs/tags/v2.1-20220915.tar.gz
dst_name=luajit2.tar.gz
force_download
tar xvf luajit2.tar.gz
LUAJIT_DIR_NAME=luajit-2.1
LUAJIT_ROOT_DIR=luajit2-2.1-20220915
cd ${LUAJIT_ROOT_DIR}
make -j8 && make install
cd ${SRC_DIR}
# pre modify cfg
cat <<EOF >>${LUA_NGX_MODULE_ROOT_DIR}/config
echo '
#ifndef LUA_DEFAULT_PATH
#define LUA_DEFAULT_PATH "./?.lua;/usr/local/share/lua/5.1/?.lua;/usr/local/share/lua/5.1/?/init.lua;/usr/local/lib/lua/5.1/?.lua;/usr/local/lib/lua/5.1/?/init.lua;/usr/local/lib/lua/?.lua;/usr/local/lib/lua/?/init.lua;/usr/share/lua/5.1/?.lua;/usr/share/lua/5.1/?/init.lua"
#endif
#ifndef LUA_DEFAULT_CPATH
#define LUA_DEFAULT_CPATH "./?.so;/usr/local/lib/lua/5.1/?.so;/usr/local/lib/lua/?.so;/usr/lib/x86_64-linux-gnu/lua/5.1/?.so;/usr/lib/lua/5.1/?.so;/usr/local/lib/lua/5.1/loadall.so"
#endif
' >> "\$ngx_addon_dir/src/ngx_http_lua_autoconf.h"
EOF
# nginx
download_url=https://openresty.org/download/nginx-1.19.3.tar.gz
dst_name=nginx.tar.gz
force_download
tar -xzf nginx.tar.gz
NGX_ROOT_DIR=nginx-1.19.3
NGX_MOD_DIR=third_modules
mkdir -p ${NGX_ROOT_DIR}/${NGX_MOD_DIR}
cp ${NGX_DEV_KIT_ROOT_DIR} ${NGX_ROOT_DIR}/${NGX_MOD_DIR}/${NGX_DEV_KIT_ROOT_DIR} -r
cp ${LUA_NGX_MODULE_ROOT_DIR} ${NGX_ROOT_DIR}/${NGX_MOD_DIR}/${LUA_NGX_MODULE_ROOT_DIR} -r
cd ${NGX_ROOT_DIR}
export LUAJIT_INC=/usr/local/include/${LUAJIT_DIR_NAME}
export LUAJIT_LIB=/usr/local/lib
./configure --prefix=/var/www/html \
--with-cc-opt="-g -O0" \
--with-ld-opt="-Wl,-rpath,/usr/local/lib" \
--sbin-path=/usr/sbin/nginx \
--conf-path=/etc/nginx/nginx.conf \
--http-log-path=/var/log/nginx/access.log \
--error-log-path=/var/log/nginx/error.log \
--with-pcre --with-pcre-jit \
--lock-path=/var/lock/nginx.lock \
--pid-path=/var/run/nginx.pid \
--modules-path=/etc/nginx/modules \
--with-stream \
--with-stream_ssl_module \
--with-stream_ssl_preread_module \
--with-http_addition_module \
--with-http_v2_module \
--with-http_ssl_module \
--with-http_realip_module \
--with-http_auth_request_module \
--with-http_gzip_static_module \
--add-module=${NGX_MOD_DIR}/${NGX_DEV_KIT_ROOT_DIR} \
--add-module=${NGX_MOD_DIR}/${LUA_NGX_MODULE_ROOT_DIR} \
--add-module=../ngx_http_hello_module
make -j8 && make install
| true |
5a808fdde134ca5a574f89c18c8fcb48f5b0852b | Shell | mambolevis/dotfiles-2 | /pacUpdates.sh | UTF-8 | 260 | 3.359375 | 3 | [] | no_license | #!/bin/bash
# Arch upgradeable packages
list=`pacman -Sup 2> /dev/null`
if [ "$list" == "" ]; then
count=0
else
count=`echo "$list" | wc -l`
let "count-=1"
fi
if [ "$count" == 1 ]; then
echo "$count Update"
else
echo "$count Updates"
fi
| true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.