blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 4 115 | path stringlengths 2 970 | src_encoding stringclasses 28 values | length_bytes int64 31 5.38M | score float64 2.52 5.28 | int_score int64 3 5 | detected_licenses listlengths 0 161 | license_type stringclasses 2 values | text stringlengths 31 5.39M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
40fe0f91f54718da81f748bdf614a6a761966ca4 | Shell | hag007/PRS | /calc_pca_full.sh | UTF-8 | 1,161 | 2.78125 | 3 | [] | no_license | #!/bin/bash
set -e
source constants_.sh
source parse_args.sh "$@"
# Parse input
target_dataset="${datasets_path}${target}/${imp}/"
if [[ -z ${maf} ]]; then maf=0.05; fi
if [[ -z ${geno} ]]; then geno=0.1; fi
if [[ -z ${imp} ]]; then imp="original"; fi
if [[ -z ${memory} ]]; then memory=500000; fi
if [[ -z ${threads} ]]; then threads=80; fi
if [[ -z ${stage} ]]; then stage=2; fi
# Start pipeline
if [[ ${stage} -le 1 ]]; then
echo '### QC ###'
plink \
--bfile ${target_dataset}ds \
--keep "${datasets_path}${target}/pop.panel" \
--out ${target_dataset}ds.QC \
--memory ${memory} \
--threads ${threads} \
--maf ${maf} \
--geno ${geno} \
--hwe 1e-6 \
--make-bed
fi
if [[ ${stage} -le 2 ]]; then
echo '### perform prunning ###'
plink \
--bfile ${target_dataset}ds.QC \
--out ${target_dataset}ds \
--memory ${memory} \
--threads ${threads} \
--indep-pairwise 200 50 0.25
fi
if [[ ${stage} -le 3 ]]; then
echo '### calculate the first 6 PCs ###'
plink \
--bfile ${target_dataset}ds.QC \
--out ${target_dataset}ds \
--memory ${memory} \
--threads ${threads} \
--extract ${target_dataset}ds.prune.in \
--pca 200
fi
| true |
bd7dad505cbe84ba5863b1dba1614cca2a6369c0 | Shell | richbrowne/f5-cloud-libs | /doc.sh | UTF-8 | 3,529 | 3.703125 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash
# This script generates the README.md file
README_FILE=README.md
writeHelp () {
IFS=''
node "$1" --help | while read LINE; do
if [[ -z $LINE ]]; then
LINE=" "
fi
echo " ""$LINE" >> $README_FILE
done
}
cat > $README_FILE << EOL
[](https://travis-ci.org/F5Networks/f5-cloud-libs)
[](https://coveralls.io/github/F5Networks/f5-cloud-libs)
# Library code and scripts for deploying BIG-IP in a cloud
This project consists of two main parts
- scripts
- Command line scripts for configuring BIG-IP
- These are meant to be called either directly from the command line or from cloud deployment templates
- See usage below
- lib
- Library code for controlling a BIG-IP
- Called from the scripts
## Release notes
### Version 3.6.0
* Add --shell option to scripts/runScript.js
### Version 3.5.0
* Autoscale improvements
* More device group options in autoscale.js
* Use save-on-auto-sync when creating device group
* Fix password syncing in master election
### Version 3.4.0
* Autoscale improvements
* Handle replacing master
* Revoke license if licensed from BIG-IQ
### Version 3.3.0
* License BIG-IP from BIG-IQ 5.2 and 5.3
### Version 3.2.0
* Support for S3 ARN for licensing via BIG-IQ
### Version 3.1.0
* Support for licensing via BIG-IQ
* Support for service discovery
### Version 3.0.1
* Add retry for password-url when licensing via BIG-IQ.
### Version 3.0.0
**This version is not backwards compatible. The format for options on network.js has changed.
See node scripts/network.js --help for details**
* License BIG-IP from BIG-IQ 5.0 and 5.1
* More options for network.js
* Add arbitrary routes
* Support mtu on vlans
* Support port lockdown on self IPs
* Updates to signaling. --wait-for now means 'run if the signal has been sent' rather than 'run when the signal is sent'
* More robust reboot handling.
### Version 2.3.0
* Support for Azure autoscaling
* Support --password-url in network.js
* Restore from stored UCS
### Version 2.2.0
* Restore from saved UCS file if present in storage account
### Version 2.1.0
* Allows for autoscaling and clustering without providing a password in the template
* Adds hash verification for all downloaded files
* Fixes race condition when running multiple f5-cloud-libs scripts at once
### Version 2.0.0
* onboard.js option of --set-password is no longer available, use --update-user instead.
* All scripts that take --password now also support --password-url. Only 'file' URLs are supported for now.
* Added option to suppress console output (--no-console).
* Added support for verifying hash of downloaded f5-cloud-libs tarball.
* Added some parsing of sync messages to get sync to work more often.
## Scripts
### onboard.js
Does initial configuration and provisioning of a BIG-IP.
EOL
writeHelp scripts/onboard.js
cat >> $README_FILE << EOL
### cluster.js
Sets up BIG-IPs in a cluster.
EOL
writeHelp scripts/cluster.js
cat >> $README_FILE << EOL
### autoscale.js
Runs autoscale code to elect master and cluster
EOL
writeHelp scripts/autoscale.js
cat >> $README_FILE << EOL
### network.js
Sets up default gateway, VLANs and self IPs
EOL
writeHelp scripts/network.js
cat >> $README_FILE << EOL
### runScript.js
Runs an arbitrary script.
EOL
writeHelp scripts/runScript.js
| true |
248746e47dced353c7c2b8cedcabd4cc7fa28789 | Shell | Daniel-Pit/burnvideo_node | /server/boot/convert_ntsc_to_image | UTF-8 | 2,222 | 3.234375 | 3 | [] | no_license | #!/bin/bash -e
URI=$1
OUTPUT_PATH=$2
BASENAME=`basename "${URI#https://}"`
MOVIE_PATH="/tmp/$BASENAME"
TMP_PATH="/tmp/tmp_$OUTPUT_PATH"
TMP_IMAGEPATH="/tmp/tmpimage_$BASENAME"
TMP_BACKIMAGEPATH="/tmp/backtmpimage_$BASENAME"
echo "URI: $URI"
echo "MOVIE_PATH: $MOVIE_PATH"
echo "BASENAME: $BASENAME"
echo "TMP IMAGE PATH: $TMP_IMAGEPATH"
echo "TMP VIDEO PATH: $TMP_PATH"
if [[ $URI == https://* ]]; then
function finish() {
rm -f $MOVIE_PATH
}
trap finish EXIT
s3cmd get s3://burunvideo/$BASENAME $MOVIE_PATH
else
MOVIE_PATH=$URI
fi
OUT_WIDTH=720
OUT_HEIGHT=576
RESIZE_WH="720x576"
echo "RESIZE_WH = $RESIZE_WH"
mogrify -auto-orient $MOVIE_PATH
if [[ $OUTPUT_PATH == "dvd-logo.mpeg" ]]; then
VFILTER="[0:v]scale=w=720:h=576:force_original_aspect_ratio=decrease,split[in0][in1];[in0]scale=ih*16/9+1:-1,boxblur=luma_radius=min(h\,w)/20:luma_power=1:chroma_radius=min(cw\,ch)/20:chroma_power=1[bg];[bg][in1]overlay=(W-w)/2:(H-h)/2,crop=h=iw*9/16"
ffmpeg -nostats -fflags +genpts -loop 1 -i $MOVIE_PATH -lavfi $VFILTER -t 10 -aspect 16:9 -target ntsc-dvd -y $OUTPUT_PATH
else
cp $MOVIE_PATH $TMP_IMAGEPATH
cp $MOVIE_PATH $TMP_BACKIMAGEPATH
mogrify -resize 720x576 $TMP_IMAGEPATH
convert $MOVIE_PATH -blur 0x50 $TMP_BACKIMAGEPATH
CROP_WIDTH=$(identify -format "%w" $TMP_IMAGEPATH)
CROP_HEIGHT=$(identify -format "%h" $TMP_IMAGEPATH)
echo "RESIZED $CROP_WIDTH * $CROP_HEIGHT"
CROPVALUE="crop=720:576"
ffmpeg -nostats -fflags +genpts -loop 1 -i $TMP_BACKIMAGEPATH -t 10 -vf "scale='if(gt(iw,ih),-1,720)':'if(gt(iw,ih),576,-1)'" -aspect 16:9 -target ntsc-dvd -y $TMP_PATH
VFILTER="[0:v]scale=w=720:h=576:force_original_aspect_ratio=decrease,split[in0][in1];[in0]scale=ih*16/9+1:-1,boxblur=luma_radius=min(h\,w)/20:luma_power=1:chroma_radius=min(cw\,ch)/20:chroma_power=1[bg];[bg][in1]overlay=(W-w)/2:(H-h)/2,crop=min(iw\,ih*(16/9)):ow/(16/9)"
echo "VFILTER=$VFILTER"
ffmpeg -nostats -fflags +genpts -i $TMP_PATH -i $TMP_IMAGEPATH -filter_complex "[0:v][1:v] overlay=(W-$CROP_WIDTH)/2:(H-$CROP_HEIGHT)/2:enable='between(t,0,10)'" -pix_fmt yuv420p -c:a copy -aspect 16:9 -target ntsc-dvd -y $OUTPUT_PATH
rm -f $TMP_PATH
rm -f $TMP_IMAGEPATH
rm -f $TMP_BACKIMAGEPATH
fi | true |
cf47eeb406b09e92ce85770e35dc48c639d149a7 | Shell | hmasmoudi/SyphaxOS | /Default/0001-SyphaxOSCore/001_BuildPackagesScripts/0050-grub/PKGBUILD | UTF-8 | 892 | 2.640625 | 3 | [] | no_license | # Maintainer: Hatem Masmoudi <hatem.masmoudi@gmail.com>
pkgname=grub
pkgver=2.02
pkgrel=6
pkgdesc="The GRUB package contains the GRand Unified Bootloader."
arch=('x86_64')
url="https://ftp.gnu.org/gnu/grub"
license=('GPL')
groups=('core')
source=("$url/$pkgname-$pkgver.tar.xz")
md5sums=('8a4a2a95aac551fb0fba860ceabfa1d3')
depends=('rootfs')
build() {
cd "$srcdir/$pkgname-$pkgver"
./configure --prefix=/usr \
--sbindir=/sbin \
--sysconfdir=/etc \
--with-bootdir="/boot" \
--with-grubdir="grub" \
--enable-device-mapper \
--enable-grub-mount \
--disable-efiemu \
--disable-werror
make
}
package() {
cd "$srcdir/$pkgname-$pkgver"
make DESTDIR="$pkgdir" install
install -D -m0644 "../../grub.default" "${pkgdir}/etc/default/grub"
}
| true |
8f3355f7b287c081c25a6e567a1d7688501cbad7 | Shell | junghans/charliecloud | /test/common.bash | UTF-8 | 4,789 | 3.5625 | 4 | [
"Apache-2.0"
] | permissive | docker_ok () {
sudo docker images | fgrep -q $1
}
env_require () {
if [[ -z ${!1} ]]; then
printf "\$$1 is empty or not set\n\n" >&2
exit 1
fi
}
image_ok () {
ls -ld $1 $1/WEIRD_AL_YANKOVIC || true
test -d $1
ls -ld $1 || true
byte_ct=$(du -s -B1 $1 | cut -f1)
echo "$byte_ct"
[[ $byte_ct -ge 3145728 ]] # image is at least 3MiB
}
need_docker () {
# Skip test if $CH_TEST_SKIP_DOCKER is true. If argument provided, use
# that tag as missing prerequisite sentinel file.
PQ=$TARDIR/$1.pq_missing
if [[ $PQ ]]; then
rm -f $PQ
fi
if [[ $CH_TEST_SKIP_DOCKER ]]; then
if [[ $PQ ]]; then
touch $PQ
fi
skip 'Docker not found or user-skipped'
fi
}
prerequisites_ok () {
if [[ -f $TARDIR/$1.pq_missing ]]; then
skip 'build prerequisites not met'
fi
}
scope () {
case $CH_TEST_SCOPE in
quick)
if [[ $1 != quick ]]; then
skip "$1 scope"
fi
;;
standard)
if [[ $1 != standard && $1 != quick ]]; then
skip "$1 scope"
fi
;;
full)
;; # always run tests in full scope
*)
exit 1
esac
}
tarball_ok () {
ls -ld $1 || true
test -f $1
test -s $1
}
# Predictable sorting and collation
export LC_ALL=C
# Disable OpenMPI's process_vm_readv(2)-based single-copy mechanism because
# processes in sibling user namespaces don't have permission to use this
# system call on one another. See issue #126 and the FAQ.
export OMPI_MCA_btl_vader_single_copy_mechanism=none
# Set path to the right Charliecloud. This uses a symlink in this directory
# called "bin" which points to the corresponding bin directory, either simply
# up and over (source code) or set during "make install".
#
# Note that sudo resets $PATH, so if you want to run any Charliecloud stuff
# under sudo, you must use an absolute path.
CH_BIN="$(cd "$(dirname ${BASH_SOURCE[0]})/bin" && pwd)"
CH_BIN="$(readlink -f "$CH_BIN")"
export PATH=$CH_BIN:$PATH
CH_RUN_FILE="$(which ch-run)"
if [[ -u $CH_RUN_FILE ]]; then
CH_RUN_SETUID=yes
fi
# User-private temporary directory in case multiple users are running the
# tests simultaenously.
btnew=$BATS_TMPDIR/bats.tmp.$USER
mkdir -p $btnew
chmod 700 $btnew
export BATS_TMPDIR=$btnew
[[ $(stat -c '%a' $BATS_TMPDIR) = '700' ]]
# Separate directories for tarballs and images
TARDIR=$CH_TEST_TARDIR
IMGDIR=$CH_TEST_IMGDIR
# Some test variables
EXAMPLE_TAG=$(basename $BATS_TEST_DIRNAME)
EXAMPLE_IMG=$IMGDIR/$EXAMPLE_TAG
CHTEST_TARBALL=$TARDIR/chtest.tar.gz
CHTEST_IMG=$IMGDIR/chtest
CHTEST_MULTINODE=$SLURM_JOB_ID
if [[ $CHTEST_MULTINODE ]]; then
MPIRUN_NODE='srun --ntasks-per-node 1'
MPIRUN_CORE='srun --cpus-per-task 1'
MPIRUN_2='srun -n 2'
# $SLURM_NTASKS isn't always set, nor is $SLURM_CPUS_ON_NODE despite the
# documentation.
if [[ -z $SLURM_CPUS_ON_NODE ]]; then
SLURM_CPUS_ON_NODE=$(echo $SLURM_JOB_CPUS_PER_NODE | cut -d'(' -f1)
fi
CHTEST_CORES_NODE=$SLURM_CPUS_ON_NODE
CHTEST_CORES_TOTAL=$(($CHTEST_CORES_NODE * $SLURM_JOB_NUM_NODES))
else
MPIRUN_NODE='mpirun --map-by ppr:1:node'
MPIRUN_CORE='mpirun'
MPIRUN_2='mpirun -np 2'
CHTEST_CORES_NODE=$(getconf _NPROCESSORS_ONLN)
CHTEST_CORES_TOTAL=$CHTEST_CORES_NODE
fi
# If the variable CH_TEST_SKIP_DOCKER is true, we skip all the tests that
# depend on Docker. It's true if user-set or command "docker" is not in $PATH.
if ( ! command -v docker >/dev/null 2>&1 ); then
CH_TEST_SKIP_DOCKER=yes
fi
# Validate CH_TEST_SCOPE and set if empty.
if [[ -z $CH_TEST_SCOPE ]]; then
CH_TEST_SCOPE=standard
elif [[ $CH_TEST_SCOPE != quick \
&& $CH_TEST_SCOPE != standard \
&& $CH_TEST_SCOPE != full ]]; then
printf '$CH_TEST_SCOPE value "%s" is invalid\n\n' $CH_TEST_SCOPE >&2
exit 1
fi
# Do we have sudo?
if ( command -v sudo >/dev/null 2>&1 && sudo -v >/dev/null 2>&1 ); then
# This isn't super reliable; it returns true if we have *any* sudo
# privileges, not specifically to run the commands we want to run.
CHTEST_HAVE_SUDO=yes
fi
# Do we have what we need?
env_require CH_TEST_TARDIR
env_require CH_TEST_IMGDIR
env_require CH_TEST_PERMDIRS
if ( bash -c 'set -e; [[ 1 = 0 ]]; exit 0' ); then
# Bash bug: [[ ... ]] expression doesn't exit with set -e
# https://github.com/sstephenson/bats/issues/49
printf 'Need at least Bash 4.1 for these tests.\n\n' >&2
exit 1
fi
if [[ ! -x $CH_BIN/ch-run ]]; then
printf 'Must build with "make" before running tests.\n\n' >&2
exit 1
fi
if ( mount | fgrep -q $IMGDIR ); then
printf 'Something is mounted under %s.\n\n' $IMGDIR >&2
exit 1
fi
| true |
d7437a6bc80a9de503bf39949b4339b82a880469 | Shell | Sefriol/ReactorChat | /backend/api/runserver.sh | UTF-8 | 854 | 3.796875 | 4 | [] | no_license | #!/bin/sh
remove=false
deployment=false
while [ $# -gt 0 ]
do
key="$1"
case $key in
-rm|--remove)
remove=true
shift # past argument
;;
-d|--deploy)
deployment=true
shift # past argument
;;
*)
# unknown option
;;
esac
done
echo "\n======>>> Stopping all docker containers...\n"
sleep 1
docker-compose stop
#docker stop $(docker ps -a -q)
if [ $remove = true ]; then
echo "\n======>>> Removing all docker containers...\n"
sleep 1
docker rm $(docker ps -a -q)
fi
if [ $deployment = true ]; then
echo "\n======>>> Running docker-compose in deployment mode...\n"
sleep 1
docker-compose -f docker-compose.yml -f deploy.yml up --build -d
else
echo "\n======>>> Running docker-compose in development mode...\n"
sleep 1
docker-compose -f docker-compose.yml -f dev.yml up --build -d
fi
| true |
640e5cf7d4fa6e3cefe27988b59672edf6f21d88 | Shell | wisobi/leanbean | /update-versions.sh | UTF-8 | 757 | 2.9375 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env bash
POM_VERSION=$(mvn -f leanbean-api/pom.xml org.apache.maven.plugins:maven-help-plugin:2.1.1:evaluate -Dexpression=project.version|grep -Ev '(^\[|Download\w+:)')
VERSION=$(cut -f1 -d"-" <<< "$POM_VERSION")
RELEASE=$(echo $VERSION | ( IFS=".$IFS" ; read a b c && echo $a.$b.$((c + 1)) ))
CMD="mvn -f leanbean-api/pom.xml --batch-mode release:update-versions -DdevelopmentVersion=${RELEASE}-SNAPSHOT"
echo "Running $CMD"
CMD="mvn -f leanbean-google/pom.xml --batch-mode release:update-versions -DdevelopmentVersion=${RELEASE}-SNAPSHOT"
echo "Running $CMD"
CMD="sed -i 's/<widget id=\"com.wisobi.leanbean\" version=\"${VERSION}\"/<widget id=\"com.wisobi.leanbean\" version=\"${RELEASE}\"/g' leanbean-mobile/config.xml"
echo "Running $CMD" | true |
e8b43b805d3e281e3d883466af4d368af5d88533 | Shell | makerspaze/todos | /tools/start_services.sh | UTF-8 | 1,495 | 3.46875 | 3 | [
"BSD-3-Clause"
] | permissive | #!/bin/bash
# Copyright 2015 The Vanadium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
# Expects credentials in creds dir, generated using "make creds".
set -euo pipefail
trap kill_child_processes INT TERM EXIT
silence() {
"$@" &> /dev/null || true
}
# Copied from chat example app.
kill_child_processes() {
# Attempt to stop child processes using the TERM signal.
if [[ -n "$(jobs -p -r)" ]]; then
silence pkill -P $$
sleep 1
# Kill any remaining child processes using the KILL signal.
if [[ -n "$(jobs -p -r)" ]]; then
silence sudo -u "${SUDO_USER}" pkill -9 -P $$
fi
fi
}
main() {
local -r TMP=tmp
local -r PORT=${PORT-4000}
local -r MOUNTTABLED_ADDR=":$((PORT+1))"
local -r SYNCBASED_ADDR=":$((PORT+2))"
mkdir -p $TMP
# TODO(sadovsky): Run mounttabled and syncbased each with its own blessing
# extension.
./bin/mounttabled \
--v23.tcp.address=${MOUNTTABLED_ADDR} \
--v23.credentials=creds &
./bin/syncbased \
--v=5 \
--alsologtostderr=false \
--root-dir=${TMP}/syncbase_${PORT} \
--name=syncbase \
--v23.namespace.root=/${MOUNTTABLED_ADDR} \
--v23.tcp.address=${SYNCBASED_ADDR} \
--v23.credentials=creds \
--v23.permissions.literal='{"Admin":{"In":["..."]},"Write":{"In":["..."]},"Read":{"In":["..."]},"Resolve":{"In":["..."]},"Debug":{"In":["..."]}}'
tail -f /dev/null # wait forever
}
main "$@"
| true |
f5924af605ccc616dc1892cd44322fee915a18b7 | Shell | open-power-ref-design/accelerated-db | /setup_git_repo.sh | UTF-8 | 1,759 | 4.40625 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash
# Set up a local git repository based on a specified upstream; and check
# out a specific commit.
#
# Exit 0 on success; exit 1 on failure
if [ -z "$3" -o ! -z "$4" ]
then
echo "Usage: $(basename $0) <git-repo-url> <local-dir> <commit-ID>"
exit 1
fi
REMOTE="$1"
LOCAL="$2"
COMMIT="$3"
OPWD=$(pwd)
if [ -e "${LOCAL}" ]
then
if [ -d "${LOCAL}" ]
then
# Directory already exists; try to use it
if ! cd "${LOCAL}"
then
echo "ERROR: Can't cd to existing ${LOCAL}"
exit 1
fi
if ! git rev-parse --show-top-level > /dev/null 2>&1
then
echo "ERROR: Existing ${LOCAL} directory is not a git repo"
cd "$OPWD"
exit 1
else
# Directory is a git repo; refresh meta-data
if ! git fetch -q
then
echo "ERROR: git fetch into existing ${LOCAL} repo failed"
cd "$OPWD"
exit 1
else
echo "Successful git fetch to existing ${LOCAL}"
fi
fi
else
echo "ERROR: ${LOCAL} exists but is not a directory"
cd "$OPWD"
exit 1
fi
else
# Doesn't already exist; clone the source
if ! git clone ${REMOTE} ${LOCAL} --recursive
then
echo "ERROR: git clone into new ${LOCAL} failed"
exit 1
fi
if ! cd ${LOCAL}
then
echo "ERROR: Can't cd to new ${LOCAL}"
exit 1
fi
fi
# If we reach this point, then we have an up-to-date repo in $LOCAL,
# and we're cd'd into that dir. Check out our target commit.
if ! git checkout ${COMMIT}
then
echo "ERROR: Can't check out commit ${COMMIT} in ${LOCAL}"
exit 1
fi
cd "$OPWD"
exit 0
| true |
fc3f35a3585148eca9d2da2181045626f38c5e21 | Shell | DeH4er/suckless | /utils.sh | UTF-8 | 287 | 3.125 | 3 | [] | no_license | #!/bin/sh
src="$HOME/source"
apps="dwm st surf dmenu slstatus tabbed"
appcolor=`tput setaf 9`
cmdcolor=`tput setaf 13`
reset=`tput sgr0`
logapp () {
echo "$appcolor[ $1/$2 ----- $3 ----- ]$reset"
}
logexec () {
echo "$cmdcolor[ --- $1 ]$reset"
$1
}
i=1
l=`echo $apps | wc -w`
| true |
43ce47c54962b0462c81713f62812fe455c0deda | Shell | madmpro/dotfiles | /src/os/preferences/macos/privacy.sh | UTF-8 | 688 | 2.671875 | 3 | [
"MIT"
] | permissive | #!/bin/bash
cd "$(dirname "${BASH_SOURCE[0]}")" \
&& . "../../utils.sh"
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
print_in_purple "\n Setting privacy\n\n"
# hide a computer name on a network
sudo /usr/libexec/PlistBuddy -c "Add :ProgramArguments: string '-NoMulticastAdvertisements'" /System/Library/LaunchDaemons/com.apple.mDNSResponder.plist
# To turn back on:
# sudo /usr/libexec/PlistBuddy -c "Delete :ProgramArguments:2" /System/Library/LaunchDaemons/com.apple.mDNSResponder.plist
sudo launchctl unload /System/Library/LaunchDaemons/com.apple.mDNSResponder.plist
sudo launchctl load /System/Library/LaunchDaemons/com.apple.mDNSResponder.plist
| true |
0450f13958eb87594c4f5d6e4b63a8f0d53cc50d | Shell | EthanJWright/opensync | /src/fut/shell/lib/sm_lib.sh | UTF-8 | 14,631 | 3.046875 | 3 | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | #!/bin/sh
# Copyright (c) 2015, Plume Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the Plume Design Inc. nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL Plume Design Inc. BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Include basic environment config
if [ -e "/tmp/fut_set_env.sh" ]; then
source /tmp/fut_set_env.sh
else
source "${FUT_TOPDIR}/shell/config/default_shell.sh"
fi
source "${FUT_TOPDIR}/shell/lib/wm2_lib.sh"
source "${LIB_OVERRIDE_FILE}"
############################################ INFORMATION SECTION - START ###############################################
#
# Base library of common Stats Manager functions
#
############################################ INFORMATION SECTION - STOP ################################################
############################################ SETUP SECTION - START #####################################################
sm_log_test_pass_msg="---------------------------------- OK ----------------------------------"
sm_setup_test_environment()
{
fn_name="sm_lib:sm_setup_test_environment"
wm_setup_test_environment "$@" &&
log -deb "$fn_name - wm_setup_test_environment - Success" ||
raise "- wm_setup_test_environment - Failed" -l "$fn_name" -ds
# Check if LM can be started, if not try starting PM.
# If it fails raise an exception.
start_specific_manager lm
if [ $? -eq 0 ]; then
log -deb "$fn_name - start_specific_manager lm - Success"
else
log -deb "$fn_name - start_specific_manager lm - Failed. Trying to start pm instead"
start_specific_manager pm
if [ $? -eq 0 ]; then
log -deb "$fn_name - start_specific_manager pm - Success"
else
raise "Both start_specific_manager lm, start_specific_manager pm - Failed" -l "$fn_name" -ds
fi
fi
# QM start for report queue handling
start_specific_manager qm &&
log -deb "$fn_name - start_specific_manager qm - Success" ||
raise "start_specific_manager qm - Failed" -l "$fn_name" -ds
start_specific_manager sm &&
log -deb "$fn_name - start_specific_manager sm - Success" ||
raise "start_specific_manager sm - Failed" -l "$fn_name" -ds
empty_ovsdb_table AW_Debug
set_manager_log SM TRACE
}
insert_ws_config()
{
fn_name="sm_lib:insert_ws_config"
sm_radio_type=$1
sm_channel_list=$2
sm_stats_type=$3
sm_survey_type=$4
sm_reporting_interval=$5
sm_sampling_interval=$6
sm_report_type=$7
log -deb "$fn_name - Inserting Wifi_Stats_Config config"
if [ -z "$sm_survey_type" ]; then
sm_survey_type="[\"set\",[]]"
fi
insert_ovsdb_entry Wifi_Stats_Config \
-i radio_type "$sm_radio_type" \
-i channel_list "$sm_channel_list" \
-i stats_type "$sm_stats_type" \
-i survey_type "$sm_survey_type" \
-i reporting_interval "$sm_reporting_interval" \
-i sampling_interval "$sm_sampling_interval" \
-i report_type "$sm_report_type" ||
raise "Failed insert_ovsdb_entry" -l "$fn_name" -oe
log -deb "$fn_name - Wifi_Stats_Config config Inserted"
}
############################################ SETUP SECTION - STOP #####################################################
############################################ TEST CASE SECTION - START #################################################
check_survey_report_log()
{
fn_name="sm_lib:check_survey_report_log"
sm_radio_type=$1
sm_channel=$2
sm_survey_type=$3
sm_log_type=$4
case $sm_log_type in
*processing_survey*)
sm_log_msg="Checking logs for survey $sm_radio_type channel $sm_channel reporting processing survey"
sm_die_msg="No survey processing done on $sm_radio_type $sm_survey_type-chan on channel $sm_channel"
match_pattern_for_log_inspecting="Processing $sm_radio_type .* $sm_survey_type $sm_channel"
;;
*scheduled_scan*)
sm_log_msg="Checking logs for survey $sm_radio_type channel $sm_channel reporting scheduling survey"
sm_die_msg="No survey scheduling done on $sm_radio_type $sm_survey_type on channel $sm_channel"
match_pattern_for_log_inspecting="Scheduled $sm_radio_type $sm_survey_type $sm_channel scan"
;;
*fetched_survey*)
sm_log_msg="Checking logs for survey $sm_radio_type channel $sm_channel reporting fetched survey"
sm_die_msg="No survey fetching done on $sm_radio_type $sm_survey_type on channel $sm_channel"
match_pattern_for_log_inspecting="Fetched $sm_radio_type $sm_survey_type $sm_channel survey"
;;
*sending_survey_report*)
sm_log_msg="Checking logs for survey $sm_radio_type channel $sm_channel reporting sending survey"
sm_die_msg="No survey sending done on $sm_radio_type $sm_survey_type on channel $sm_channel"
match_pattern_for_log_inspecting="Sending $sm_radio_type .* $sm_survey_type $sm_channel survey report"
;;
*)
raise "Incorrect sm_log_type provided" -l "$fn_name" -arg
esac
log "$fn_name - $sm_log_msg"
wait_for_function_response 0 "$LOGREAD | tail -250 | grep -q \"$match_pattern_for_log_inspecting\"" &&
log -deb "$fn_name - $sm_log_test_pass_msg" ||
raise "$sm_die_msg" -l "$fn_name" -tc
}
inspect_survey_report()
{
fn_name="sm_lib:inspect_survey_report"
sm_radio_type=$1
sm_channel=$2
sm_survey_type=$3
sm_reporting_interval=$4
sm_sampling_interval=$5
sm_report_type=$6
sm_stats_type="survey"
sm_channel_list="[\"set\",[$sm_channel]]"
empty_ovsdb_table Wifi_Stats_Config ||
raise "Failed empty_ovsdb_table Wifi_Stats_Config" -l "$fn_name" -tc
insert_ws_config \
"$sm_radio_type" \
"$sm_channel_list" \
"$sm_stats_type" \
"$sm_survey_type" \
"$sm_reporting_interval" \
"$sm_sampling_interval" \
"$sm_report_type"
check_survey_report_log "$sm_radio_type" "$sm_channel" "$sm_survey_type" processing_survey
check_survey_report_log "$sm_radio_type" "$sm_channel" "$sm_survey_type" scheduled_scan
check_survey_report_log "$sm_radio_type" "$sm_channel" "$sm_survey_type" fetched_survey
check_survey_report_log "$sm_radio_type" "$sm_channel" "$sm_survey_type" sending_survey_report
empty_ovsdb_table Wifi_Stats_Config ||
raise "Failed empty_ovsdb_table Wifi_Stats_Config" -l "$fn_name" -tc
return 0
}
check_neighbor_report_log()
{
fn_name="sm_lib:check_neighbor_report_log"
sm_radio_type=$1
sm_channel=$2
sm_survey_type=$3
sm_log_type=$4
sm_neighbor_mac=$5
sm_neighbor_ssid=$6
case $sm_log_type in
*add_neighbor*)
sm_log_msg="Checking for $sm_radio_type neighbor adding for $sm_neighbor_mac"
sm_die_msg="No neighbor $sm_neighbor_mac was added"
match_pattern_for_log_inspecting="Adding $sm_radio_type .* $sm_survey_type neighbor {bssid='$sm_neighbor_mac' ssid='$sm_neighbor_ssid' .* chan=$sm_channel}"
;;
*parsed_neighbor_bssid*)
sm_log_msg="Checking for $sm_radio_type neighbor parsing of bssid $sm_neighbor_mac"
sm_die_msg="No neighbor $sm_neighbor_mac was parsed"
match_pattern_for_log_inspecting="Parsed $sm_radio_type BSSID $sm_neighbor_mac"
;;
*parsed_neighbor_ssid*)
sm_log_msg="Checking for $sm_radio_type neighbor parsing of ssid $sm_neighbor_ssid"
sm_die_msg="No neighbor $sm_neighbor_ssid was parsed"
match_pattern_for_log_inspecting="Parsed $sm_radio_type SSID $sm_neighbor_ssid"
;;
*sending_neighbor*)
sm_log_msg="Checking for $sm_radio_type neighbor sending of $sm_neighbor_mac"
sm_die_msg="No neighbor $sm_neighbor_mac was added"
match_pattern_for_log_inspecting="Sending $sm_radio_type .* $sm_survey_type neighbors {bssid='$sm_neighbor_mac' ssid='$sm_neighbor_ssid' .* chan=$sm_channel}"
;;
*)
raise "Incorrect sm_log_type provided" -l "$fn_name" -arg
esac
log "$fn_name - $sm_log_msg"
wait_for_function_response 0 "$LOGREAD | tail -250 | grep -q \"$match_pattern_for_log_inspecting\"" &&
log -deb "$fn_name - $sm_log_test_pass_msg" ||
raise "$sm_die_msg" -l "$fn_name" -tc
}
inspect_neighbor_report()
{
fn_name="sm_lib:inspect_neighbor_report"
sm_radio_type=$1
sm_channel=$2
sm_survey_type=$3
sm_reporting_interval=$4
sm_sampling_interval=$5
sm_report_type=$6
sm_neighbor_ssid=$7
sm_neighbor_mac=$(echo "$8" | tr a-z A-Z)
if [ -z "$sm_neighbor_mac" ] || [ -z "$sm_neighbor_ssid" ]; then
raise "Empty neighbor MAC address, or neighbor ssid" -l "$fn_name" -arg
fi
sm_channel_list="[\"set\",[$sm_channel]]"
empty_ovsdb_table Wifi_Stats_Config ||
raise "Failed empty_ovsdb_table Wifi_Stats_Config" -l "$fn_name" -tc
insert_ws_config \
"$sm_radio_type" \
"$sm_channel_list" \
"survey" \
"$sm_survey_type" \
"$sm_reporting_interval" \
"$sm_sampling_interval" \
"$sm_report_type"
insert_ws_config \
"$sm_radio_type" \
"$sm_channel_list" \
"neighbor" \
"$sm_survey_type" \
"$sm_reporting_interval" \
"$sm_sampling_interval" \
"$sm_report_type"
check_neighbor_report_log "$sm_radio_type" "$sm_channel" "$sm_survey_type" add_neighbor "$sm_neighbor_mac" "$sm_neighbor_ssid"
check_neighbor_report_log "$sm_radio_type" "$sm_channel" "$sm_survey_type" parsed_neighbor_bssid "$sm_neighbor_mac" "$sm_neighbor_ssid"
check_neighbor_report_log "$sm_radio_type" "$sm_channel" "$sm_survey_type" parsed_neighbor_ssid "$sm_neighbor_mac" "$sm_neighbor_ssid"
check_neighbor_report_log "$sm_radio_type" "$sm_channel" "$sm_survey_type" sending_neighbor "$sm_neighbor_mac" "$sm_neighbor_ssid"
empty_ovsdb_table Wifi_Stats_Config ||
raise "Failed empty_ovsdb_table Wifi_Stats_Config" -l "$fn_name" -tc
return 0
}
check_leaf_report_log()
{
fn_name="sm_lib:check_leaf_report_log"
sm_radio_type=$1
sm_client_mac_address=$(echo "$2" | tr a-z A-Z)
sm_log_type=$3
case $sm_log_type in
*connected*)
sm_log_msg="Checking logs for leaf reporting radio $sm_radio_type connection established"
sm_die_msg="No client $sm_client_mac_address connected for reporting"
match_pattern_for_log_inspecting="Marked $sm_radio_type .* client $sm_client_mac_address connected"
;;
*client_parsing*)
sm_log_msg="Checking logs for leaf parsing $sm_client_mac_address"
sm_die_msg="No client $sm_client_mac_address parsed"
match_pattern_for_log_inspecting="Parsed $sm_radio_type client MAC $sm_client_mac_address"
;;
*client_update*)
sm_log_msg="Checking logs for leaf entry update $sm_client_mac_address"
sm_die_msg="No client $sm_client_mac_address updated"
match_pattern_for_log_inspecting="Updating $sm_radio_type .* client $sm_client_mac_address entry"
;;
*sending*)
sm_log_msg="Checking logs for leaf $sm_client_mac_address $sm_radio_type sample sending"
sm_die_msg="No client $sm_client_mac_address $sm_radio_type sample sending initiated"
match_pattern_for_log_inspecting="Sending $sm_radio_type .* client $sm_client_mac_address stats"
;;
*)
raise "Incorrect sm_log_type provided" -l "$fn_name" -arg
esac
log -deb "$fn_name - $sm_log_msg"
wait_for_function_response 0 "$LOGREAD | tail -250 | grep -q \"$match_pattern_for_log_inspecting\"" &&
log -deb "$fn_name - $sm_log_test_pass_msg" ||
raise "$sm_die_msg" -l "$fn_name" -tc
}
inspect_leaf_report()
{
fn_name="sm_lib:inspect_leaf_report"
sm_radio_type=$1
sm_reporting_interval=$2
sm_sampling_interval=$3
sm_report_type=$4
sm_leaf_mac=$5
if [ -z "$sm_leaf_mac" ]; then
raise "Empty leaf MAC address" -l "$fn_name" -arg
fi
empty_ovsdb_table Wifi_Stats_Config ||
raise "Failed empty_ovsdb_table Wifi_Stats_Config" -l "$fn_name" -oe
insert_ws_config \
"$sm_radio_type" \
"[\"set\",[]]" \
"survey" \
"[\"set\",[]]" \
"$sm_reporting_interval" \
"$sm_sampling_interval" \
"$sm_report_type"
insert_ws_config \
"$sm_radio_type" \
"[\"set\",[]]" \
"client" \
"[\"set\",[]]" \
"$sm_reporting_interval" \
"$sm_sampling_interval" \
"$sm_report_type"
check_leaf_report_log "$sm_radio_type" "$sm_leaf_mac" connected
check_leaf_report_log "$sm_radio_type" "$sm_leaf_mac" client_parsing
check_leaf_report_log "$sm_radio_type" "$sm_leaf_mac" client_update
check_leaf_report_log "$sm_radio_type" "$sm_leaf_mac" sending
empty_ovsdb_table Wifi_Stats_Config ||
raise "Failed empty_ovsdb_table Wifi_Stats_Config" -l "$fn_name" -oe
return 0
}
############################################ TEST CASE SECTION - STOP ##################################################
| true |
51a6b0ee9e48a88b4ae959e299bc50fbf93ff9dc | Shell | mgoffin/rcfiles | /.zshrc | UTF-8 | 979 | 2.609375 | 3 | [] | no_license | source <(antibody init)
antibody bundle < ~/.zsh_plugins.txt
# oh-my-zsh plugins
plugins=(
autopep8
battery
colored-man-pages
colorize
command-not-found
common-aliases
git
git-prompt
pyenv
python
virtualenv
virtualenvwrapper
vscode
)
export PYENV_ROOT="$HOME/.pyenv"
export PATH="$PYENV_ROOT/bin:/$HOME/.rbenv/bin:$PATH"
export VIRTUALENVWRAPPER_PYTHON=/home/mjg/.pyenv/shims/python
export PYENV_VIRTUALENV_DISABLE_PROMPT=1
export WORKON_HOME=/home/mjg/.pyenv
eval "$(rbenv init -)"
if command -v pyenv 1>/dev/null 2>&1; then
eval "$(pyenv init -)"
eval "$(pyenv virtualenv-init -)"
fi
pyenv shell 3.7.0
alias rezsh="source ~/.zshrc"
alias ls='colorls -h --group-directories-first -1'
alias l='colorls --group-directories-first --almost-all'
alias ll='colorls --group-directories-first --almost-all --long'
export VISUAL="vim"
export EDITOR="$VISUAL"
export PYTHONDONTWRITEBYTECODE=1
source $(dirname $(gem which colorls))/tab_complete.sh
| true |
87b7b75f7caddbaee3ddef55ccd79765552ea3ab | Shell | sadsunshower/c-obfuscation-workshop | /scratch/wcr.sh | UTF-8 | 394 | 3.078125 | 3 | [] | no_license | #!/bin/bash
# wcr - watch, compile, run!
clear
echo -e "\e[91mwatch, compile, run\e[0m watching for changes on $1...\n"
inotifywait -q -m -e close_write --format %e $1 |
while read events; do
clear
echo -e "\e[91mwatch, compile, run\e[0m watching for changes on $1...\n"
clang -Wall -o program $1
echo -e "\n== Output =="
./program
echo "== End of Output =="
done | true |
6c68e7bc27cb5889aee4a85993a558debe408ee3 | Shell | jpikl/dotfiles | /.profile.d/40-cargo.sh | UTF-8 | 457 | 2.53125 | 3 | [] | no_license | # shellcheck shell=bash
# TODO: Use separate config/cache dirs once the following issue is resolved: https://github.com/rust-lang/cargo/pull/9178
# Change the default CARGO_HOME install root to ~/.local
export CARGO_INSTALL_ROOT=${CARGO_INSTALL_ROOT:-$USER_LOCAL_DIR}
# Update path for rust binaries
# CARGO_INSTALL_ROOT applies only to `cargo install`.
# CARGO_HOME/bin might still contain rustup binaries.
export PATH=$PATH:${CARGO_HOME:-~/.cargo}/bin
| true |
6ffc7dbc7a05c7493dcb39a82cf577773404ae67 | Shell | MenkeTechnologies/zpwr | /autoload/common/zal | UTF-8 | 233 | 3.125 | 3 | [
"MIT"
] | permissive | # -*- mode: sh -*-
# vim: set ft=sh:
function zal(){
if [[ -d "$ZPWR_AUTOLOAD" ]]; then
zpwrCd "$ZPWR_AUTOLOAD"
else
zpwrLogConsoleErr "ZPWR_AUTOLOAD '$ZPWR_AUTOLOAD' is not a directory."
fi
}
zal "$@"
| true |
9f29f2f6edc44700499a26fd0278af1e4dc9f5bd | Shell | everscalecodes/freeton-rustnode-ansible | /roles/monitoring_agent/files/scripts/ton-election-is-active-local.sh | UTF-8 | 1,088 | 3.125 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash -eE
# export ton environments
. ton-env.sh
ton-check-env.sh TON_CONSOLE
ton-check-env.sh TON_CONSOLE_CONFIG
CURRENT_UNIXTIME=$(date +%s)
TON_CONFIG_15=$($TON_CONSOLE -C $TON_CONSOLE_CONFIG -c "getconfig 15")
TON_CONFIG_15_JSON=$(echo $TON_CONFIG_15 | awk '{split($0, a, "param:"); print a[2]}')
TON_CONFIG_34=$($TON_CONSOLE -C $TON_CONSOLE_CONFIG -c "getconfig 34")
TON_CONFIG_34_JSON=$(echo $TON_CONFIG_34 | awk '{split($0, a, "param:"); print a[2]}')
TON_CURRENT_VALIDATION_END=$(echo $TON_CONFIG_34_JSON | jq '.p34.utime_until')
TON_ELECTIONS_START_BEFORE=$(echo $TON_CONFIG_15_JSON | jq '.p15.elections_start_before')
TON_ELECTIONS_END_BEFORE=$(echo $TON_CONFIG_15_JSON | jq '.p15.elections_end_before')
TON_ELECTIONS_START=$(($TON_CURRENT_VALIDATION_END - $TON_ELECTIONS_START_BEFORE))
TON_ELECTIONS_END=$(($TON_CURRENT_VALIDATION_END - $TON_ELECTIONS_END_BEFORE))
if (( $CURRENT_UNIXTIME>=$TON_ELECTIONS_END ));
then
echo "0"
exit 0
fi
if (( $CURRENT_UNIXTIME<=$TON_ELECTIONS_START ));
then
echo "0"
exit 0
fi
echo "1"
| true |
97326747af19c9188f8f4cfa2fcfb17f32d18100 | Shell | jacopotediosi/WebServersCommonConfigs | /etc/update-motd.d/70-tls | UTF-8 | 1,257 | 3.953125 | 4 | [] | no_license | #!/usr/bin/env bash
# REQUIREMENTS: openssl
#Remember to change below configs
## CONFIG
certificates=(
"/etc/letsencrypt/live/example.com/fullchain.pem"
"/etc/letsencrypt/live/example2.com/fullchain.pem"
)
certificateNames=(
"Example.com"
"Example2.com"
)
## END CONFIG
expiry_date () {
out=$(openssl x509 -enddate -noout -in "$1" | cut -d "=" -f 2)
unix_date "$out"
}
unix_date () {
date -d "$@" +%s
}
expires_in () {
diff=$(( $1 - $2 ))
expiresInHours=$(( diff / 3600 ))
if [ $expiresInHours -gt 48 ]; then
echo "$(( expiresInHours / 24 )) days"
else
echo "$expiresInHours hours"
fi
}
echo ""
echo "TLS Certificates:"
for i in "${!certificates[@]}"; do
cert="${certificates[$i]}"
expires=$(expiry_date "$cert")
now=$(unix_date "now")
inAWeek=$(unix_date "1 week")
expiresIn=$(expires_in "$expires" "$now")
if [ "$expires" -le "$now" ]; then
echo -e " ${certificateNames[$i]}: \e[31m▲ expired\e[0m"
elif [ "$expires" -le "$inAWeek" ]; then
echo -e " ${certificateNames[$i]}: \e[33m● expiring soon ($expiresIn left)\e[0m"
else
echo -e " ${certificateNames[$i]}: \e[32m● expires in $expiresIn\e[0m"
fi
done
| true |
3a813adb10845efbbfa5092bd4987afba256a51a | Shell | vapawar/shcodes | /012_ralational_ops.sh | UTF-8 | 756 | 3.09375 | 3 | [] | no_license | #!/bin/sh
#author:vinod pawar
a=10
b=20
if [ $a -eq $b ] then
echo "$a -eq $b : a is equal to be"
else
echo "$a -eq $b : a is not equal to be"
fi
if [ $a -ne $b ] then
echo "$a -ne $b : a is not equal to b"
else
echo "$a -ne $b : a is equql to b"
fi
if [ $a -gt $b ] then
echo "$a -gt $b : a is greater than b"
esle
echo "$a -gt $b : a in not greater than b"
fi
if [ $a -lt $b ] then
echo "$a -lt $b : a is less than b"
esle
echo "$a -lt $b : a in not less than b"
fi
if [ $a -ge $b ] then
echo "$a -ge $b : a is greater or equal b"
esle
echo "$a -ge $b : a in not greater or equal than b"
fi
if [ $a -le $b ] then
echo "$a -le $b: a is less or equal to b"
else
echo "$a -le $b: a is not less or equal to b"
fi | true |
14bedb3954fd4f676afc76e96a8f9306b99849f2 | Shell | GovHackMT/conference-site | /deploy.sh | UTF-8 | 386 | 3.390625 | 3 | [] | no_license | #!/bin/bash
echo -e "\033[0;32mDeploying updates to GitHub...\033[0m"
# Build the project.
npm run build
# Go To Public folder
cd public
rm bundle.js.map
# Add changes to git.
git add -A
# Commit changes.
msg="[chore] Build and deploy site on `date`"
if [ $# -eq 1 ]
then msg="$1"
fi
git commit -m "$msg"
# Push source and build repos.
git push origin master
# Come Back
cd ..
| true |
4e986e27c030e501ae2cb4eb4caf54c4677d1da6 | Shell | ticty/mkbin | /proc.tpl | UTF-8 | 892 | 3.875 | 4 | [] | no_license | #!/bin/bash
usage()
{
cat << EOF
Something to show
EOF
}
if [ $# -ne 0 ]
then
case "$1" in
"-h"|"-help"|"--help")
usage
exit 0
;;
*)
echo "not accept option \"$1\""
exit 1
;;
esac
fi
proc_line=@LINENUM@
data_md5=@MD5SUM@
tmp_dir="`mktemp -d`"
tmp_data="`tempfile -d \"$tmp_dir\" -s \"gz\"`"
bak_dir="`mktemp -d --tmpdir=\"$tmp_dir\"`"
trap "rm -rf $tmp_dir" EXIT
# extract data and check its md5 value
tail -n +$((@LINENUM@+1)) "$0" > "$tmp_data"
md5=`md5sum "$tmp_data" | awk '{printf $1}'`
if [ "${md5}" != "${data_md5}" ]
then
echo "packet is broken, please download a new one!"
exit 1
fi
# extract data
#tar -C "$tmp_dir" -xf "$tmp_data"
cd "$tmp_dir"
tar -xf "$tmp_data"
if [ $? -ne 0 ]
then
echo "extract the data fail"
exit 0
fi
#do anything here
# exit here, so the install data would not exec
exit 0
| true |
accf781e0f700526f3a6fbba388deff7c9e42cc8 | Shell | nuriel77/hornet-playbook | /roles/hornet/files/horc | UTF-8 | 48,317 | 3.9375 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env bash
set -o pipefail
# This is just a proof-of-concept. Use with care.
# Only use if you installed your node using hornet-playbook
if [[ $EUID -ne 0 ]]; then
echo "This script must be run as root"
echo "Please change to root: 'sudo su -' and re-run"
exit 1
fi
clear
[ -f "$HOME/.horc" ] && . "$HOME/.horc"
[ -f "/opt/hornet-playbook/inventory-multi" ] && INVENTORY_FILE=inventory-multi || INVENTORY_FILE=inventory
: "${EDITOR:=nano}"
VERSION_TEMP=1.5.0
__VERSION__=${VERSION_TEMP}
SEMVER_REGEX="^(0|[1-9][0-9]*)\\.(0|[1-9][0-9]*)\\.(0|[1-9][0-9]*)(\\-[0-9A-Za-z-]+(\\.[0-9A-Za-z-]+)*)?(\\+[0-9A-Za-z-]+(\\.[0-9A-Za-z-]+)*)?$"
: "${HORNET_BRANCH:=main}"
: "${HORC_BRANCH:=main}"
CUR_DIR="$(pwd)"
PID_FILE="/var/run/horc.pid"
WIDTH=78
# Source if exists
[ -f ~/.horc ] && . ~/.horc
export NEWT_COLORS='
window=,
'
# Check if another process of horc is already running.
if [ -e "$PID_FILE" ]
then
PID_FROM_FILE=$(cat "$PID_FILE")
if ps -fq $PID_FROM_FILE | grep -q "$(basename $0)$"
then
echo "ERROR: another instance of $(basename $0) is already running with pid ${PID_FROM_FILE}."
exit 1
fi
fi
# Cleanup function
function cleanup() {
rm -f "$PID_FILE"
cd "$CUR_DIR"
trap - EXIT
clear
}
# Write pid to pidfile
echo -n $$ >"$PID_FILE"
# Set exit trap
trap cleanup INT TERM EXIT
function pause(){
read -p "$*"
clear
}
# Get OS and Dist
function set_dist() {
if [ -f /etc/os-release ]; then
# freedesktop.org and systemd
. /etc/os-release
export OS=$NAME
export VER=$VERSION_ID
elif type lsb_release >/dev/null 2>&1; then
# linuxbase.org
export OS=$(lsb_release -si)
export VER=$(lsb_release -sr)
elif [ -f /etc/lsb-release ]; then
# For some versions of Debian/Ubuntu without lsb_release command
. /etc/lsb-release
export OS=$DISTRIB_ID
export VER=$DISTRIB_RELEASE
elif [ -f /etc/debian_version ]; then
# Older Debian/Ubuntu/etc.
export OS=Debian
export VER=$(cat /etc/debian_version)
elif [ -f /etc/SuSe-release ]; then
# Older SuSE/etc.
echo "Unsupported OS."
exit 1
elif [ -f /etc/redhat-release ]; then
# Older Red Hat, CentOS, etc.
echo "Old OS version. Minimum required is 7."
exit 1
else
# Fall back to uname, e.g. "Linux <version>", also works for BSD, etc.
export OS=$(uname -s)
export VER=$(uname -r)
fi
# Set path to hornet's configuration file
if [[ "$OS" =~ ^(CentOS|Red) ]]; then
export SYSCONFIG_FILE=/etc/sysconfig/hornet
elif [[ "$OS" =~ ^(Ubuntu|Debian|Raspbian) ]]; then
export SYSCONFIG_FILE=/etc/default/hornet
fi
}
function validate_version {
local version=$1
if [[ "$version" =~ $SEMVER_REGEX ]]
then
local major=${BASH_REMATCH[1]}
local minor=${BASH_REMATCH[2]}
local patch=${BASH_REMATCH[3]}
local prere=${BASH_REMATCH[4]}
local build=${BASH_REMATCH[5]}
if [[ ! -z "$prere" ]] || [[ ! -z "$build" ]]
then
return 1
fi
else
return 1
fi
}
function compare_versions() {
local FIRST_VERSION=$(echo "$1" | tr -d 'v')
local EVAL_STR=$2
local SECOND_VERSION=$(echo "$3" | tr -d 'v')
for VER in "$FIRST_VERSION" "$SECOND_VERSION"
do
if ! validate_version "$VER"
then
>&2 echo "Warning: the version '$VER' is not a valid release version. Cannot check version."
return 1
fi
done
local GET_BOOL=$(python -c "from distutils.version import StrictVersion; print(StrictVersion('$FIRST_VERSION') $EVAL_STR StrictVersion('$SECOND_VERSION'))")
if [[ "$GET_BOOL" == "True" ]]
then
return 0
elif [[ "$GET_BOOL" == "False" ]]
then
return 1
fi
}
### Playbook ###
function verify_playbook() {
local OUTPUT
local HEIGHT
local RC
if [ ! -d /opt/hornet-playbook ]; then
cd /opt && git clone -b "$HORC_BRANCH" https://github.com/nuriel77/hornet-playbook.git
fi
cd /opt/hornet-playbook
OUTPUT=$(git pull 2>&1)
RC=$?
HEIGHT=$(expr $(echo "$OUTPUT"|wc -l) + 10)
if [[ $RC -ne 0 ]]; then
whiptail --title "Git Errors" \
--msgbox "Errors when trying to update the playbook repository: $OUTPUT" \
$HEIGHT $WIDTH
return 1
fi
}
function enable_https() {
local SSL_EMAIL
local SSL_DOMAIN
if ! (whiptail --title "Enable HTTPS" \
--yesno "This option will request a certificate for this node via certbot/letsencrypt using nginx.\nA domain name must be configured pointing to this node's IP address.\n\nDo you want to proceed?" \
--defaultno \
14 $WIDTH) then
return 1
fi
SSL_EMAIL=$(whiptail --inputbox "Enter your email address to register your TLS certificate with:" 8 $WIDTH --title "Enter Email" 3>&1 1>&2 2>&3)
if [[ $? -ne 0 ]]; then
return
fi
SSL_DOMAIN=$(whiptail --inputbox "Enter the domain name to register the TLS certificate with:" 8 $WIDTH --title "Enter FQDN" 3>&1 1>&2 2>&3)
if [[ $? -ne 0 ]]; then
return
fi
verify_playbook
if [[ $? -ne 0 ]]; then
whiptail --title "Error!" \
--msgbox "ERROR: Cannot enable HTTPS." \
8 $WIDTH
return 1
fi
if ! (whiptail --title "Verify Details" \
--yesno "You've entered email '${SSL_EMAIL}' and the domain '${SSL_DOMAIN}'\n\nIs this correct?" \
--defaultno \
12 $WIDTH) then
return
fi
# Run the script to request a certificate
/usr/local/bin/certbot-nginx.sh "${SSL_EMAIL}" "${SSL_DOMAIN}"
if [[ $? -ne 0 ]]; then
pause "SSL Certificate request failed. See output above for more details."
clear
return 1
fi
# Subsequent requests to create new certificates (for example, original
# directory was wiped) certbot will start appending an index for path name
# We need to make sure we find a directory.
FULL_PATH=$(find /etc/letsencrypt/live -type d -name "${SSL_DOMAIN}*" -print -quit)
# Will configure nginx to use the let's encrypt certificates
if [ -f "${FULL_PATH}/cert.pem" ]; then
if ! grep -q "^ssl_cert_file: ${FULL_PATH}/fullchain.pem" /opt/hornet-playbook/group_vars/all/z-ssl-override.yml >/dev/null 2>&1; then
cat <<EOF >> /opt/hornet-playbook/group_vars/all/z-ssl-override.yml
ssl_cert_file: ${FULL_PATH}/cert.pem
ssl_key_file: ${FULL_PATH}/privkey.pem
ssl_bundle_cert: ${FULL_PATH}/fullchain.pem
create_selfsigned_cert: False
letsencrypt: True
EOF
fi
# We don't enable nginx with this common certificate in multi node setup!
cd /opt/hornet-playbook && ansible-playbook -i inventory -v site.yml --tags=configure_nginx_ssl
fi
pause "Done. Check above output to see the status of the request."
clear
}
### Hornet ###
function get_latest_hornet_commit {
# Experimental/dev
curl -H 'Cache-Control: no-cache' -s -m 5 -f "https://api.github.com/repos/iotaledger/hornet/commits/$HORNET_BRANCH" | jq -r .sha | head -c 7
}
function get_latest_hornet_release {
curl -H 'Cache-Control: no-cache' -s -m 5 -f https://api.github.com/repos/iotaledger/hornet/releases/latest | jq -r .tag_name | tr -d 'v'
}
function check_new_hornet {
local HORNET_LATEST
local HORNET_VERSION
HORNET_LATEST=$(get_latest_hornet_release)
HORNET_VERSION=$(grep ^TAG "$SYSCONFIG_FILE" | cut -d= -f2)
if compare_versions $HORNET_VERSION '<' $HORNET_LATEST; then
return 0
else
return 1
fi
}
function upgrade_hornet() {
echo "Checking for updates..."
local HORNET_LATEST
local HORNET_VERSION
HORNET_LATEST=$(get_latest_hornet_release)
if [[ $? -ne 0 ]]; then
whiptail --title "Error!" \
--msgbox "ERROR: Failed to get Hornet's latest version\n${HORNET_LATEST}" \
8 $WIDTH
return 1
fi
clear
HORNET_VERSION=$(grep ^TAG "$SYSCONFIG_FILE" | cut -d= -f2)
if [[ $? -ne 0 ]]; then
whiptail --title "Error!" \
--msgbox "ERROR: Failed to get current Hornet's version!" \
8 $WIDTH
return 1
fi
# Only use compare_versions if real versions (e.g. semver)
if compare_versions $HORNET_VERSION '>=' $HORNET_LATEST; then
if ! (whiptail --title "No Updates" \
--yesno "You already have the latest version: ${HORNET_VERSION}.\nDo you want to proceed anyway?" \
--defaultno \
10 $WIDTH) then
return
else
local NO_CONFIRM=1
fi
fi
if [ -z "$NO_CONFIRM" ]; then
if ! (whiptail --title "Upgrade Hornet" \
--yesno "Are you sure you want to upgrade Hornet from ${HORNET_VERSION} to ${HORNET_LATEST}?\nWARNING: only do this if you know what your are doing!" \
--defaultno \
8 $WIDTH) then
return
fi
fi
verify_playbook
if [[ $? -ne 0 ]]; then
whiptail --title "Error!" \
--msgbox "ERROR: Cannot upgrade Hornet." \
8 $WIDTH
return 1
fi
echo "Upgrading Docker image.... (Hornet will automatically restart if image gets updated)"
cd /opt/hornet-playbook && ansible-playbook -i inventory \
-v site.yml \
--tags=build_hornet_image
if [[ $? -ne 0 ]]; then
whiptail --title "Error!" \
--msgbox "ERROR: Failed upgrading Hornet." \
8 $WIDTH
rm -f "$TFILE"
return 1
fi
if ! docker ps -a | sed '1d' | awk {'print $2'} | grep "gohornet/hornet:${HORNET_LATEST}"
then
echo "Force update tag and restart hornet ..."
sed -i "s/^TAG=.*$/TAG=$HORNET_LATEST/" "$SYSCONFIG_FILE"
/bin/systemctl restart hornet
fi
pause "Update finished successfully. Nevertheless, it is recommended to check the status of Hornet. Press ENTER to return to menu."
clear
}
### Get DB ###
function get_db() {
local CONFIG_FILE
CONFIG_FILE=$(grep ^base_config "$HOME/.nbctl" | awk {'print $2'})
# Playbook's default sources
SOURCES_ARRAY=($(jq -r '.snapshots.downloadURLs[] | .full' < "$CONFIG_FILE") "Custom")
# Create an indexed array for building the menu
SOURCES_ARRAY_INDEXED=($(printf "%s\n" "${SOURCES_ARRAY[@]}"| awk -F, '!/^ / && NF { print NR")"; print $1}'))
# Output the menu
SOURCE_INDEX=$(whiptail --title "Select DB Source" --menu "This action will download and boostrap a synced database.\nChoose a source where to download the database from:" 16 $WIDTH 4 "${SOURCES_ARRAY_INDEXED[@]}" 3>&1 1>&2 2>&3)
if [ $? -eq 1 ]; then
return
fi
# Parse to get the correct database source
local CHOSEN_INDEX=$(( $(echo "$SOURCE_INDEX"| sed 's/)//') - 1 ))
local DB_SOURCE="${SOURCES_ARRAY[$CHOSEN_INDEX]}"
# Try to get database size. If source is custom, ask to enter URL.
local FILE_SIZE
local LAST_UPDATED="unknown"
if [ "$DB_SOURCE" == "https://x-vps.com/export.bin" ]; then
local JSON_OUTPUT=$(curl -k -H "Content-Type: application/json" -m 4 -f -s https://x-vps.com/index.php 2>/dev/null)
FILE_SIZE=$(echo "$JSON_OUTPUT" | jq -r .file_size)
LAST_UPDATED=$(echo "$JSON_OUTPUT" | jq -r .last_updated)
elif [ "$DB_SOURCE" == "Custom" ]; then
DB_SOURCE=$(whiptail --inputbox "Choose a source where to download the snapshot state DB from:" 8 $WIDTH "" --title "Select Custom Database Source" 3>&1 1>&2 2>&3)
if [ $? -eq 1 ]; then
return
fi
fi
if [[ -z "${DB_SOURCE// }" ]] || [[ "x${DB_SOURCE}" == "x" ]]
then
whiptail --title "Empty source!" \
--msgbox "You didn't supply any source!" \
8 $WIDTH
return
fi
# Try to get the file size before download
[ -z "$FILE_SIZE" ] && { FILE_SIZE=$(wget --spider "$DB_SOURCE" 2>&1 | grep "^Length: " | sed 's/^.* (\(.*\)) .*$/\1/'); }
[ -z "$FILE_SIZE" ] && FILE_SIZE="unknown"
if (whiptail --title "Confirm DB Download" \
--yesno "Download new database from '$DB_SOURCE'?\n\nUpdated: ${LAST_UPDATED}, size: ${FILE_SIZE}\n\n" \
16 $WIDTH) then
# This command will stop hornet, remove older database directories,
# extract the database (on the fly) set correct user ownership and start HORNET up again.
echo "Stopping hornet first, removing old database files and commencing download/extract of database files ..."
systemctl stop hornet \
&& rm -rf /var/lib/hornet/snapshots /var/lib/hornet/mainnetdb/* \
&& mkdir -p /var/lib/hornet/mainnetdb \
&& mkdir -p /var/lib/hornet/snapshots/mainnet \
&& cd /var/lib/hornet/snapshots/mainnet \
&& wget -O full_snapshot.bin "$DB_SOURCE" \
&& chown hornet.hornet /var/lib/hornet -RL \
&& systemctl start hornet
if [[ $? -ne 0 ]]; then
whiptail --title "New DB Failed!" \
--msgbox "Well this is embarrassing. Downloading the new DB failed..." \
8 $WIDTH
# Cleanup any leftovers
rm -rf /var/lib/hornet/mainnetdb* /var/lib/hornet/snapshots/*
chown hornet.hornet /var/lib/hornet -RL
clear
return 1
else
pause "Done! Please check HORNET's status and logs to verify it is error free."
clear
fi
fi
}
## Set DB Max Size
function set_db_max_size() {
local DB_MAX_SIZE_CURRENT
local DB_MAX_SIZE_NEW
if [[ $(jq -r .pruning.size.enabled < /var/lib/hornet/config.json) != true ]]
then
whiptail --title "DB Max Size Disabled!" \
--msgbox "Database size limit feature is disabled in hornet.conf!\nMake sure to enable it in order to be able to set Max Size." \
9 $WIDTH
return
fi
DB_MAX_SIZE_CURRENT=$(jq -r .pruning.size.targetSize < /var/lib/hornet/config.json)
_DB_MAX_SIZE_NEW=$(whiptail --inputbox "\nCurrent DB max size is set to ${DB_MAX_SIZE_CURRENT}.\nEnter a new size:" 10 $WIDTH "" --title "Select DB Max Size" 3>&1 1>&2 2>&3)
if [ $? -eq 1 ]; then
return
elif [[ "$_DB_MAX_SIZE_NEW" == "" ]]; then
whiptail --title "No Input" \
--msgbox "No size selected!" \
8 $WIDTH
return
fi
DB_MAX_SIZE_NEW=$(tr -d " \t" <<< "$_DB_MAX_SIZE_NEW")
if [[ "$DB_MAX_SIZE_NEW" == "$DB_MAX_SIZE_CURRENT" ]]
then
whiptail --title "No Change" \
--msgbox "You've selected the same max size as the currently configured max size." \
8 $WIDTH
return
fi
if ! (whiptail --title "Confirm Update" \
--yesno "Are you sure you want to set DB Max Size to '$DB_MAX_SIZE_NEW'?" \
8 $WIDTH) then
return
fi
echo "Updating Max Size ..."
verify_playbook
test -f /opt/hornet-playbook/group_vars/all/z-append.yml && sed -i '/^hornet_db_max_size:/d' /opt/hornet-playbook/group_vars/all/z-append.yml
echo "hornet_db_max_size: $DB_MAX_SIZE_NEW" >> /opt/hornet-playbook/group_vars/all/z-append.yml
run-playbook --tags=hornet_config_file -e overwrite=yes
}
## HORC
function get_latest_horc_version() {
local RAND=$(echo -n $(cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w 7 | head -n 1))
curl -s -f -m 5 -H 'Cache-Control: no-cache' "https://raw.githubusercontent.com/nuriel77/hornet-playbook/${HORC_BRANCH}/roles/hornet/files/horc?nocache=$RAND"|grep ^VERSION|cut -d= -f2
}
function check_new_horc() {
local CURR_VERSION="$__VERSION__"
local HORC_VERSION=$(get_latest_horc_version 2>&1)
if [[ $? -ne 0 ]] || [[ "$HORC_VERSION" == "" ]]; then
return 1
fi
if compare_versions $CURR_VERSION '>=' $HORC_VERSION; then
return 1
else
return 0
fi
}
function update_horc() {
local HORC_VERSION=$(get_latest_horc_version 2>&1)
local CURR_VERSION="$__VERSION__"
if [[ $? -ne 0 ]] || [[ "$HORC_VERSION" == "" ]]; then
whiptail --title "HORC Update Failed" \
--msgbox "Failed to get HORC version from github." \
8 $WIDTH
return 1
fi
if compare_versions $CURR_VERSION '>=' $HORC_VERSION; then
if ! (whiptail --title "No Updates" \
--yesno "You already have the latest version: ${CURR_VERSION}.\nDo you want to proceed anyway?" \
--defaultno \
10 $WIDTH) then
return
else
# Don't need another confirmation question
local NO_CONFIRM=1
# Delete any 'completed' files for this version
# This forces to re-run updates specific for
# this version if any.
rm -f "custom_updates/${CURR_VERSION}_updates.sh.completed"
fi
fi
if [ -z "$NO_CONFIRM" ]; then
if ! (whiptail --title "Confirm Update" \
--yesno "Are you sure you want to update HORC from '$__VERSION__' to '$HORC_VERSION'?" \
8 $WIDTH) then
return
fi
fi
# Verifies and pull latest changes
verify_playbook
if [[ $? -ne 0 ]]; then
whiptail --title "Error!" \
--msgbox "ERROR: playbook task failed." \
8 $WIDTH
return 1
fi
# Run playbook script upgrades
cd /opt/hornet-playbook && ansible-playbook -i inventory site.yml -v --tags=scripts
[[ $? -ne 0 ]] && MSG="Updating HORC failed!!! Check output above for errors." || MSG="Update finished successfully! Additional updates may apply upon restart of horc."
pause "$MSG Press ENTER to return to menu."
clear
# Remove current pid file and restart
rm -f "$PID_FILE"
exec "$0"
}
# Used by sort_func to sort an array
# based on versions.
function version_sort() {
local FIRST_VERSION=$(basename "$1" | cut -d_ -f1)
local SECOND_VERSION=$(basename "$2" | cut -d_ -f1)
local EVAL_STR='<'
local GET_BOOL=$(python -c "from distutils.version import StrictVersion; print(StrictVersion('$FIRST_VERSION') $EVAL_STR StrictVersion('$SECOND_VERSION'))")
if [[ "$GET_BOOL" == "True" ]]
then
return 0
elif [[ "$GET_BOOL" == "False" ]]
then
return 1
fi
}
# Generic sort version. Will accept a
# function that is supposed to return
# 0 or 1 for sorting
function sort_func() {
(($#<=1)) && return 0
local compare_func=$1
shift
local stack=( 0 $(($#-1)) ) beg end i pivot smaller larger
UPDATES_SORTED=("$@")
while ((${#stack[@]})); do
beg=${stack[0]}
end=${stack[1]}
stack=( "${stack[@]:2}" )
smaller=() larger=()
pivot=${UPDATES_SORTED[beg]}
for ((i=beg+1;i<=end;++i))
do
if "$compare_func" "${UPDATES_SORTED[i]}" "$pivot"
then
smaller+=( "${UPDATES_SORTED[i]}" )
else
larger+=( "${UPDATES_SORTED[i]}" )
fi
done
UPDATES_SORTED=( "${UPDATES_SORTED[@]:0:beg}" "${smaller[@]}" "$pivot" "${larger[@]}" "${UPDATES_SORTED[@]:end+1}" )
if ((${#smaller[@]}>=2));
then
stack+=( "$beg" "$((beg+${#smaller[@]}-1))" )
fi
if ((${#larger[@]}>=2))
then
stack+=( "$((end-${#larger[@]}+1))" "$end" )
fi
done
}
function run_custom_updates(){
# Find pending update files
readarray -t TO_RUN_UPDATES < <(find /opt/hornet-playbook/custom_updates/ -maxdepth 1 -type f -name '*_updates.sh')
# Return if nothing to update
((${#TO_RUN_UPDATES[@]} == 0)) && { clear; return; }
echo "Checking if any pending updates ..."
# Sort updates by version from small to great
sort_func version_sort "${TO_RUN_UPDATES[@]}"
for _UPDATE in "${UPDATES_SORTED[@]}"
do
if [ ! -e "${_UPDATE}.completed" ]
then
echo "Executing update $(basename ${_UPDATE}) ..."
if ! /bin/bash "$_UPDATE"
then
whiptail --title "Error!" \
--msgbox "ERROR: custom updates task failed at '$_UPDATE'." \
8 $WIDTH
return 1
else
touch "${_UPDATE}.completed"
fi
else
echo "$(basename ${_UPDATE}) already updated. To force rerun this update remove the file '${_UPDATE}.completed' and rerun horc."
fi
done
clear
}
### Configs ###
function choose_editor() {
USER_CHOICE=$(whiptail --inputbox "Choose a default text editor:" 8 $WIDTH ${EDITOR} --title "Choose Editor" 3>&1 1>&2 2>&3)
if [[ $? -ne 0 ]]; then
return
fi
which $USER_CHOICE >/dev/null 2>&1
if [[ $? -ne 0 ]]; then
whiptail --title "Error!" \
--msgbox "ERROR: Invalid editor or editor not found on system." \
8 $WIDTH
return 1
fi
EDITOR=$USER_CHOICE
if ! grep -q "^export EDITOR" $HOME/.horc; then
echo "export EDITOR=${EDITOR}" >> $HOME/.horc
else
sed -i "s/^export EDITOR=.*/export EDITOR=${EDITOR}/g" $HOME/.horc
fi
}
function edit_config_file() {
local CONFIG=$1
local SERVICE=$2
local RELOAD=$3
BEFORE_EDIT=$(md5sum $CONFIG | awk {'print $1'})
$EDITOR ${CONFIG}
AFTER_EDIT=$(md5sum $CONFIG | awk {'print $1'})
if [[ "$AFTER_EDIT" != "$BEFORE_EDIT" ]]; then
if (whiptail --title "File Modified" \
--yesno "${CONFIG} file was modified.\n${SERVICE} requires a restart to pick up the new changes.\nDo you want to restart it now?" \
8 $WIDTH) then
if [[ -z ${RELOAD} ]]; then
service_cmd $SERVICE restart
else
service_cmd $SERVICE reload
fi
fi
fi
}
function edit_config() {
local CONFIG_FILE=$1
local SERVICE=$2
local RELOAD=$3
if [ ! -f "$CONFIG_FILE" ]; then
whiptail --title "Missing File" \
--msgbox "Error: cannot find '$CONFIG_FILE'" \
8 $WIDTH
return 1
fi
# Make backup for restores
if [ ! -f "${CONFIG_FILE}.original" ]; then
cp -- "${CONFIG_FILE}" "${CONFIG_FILE}.original"
fi
if edit_config_file "${CONFIG_FILE}" "${SERVICE}" "${RELOAD}"; then
return 0
else
return 1
fi
}
### Peers ###
function add_peer() {
local PEER
local HEIGHT
local NBCTL_OUTPUT
local ADD_PEER
local RC
PEER=$(whiptail --inputbox "Enter a peer to add.\nThe format is:\n/dns/hostname/tcp/15600/p2p/peerID,alias/PeerAlias\ne.g: /dns/mynode.io/tcp/15600/p2p/12D3KooWLpSb3fyZ1nIen1F5jAeNRPwLTMgeB534nFUlsWaAfiO7,alias/Mynode" 12 120 --title "Add Peer" 3>&1 1>&2 2>&3)
RC=$?
if [[ $RC -ne 0 ]]; then
return
fi
HEIGHT=$(expr $(echo "$PEER"|wc -l) + 8)
PEER_TO_ADD=$(echo "$PEER"|tr -d ' ')
if (whiptail --title "Add Peer" \
--yesno "The following peer will be added:\n\n$PEER_TO_ADD" \
$HEIGHT 120) then
# Run nbctl command
NBCTL_OUTPUT=$(nbctl -a -p "$PEER_TO_ADD" 2>&1)
RC=$?
if [[ $RC -eq 0 ]]; then
HEIGHT=8
whiptail --title "Added Peers" \
--msgbox "Peer added." \
$HEIGHT $WIDTH
else
if echo "$NBCTL_OUTPUT" | grep -q "urllib2.URLError" ; then
NBCTL_OUTPUT="Failed to communicate with HORNET API port. Check if HORNET is active and if $HOME/.nbctl is configured correctly."
fi
HEIGHT=$(expr $(echo "$NBCTL_OUTPUT"|wc -l) + 10)
whiptail --title "Adding Peers Failed" \
--msgbox "Failure: $NBCTL_OUTPUT" \
$HEIGHT $WIDTH
fi
fi
}
function remove_peers() {
local PEERS_ARRAY_INDEXED
local PEER_INDEX
local PEERS_ARRAY
local PEERS
local PEER
local HEIGHT
echo "Getting peers list..."
PEERS=$(list_peers get)
RC=$?
clear
if [[ $RC -ne 0 ]]; then
whiptail --title "Peers Failed" \
--msgbox "Failed to get list of peers: $PEERS" \
8 $WIDTH
return
fi
if [ "$PEERS" == "" ]; then
whiptail --title "No Peers" \
--msgbox "There are no peers configured" \
8 $WIDTH
return
fi
PEERS=$(echo "$PEERS" | jq -r '.peers[] | "\(.alias),\(.multiAddresses[0]),\(.id)"' | column -c 80 -t)
HEIGHT=$(expr $(echo "$PEERS"|wc -l) )
PEERS_ARRAY=($(echo "$PEERS"))
PEERS_ARRAY_INDEXED=($(echo "$PEERS" | awk '!/^ / && NF { print NR")"; print $1$2$3}'))
PEER_INDEX=$(whiptail --title "Remove peers" --menu "Choose a peer" 25 120 16 "${PEERS_ARRAY_INDEXED[@]}" 3>&1 1>&2 2>&3)
if [ $? -eq 1 ]; then
return
fi
local PEER_INX=$(echo "$PEER_INDEX" | cut -d')' -f1)
local PEER_INX=$(expr $PEER_INX - 1)
local TO_REMOVE="${PEERS_ARRAY[$PEER_INX]}"
local ALIAS=$(echo "$TO_REMOVE" | cut -d',' -f1)
local ID="$(echo "$TO_REMOVE" | cut -d ',' -f3)"
if (whiptail --title "Confirm removal" \
--yesno "Are you sure you want to remove:\n\n${ALIAS},${ID}" \
--defaultno \
12 $WIDTH)
then
REMOVAL_OUTPUT=$(nbctl -r -p "${ID}" 2>&1)
if [ $? -eq 0 ];then
whiptail --title "Removed successfully" \
--msgbox "$REMOVAL_OUTPUT" \
12 $WIDTH
else
HEIGHT=$(expr $(echo "$REMOVAL_OUTPUT"|wc -l) + 7)
whiptail --title "Remove failed!" \
--msgbox "${REMOVAL_OUTPUT}" \
$HEIGHT $WIDTH
fi
fi
}
function list_peers() {
local PEERS_QUERY
local PEERS
local HEIGHT
local RETURN_RES=$1
PEERS_QUERY=$(nbctl -s 2>&1)
RC=$?
if [[ $RC -ne 0 ]]; then
if echo "$PEERS_QUERY" | egrep -q "urllib..URLError|timeout|refused" ; then
PEERS_QUERY="Failed to communicate with HORNET API port. Check if HORNET is active and if $HOME/.nbctl is configured correctly."
else
PEERS_QUERY="Unknown error"
fi
if [ -n "$RETURN_RES" ]; then
echo "$PEERS_QUERY"
return 1
fi
HEIGHT=$(expr $(echo "$PEERS_QUERY"|wc -l) + 7)
whiptail --title "List peers failed" \
--msgbox "$PEERS_QUERY" \
$HEIGHT $WIDTH
return 1
else
#PEERS=$(echo "$PEERS_QUERY" | jq -r '.peers[] | "\(.alias) \(.multiAddresses[0]) conn: \(.connected)\n"' | column -c 80 -t)
if [ -n "$RETURN_RES" ]; then
echo "$PEERS_QUERY"
return 0
fi
PEERS=$(echo "$PEERS_QUERY" | jq -r '.peers[] | "\(.alias) \(.multiAddresses[0]) connected: \(.connected),\n'" "'\(.id)"' | column -c 80)
if [ "$PEERS" == "" ]; then
LENGTH=0
else
LENGTH=$(expr $(echo "$PEERS"|wc -l) / 2)
FOR_WINDOW_LENGTH=$(($(echo "$PEERS"|wc -l)*2))
fi
local HEIGHT=$(expr $FOR_WINDOW_LENGTH + 8)
whiptail --title "List Peers" \
--msgbox "Total: ${LENGTH} peer(s)\n\n${PEERS}" \
$HEIGHT $WIDTH
fi
}
### Node info ###
function show_lmsi() {
pause "Sorry, this is still work-in-progress. Press ENTER to return to menu."
clear
return
if [ ! -f "$HOME/.nbctl" ]; then
whiptail --title "Missing .nbctl" \
--msgbox "Error: cannot find $HOME/.nbctl" \
8 $WIDTH
return
fi
HORNET_HOST=$(grep ^host "$HOME/.nbctl" | cut -d: -f2-)
local HORNET_API_VERSION=$(grep ^api_version $HOME/.nbctl | cut -d: -f2-)
local LSMI_LOCAL=$(curl -f -m 5 -s $HORNET_HOST -X POST -H "X-IOTA-API-Version: $HORNET_API_VERSION" -H 'Content-Type: application/json' -d '{"command": "getNodeInfo"}'| python -m json.tool|egrep "latestSolidSubtangleMilestoneIndex|latestMilestoneIndex"| sed 's/,//'|sed -e 's/^[ \t]*//'|sed 's/"//g')
if [ $? -ne 0 ]; then
whiptail --title "Query failed" \
--msgbox "Error: failed query ${LSMI_LOCAL}. Check if HORNET is active and if $HOME/.nbctl is configured properly." \
12 $WIDTH
return
fi
local LSMI_QUERY=$(curl -H 'Cache-Control: no-cache' -s -f -m 5 https://x-vps.com/lmsi|jq -r .latestMilestoneIndex 2>/dev/null)
if [ $? -eq 0 ]; then
OUTPUT="Latest milestone index: ${LSMI_QUERY}\n\n"
else
OUTPUT=""
fi
OUTPUT="${OUTPUT}Local:\n\n${LSMI_LOCAL}"
whiptail --title "LatestMilestoneIndex" \
--msgbox "$OUTPUT" \
12 $WIDTH
}
function get_node_info() {
if [ ! -f $HOME/.nbctl ]; then
whiptail --title "Missing .nbctl" \
--msgbox "Error: cannot find $HOME/.nbctl" \
8 $WIDTH
return
fi
local HORNET_HOST=$(grep ^host $HOME/.nbctl | awk {'print $2'})
local HORNET_API_VERSION=$(grep ^api_version $HOME/.nbctl | awk {'print $2'})
NODE_RESPONSE=$(curl -f -m 5 -s ${HORNET_HOST}/api/v${HORNET_API_VERSION}/info -X GET -H "Accept: application/json" -H "Content-Type: application/json")
# epoch time to human readable
LMST_EPOCH=$(jq -r '.data.latestMilestoneTimestamp' <<< "$NODE_RESPONSE")
LMST_TIME=$(echo "$LMST_EPOCH:($(date -d @${LMST_EPOCH}))" | tr ' ' '_')
# to entries, columns, sort
NODE_INFO=$(jq -r --arg a "$LMST_TIME" '.data.latestMilestoneTimestamp = $a | .data | to_entries[] | "\(.key): \(.value)"' <<< "$NODE_RESPONSE" | column -t | sort )
if [ $? -ne 0 ]; then
whiptail --title "Query failed" \
--msgbox "Error: failed query ${NODE_INFO}. Check if HORNET is active and if $HOME/.nbctl is configured properly." \
12 $WIDTH
return
fi
HEIGHT=$(expr $(echo "$NODE_INFO"|wc -l) + 8)
whiptail --title "Node Info" \
--msgbox "$NODE_INFO" \
$HEIGHT $WIDTH
}
### PS MEM ###
function view_ps_mem() {
which ps_mem >/dev/null 2>&1
if [[ $? -ne 0 ]]; then
whiptail --title "Missing ps_mem" \
--msgbox "Error: cannot find 'ps_mem' utility!" \
8 $WIDTH
return 1
fi
whiptail --title "ps_mem utility" \
--msgbox "This utility shows a per-process total memory usage.\nUse arrows or page up/down to scroll and q to exit." \
8 $WIDTH
ps_mem 2>/dev/null|less
}
### Services ###
function service_status() {
local SERVICE
SERVICE=$1
# Pipe to less as some distros don't use pager
systemctl status $SERVICE|less
}
function service_cmd() {
local SERVICE
local COMMAND
local OUTPUT
local EXTRA_CMD
local DISABLE
local STATE
SERVICE=$1
COMMAND=$2
EXTRA_CMD=$3
echo "Running 'systemctl $COMMAND $SERVICE' ..."
if [ "$EXTRA_CMD" == "disable" ]; then
systemctl disable $SERVICE
STATE="and disable"
elif [ "$EXTRA_CMD" == "enable" ]; then
systemctl enable $SERVICE
STATE="enabled"
fi
if [ "$EXTRA_CMD" != "enable" ]; then
OUTPUT=$(systemctl $COMMAND $SERVICE 2>&1)
else
OUTPUT=""
fi
HEIGHT=$(expr $(echo "$OUTPUT"|wc -l) + 7)
if [ $? -ne 0 ]; then
whiptail --title "Failed" \
--msgbox "$COMMAND $SERVICE failed: $OUTPUT" \
$HEIGHT 48
else
whiptail --title "Success" \
--msgbox "$COMMAND $SERVICE $STATE OK" \
$HEIGHT 48
fi
clear
}
function service_log() {
local SERVICE
local ARGS
SERVICE=$1
# Pipe to less in the case too few lines.
# This will prevent immediately exiting the view
journalctl -u $SERVICE|less
}
function service_menu() {
local SERVICE
SERVICE=$1
whiptail --title "HORC v${__VERSION__} - $SERVICE Service" \
--menu "For logs use SHIFT-g to skip to end of log, or q to exit." \
--cancel-button "Back" \
25 $WIDTH 16 \
"a)" "Status" \
"b)" "Start" \
"c)" "Stop" \
"d)" "Restart" \
"e)" "Disable start on reboot" \
"f)" "Enable start on reboot" \
"g)" "View log" \
3>&1 1>&2 2>&3
}
function service() {
local SERVICE
local CHOICE
SERVICE=$1
CHOICE=$(service_menu $SERVICE)
RC=$?
if [[ $RC -eq 1 ]]; then
return
fi
case "$CHOICE" in
"a)")
service_status $SERVICE
service $SERVICE
;;
"b)")
service_cmd $SERVICE start
service $SERVICE
;;
"c)")
service_cmd $SERVICE stop
service $SERVICE
;;
"d)")
service_cmd $SERVICE restart
service $SERVICE
;;
"e)")
service_cmd $SERVICE stop disable
service $SERVICE
;;
"f)")
service_cmd $SERVICE "" enable
service $SERVICE
;;
"g)")
service_log $SERVICE
service $SERVICE
;;
*)
service $SERVICE
;;
esac
}
function services_menu() {
whiptail --title "HORC v${__VERSION__} - Node Services" \
--menu "Choose an option" \
--cancel-button "Back" \
15 $WIDTH 8 \
"a)" "Hornet" \
"b)" "Nginx" \
3>&1 1>&2 2>&3
}
function services() {
local CHOICE
CHOICE=$(services_menu)
RC=$?
if [[ $RC -eq 1 ]]; then
return
fi
case "$CHOICE" in
"a)")
service hornet
services
;;
"b)")
service nginx
services
;;
*)
services
;;
esac
}
### Rerun playbook installation ###
function rerun_playbook() {
if (whiptail --title "Rerun Playbook Method" \
--yesno "Sometimes you may want to rerun the entire installation if you think something has changed and you want to try to reset the node to its initial state.\nThere are two options:\n\n1. simply rerun the installation\n2. use the override method: it will reset any configurations you may have configured manually.\n\nIf you would like to use the override method choose Yes else choose No for normal rerun.\n\nNote that existing configuration files will be backed up so you can restore e.g. peers and other settings later on.\n" \
--defaultno \
20 $WIDTH) then
local OVERWRITE=yes
else
local OVERWRITE=no
fi
if ! (whiptail --title "Rerun Playbook Confirm" \
--yesno "This option will allow you to rerun the entire installation.\nUsing override method: $OVERWRITE\n\nWould you like to proceed?\n" \
--defaultno \
12 $WIDTH) then
return
fi
verify_playbook
if [[ $? -ne 0 ]]; then
whiptail --title "Error!" \
--msgbox "ERROR: Cannot rerun Hornet installation, unknown error." \
8 $WIDTH
return 1
fi
cd /opt/hornet-playbook && ansible-playbook -i inventory site.yml -v -e "overwrite=$OVERWRITE"
[[ $? -ne 0 ]] && MSG="Rerunning the playbook installation failed!!! Check output above for errors." || MSG="Rerun finished successfully!"
pause "$MSG Press ENTER to return to menu."
clear
}
### Clean Dangling Images ###
function remove_dangling_images() {
local DANGLING_IMAGES
DANGLING_IMAGES=($(/usr/bin/docker images -f "dangling=true" -q))
if [[ ${#DANGLING_IMAGES[@]} -gt 0 ]]; then
/usr/bin/docker rmi -f $(/usr/bin/docker images -f "dangling=true" -q)
return $?
fi
}
### Cleanup Docker Images ###
function cleanup_docker_images() {
if (whiptail --title "Cleanup Docker Images" \
--yesno "To free up some diskspace you can delete unused docker images and volumes. For services that are temporarily off this isn't a problem: the image will be pulled again once you start up the service.\n\nWould you like proceed?" \
--defaultno \
14 $WIDTH) then
echo "Removing unused volumes, please wait..."
/usr/bin/docker volume prune -f
echo "Removing unused images, please wait..."
/usr/bin/docker image prune -a -f
echo "Removing dangling images, please wait..."
remove_dangling_images
[[ $? -ne 0 ]] && MSG="Failed to cleanup unused images! Check output above for errors. " || MSG="Cleanup finished successfully! "
pause "${MSG}Press ENTER to return to menu."
clear
fi
}
### Update config.json ###
function update_config() {
if ! (whiptail --title "Update config.json" \
--yesno "This option will allow you to update config.json according to the playbook's variables.\nThis will update the configuration if there was any update to it or if you want to reset any modifications you have done to it manually.\n\nWould you like to proceed?\n" \
--defaultno \
12 $WIDTH) then
return
fi
verify_playbook
if [[ $? -ne 0 ]]; then
whiptail --title "Error!" \
--msgbox "ERROR: Cannot update config.json, unknown error trying to git pull." \
8 $WIDTH
return 1
fi
cd /opt/hornet-playbook \
&& git pull \
&& ansible-playbook -v -i inventory \
site.yml \
--tags=hornet_config_file \
-e overwrite=yes
[[ $? -ne 0 ]] && MSG="Failed to update config.json! Check output above for any errors. " || MSG="Update finished successfully! "
pause "${MSG}Press ENTER to return to menu."
}
### Notice ###
function how_to_setup() {
whiptail --title "HORC v${VERSION} - Instructions" \
--msgbox "Check that you have the .nbctl file in your home folder (ls -l ~/.nbctl).\nExample of the file's contents:\n\napi_version: 1\nhost: http://127.0.0.1:14265\nfile: /var/lib/hornet/peering.json\n\nThis will allow this script to use the nbctl utility to manage peers and get the node's data." \
15 $WIDTH
}
### Configure files ###
function configure_files_menu() {
whiptail --title "HORC v${__VERSION__} - Configure Files" \
--menu "Choose an option" \
--cancel-button "Back" \
22 48 12 \
"a)" "Hornet System Config" \
"b)" "Hornet Main Config" \
"c)" "Hornet Peers Config" \
"Z)" "Choose Editor" \
3>&1 1>&2 2>&3
}
function configure_files() {
local CHOICE
local CONFIG_FILE
CONFIG_FILE=$(grep ^base_config "$HOME/.nbctl" | awk {'print $2'})
CHOICE=$(configure_files_menu)
RC=$?
if [[ $RC -eq 1 ]]; then
return
fi
case "$CHOICE" in
"a)")
edit_config "$SYSCONFIG_FILE" "hornet"
configure_files
;;
"b)")
edit_config "${CONFIG_FILE}" "hornet"
configure_files
;;
"c)")
edit_config "/var/lib/hornet/peering.json" "hornet"
configure_files
;;
"Z)")
choose_editor
configure_files
;;
*)
configure_files
;;
esac
}
### Hornet Plugins ###
function hornet_plugins() {
local RESULTS=
local ORIGINAL_NODE_OBJ=
local NEW_NODE_OBJ=
local JSON_OUTPUT=
local CONFIG_FILE=
local PLUGINS_FILE=
# By default list all the existing plugins as disabled
export ENABLE_PLUGINS=""
export DISABLE_PLUGINS="Spammer,ZMQ,MQTT,Prometheus,Coordinator,Autopeering,Faucet,Participation"
declare -A PLUGINS_STATE
CONFIG_FILE=$(grep ^base_config "$HOME/.nbctl" | awk {'print $2'})
PLUGINS_FILE="/opt/hornet-playbook/group_vars/all/z-plugins.yml"
# Save current config state
ORIGINAL_NODE_OBJ=$(jq -r '.node | "\(.enablePlugins) \(.disablePlugins)"' < "$CONFIG_FILE")
IFS=', ' read -r -a DISABLE_PLUGINS_ARRAY <<< "$DISABLE_PLUGINS"
for PLUGIN in "${DISABLE_PLUGINS_ARRAY[@]}"
do
if jq -e -r '.node.enablePlugins | index("'${PLUGIN}'")' < "$CONFIG_FILE" >/dev/null
then
PLUGINS_STATE[${PLUGIN}]="ON"
else
PLUGINS_STATE[${PLUGIN}]="OFF"
fi
done
RESULTS=$(whiptail --title "HORC v${__VERSION__} - Plugins" \
--checklist \
--cancel-button "Exit" \
"\nPlease choose plugins to enable or disable.\n\
Select/unselect options using space and\nclick Enter to proceed.\n" 18 60 "${#DISABLE_PLUGINS_ARRAY[@]}" \
Spammer "... Spammer" "${PLUGINS_STATE[Spammer]}" \
ZMQ "... ZeroMQ" "${PLUGINS_STATE[ZMQ]}" \
MQTT "... MQTT" "${PLUGINS_STATE[MQTT]}" \
Prometheus "... Prometheus Metrics" "${PLUGINS_STATE[Prometheus]}" \
Coordinator "... (*dev) Comnet Coordinator" "${PLUGINS_STATE[Coordinator]}" \
Autopeering "... Autopeering" "${PLUGINS_STATE[Autopeering]}" \
Faucet "... Faucet" "${PLUGINS_STATE[Faucet]}" \
Participation "... Participation" "${PLUGINS_STATE[Participation]}" \
3>&1 1>&2 2>&3)
RC=$?
[[ $RC -ne 0 ]] && return
read -a RESULTS_ARRAY <<< "$RESULTS"
for CHOICE in "${RESULTS_ARRAY[@]}"
do
ENABLE_PLUGINS+=",${CHOICE//\"}"
DISABLE_PLUGINS_ARRAY=("${DISABLE_PLUGINS_ARRAY[@]/${CHOICE//\"}/}")
done
DISABLE_PLUGINS=$(printf "%s," "${DISABLE_PLUGINS_ARRAY[@]}")
JSON_OUTPUT=$(cat "$CONFIG_FILE" | python -c '
import json, sys, os
obj = json.load(sys.stdin)
enable_plugins = os.environ["ENABLE_PLUGINS"]
disable_plugins = os.environ["DISABLE_PLUGINS"]
obj["node"]["enablePlugins"] = [p for p in enable_plugins.split(",") if p != ""]
obj["node"]["disablePlugins"] = [p for p in disable_plugins.split(",") if p != ""]
print(json.dumps(obj))
')
NEW_NODE_OBJ=$(jq -r '.node | "\(.enablePlugins) \(.disablePlugins)"' <<<"$JSON_OUTPUT")
[[ "$ORIGINAL_NODE_OBJ" == "$NEW_NODE_OBJ" ]] && return
# Make consistent in yaml variables file
YAML_OUTPUT=$(python -c '
import yaml, os
obj = {"hornet_disable_plugins":{}, "hornet_enable_plugins":{}}
enable_plugins = os.environ["ENABLE_PLUGINS"]
disable_plugins = os.environ["DISABLE_PLUGINS"]
obj["hornet_enable_plugins"] = [p for p in enable_plugins.split(",") if p != ""]
obj["hornet_disable_plugins"] = [p for p in disable_plugins.split(",") if p != ""]
print(yaml.dump(obj))
')
echo "$YAML_OUTPUT" >"$PLUGINS_FILE"
# Produce new config
jq . <<< "$JSON_OUTPUT" > "$CONFIG_FILE"
if (whiptail --title "Restart Hornet" \
--yesno "You've made changes to the plugins. A restart of HORNET is needed to load the new configuration.\n\nRestart now?" \
--defaultno \
10 $WIDTH) then
echo "Restarting hornet, please wait ..."
/bin/systemctl restart hornet
fi
}
### Toggle Comnet ###
function toggle_comnet(){
pause "Sorry, this is still work-in-progress. Press ENTER to return to menu."
clear
return
local TO="comnet"
local FROM="mainnet"
local MSG
if [ -f "/opt/hornet-playbook/group_vars/all/z-comnet-vars.yml" ]
then
TO="mainnet"
FROM="comnet"
fi
MSG="This node seems to be configured for $FROM at the moment.\n\nWould you like to switch it to $TO?"
if ! (whiptail --title "Toggle ComNet" \
--yesno "${MSG}" \
--defaultno \
10 $WIDTH) then
return
fi
if [ "$TO" == "comnet" ]
then
cp -- "/opt/hornet-playbook/roles/shared-files/comnet-vars.yml" "/opt/hornet-playbook/group_vars/all/z-comnet-vars.yml"
else
rm "/opt/hornet-playbook/group_vars/all/z-comnet-vars.yml"
fi
echo "Stopping HORNET ..."
/bin/systemctl stop hornet
# Cleanup
rm -f /var/lib/hornet/snapshot/export.bin /var/lib/hornet/mainnetdb/*
# Reconfigure
cd /opt/hornet-playbook \
&& ansible-playbook -v site.yml \
-i inventory \
-e overwrite=yes \
--tags="hornet_config_files"
MSG="Node configured for ${TO}."
pause "$MSG Press ENTER to return to menu."
}
### Peers ###
function peers_menu(){
whiptail --title "HORC v${__VERSION__} - Peers" \
--menu "Choose an option" \
--cancel-button "Back" \
12 48 4 \
"a)" "Add Peers" \
"b)" "Remove peers" \
"c)" "List peers" \
3>&1 1>&2 2>&3
}
function peers(){
local CHOICE
CHOICE=$(peers_menu)
RC=$?
if [[ $RC -eq 1 ]]; then
return
fi
case "$CHOICE" in
"a)")
add_peer
peers
;;
"b)")
remove_peers
peers
;;
"c)")
list_peers
peers
;;
*)
peers
;;
esac
}
### Main Menu ###
function main_menu() {
local MENU="Choose an option"
if check_new_horc; then
MENU="${MENU}\n(*HORC update available)"
fi
if check_new_hornet; then
MENU="${MENU}\n(*HORNET update available)"
fi
whiptail --title "HORC v${__VERSION__} - Hornet Configuration Menu" \
--menu "$MENU" \
--cancel-button "Exit" \
29 $WIDTH 18 \
"a)" "Update Hornet Software" \
"b)" "Hornet Plugins" \
"c)" "Manage Services" \
"d)" "Configure Files" \
"e)" "View Per Processes Memory Usage" \
"f)" "Rerun Playbook Installation" \
"g)" "Clean Unused Docker Images" \
"h)" "Update HORC and node scripts" \
"i)" "Enable HTTPS / Certificate" \
"j)" "Get Node Info" \
"k)" "Show LatestMilestoneIndex" \
"l)" "Get new snapshot DB" \
"m)" "Set DB Maximum Size" \
"n)" "Peers" \
"o)" "Update config.json" \
"Z)" "Configure this Script" \
3>&1 1>&2 2>&3
}
function run_main_menu() {
local CHOICE
CHOICE=$(main_menu)
RC=$?
if [[ $RC -eq 1 ]]; then
exit
fi
case "$CHOICE" in
"Z)")
how_to_setup
run_main_menu
;;
"a)")
upgrade_hornet
run_main_menu
;;
"b)")
hornet_plugins
run_main_menu
;;
"c)")
services
run_main_menu
;;
"d)")
configure_files
run_main_menu
;;
"e)")
view_ps_mem
run_main_menu
;;
"f)")
rerun_playbook
run_main_menu
;;
"g)")
cleanup_docker_images
run_main_menu
;;
"h)")
update_horc
run_main_menu
;;
"i)")
enable_https
run_main_menu
;;
"j)")
get_node_info
run_main_menu
;;
"k)")
show_lmsi
run_main_menu
;;
"l)")
get_db
run_main_menu
;;
"m)")
set_db_max_size
run_main_menu
;;
"n)")
peers
run_main_menu
;;
"o)")
update_config
run_main_menu
;;
*)
run_main_menu
;;
esac
}
# Get OS and version
set_dist
# Run custom updates
run_custom_updates
# Run main menu
run_main_menu
| true |
2ecc7e14347bd9bfbc2324c216595f92cb57d401 | Shell | yangshun2005/ShDevTools | /git-checkout-branch-all.sh | UTF-8 | 347 | 3.875 | 4 | [] | no_license | #!/bin/bash
set -e
branchName="$1"
if [[ -z "$branchName" ]]; then
echo "Usage: git-checkout-branch-all.sh <<branchName>>"
exit 1
fi
for repo in $( ls -d */ ); do
if [[ $( ls -a $repo | grep ^.git$ ) ]]; then
cd $repo
echo "------------------- $repo -------------------"
set +e
git checkout "$branchName"
set -e
cd ..
fi
done
| true |
681b6c9c012675292b948db68981fa6d59869a92 | Shell | t04glovern/tribes-pi-config | /setup/01-bluetooth-config.sh | UTF-8 | 1,138 | 3.359375 | 3 | [] | no_license | #!/bin/sh
echo ">>>Installing bluetooth development packages"
sudo apt-get install -y libbluetooth-dev
echo ">>>Installing PyBluez"
sudo pip install -y pybluez
echo ">>>Setting the bluetooth daemon to run in compatibility mode"
if grep -q 'ExecStart=/usr/lib/bluetooth/bluetoothd -C' /lib/systemd/system/bluetooth.service; then
echo "---compatibility mode already set"
else
original_line="ExecStart=/usr/lib/bluetooth/bluetoothd"
new_line="ExecStart=/usr/lib/bluetooth/bluetoothd -C"
sudo sed -i "s%$original_line%$new_line%g" /lib/systemd/system/bluetooth.service
fi
echo ">>>Copying bluetooth-server service to systemd"
sudo cp /home/pi/tribes-pi-config/services/bluetooth-server.service /lib/systemd/system/bluetooth-server.service
echo ">>>Load serial port profile"
sudo sdptool add SP
echo ">>>Applying permissions for bluetooth-server service"
sudo chmod 644 /lib/systemd/system/bluetooth-server.service
echo ">>>Reloading the systemd daemon and bluetooth services"
sudo systemctl daemon-reload
sudo systemctl restart bluetooth
sudo systemctl enable bluetooth-server
sudo systemctl restart bluetooth-server | true |
fcf942eb9159863731f9e4543e0b9cc06a85645a | Shell | aoswalt/dotfiles | /functions/git-branch-current | UTF-8 | 296 | 3.6875 | 4 | [] | no_license | # Displays the current Git branch.
if ! command git rev-parse 2> /dev/null; then
print "$0: not a repository: $PWD" >&2
return 1
fi
local ref="$(command git symbolic-ref HEAD 2> /dev/null)"
if [[ -n "$ref" ]]; then
print "${ref#refs/heads/}"
return 0
else
return 1
fi
# vim: ft=zsh
| true |
133a6405fd0e325a34a7b2d393421b56a7588fe5 | Shell | bgutter/brb-utils | /brb-restore-isos.sh | UTF-8 | 1,893 | 4.21875 | 4 | [
"MIT"
] | permissive | #!/bin/bash
#
# If anything fails, quit
#
set -e
#
# Options
#
loopDevice=$(losetup -f)
restoreDir=${@:$#}
isoList=${*%${!#}}
backupDriveName=backupImage
backupDriveContainerName=$backupDriveName'Encrypted'
bar="======================================="
for isoFile in $isoList; do
#
# Mount the ISO to a loop device, mount
# the LUKS logical volume, then mount the
# EXT4 partition.
#
echo
echo "Mounting $isoFile..."
echo $bar
sudo losetup $loopDevice $isoFile
sudo cryptsetup luksOpen $loopDevice $backupDriveContainerName
sudo mount /dev/mapper/$backupDriveContainerName /mnt/backups/$backupDriveName
#
# Echo the contents of the manifest and the map,
# ask for permission to continue
#
echo
echo "Found the following files:"
echo $bar
cat /mnt/backups/$backupDriveName/MANIFEST.TXT
echo
echo "The overall files -> disc map looks like this:"
echo $bar
cat /mnt/backups/$backupDriveName/MAP.TXT
echo
read -p "Continue? (y/n): " -n 1 -r
echo
if [[ $REPLY =~ ^[Yy]$ ]]
then
#
# Rsync the files from the mount back to the restore dir
#
echo
echo "rsync-ing /mnt/backups/$backupDriveName/* (except map and manifest) to $restoreDir..."
echo $bar
sudo rsync -a --stats --exclude "lost+found" --exclude "MAP.TXT" --exclude "MANIFEST.TXT" "/mnt/backups/$backupDriveName/" "$restoreDir"
#
# Verify that all folders in ISO now appear in restore dir
#
# TODO
else
echo
echo "Skipping..."
echo $bar
fi
#
# Clean up
#
echo
echo "Unmounting everything & cleaning up..."
echo $bar
sudo umount /mnt/backups/$backupDriveName
sudo cryptsetup luksClose $backupDriveContainerName
sudo losetup -d $loopDevice
done
echo "Done!"
echo
| true |
e45700f9f6df40bc7a703099e9fa53fa440a3df9 | Shell | lowkc/chembibrename | /rename.sh | UTF-8 | 1,975 | 4.28125 | 4 | [] | no_license | #!/bin/bash
if [ -z $1 ]; then
echo "No command selected"
exit
fi
# Check that necessary files exist.
if [ ! -f $2 ]; then
echo "Input bibliography file does not exist"
exit
fi
if [ ! -f JournalAbbreviationList ]; then
echo "JournalAbbreviationList files does not exist"
exit
fi
if [ ! -f JournalNamesList ]; then
echo "JournalNamesList files does not exist"
exit
fi
if [ $1 = "list" ]; then
if [ -f "abbreviated-"$2 ]; then
cat "abbreviated-"$2 | grep journal | uniq -u
exit
else
echo "Run replace command first"
fi
fi
if [ ! "$1" = "replace" ]; then
exit
fi
#
# Check that JournalAbbreviationList and JournalNamesList corresponds
if [ ! $(cat JournalAbbreviationList | wc -l) = $(cat JournalNamesList | wc -l) ]; then
exit
# else
# echo "JournalNamesList and JournalAbbreviationList checks out."
fi
# Setup variables and inform user
outfile="abbreviated-"$2
IFS=$'\n'
Abbrlines=($(cat JournalAbbreviationList))
Namelines=($(cat JournalNamesList))
echo ""
echo "WARNING!: Continuing will remove $outfile and write new bibliography to this."
read -p "Do you want to continue [y/n]? "
if [ ! "$REPLY" = "y" ]; then
exit
fi
rm $outfile
cp $2 $outfile
# Command to insert abbreviations
function replace() {
# Generate search regex
if grep -Rqi "$journal" $outfile; then
echo "Replacing \"$journal\" with \"$jabbrev\""
searchregex="journal[ ]*=[ ]*{$journal}"
replaceregex="journal = {$jabbrev}"
sedcommand="s/"$searchregex"/"$replaceregex"/Ig"
escapedchar="'"
eval "gsed -i $escapedchar$sedcommand$escapedchar $outfile"
else
continue
fi
}
TitleCaseConverter() {
gsed 's/.*/\L&/; s/[a-z]*/\u&/g' <<<"$1"
}
for i in $(seq 1 $(cat JournalNamesList | wc -l) ); do
# journal_anycase=${Namelines[$i-1]}
journal=${Namelines[$i-1]}
# journal="$(TitleCaseConverter "$journal_anycase")"
jabbrev=${Abbrlines[$i-1]}
replace
done
| true |
d9ff5d55c2022532ed05adb9863e9276f3c3a189 | Shell | emilieyyu/cmpt318 | /flu_vaccine/flu parts of speech/flu_vaccine_script.sh | UTF-8 | 2,217 | 3.3125 | 3 | [] | no_license | #!/bin/bash
#manually corrected typos - only 2 'l;and' and 'manyof'
#remove all extra tabs in front of each line
echo 'Removing tabs in front of lines...'
sed 's/^ *//g' < flu_vaccine.txt > flu_vaccine_clean.txt
#tokenization
echo 'Tokenizing...'
cat flu_vaccine_clean.txt | tr -cs 'a-zA-Z0-9' '[\n*]' | tr 'A-Z' 'a-z' > flu_vaccine_tokens.txt
#alphabetically organized
echo 'Sorting alphabetically...'
cat flu_vaccine_clean.txt | tr -cs 'a-zA-Z0-9' '[\012*]' | tr A-Z a-z | sort | uniq > flu_vaccine_alph.txt
#sorted based on frequency
echo 'Sorting based on frequency...'
cat flu_vaccine_tokens.txt | sort | uniq -c | sort -rn > flu_vaccine_types.txt
cat flu_vaccine_clean.txt | tr '\r\n' ' ' | tr '[.?!]' '[\n*]' > flu.txt
echo 'creating sentences'
mv flu.txt ./lapos-0.1.2
cat ./lapos-0.1.2/flu.txt | ./lapos-0.1.2/lapos -t -m ./lapos-0.1.2/model_wsj02-21 > flu_vaccine_pos.txt
mv ./lapos-0.1.2/flu.txt ./
cat flu_vaccine_pos.txt | tr -cs '[:alnum:]\/' '[\n*]' > flu_pos_token_list.txt
cat flu_pos_token_list.txt | sort | uniq > flu_pos_types.txt
cat flu_pos_token_list.txt | sort | uniq -c | sort -rn > flu_pos_types_counted.txt
#calculate avgs
echo 'Calculating averages'
cat flu_vaccine.txt | tr '\r\n' ' ' | tr '[.?!]' '[\n*]' | wc | awk '{print "Average sentence length =", $2/$1, "words.", "\nAverage word length = ", $3/$2, "chars.", "\nAverage chars per sentence =", $3/$1, "chars."}' > averages.txt
#2, 3, 4 grams with frequency
echo 'Extracting 2-gram count...'
cat flu_vaccine_clean.txt | tr -cs [a-zA-Z0-9] '[\012*]' | tr 'A-Z' 'a-z' > tmpfile1.txt
tail --lines=+2 tmpfile1.txt > tmpfile2.txt
paste tmpfile1.txt tmpfile2.txt | sort | uniq -c | sort -rn > 2-grams.txt
echo 'Extracting 3-gram count...'
paste tmpfile1.txt tmpfile2.txt > tmpfile3.txt
tail --lines=+3 tmpfile1.txt > tmpfile4.txt
paste tmpfile3.txt tmpfile4.txt | sort | uniq -c | sort -rn > 3-grams.txt
echo 'Extracting 4-gram count...'
tail --lines=+4 tmpfile1.txt > tmpfile5.txt
paste tmpfile3.txt tmpfile4.txt > tmpfile6.txt
paste tmpfile6.txt tmpfile5.txt | sort | uniq -c | sort -rn > 4-grams.txt
mkdir ngram_files
mv tmpfile1.txt tmpfile2.txt tmpfile3.txt tmpfile4.txt tmpfile5.txt tmpfile6.txt ngram_files
echo 'Complete.' | true |
92f20c1a9756d57e3aad45b6b4b61660bb5b2f2b | Shell | cybozu-go/neco | /ignitions/common/files/opt/sbin/setup-iptables-rules | UTF-8 | 555 | 2.71875 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/sh -e
{{ if ne (Metadata "external_ip_address_block") "" }}
# Restrict access from the outside if the external IP address block has been specified.
iptables -t filter -N PROTECT_GLOBAL
iptables -t filter -A PROTECT_GLOBAL ! -d {{ Metadata "external_ip_address_block" }} -j RETURN
iptables -t filter -A PROTECT_GLOBAL -p tcp -m multiport --dports 80,443 -j RETURN
iptables -t filter -A PROTECT_GLOBAL -p udp -m multiport --dports 80,443 -j RETURN
iptables -t filter -A PROTECT_GLOBAL -j DROP
iptables -t filter -I INPUT -j PROTECT_GLOBAL
{{ end }}
| true |
46cba819d79fce0f5a06886489ddacab5dce2678 | Shell | zoispag/.dotfiles | /source/system/.python | UTF-8 | 258 | 2.578125 | 3 | [] | no_license | #!/usr/bin/env bash
# Source Poetry
export PATH="/Users/zoispag/.local/bin:$PATH"
# Source PyEnv
# if command -v pyenv 1>/dev/null 2>&1; then
# export PYENV_ROOT="$HOME/.pyenv"
# export PATH="$PYENV_ROOT/bin:$PATH"
# eval "$(pyenv init --path)"
# fi
| true |
b1a5a20e2e83318e115512c11a9526d7276219cb | Shell | mano8888/july | /while.sh | UTF-8 | 286 | 3.109375 | 3 | [] | no_license | a=1
while [ $a -le 25 ]
do
echo $a
(( a++ ))
done
while read a
do
echo $a
done < while.sh
cat while.sh | while read a
do
echo $a
done
while IFS= read -r line
do
echo $line
done < /etc/host.conf
| true |
85f3e8fe2e77ec2ba96a1af327312eb5b5ffec95 | Shell | huzelin/RTP | /Build.sh | UTF-8 | 102 | 2.65625 | 3 | [] | no_license | #!/bin/sh
if [ ! -d build ]; then
mkdir build
fi
set -x
LWP=`pwd`
pushd build
cmake ../
make -j 2
| true |
a97c88b78e4edb8fca45312d0dd30318c4bb3f18 | Shell | timm/crusty | /bash/screen_quit | UTF-8 | 890 | 3.671875 | 4 | [] | no_license | #!/bin/bash
# Derek.perriero, Jan 30
## capture the screen pid with a little parsing
SCREENPID=`screen -ls | grep -e '[0-9]\.pts' | awk '/[d\]]/{print $1}' | awk '{split($1,a,"."); print a[1]}'`
QUIT="Quit"
while [ 1 ]
do
printf "$QUIT crusty? (yes/no) "
read choice
if [ "$choice" == "yes" ]
then
## just a simulated crusty exit with progress dots
printf "Stopping crusty"
for((i=0;i<8;i+=1)); do
sleep 0.3
printf "."
done
## end cool crusty exit
## detach screen session
screen -d $SCREENPID
## after detach, kill the pid
kill $SCREENPID
## break out of while loop (not necessary, but precautionary)
break
elif [ "$choice" == "no" ]
then
QUIT="quit"
printf "Select another crusty option or, "
else
QUIT="Quit"
echo "Choose yes or no."
fi
done
| true |
4c0b35c598c4f7068ac58eaa7a11e63a16e1f206 | Shell | kirner/enigma2pc | /scripts/e2pc_tslook.sh | UTF-8 | 4,083 | 3.4375 | 3 | [] | no_license | #!/bin/sh
#
# The easy script create files 'TS_Files'.meta for Enigma2PC
# and after can look TS_Files (recordings VDR, downloading from internet)
#
# You directory witch TS-files, default current the directory
# When some directory
# DIR_TS="DIR1 DIR3 DIR3"
DIR_TS=`pwd`
# You LANGUAGE
AUDIO_LANG="(rus)"
#Temp file
TMP_FILE="/tmp/e2ts.txt"
############################################################
# Data for 'TS_Files.meta'
############################################################
#REF_SERVICE="1:0:1:84:7:70:168000:0:0:0:"
REF_SERVICE="1:0:0:0:0:0:0:0:0:0:"
F_NAME=""
F_DECRIPTION=""
F_CREATE=""
UNKNOW=""
F_LENGTH=""
F_SIZE=""
SERVICE_DATA="f:0,c:"
PACKET="188"
SCRAMBLED="0"
create_mfile() {
echo "For ts-file $ts create $ts.meta file"
F_NAME=`basename $1`
echo "Filename is $F_NAME"
################################################
# Give data use FFMPEG
###############################################
ffmpeg -i $1 2>&1 | grep Stream | tee $TMP_FILE
################################################
# Video PID Detect
################################################
VPID=`grep Video $TMP_FILE | cut -d'x' -f2 | cut -d']' -f1`
count_vpid=$((4-`echo -n $VPID | sed s/*//g | wc -c`))
while [ $count_vpid != 0 ]
do
VPID="0"$VPID
count_vpid=$(($count_vpid-1))
done
echo "VPID: $VPID"
VPID="00"$VPID
#################################################
# Audio PID Detect
#################################################
AAC3=""
if [ $(grep Audio $TMP_FILE | grep $AUDIO_LANG | grep -c ac3 ) -ne 0 ]; then
APID=`grep Audio $TMP_FILE | grep $AUDIO_LANG | grep ac3 | head -1 | cut -d'x' -f2 | cut -d']' -f1`
AAC3=$AAC3"1"
elif [ $(grep Audio $TMP_FILE | grep $AUDIO_LANG | grep -c mp2 ) -ne 0 ]; then
APID=`grep Audio $TMP_FILE | grep $AUDIO_LANG | grep mp2 | head -1 | cut -d'x' -f2 | cut -d']' -f1`
elif [ $(grep Audio $TMP_FILE | grep "Stream #0.1" | grep -c ac3 ) -ne 0 ]; then
APID=`grep Audio $TMP_FILE | grep "Stream #0.1" | cut -d'x' -f2 | cut -d']' -f1`
AAC3=$AAC3"1"
else
APID=`grep Audio $TMP_FILE | grep "Stream #0.1" | cut -d'x' -f2 | cut -d']' -f1`
fi
count_apid=$((4-`echo -n $APID | sed s/*//g | wc -c`))
while [ $count_apid != 0 ]
do
APID="0"$APID
count_apid=$(($count_apid-1))
done
echo "APID: $APID"
if [ -z $AAC3 ]; then
APID=",c:01"$APID
else
APID=",c:04"$APID
fi
##################################################
# Video type detect
##################################################
if [ $(grep Video $TMP_FILE |grep -c h264) -ne 0 ]; then
VTYPE=",c:050001"
else
VTYPE=""
fi
###################################################
# Create string m_service_data
###################################################
SERVICE_DATA=$SERVICE_DATA$VPID$APID$VTYPE
####################################################
# Create new meta file
####################################################
echo $REF_SERVICE > $1".meta"
echo $F_NAME >> $1".meta"
echo $F_DECRIPTION >> $1".meta"
echo $F_CREATE >> $1".meta"
echo $UNKNOW >> $1".meta"
echo $F_LENGTH >> $1".meta"
echo $F_SIZE >> $1".meta"
echo $SERVICE_DATA >> $1".meta"
echo $PACKET >> $1".meta"
echo $SCRAMBLED >> $1".meta"
####################################################
# Free vars
####################################################
APID=""
VPID=""
VTYPE=""
AAC3=""
SERVICE_DATA="f:0,c:"
}
meta() {
for ts in $(find $DIR_TS -iname '*.ts')
do
if [ -f $ts".meta" ]; then
if [ $(grep -c $SERVICE_DATA $ts".meta") -ne 0 ]; then
echo "Meta file $ts is exist";
else
create_mfile "$ts"
fi
else
create_mfile "$ts"
fi
done
}
rm_file() {
if [ -f $TMP_FILE ]; then
rm $TMP_FILE
fi
}
meta_clear() {
find $DIR_TS -iname "*.ts.meta" -exec rm -fR {} \;
}
case "$1" in
create_meta)
meta
rm_file
;;
start)
meta
rm_file
;;
clear)
meta_clear
;;
stop)
meta_clear
;;
*)
meta
rm_file
;;
esac
| true |
8dbd8106023603cdba4bfb19818ac78f71f06507 | Shell | thewidgetsmith/dotfiles | /inc/zsh/completion.zsh | UTF-8 | 1,285 | 2.515625 | 3 | [
"MIT"
] | permissive | # Use modern completion system
autoload -Uz compinit && compinit
#
zstyle ':completion:*' auto-description 'specify: %d'
#
zstyle ':completion:*' completer _expand _complete _correct _approximate
#
zstyle ':completion:*' format 'Completing %d'
#
zstyle ':completion:*' group-name ''
#
zstyle ':completion:*' menu select=2
#
if [ -x "$(command -v dircolors)" ]; then
eval "$(dircolors -b)"
zstyle ':completion:*:default' list-colors ${(s.:.)LS_COLORS}
else
export CLICOLOR=1
zstyle ':completion:*:default' list-colors ''
fi
#
zstyle ':completion:*' list-colors ''
#
zstyle ':completion:*' list-prompt %SAt %p: Hit TAB for more, or the character to insert%s
# matches case insensitive for lowercase
zstyle ':completion:*' matcher-list '' 'm:{a-z}={A-Z}' 'm:{a-zA-Z}={A-Za-z}' 'r:|[._-]=* r:|=* l:|=*'
#
zstyle ':completion:*' menu select=long
#
zstyle ':completion:*' select-prompt %SScrolling active: current selection at %p%s
#
zstyle ':completion:*' use-compctl false
#
zstyle ':completion:*' verbose true
#
zstyle ':completion:*:*:kill:*:processes' list-colors '=(#b) #([0-9]#)*=0=01;31'
#
zstyle ':completion:*:kill:*' command 'ps -u $USER -o pid,%cpu,tty,cputime,cmd'
# pasting with tabs doesn't perform completion
zstyle ':completion:*' insert-tab pending
| true |
0d9a5f037488da6da11f4e4f53881cd320d1718b | Shell | BoulderAI/tegra-demo-distro | /layers/meta-tegrademo/recipes-demo/data-overlay-setup/data-overlay-setup/data-overlay-setup.sh.in | UTF-8 | 830 | 4.09375 | 4 | [
"MIT"
] | permissive | #!/bin/sh
#
# Script to verify that the upperdir and workdir
# directories for overlayfs mounts exist.
#
# This function is fed mount options from the fstab,
# looks for the upperdir= and workdir= options, and
# creates those directories if they aren't already present.
process_mntopts() {
read opt1 opt2 opt3 opt4 opt5 opt6 opt7 opt8
for opt in $opt1 $opt2 $opt3 $opt4 $opt5 $opt6 $opt7 $opt8; do
for tag in upperdir workdir; do
val="${opt##$tag=}"
if [ "$opt" != "$val" -a ! -d "$val" ]; then
mkdir -p "$val"
fi
done
done
}
# Read the fstab looking for overlay mounts.
while read fs_spec fs_file fs_vfstype fs_mntopts fs_freq fs_passno; do
[ "$fs_spec" = "overlay" -a "$fs_vfstype" = "overlay" ] || continue
echo "$fs_mntopts" | (IFS=, process_mntopts)
done < @SYSCONFDIR@/fstab
exit 0
| true |
c509c362a9b8b8adb3920bd54450d8816aceb4ed | Shell | srijanshetty/cli-goodies | /settings/990-aliases.zsh | UTF-8 | 599 | 2.75 | 3 | [
"MIT"
] | permissive | #
# Sensible defaults
#
# For when you copy commands with $
alias \$=' '
# Search
alias ag='ag --path-to-ignore ~/.ignore'
# List all global npm packages
alias npm-list="npm list -g --depth 0"
# (-u) unicode support and color support (-2)
alias tmux="tmux -u -2"
# top, the way it should be
alias top="top -c"
# podman
alias pd="podman"
alias pc="podman-compose"
# Jail some standard programs
# qutebrowser
alias b="firejail qutebrowser"
alias vlc="firejail vlc"
#
# Conditional aliases
#
alias ls="exa"
alias watch="viddy"
#
# Utilities
#
# Quick check for sha
alias sha='shasum -a 256 '
| true |
6ecb61c9ef3be044dbea9cb2492a3dca3ee2642f | Shell | samisalkosuo/pureapp | /scriptpackages/AsperaCargoD/setup.sh | UTF-8 | 2,067 | 3.6875 | 4 | [
"Apache-2.0"
] | permissive | #setup Aspera CargoD
#download package
#install cargod
#setup config file
#execute: /opt/aspera/cargod/bin/asperacargod &
function changeString {
if [[ $# -ne 3 ]]; then
echo "$FUNCNAME ERROR: Wrong number of arguments. Requires FILE FROMSTRING TOSTRING."
return 1
fi
SED_FILE=$1
FROMSTRING=$2
TOSTRING=$3
TMPFILE=$SED_FILE.tmp
#escape to and from strings
FROMSTRINGESC=$(echo $FROMSTRING | sed -e 's/\\/\\\\/g' -e 's/\//\\\//g' -e 's/&/\\\&/g')
TOSTRINGESC=$(echo $TOSTRING | sed -e 's/\\/\\\\/g' -e 's/\//\\\//g' -e 's/&/\\\&/g')
sed -e "s/$FROMSTRINGESC/$TOSTRINGESC/g" $SED_FILE > $TMPFILE && mv $TMPFILE $SED_FILE
if [ ! -f $TMPFILE ]; then
return 0
else
echo "$FUNCNAME ERROR: Something went wrong."
return 2
fi
}
/sbin/service iptables stop
if [[ "$CARGOD_RPM_URL" == "" ]] ; then
CARGOD_RPM_URL="http://download.asperasoft.com/download/sw/cargodownloader/1.3/aspera-cargod-1.3.0.80012-linux-32.rpm"
fi
wget $CARGOD_RPM_URL
#prereqs:
yum -y install glibc.i686
yum -y install zlib-devel.i686
rpm -i aspera-cargod-1.3.0.80012-linux-32.rpm
cp asperacargo.conf.template asperacargo.conf
if [[ "$FASPEXUSER" == "" ]] ; then
echo "Enter Faspex username: "
read FASPEXUSER
fi
if [[ "$FASPEXPASSWORD" == "" ]] ; then
echo "Enter Faspex password: "
read FASPEXPASSWORD
fi
if [[ "$FASPEXURL" == "" ]] ; then
echo "Enter Faspex URL: "
read FASPEXURL
fi
if [[ "$DOWNLOADDIR" == "" ]] ; then
echo "Enter download directory: "
read DOWNLOADDIR
fi
changeString asperacargo.conf %USER% $FASPEXUSER
changeString asperacargo.conf %PASSWORD% $FASPEXPASSWORD
changeString asperacargo.conf %FASPEXURL% $FASPEXURL
changeString asperacargo.conf %DOWNLOADDIR% $DOWNLOADDIR
mv asperacargo.conf /opt/aspera/cargod/etc/asperacargo.conf
#comment if not want to start automatically
/opt/aspera/cargod/bin/asperacargod &
echo "View transfers..."
echo "use cmd: tail -f /opt/aspera/cargod/bin/../var/log/asperacargod.log" | true |
e98d23bf9064fcf11f785749b1f7d42162ca1c0c | Shell | mosckital/dotfiles | /basics/.man_funcs | UTF-8 | 8,161 | 4.0625 | 4 | [] | no_license | #!/bin/bash
#
# This script includes the utility functions to print tips about bash commands
# in terminal, avoiding search for common answers online again and again.
#
# This script requires that the `.functions` script has been sourced in order
# to use the utility color printing functions inside.
# # print a warning if the required .functions script was not sourced beforehand
# if [ "$(type -t define_styles)" != "function" ]; then
# echo "WARNING! '.functions' must be sourced before using this script!"
# fi
#######################################
# Define shortcuts to printing style control commands.
#
# This function will define the shortcuts in global scope, so please use
# the following unset_styles() function to unset when no longer needed.
#######################################
function define_styles() {
# define the font colors
black=${black:-$(tput setaf 0)}
red=${red:-$(tput setaf 1)}
green=${green:-$(tput setaf 2)}
yellow=${yellow:-$(tput setaf 3)}
blue=${blue:-$(tput setaf 4)}
magenta=${magenta:-$(tput setaf 5)}
cyan=${cyan:-$(tput setaf 6)}
white=${white:-$(tput setaf 7)}
# define the font styles
bold=${bold:-$(tput bold)}
underline=${underline:-$(tput smul)}
blink=${blink:-$(tput blink)}
rev=${rev:-$(tput rev)}
invis=${invis:-$(tput invis)}
# define reset to normal
reset=${reset:-$(tput sgr 0)}
}
#######################################
# Unset the previous defined printing style control commands from global scope.
#######################################
function unset_styles() {
# unset defined font colors
unset black
unset red
unset green
unset yellow
unset blue
unset magenta
unset cyan
unset white
# unset defined font styles
unset bold
unset underline
unset blink
unset rev
unset invis
# unset defined reset to normal
unset reset
}
#######################################
# Print colored tips about sourcing a script
#######################################
function man_source() {
# define the printing styles
define_styles
# print pretty tips
echo "\
${cyan}Tips for ${bold}source${reset} ${cyan}command${reset}
This tip is a concise conclusion of the following wonderful answer:
${underline}https://askubuntu.com/a/182020${reset}
Basically, there are the following ways to source or execute a script:
1. ${magenta}source${reset} command, which will load and execute the target \
script in the current shell.
2. ${magenta}. script${reset}, which is identic to ${blue}source${reset} \
command. Plese be aware of the space between the dot and the sciprt name.
3. ${magenta}./script${reset}, which requires the script being executable, \
where executable permission can be set by ${blue}chmox +x${reset}, and will \
use the program indicated by the Hashbang line, normally the first line \
looking like ${green}#!/bin/sh${reset}, to run the script.\
"
# unset the defined printing styles
unset_styles
}
#######################################
# Print colored tips about condition in if statment
#######################################
function man_condition() {
# define the printing styles
define_styles
echo "\
${cyan}Tips for ${bold}condition${reset} ${cyan}operators in ${bold}if\
${reset} ${cyan}statement${reset}
This tip is a concise conclusion of the following wonderful answer:
${underline}https://unix.stackexchange.com/a/306115${reset}
Basically, there are the following five ways to evaluate a condition in an \
${bold}if${reset} statement:
1. ${magenta}if ${bold}[ confidtion ]${reset}:
Traditinoal shell ${blue}test${reset} command available on all POSIX shells. \
It sets an exit code, normally ${bold}0${reset} for success/true and \
${bold}1${reset} for error/false, and the ${magenta}if${reset} statement acts \
accordingly. Typically to test if a file exists or if two numbers are equal.
2. ${magenta}if ${bold}[[ confidtion ]]${reset}:
Upgraded variation of ${blue}test${reset} from ${green}ksh${reset} that \
${green}bash${reset} and ${green}zsh${reset} also support. It also sets an \
exit code and the ${magenta}if${reset} statement acts accordingly. Among its \
extended features, it can test if a string matches a regular expression by \
the operator ${blue}=~${reset}, which matches the quoted string on the left \
to the unquoted regular expression on the right, like: \
${bold}[[ \"string\" =~ pattern ]]${reset}.
3. ${magenta}if ${bold}(( confidtion ))${reset}:
Another ${green}ksh${reset} extension that ${green}bash${reset} and \
${green}zsh${reset} also support, to perform arithmetics. It returns an exit \
code of ${bold}0 (true)${reset} if the arithmetic calculation result is \
non-zero and the ${magenta}if${reset} statement acts accordingly.
4. ${magenta}if ${bold}(command)${reset}:
This runs the ${magenta}command${reset} in a subshell. When the command \
completes, it sets an exit code and the ${magenta}if${reset} statement acts \
accordingly. A typical reason for using a subshell like this is to limit the \
side-effects of the ${magenta}command${reset} if ${magenta}command${reset} \
requires variable assignments or other changes to the shell's environment. \
Such changes do not remain after the subshell completes.
5. ${magenta}if ${bold}command${reset}:
The ${bold}command${reset} will be exected and the ${magenta}if${reset} \
statement acts accordingly. Please be aware that side-effects may exist!"
# unset the defined printing styles
unset_styles
}
#######################################
# Print the links to some useful cheatsheets
#######################################
function man_cheatsheets() {
# define the printing styles
define_styles
echo "\
The followings are links to some useful ${cyan}cheatsheets${reset}:
1. ${magenta}Bash${reset} Scripting CheatSheet:
${underline}https://devhints.io/bash${reset}"
# unset the defined printing styles
unset_styles
}
#######################################
# Print the links to some useful cheatsheets
#######################################
function man_io_redirections() {
# define the printing styles
define_styles
echo "\
${cyan}Tips for ${bold}IO Redirection${reset} ${cyan}in ${bold}bash${reset}
This tip is a concise conclusion of the following wonderful answer:
${underline}https://unix.stackexchange.com/a/70971${reset}
1. ${cyan}Numbers${reset} in IO redirection:
a. a ${magenta}number 1${reset} = standard out / STDOUT
b. a ${magenta}number 2${reset} = standard error / STDERR
c. if no number specified, the ${magenta}number 1${reset} is assumed
2. ${cyan}Functions${reset}:
a. ${magenta}M>&-${reset}, like ${green}2>&-${reset}:
${green}M${reset} is a file descriptor number. This will close output for \
whichever file descriptor is referenced, i.e. ${green}M${reset}.
b. ${magenta}M>/dev/null${reset}, like ${green}2>/dev/null${reset}:
${green}M${reset} is a file descriptor number. This will redirect the file \
descriptor to /dev/null, which discards all messages.
c. ${magenta}M>&N${reset}, like ${green}2>&1${reset}:
${green}M${reset} and ${green}N${reset} are both file descriptor numbers. \
This combines the output of both file descriptors into a single stream of the \
latter one.
3. ${cyan}Abbreviations${reset}:
a. ${magenta}|&${reset} is an abbreviation for ${green}2>&1 |${reset}
b. ${magenta}&>/dev/null${reset} is an abbreviation for \
${green}>/dev/null${reset}
c. ${magenta}>/dev/null${reset} is an abbreviation for \
${green}1>/dev/null${reset}"
# unset the defined printing styles
unset_styles
}
#######################################
# Print explanation and link for parameter extension
#######################################
function man_parameter_expension() {
# define the printing styles
define_styles
echo "\
${cyan}Tips for ${bold}Parameter Expansion${reset}
A very good and fully detailed manual can be found by following the link:
${underline}\
https://www.gnu.org/software/bash/manual/html_node/Shell-Parameter-Expansion.html\
${reset}"
# unset the defined printing styles
unset_styles
}
| true |
2a12037d919f53c10e0b68fe2cd7794f1408438e | Shell | trickMin/ui | /build.sh | UTF-8 | 451 | 3.0625 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/sh
set -e
echo node version:
node --version
echo npm version
npm --version
# 启用安装
yarn
yarn lerna bootstrap
if [ $? -eq 0 ]; then
echo install SUCCESS
# npm run build
yarn run build
if [ $? -eq 0 ]; then
echo build SUCCESS
# copy 文件
rm -rf ./packages/server/public
cp -R ./packages/ui/dist ./packages/server/public
else
echo build FAILED
exit 1
fi
else
echo install FAILED
exit 1
fi | true |
f9afa34ebe812610941839f55785aeb1f1f78271 | Shell | clojuredays/clojuredays.org | /scripts/dev.sh | UTF-8 | 346 | 3.15625 | 3 | [] | no_license | #!/bin/bash
# Start a webserver serving the static resources in the background and capture the PID
python3 -m http.server --directory public/ > /dev/null &
SERVER="$!"
# Recompile CSS on save
lein sass watch &
SASS="$!"
echo "Hit <enter> to quit"
read -r _DONE
kill "$SERVER"
trap "kill $SERVER $SASS" SIGINT SIGTERM EXIT SIGSTOP
echo Done.
| true |
5c9792d44f3fd2dcdfdaf5347057748010a76f32 | Shell | leomontenegro6/shantae-risky-revenge-traducao-ptbr | /commitar.sh | UTF-8 | 208 | 2.71875 | 3 | [
"MIT"
] | permissive | #!/bin/bash
read -p "Digite o texto do commit: `echo $'\n> '`" COMMIT
git config --global user.name "leomontenegro6"
git config --global user.email "leomontenegro6@gmail.com"
git add --all
git commit -am "$COMMIT"
git push
| true |
c7b2f0b7a65fb2720e3edebf9ec13999844daa6a | Shell | gitprelimtek/protobuf-javalite-firebase-wellknowntypes | /run.sh | UTF-8 | 2,760 | 3.703125 | 4 | [] | no_license | #!/bin/bash
CURRENT=`pwd`
CONTEXT_DIR=`dirname "$0"`
TMPLIB=$CURRENT/tmpdir
EXTRACTED=$CURRENT/extracted
OUTPUT=$CURRENT/output
#echo "The script you are running has basename `basename "$0"` "
#echo "The script you are running has dirname `dirname "$0"` "
#echo "The present working directory is `pwd`"
proc_java(){
## protobuf-java
jar xf $TMPLIB/protobuf-java-*jar && echo "protobuf-java extracted OK " || ( echo "protobuf-java extraction failed. Exiting"; exit 1 )
}
proc_javalite(){
## protobuf-javalite
jar xf $TMPLIB/protobuf-javalite*jar && echo "protobuf-javalite extracted OK " || ( echo "protobuf-javalite extraction failed. Exiting"; exit 1 )
}
proc_gcommonprotos(){
## proto-google-common-protos
jar xf $TMPLIB/proto-google-common-protos*jar && echo "proto-google-common-protos extracted OK " || ( echo "proto-google-common-protos extraction failed. Exiting"; exit 1 )
}
proc_wellknowntypes(){
## protolite-well-known-types
## rename file with .aar to .jar
mv $TMPLIB/protolite-well-known-types*aar $TMPLIB/protolite-well-known-types.jar
jar xf $TMPLIB/protolite-well-known-types.jar && echo "protolite-well-known-types extracted OK " || ( echo "protolite-well-known-types extraction failed. Exiting"; exit 1 )
## extract classes.jar generated by protolite-well-known-types.jar
jar xf ./classes.jar && echo "classes extracted OK " || ( echo "classes extraction failed. Exiting"; exit 1 )
rm ./classes.jar
}
echo 'changing dir to $CONTEXT_DIR'
cd $CONTEXT_DIR
echo 'init temp dir: $TMPLIB'
rm -r $TMPLIB
mkdir -p $TMPLIB && echo "$TMPLIB created" || echo "$TMPLIB create failed"
echo 'init output dir: $OUTPUT'
rm -r $OUTPUT
mkdir -p $OUTPUT && echo "$OUTPUT created" || echo "$OUTPUT create failed"
echo 'init output dir: $OUTPUT'
rm -r $EXTRACTED
mkdir -p $EXTRACTED && echo "$EXTRACTED created" || echo "$EXTRACTED create failed"
##download artifacts to ./lib
mvn dependency:copy-dependencies -DoutputDirectory=$TMPLIB && echo mvn dependencies downloaded OK || ( echo Failed; exit 1 )
## extract artifact classes into ./extracted
## note: there is a desired sequence of dependencies in order to override some class implementations
cd $EXTRACTED && echo "$EXTRACTED found" || ( echo "$EXTRACTED not found. Exiting" ; exit 1 )
#proc_javalite
#proc_gcommonprotos
#proc_wellknowntypes
#proc_java
proc_javalite
proc_gcommonprotos
proc_wellknowntypes
#proc_gcommonprotos
#proc_wellknowntypes
#proc_javalite
## create the output jar from extracted
jar cf $OUTPUT/ptek-protobuf-javalite-firebase-wellknowntypes.jar * && echo "ptek-protobuf-javalite-firebase-wellknowntypes.jar created OK " || ( echo "ptek-protobuf-javalite-firebase-wellknowntypes.jar create failed. Exiting"; exit 1 )
ls -l $OUTPUT
| true |
df86a6883eccadb1b5616d6b460f5e454e706c9d | Shell | Poeschl/Hassio-Addons | /asterisk/root/etc/cont-init.d/setup-config-files.sh | UTF-8 | 283 | 3.015625 | 3 | [
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] | permissive | #!/usr/bin/with-contenv bashio
# shellcheck shell=bash
set -e
CONFIG_FILES=$(bashio::config 'config_files')
for config in $CONFIG_FILES; do
bashio::log.info "Apply '$config'"
cp "$config" '/etc/asterisk/'
done
chown -R asterisk:asterisk /etc/asterisk
chmod 640 -R /etc/asterisk
| true |
2b9696f7cd311a229f17ee61c30b075405c15db3 | Shell | FredHutch/motuz | /bin/dev/init_dev.sh | UTF-8 | 523 | 2.984375 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env bash
set -e
THIS_DIR=$(dirname "$0")
cd ${THIS_DIR}
cd ../..
echo "Setting up the database..."
./bin/_utils/database_install.sh
echo "DONE - Setting up the database"
echo "Installing backend dependencies"
./bin/_utils/backend_install.sh
echo "DONE - Installing backend dependencies"
echo "Initializing backend..."
./bin/_utils/backend_init.sh
echo "DONE - Initializing backend"
echo "Installing frontend dependencies..."
./bin/_utils/frontend_install.sh
echo "DONE - Installing frontend dependencies"
| true |
de6a89d742b187e15abf781e1260df97209ed873 | Shell | rsyslog/rsyslog-pkg-ubuntu | /rsyslog/Debian/v8-stable/debian/rsyslog.preinst | UTF-8 | 191 | 2.875 | 3 | [
"Apache-2.0",
"BSD-3-Clause",
"GPL-3.0-or-later",
"LGPL-3.0-or-later"
] | permissive | #!/bin/sh
set -e
if [ "$1" = "install" ] && [ -n "$2" ] ; then
[ -f /etc/logrotate.d/rsyslog.disabled ] && mv -f /etc/logrotate.d/rsyslog.disabled /etc/logrotate.d/rsyslog
fi
#DEBHELPER#
| true |
de97468e13442e3863bc281d087eb71cc8ec0d34 | Shell | burcuozcelik/trajectory | /CG_versions/scripts/parser/find_missing_stage1.sh | UTF-8 | 1,560 | 2.890625 | 3 | [] | no_license | #!/bin/bash
rm missing_stage1
for solver in "cg" "cgs" "bicg" "bicgsta" "iccg"
do
for dataset in 4 8 16 28
do
for chunksize in 10 40
do
for training in 8000 1600
do
for testing in 5000
do
fname="job-"$solver-$chunksize-$dataset-$training-$testing
output_training=$( grep -re "Completed!" ./sbatchs-datasets/$fname | wc -l )
if [ $output_training == "1" ]
then
dir="data/datasets/memory/anamoly/"$solver"/"$dataset"_"$chunksize"_"$training"_"$testing/
output_model=$(cat $dir$solver"_"$dataset"_"$chunksize"_"$training"_"$testing".model" | wc -l )
if [ $output_model != "0" ]
then
echo "stage1-$solver-$chunksize-$dataset-$training-$testing.sh" >> missing_stage1
fi
fi
done
done
done
done
done
#for i in 1
#do
# for solver in "cg" "cgs" "bicg" "bicgsta" "iccg"
# do
# for j in 4 8 16 28
# do
# for k in 10 100 500
# do
# for n in 10 20 40
# do
# #for ctype in "wDetectors" "woDetectors"
# for ctype in "woDetectors"
# do
# #echo data/datasets/memory/$solver/$n"_"$j"_"$k"_"$i/$solver"_"$n"_"$j"_"$k"_"$i"_"$ctype.train1.best
# output=$( cat "data/datasets/memory/"$solver"/"$n"_"$j"_"$k"_"$i/$solver"_"$n"_"$j"_"$k"_"$i"_"$ctype".train1.best" | wc -l )
# #echo $output
# if [ $output == "0" ]
# then
# echo "stage1-$solver-$n-$j-$k-$i-$ctype.sh" >> missing_stage1
# fi
# done
# done
# done
# done
# done
#done
| true |
03a5f8ae2d9c9969685dc10178af249f82f7efb2 | Shell | mikoim/thinkpad-provisioning | /.config/i3/fm.sh | UTF-8 | 240 | 2.671875 | 3 | [
"Unlicense"
] | permissive | #!/usr/bin/env bash
if hash spacefm 2>/dev/null; then
spacefm "$@"
elif hash pcmanfm 2>/dev/null; then
pcmanfm "$@"
elif hash nautilus 2>/dev/null; then
nautilus "$@"
else
notify-send "fm.sh" "Favorite file managers not found."
fi
| true |
85bb8418af5e9f206aca5b18946c60673d749390 | Shell | alljoyn/core-test | /testbot/windows/sbin/win7Testbot | UTF-8 | 90,849 | 3.796875 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash
# Copyright (c) Open Connectivity Foundation (OCF), AllJoyn Open Source
# Project (AJOSP) Contributors and others.
#
# SPDX-License-Identifier: Apache-2.0
#
# All rights reserved. This program and the accompanying materials are
# made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Copyright (c) Open Connectivity Foundation and Contributors to AllSeen
# Alliance. All rights reserved.
#
# Permission to use, copy, modify, and/or distribute this software for
# any purpose with or without fee is hereby granted, provided that the
# above copyright notice and this permission notice appear in all
# copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
# WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
# AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
# DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
# PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
# TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
# PERFORMANCE OF THIS SOFTWARE.
# Purpose:
# Windows7 testbot main script
# Return:
# 0 - Success
# 1 - Block because build directory missing
# 2 - Invalid command
# 3 - Fail at least one test failed
# Function name: usage
# Description: display command parameter
# Parameter: none
# Return: none
function usage() {
echo 'Usage: win7Testbot -h -v -r [windows_sdk_path] -a [android_sdk_path] -b [adb_path]'
echo ' -h help
-v verbose
-r windows_sdks_path
-a android_sdks_path
-b adb_path'
}
# Function name: parseCmdArgs
# Description: Parse command line arguments
# Parameter: none
# Return:
# 2 - invalid command
function parseCmdArgs() {
echo "Parsing arguments..."
args=`getopt hvr:a:b: $*`
if [ $? != 0 ]; then
usage
exit 2
fi
set -- $args
#default is concise
verbose=false
# script name without path
scriptName=`basename $0`
# Default path for windows7 parent of sdk-bin and sdk-rel subdirectories
defPath=""
# Android unzipped package directory
androidDir=""
# Android adb binary location
adbLoc=""
for i
do
case "$i" in
-h) shift;usage;exit 0;;
-v) shift;verbose=true; echo "verbose $verbose";;
-r) shift;defPath=$1;echo "Windows sdks path $defPath";shift;;
-a) shift;androidDir=$1;echo "Android sdks path $androidDir";shift;;
-b) shift;adbLoc=$1;echo "Android adb location $adbLoc";shift;;
esac
done
if [ -n "$defPath" -a -d "$defPath" ]; then
winDir="$defPath"
lastChr=${defPath#${defPath%?}}
# Remove trailing /
if [ "$lastChr" == "/" ]; then
winDir=`echo "${defPath%?}"`
fi
else
echo "Invalid windows build path $defPath"
exit 2
fi
cppRelBinDir="${winDir}/sdk-rel/cpp/bin"
cppRelSampleDir="${winDir}/sdk-rel/cpp/bin/samples"
cppDbgBinDir="${winDir}/sdk-dbg/cpp/bin"
cppDbgSampleDir="${winDir}/sdk-dbg/cpp/bin/samples"
if [ -d "$cppRelBinDir" -a -d "$cppRelSampleDir" ]; then
echo "CPP release binaries and samples directory exist"
elif [ -d "$cppRelBinDir" ]; then
echo "Invalid cpp release sample path $cppRelSampleDir"
exit 2
else
echo "Invalid cpp release binary path $cppRelBinDir"
exit 2
fi
if [ -d "$cppDbgBinDir" -a -d "$cppDbgSampleDir" ]; then
echo "CPP debug binaries and samples directory exist"
elif [ -d "$cppDbgBinDir" ]; then
echo "Invalid cpp debug sample path $cppDbgSampleDir"
exit 2
else
echo "Invalid cpp debug binary path $cppDbgBinDir"
exit 2
fi
jarRelDir="${winDir}/sdk-rel/java/jar"
jarDbgDir="${winDir}/sdk-dbg/java/jar"
javaRelLibDir="${winDir}/sdk-rel/java/lib"
javaDbgLibDir="${winDir}/sdk-dbg/java/lib"
if [ -d "$jarRelDir" -a -d "$javaRelLibDir" ]; then
echo "Java release jar and library directory exist"
elif [ -d "$jarRelDir" ]; then
echo "Nonexist java release library path $javaRelLibDir"
exit 2
else
echo "Nonexist java release jar path $jarRelDir"
exit 2
fi
if [ -d "$jarDbgDir" -a -d "$javaDbgLibDir" ]; then
echo "Java debug jar and library directory exist"
elif [ -d "$jarDbgDir" ]; then
echo "Nonexist java debug library path $javaDbgLibDir"
exit 2
else
echo "Nonexist java debug jar path $jarDbgDir"
exit 2
fi
# To run java tests, relative path for jar and lib file are required
# Jenkins and testbot have different current path, default to testbot
jarRelRelativeDir="./buildbot/sdk-rel/java/jar"
jarDbgRelativeDir="./buildbot/sdk-dbg/java/jar"
javaRelRelativeLibDir="./buildbot/sdk-rel/java/lib"
javaDbgRelativeLibDir="./buildbot/sdk-dbg/java/lib"
currentDir=`pwd`
#/cygdrive/c/jenkins/WIN7X64/workspace/win7_testbot_master
JenkinsPathKey="jenkins"
#/cygdrive/c/buildslave/win7x64-testbot/win7testbot_12_master/platform/
TestbotPathKey="buildslave"
masterKey="master"
if [[ $currentDir =~ $JenkinsPathKey ]]; then
if [[ $currentDir =~ $masterKey ]]; then
echo "Jenkins master test with $currentDir "
jarRelRelativeDir="../../../../buildslave/win7x64-testbot/win7testbot_12_master/platform/buildbot/sdk-rel/java/jar"
jarDbgRelativeDir="../../../../buildslave/win7x64-testbot/win7testbot_12_master/platform/buildbot/sdk-dbg/java/jar"
javaRelRelativeLibDir="../../../../buildslave/win7x64-testbot/win7testbot_12_master/platform/buildbot/sdk-rel/java/lib"
javaDbgRelativeLibDir="../../../../buildslave/win7x64-testbot/win7testbot_12_master/platform/buildbot/sdk-dbg/java/lib"
else
echo "Jenkins branch test with $currentDir "
jarRelRelativeDir="../../../../buildslave/win7x64-testbot/win7testbot_12_branch/platform/buildbot/sdk-rel/java/jar"
jarDbgRelativeDir="../../../../buildslave/win7x64-testbot/win7testbot_12_branch/platform/buildbot/sdk-dbg/java/jar"
javaRelRelativeLibDir="../../../../buildslave/win7x64-testbot/win7testbot_12_branch/platform/buildbot/sdk-rel/java/lib"
javaDbgRelativeLibDir="../../../../buildslave/win7x64-testbot/win7testbot_12_branch/platform/buildbot/sdk-dbg/java/lib"
fi
fi
# Android parent path for sdk-rel subdirectory
if [ -n "$androidDir" -a -d "$androidDir" ]; then
androidPath="$androidDir"
lastChr=${androidDir#${androidDir%?}}
# Remove trailing /
if [ "$lastChr" == "/" ]; then
androidPath=`echo "${androidDir%?}"`
fi
androidRelCppBin="${androidPath}/sdk-rel/cpp/bin"
else
echo "Non-exist android build path $androidDir"
fi
androidSdkMissing=0
if [ -d "$androidRelCppBin" ]; then
echo "Android release binary path $androidRelCppBin"
else
echo "Invalid android release binary path $androidRelCppBin"
androidSdkMissing=1
fi
# Android parent path for sdk-rel subdirectory
if [ -n "$adbLoc" -a -d "$adbLoc" ]; then
adbPath="$adbLoc"
lastChr=${adbLoc#${adbLoc%?}}
# Remove trailing /
if [ "$lastChr" == "/" ]; then
adbPath=`echo "${adbLoc%?}"`
fi
ADB="${adbPath}/adb.exe"
else
echo "Non-exist adb path $adbLoc"
fi
adbMissing=0
if [ -x "$ADB" ]; then
echo "Android adb looks fine"
else
echo "Android adb is NOT executable!"
adbMissing=1
fi
# Default path for thin client unit test subdirectory
tcUnitTestPath="${winDir}/../buildbot_ajtcl/scons/core/ajtcl/unit_test"
tcUnitTestMissing=0
if [ -n "$tcUnitTestPath" -a -d "$tcUnitTestPath" ]; then
echo "TC unit test folder exists"
else
echo "Invalid tc unit test path $tcUnitTestPath"
tcUnitTestMissing=1
fi
# Default path for thin client test subdirectory
tcTestPath="${winDir}/../buildbot_ajtcl/scons/core/ajtcl/dist/test"
tcTestMissing=0
if [ -n "$tcTestPath" -a -d "$tcTestPath" ]; then
echo "TC test folder exists"
else
echo "Invalid tc test path $tcTestPath"
tcTestMissing=1
fi
# Default path for standard client test tools(ajtcsctest.exe/ajtrawservice.exe...) subdirectory
scToolsPath="${winDir}/../buildbot/test_tools-rel"
scToolsMissing=0
if [ -n "$scToolsPath" -a -d "$scToolsPath" ]; then
echo "SC test tools folder exists"
else
echo "Invalid sc test tools path $scToolsPath"
scToolsMissing=1
fi
}
# Function name: SetUpGlobalVariables
# Description: setup global variables
# Parameter: none
# Return: none
function SetUpGlobalVariables() {
# Test result file
testResultFile="${scriptName}_result.txt"
testResultHtml="${scriptName}_result.html"
#detail command log file
testDetailLog="${scriptName}_detail.txt"
# Intermediate error log file
testProgressLog="${scriptName}_progress.txt"
BASIC_CLIENT_OK="Basic client exiting with status 0x0000"
BBCLIENT_OK="bbclient exiting with status 0 "
AndroidTestDir="/data/local/tmp"
# Commit id related
SC_COMMIT_TYPE=1
TC_COMMIT_TYPE=2
SC_URL_PREFIX="https://git.allseenalliance.org/cgit/core/alljoyn.git/commit/?id="
TC_URL_PREFIX="https://git.allseenalliance.org/cgit/core/ajtcl.git/commit/?id="
UNKNOWN_ID="unknown"
}
# Function name: getCommitIds
# Description:
# Get commit ids from manifest.txt of core and tc sdk
# Parameter: none
# Return:
# scCommitId - commit ref id of SC SDK
# tcCommitId - commit ref id of TC SDK
# androidCommitId - commit ref id of Android SDK
function getCommitIds() {
echo "Get commit ref ids from both SC and TC SDK..." >> $testDetailLog
scCommitId="${UNKNOWN_ID}"
tcCommitId="${UNKNOWN_ID}"
androidCommitId="${UNKNOWN_ID}"
local scManifest="${winDir}/sdk-rel/manifest.txt"
local tcManifest="${winDir}/../buildbot_ajtcl/scons/manifest.txt"
local androidManifest="${androidPath}/sdk-rel/manifest.txt"
local scCommit="unknown"
local tclCommit="unknown"
local androidCommit="unknown"
if [ -e "$scManifest" ]; then
scCommit=`grep -i "commit ref:" $scManifest | head -n 1 |awk -F " " '{print $NF}'`
if [ ! -z "$scCommit" ]; then
scCommitId=$scCommit
echo "SC SDK commit id is $scCommitId"
else
echo "SC SDK commit id is unknown since manifest.txt does NOT have commit id information"
fi
fi
if [ -e "$tcManifest" ]; then
tclCommit=`grep -i "commit ref:" $tcManifest | head -n 1 |awk -F " " '{print $NF}'`
if [ ! -z "$tclCommit" ]; then
tcCommitId=$tclCommit
echo "TC SDK commit id is $tcCommitId"
else
echo "TC SDK commit id is unknown since manifest.txt does NOT have commit id information"
fi
fi
if [ -e "$androidManifest" ]; then
androidCommit=`grep -i "commit ref:" $androidManifest | head -n 1 |awk -F " " '{print $NF}'`
if [ ! -z "$androidCommit" ]; then
androidCommitId=$androidCommit
echo "Android SDK commit id is $scCommitId"
else
echo "Android SDK commit id is unknown since manifest.txt does NOT have commit id information"
fi
fi
}
# Function name: createCommitIdUrl
# Description:
# Create link url for commid id
# Parameter:
# commitType - SC or TC commit
# commitId - commit id
# Return:
# commitUrl is set to valid url or unknown
function createCommitIdUrl() {
echo "Create commit id url from $commitId ..." >> $testDetailLog
local commitType=$1
local commitId=$2
commitUrl="${UNKNOWN_ID}"
if [ "$commitType" -eq "${SC_COMMIT_TYPE}" ]; then
commitUrl="${SC_URL_PREFIX}${commitId}"
else
commitUrl="${TC_URL_PREFIX}${commitId}"
fi
echo "commit id url is $commitUrl"
}
# Function name: checkAndroidCnt
# Description:
# Check number of android device(s) connected through USB
# Parameter: none
# Return:
# devices is set to have android deviceid;
# androidCnt flag is set to 1 if one android connected;
# set to 0 if no android device connected,
# set to actual number of devices if >1 devices connected
function checkAndroidCnt() {
$verbose && echo "How many android device connected?" >> $testDetailLog
timeout 2 $ADB devices 2>android_devices.log 1>&2
# adb command is not stable, so always run as background to prevent block
devices=`timeout 2 $ADB devices | grep 'device$' | cut -f 1`
echo "Connected android id $devices"
androidCnt=`echo $devices | wc -w`
if [ "$androidCnt" -eq 0 ]; then
echo "No android device connected!"
elif [ "$androidCnt" -gt 1 ]; then
echo "$androidCnt android connected, remove extra!"
else
echo "One android connected, good to go"
fi
$verbose && echo "$androidCnt android connected" >> $testDetailLog
}
# Function name: getAndroidIpAddr
# Description:
# Get android ip address
# Parameter: none
# Return:
# androidIp is set to 192.168.x.xxx if wifi ready; empty otherwise
function getAndroidIpAddr() {
local wifiConfig="${scriptName}_android_wifi.txt"
local ipPort="${scriptName}_android_ipPort.txt"
local upKey="UP"
local zeroIp="0.0.0.0"
local wifiInterface="wlan0|eth0"
echo "Retrieve android ip address..."
androidIp=""
# TODO: if netcfg is not available, use ifconfig
timeout 3 $ADB shell netcfg 2>$wifiConfig 1>&2
# wifi config file should include a line:
# eth0 UP 192.168.1.104/23 255.255.255.0 0x00001043
# Exclude local interface
awk -F" +" -v status="$upKey" -v wifi="$wifiInterface" '$2 ~ status && $1 ~ wifi {print $3}' $wifiConfig > $ipPort
# Remove "/port" from ip for ICS 8960
androidIp=`awk -F"/" '{print $1}' $ipPort`
echo "Android ip address $androidIp"
# If no access point is active and Wifi ON, ip address will be 0.0.0.0
if [ -n "$androidIp" -a "$androidIp" != "$zeroIp" ]; then
$verbose && echo "Android ip ready $androidIp"
else
$verbose && echo "Android has no IP!"
androidIp=""
fi
}
# Function name: getWinWlanIPAddr
# Description:
# Get windows host wlan ip address
# Parameter: none
# Return:
# winIp is set to 192.168.x.xxx if wifi ready; empty otherwise
function getWinWlanIPAddr() {
local wifiConfig="${scriptName}_windows_wifi.txt"
local ipv4Hdr="IPv4 Address"
local wlanIpPrefix="192.168"
echo "Retrieve windows ip address..."
winIp=""
# use ipconfig to search IPv4 Address...:192.168.x.x
ipconfig | grep -i "${ipv4Hdr}.*${wlanIpPrefix}" | awk -F":" '{print $2}' 2>$wifiConfig 1>&2 &
sleep 2
# Remove leading spaces
winIp=`cat $wifiConfig | tr -d ' '`
echo "Windows wlan ip address is $winIp"
}
# Function name: winAndroidSameNet
# Description:
# check if windows and android on same wlan network
# Parameter:
# 1. androidIp - android wlan ip
# 2. windowsIp - windows wlan ip
# Return:
# sameNetwork is set to 1 if true; 0 otherwise
function winAndroidSameNet() {
local androidIp=$1
local windowsIp=$2
local winPingAndroid="${scriptName}_win_ping_android"
local androidPingWin="${scriptName}_android_ping_win"
local winPingOk=0
local androidPingOk=0
local winPingKeys="Reply from $androidIp"
local androidPingKeys="64 bytes from $windowsIp"
echo "Windows $windowsIp and android $androidIp share same wlan?"
sameNetwork=0
# Both android and windows should have non-empty wlan ip address
if [ -n "$androidIp" -a -n "$windowsIp" ]; then
# Ping android from windows host
timeout 15 ping $androidIp |grep "$winPingKeys" 2>$winPingAndroid 1>&2
# Ping windows from android
timeout 15 $ADB shell ping $windowsIp | grep "$androidPingKeys" 2>$androidPingWin 1>&2
winPingOk=`cat $winPingAndroid | wc -l`
androidPingOk=`cat $androidPingWin | wc -l`
echo "Windows get $winPingOk replies from android in 15 seconds"
echo "Android get $androidPingOk replies from windows in 15 seconds"
# At least 2 pings should got replies
if [ "$androidPingOk" -ge 2 ]; then
echo "Windows and android on same wlan network"
sameNetwork=1
else
echo "Android can not ping windows!"
fi
elif [ -z "$androidIp" ]; then
echo "Android is NOT connected to wifi"
else
echo "Windows is NOT connected to wifi"
fi
}
# Function name: loadTestToAndroid
# Description:
# Load test binaries to android
# Parameter: none
# Return:
# loadSuccess is 0 if load succeed; 1 otherwise
function loadTestToAndroid() {
echo "Loading test binaties to android..."
local androidBbservice="${androidRelCppBin}/bbservice"
local androidBbclient="${androidRelCppBin}/bbclient"
loadSuccess=1
# Push binaries
if [ -e "$androidBbservice" -a -e "$androidBbclient" ]; then
# Delete old file on android
timeout 15 $ADB shell rm ${AndroidTestDir}/bbservice
timeout 15 $ADB shell rm ${AndroidTestDir}/bbclient
# TODO: check file is deleted
# adb does not know path like /cygdrive/c/...
# copy bbservice and bbclient
cp $androidBbservice bbservice
cp $androidBbclient bbclient
# 20 seconds should be enough to push one file
timeout 20 $ADB push bbservice $AndroidTestDir
timeout 20 $ADB push bbclient $AndroidTestDir
# Make test binaries executable
timeout 15 $ADB shell chmod 777 ${AndroidTestDir}/bbservice
timeout 15 $ADB shell chmod 777 ${AndroidTestDir}/bbclient
# TODO: check file is pushed and changed correctly
loadSuccess=0
else
echo "$androidBbservice or $androidBbclient not exist!"
fi
}
# Function name: getWinPids
# Description: Get windows process ids with given name
# Parameter:
# 1. processName - process name
# Return:
# pids has process ids
function getWinPids() {
local processName=$1
$verbose && echo "Checking $processName..."
# find pids for processName
pids=`ps -ef | grep "$processName" | grep -v "grep" | awk -F" +" '{print $2}'`
$verbose && echo "$processName ids: $pids"
}
# Function name: getAndroidPids
# Description:
# Get android process ids with given name
# Parameter:
# 1. processName - process name
# Return:
# pids has process ids
function getAndroidPids() {
local processName=$1
local pidFile="pid_android_${processName}.log"
# adb command is not stable, run with timeout to prevent block
timeout 2 $ADB shell ps > $pidFile
pids=`awk -F" +" -v pN="$processName" '$0 ~ pN {print $2}' $pidFile`
rm -rf $pidFile
}
# Function name: cleanKeyStore
# Description:
# Clean keystore before each security test
# Parameter: none
# Return: none
function cleanKeyStore() {
local keystoreDir1="$LOCALAPPDATA/.alljoyn_secure_keystore"
local keystoreDir2="$LOCALAPPDATA/.alljoyn_keystore"
local keystoreDir3="$USERPROFILE/.alljoyn_keystore"
local keystoreDir4="$USERPROFILE/.alljoyn_secure_keystore"
rm -rf $keystoreDir1
rm -rf $keystoreDir2
rm -rf $keystoreDir3
rm -rf $keystoreDir4
}
# Function name: killWinProcess
# Description: kill windows process
# Parameter:
# 1. processName - alljoyn-daemon, bbclient or bbService
# Return:
# none
function killWinProcess() {
local processName=$1
$verbose && echo "Stopping all $processName..."
getWinPids $processName
processCount=`echo $pids | wc -w`
$verbose && echo " $processCount $processName exists"
if [ "$processCount" -ge 1 ]; then
for pid in $pids; do
$verbose && echo "kill $pid"
kill -9 $pid 2>/dev/null 1>&2
done
fi
}
# Function name: killAndroidProcess
# Description: kill all specified processes on android device
# Parameter:
# 1. processName - bbClient, bbService
# Return:
# processKill flag is set to 0 if succeed; 1 otherwise
function killAndroidProcess() {
local processName=$1
$verbose && echo "Stopping all $processName on android..." >> $testDetailLog
# kill running alljoyn-daemon process on device
processKill=0
# adb command not stable, run as background to prevent
getAndroidPids $processName
processCount=`echo $pids | wc -w`
$verbose && echo " $processCount $processName find on android" >> $testDetailLog
if [ "$processCount" -ge 1 ]; then
for pid in $pids; do
$verbose && echo "Old $processName process id is $pid " >> $testDetailLog
timeout 2 $ADB shell "kill -9 $pid"
done
# timeout if process not terminated
sleep 2
# confirm bbclient or bbservice is killed
getAndroidPids $processName
processCount=`echo $pids | wc -w`
if [ "$processCount" -ge 1 ]; then
$verbose && echo "Stop $processName failed for android!" >> $testDetailLog
processKill=1
else
$verbose && echo "All $processName stopped on android" >> $testDetailLog
fi
else
$verbose && echo "No $processName on android" >> $testDetailLog
fi
}
# Function name: isProcessLive
# Description:
# check if target process id still live
# Parameter:
# 1. procId - process id
# Return:
# procLive is set to 0 if not live; 1 if live
function isProcessLive() {
local procId=$1
procLive=0
echo "Is process id $procId live?"
# 2nd field is PID
local pids=`ps -ef | awk -F" +" '{print $2}'`
echo "find $procId from $pids"
echo ""
local processCount=`echo $pids | wc -w`
if [ "$processCount" -ge 1 ]; then
for pid in $pids; do
if [ "$pid" == "$procId" ]; then
echo "Live"
procLive=1
break
fi
done
fi
}
# Function name: padLog
# Description:
# Add padding for log to avoid output buffering
# Parameter:
# logFile - log file name to pad
# Return: none
function padLog() {
local logFile=$1
local line=0
local totalLines=100
local paddingLine="**********"
echo "Add $totalLines lines of padding to $logFile"
# Padding 100 lines
for (( line=0; line<$totalLines; line++))
do
echo "$paddingLine" >> $logFile
done
}
# Function name: logHasKeysExactTimes
# Description:
# Check if test log includes keys exact times
# Parameter:
# logName
# passKey
# count
# Return:
# testResult will be set to 0 if log match keys exact times;
# 1 if log has keys more than expected;
# -1 if log has keys less than expected
function logHasKeysExactTimes() {
local logName=$1
local passKey=$2
local count=$3
local actualTimes=0
$verbose && echo "Deciding test result in $logName with $passKey..." >> $testDetailLog
testResult=0
if [ -n "$logName" -a -n "$passKey" ]; then
if [ -e "$logName" -a -s "$logName" ]; then
actualTimes=`grep "$passKey" $logName|wc -l`
if [ "$actualTimes" -eq "$count" ]; then
testResult=0
echo "Log looks OK"
elif [ "$actualTimes" -gt "$count" ]; then
testResult=1
# Test fail
echo "Log $logName has $passKey $actualTimes times, more than expected!" >> $testProgressLog
else
testResult=-1
# Test fail
echo "Log $logName has $passKey $actualTimes times, less than expected!" >> $testProgressLog
fi
else
echo "Empty or non-exist file $logName !" >> $testDetailLog
echo "Empty or non-exist file $logName !" >> $testProgressLog
fi
else
echo "File $logName or key $passKey is null !" >> $testDetailLog
echo "File $logName or key $passKey is null !" >> $testProgressLog
fi
}
# Function name: logHasKeysMinTimes
# Description:
# Check if test log includes keys at least minimum times
# Parameter:
# logName
# passKey
# minimum
# Return:
# testResult will be set to 0 if pass; 6 if fail
# errMsg will be set if testResult is 6
function logHasKeysMinTimes() {
local logName=$1
local passKey=$2
local minimum=$3
local actualTimes=0
$verbose && echo "Does $logName contain $passKey at least $minimum times?" >> $testDetailLog
testResult=6
if [ -n "$logName" -a -n "$passKey" ]; then
if [ -e "$logName" -a -s "$logName" ]; then
actualTimes=`grep "$passKey" $logName|wc -l`
if [ "$actualTimes" -ge "$minimum" ]; then
testResult=0
echo "Log looks OK"
else
# Test fail
echo "Log $logName miss $passKey !" >> $testProgressLog
fi
else
echo "Empty or non-exist file $logName !" >> $testDetailLog
echo "Empty or non-exist file $logName !" >> $testProgressLog
fi
else
echo "File $logName or key $passKey is null !" >> $testDetailLog
echo "File $logName or key $passKey is null !" >> $testProgressLog
fi
}
# Function name: basicServiceClient
# Description: basic_service and basic_client test
# Parameter:
# buildVariant - release or debug
# Return: none
function basicServiceClient() {
local buildVariant=$1
# Initialize result to success
testResults[totalTests]=0
# Test case start from 1
local testCase=$(($totalTests + 1))
# Leave an empty line between tests in report
echo >> $testResultFile
echo "<br>" >> $testResultHtml
echo "Test case=$testCase(basic_service/basic_client $buildVariant)"
echo "Test case=$testCase(basic_service/basic_client $buildVariant)">> $testResultFile
echo "<p>Test case=$testCase(basic_service/basic_client $buildVariant)</p>">> $testResultHtml
local CLIENT_LOG="basic_client_$buildVariant.txt"
local SERVICE_LOG="basic_service_$buildVariant.txt"
# error log names in Jenkins archieve
local CLIENT_ERROR="basic_client_$buildVariant_error.txt"
local SERVICE_ERROR="basic_service_$buildVariant_error.txt"
local SERVICE_PATH="$cppRelSampleDir"
local CLIENT_PATH="$cppRelSampleDir"
if [ "$buildVariant" == "debug" -o "$buildVariant" == "dbg" ]; then
echo "Debug sample variant"
SERVICE_PATH="$cppDbgSampleDir"
CLIENT_PATH="$cppDbgSampleDir"
fi
# Launch basic_service
echo "Launch basic_service..."
${SERVICE_PATH}/basic_service.exe 2>$SERVICE_LOG 1>&2 &
service_pid=$!
# Wait
sleep 2
# Launch basic_client
echo "Launch basic_client..."
${CLIENT_PATH}/basic_client.exe 2>$CLIENT_LOG 1>&2 &
local client_pid=$!
#wait for client to complete discovery, join and method call
sleep 10
#Check log to decide test result
echo "Check client log..."
local FOUND_NAME="FoundAdvertisedName"
local JOIN_SESSION="JoinSession SUCCESS"
#Check log to decide test result
echo "Check client log..."
logHasKeysMinTimes $CLIENT_LOG "$FOUND_NAME" 1
local foundName=$testResult
logHasKeysMinTimes $CLIENT_LOG "$JOIN_SESSION" 1
local joinSession=$testResult
logHasKeysExactTimes $CLIENT_LOG "$BASIC_CLIENT_OK" 1
local clientOk=$testResult
isProcessLive $client_pid
local clientExit=$procLive
if [ "$foundName" -eq 0 -a "$joinSession" -eq 0 -a "$clientOk" -eq 0 ]; then
testResults[totalTests]=0
echo "Test result=Pass" >> $testResultFile
echo "<p>Test result=Pass</p>" >> $testResultHtml
if [ "$clientExit" -ne 0 ]; then
echo "basic_client still running!" >> $testProgressLog
fi
elif [ "$foundName" -ne 0 ]; then
mv $CLIENT_LOG $CLIENT_ERROR
testResults[totalTests]=6
echo "Test result=Fail(discovery fail!)" >> $testResultFile
echo "<p>Test result=Fail(<a class="error" href="\"$CLIENT_ERROR\"">discovery fail!</a>)</p>" >> $testResultHtml
elif [ "$joinSession" -ne 0 ]; then
mv $CLIENT_LOG $CLIENT_ERROR
testResults[totalTests]=6
echo "Test result=Fail(joinsession fail!)" >> $testResultFile
echo "<p>Test result=Fail(<a class="error" href="\"$CLIENT_ERROR\"">joinsession fail!</a>)</p>" >> $testResultHtml
elif [ "$clientOk" -ne 0 ]; then
mv $CLIENT_LOG $CLIENT_ERROR
testResults[totalTests]=6
echo "Test result=Fail(basic_client NOT exit 0)" >> $testResultFile
echo "<p>Test result=Fail(<a class="error" href="\"$CLIENT_ERROR\"">basic_client NOT exit 0</a>)</p>" >> $testResultHtml
fi
echo "Clean up service $service_pid and client $client_pid ..."
kill $service_pid
if [ "$clientExit" -ne 0 ]; then
kill $client_pid
fi
# Wait 3 seconds till cleanup
sleep 3
# Increse test count
totalTests=$(($totalTests + 1))
}
# Function name: runScSecurity
# Description:
# SC security 2.0 tests
# Parameter:
# buildVariant - release or debug
# transport - tcp or udp
# auth - authentication(SRP/LOGON/ECDHE_NULL/ECDHE_PSK/ECDHE_ECDSA)
# Return: none
function runScSecurity() {
local buildVariant=$1
local transport=$2
local auth=$3
# Initialize result to success
testResults[totalTests]=0
# Test case start from 1
local testCase=$(($totalTests + 1))
# Leave an empty line between tests in report
echo >> $testResultFile
echo "<br>" >> $testResultHtml
echo "Test case=$testCase($buildVariant Messaging with Authentication $auth over $transport)"
echo "Test case=$testCase($buildVariant Messaging with Authentication $auth over $transport)">> $testResultFile
echo "<p>Test case=$testCase($buildVariant Messaging with Authentication $auth over $transport)</p>">> $testResultHtml
local CLIENT_LOG="bbclient_${buildVariant}_${transport}_${auth}.txt"
local SERVICE_LOG="bbservice_${buildVariant}_${transport}_${auth}.txt"
# error log names in Jenkins archieve
local CLIENT_ERROR="bbclient_${buildVariant}_${transport}_${auth}_error.txt"
local SERVICE_ERROR="bbservice_${buildVariant}_${transport}_${auth}_error.txt"
local SERVICE_PATH="$cppRelBinDir"
local CLIENT_PATH="$cppRelBinDir"
# Default transport TCP
local transportFlag="-t"
local transportEnum="0x4"
# Default autntication is SRP
local authFlag="-ek SRP"
local authSuccess="Authentication ALLJOYN_SRP_KEYX succesful"
if [ "$buildVariant" == "debug" -o "$buildVariant" == "dbg" ]; then
echo "Debug test variant"
SERVICE_PATH="$cppDbgBinDir"
CLIENT_PATH="$cppDbgBinDir"
fi
if [ "$transport" == "udp" -o "$transport" == "UDP" ]; then
echo "Transport $transport is set"
transportFlag="-u"
transportEnum="0x100"
fi
if [ "$auth" == "logon" -o "$auth" == "LOGON" ]; then
echo "LOGON is set"
authFlag="-ek LOGON happy"
authSuccess="Authentication ALLJOYN_SRP_LOGON succesful"
elif [ "$auth" == "ecdhe_null" -o "$auth" == "ECDHE_NULL" ]; then
echo "ECDHE_NULL is set"
authFlag="-ek ECDHE_NULL"
authSuccess="Authentication ALLJOYN_ECDHE_NULL succesful"
elif [ "$auth" == "ecdhe_psk" -o "$auth" == "ECDHE_PSK" ]; then
echo "ECDHE_PSK is set"
authFlag="-ek ECDHE_PSK"
authSuccess="Authentication ALLJOYN_ECDHE_PSK succesful"
elif [ "$auth" == "ecdhe_ecdsa" -o "$auth" == "ECDHE_ECDSA" ]; then
echo "ECDHE_ECDSA is set"
authFlag="-ek ECDHE_ECDSA"
authSuccess="Authentication ALLJOYN_ECDHE_ECDSA succesful"
else
echo "SRP is set"
authFlag="-ek SRP"
authSuccess="Authentication ALLJOYN_SRP_KEYX succesful"
fi
local wkName="win.security"
# Launch basic_service
echo "Launch bbservice..."
${SERVICE_PATH}/bbservice.exe -n $wkName $transportFlag 2>$SERVICE_LOG 1>&2 &
service_pid=$!
# Wait
sleep 2
# Launch bbclient
echo "Launch bbclient..."
${CLIENT_PATH}/bbclient.exe -n $wkName -d -c 5 $authFlag 2>$CLIENT_LOG 1>&2 &
local client_pid=$!
#wait for client to complete discovery, join and method call
sleep 60
local FOUND_NAME="FoundAdvertisedName(name=$wkName, transport=$transportEnum, prefix=$wkName)"
local JOIN_SESSION="JoinSession $transportEnum takes"
#Check log to decide test result
echo "Check client log..."
logHasKeysExactTimes $CLIENT_LOG "$FOUND_NAME" 1
local foundName=$testResult
logHasKeysExactTimes $CLIENT_LOG "$JOIN_SESSION" 1
local joinSession=$testResult
logHasKeysExactTimes $CLIENT_LOG "$authSuccess" 1
local authStatus=$testResult
logHasKeysExactTimes $CLIENT_LOG "$BBCLIENT_OK" 1
local clientOk=$testResult
# bbclient should exit
isProcessLive $client_pid
local clientExit=$procLive
if [ "$foundName" -eq 0 -a "$joinSession" -eq 0 -a "$authStatus" -eq 0 -a "$clientOk" -eq 0 ]; then
testResults[totalTests]=0
echo "Test result=Pass" >> $testResultFile
echo "<p>Test result=Pass</p>" >> $testResultHtml
if [ "$clientExit" -ne 0 ]; then
echo "bbclient still running!" >> $testProgressLog
fi
elif [ "$foundName" -ne 0 ]; then
mv $CLIENT_LOG $CLIENT_ERROR
testResults[totalTests]=6
echo "Test result=Fail(discovery fail!)" >> $testResultFile
echo "<p>Test result=Fail(<a class="error" href="\"$CLIENT_ERROR\"">discovery fail!</a>)</p>" >> $testResultHtml
elif [ "$joinSession" -ne 0 ]; then
mv $CLIENT_LOG $CLIENT_ERROR
testResults[totalTests]=6
echo "Test result=Fail(joinsession fail!)" >> $testResultFile
echo "<p>Test result=Fail(<a class="error" href="\"$CLIENT_ERROR\"">joinsession fail!</a>)</p>" >> $testResultHtml
elif [ "$authStatus" -ne 0 ]; then
mv $CLIENT_LOG $CLIENT_ERROR
testResults[totalTests]=6
echo "Test result=Fail(authentication fail!)" >> $testResultFile
echo "<p>Test result=Fail(<a class="error" href="\"$CLIENT_ERROR\"">authentication fail!</a>)</p>" >> $testResultHtml
elif [ "$clientOk" -ne 0 ]; then
mv $CLIENT_LOG $CLIENT_ERROR
testResults[totalTests]=6
echo "Test result=Fail(bbclient NOT exit 0)" >> $testResultFile
echo "<p>Test result=Fail(<a class="error" href="\"$CLIENT_ERROR\"">exit error</a>)</p>" >> $testResultHtml
fi
echo "Clean up service $service_pid and client $client_pid ..."
kill $service_pid
if [ "$clientExit" -ne 0 ]; then
kill $client_pid
fi
# Wait 3 seconds till cleanup
sleep 3
# Increse test count
totalTests=$(($totalTests + 1))
}
# Function name: ScSecurityTests
# Description: standard client security tests
# Parameter: none
# Return: none
function ScSecurityTests() {
echo "Standard client security tests"
# Clean old keystore so previous bad keystore won't impact current test
cleanKeyStore
# TCP SRP test
runScSecurity "release" "tcp" "SRP"
cleanKeyStore
# TCP LOGON test
runScSecurity "release" "tcp" "LOGON"
cleanKeyStore
# TCP ECDHE_NULL test
runScSecurity "release" "tcp" "ECDHE_NULL"
cleanKeyStore
# TCP ECDHE_PSK test
runScSecurity "release" "tcp" "ECDHE_PSK"
cleanKeyStore
# TCP ECDHE_ECDSA test
runScSecurity "release" "tcp" "ECDHE_ECDSA"
cleanKeyStore
# UDP SRP test
runScSecurity "release" "udp" "SRP"
cleanKeyStore
# UDP LOGON test
runScSecurity "release" "udp" "LOGON"
cleanKeyStore
# UDP ECDHE_NULL test
runScSecurity "release" "udp" "ECDHE_NULL"
cleanKeyStore
# UDP ECDHE_PSK test
runScSecurity "release" "udp" "ECDHE_PSK"
cleanKeyStore
# UDP ECDHE_ECDSA test
runScSecurity "release" "udp" "ECDHE_ECDSA"
}
# Function name: bbServiceClient
# Description: bbService and bbClient test
# Parameter:
# buildVariant - release or debug
# Return: none
function bbServiceClient() {
local buildVariant=$1
# Initialize result to success
testResults[totalTests]=0
# Test case start from 1
local testCase=$(($totalTests + 1))
# Leave an empty line between tests in report
echo >> $testResultFile
echo "<br>" >> $testResultHtml
echo "Test case=$testCase(Method calls over TCP $buildVariant)"
echo "Test case=$testCase(Method calls over TCP $buildVariant)">> $testResultFile
echo "<p>Test case=$testCase(Method calls over TCP $buildVariant)</p>">> $testResultHtml
local CLIENT_LOG="bbclient_$buildVariant.txt"
local SERVICE_LOG="bbservice_$buildVariant.txt"
# error log names in Jenkins archieve
local CLIENT_ERROR="bbclient_$buildVariant_error.txt"
local SERVICE_ERROR="bbservice_$buildVariant_error.txt"
local SERVICE_PATH="$cppRelBinDir"
local CLIENT_PATH="$cppRelBinDir"
if [ "$buildVariant" == "debug" -o "$buildVariant" == "dbg" ]; then
echo "Debug test variant"
SERVICE_PATH="$cppDbgBinDir"
CLIENT_PATH="$cppDbgBinDir"
fi
# Launch basic_service
echo "Launch bbservice..."
${SERVICE_PATH}/bbservice.exe -n com.w13 -t 2>$SERVICE_LOG 1>&2 &
service_pid=$!
# Wait
sleep 2
# Launch bbclient
echo "Launch bbclient..."
${CLIENT_PATH}/bbclient.exe -n com.w13 -d -c 100 2>$CLIENT_LOG 1>&2 &
local client_pid=$!
#wait for client to complete discovery, join and method call
sleep 40
local FOUND_NAME="FoundAdvertisedName(name=com.w13, transport=0x4, prefix=com.w13)"
local JOIN_SESSION="JoinSession 0x4 takes"
#Check log to decide test result
echo "Check client log..."
logHasKeysExactTimes $CLIENT_LOG "$FOUND_NAME" 1
local foundName=$testResult
logHasKeysExactTimes $CLIENT_LOG "$JOIN_SESSION" 1
local joinSession=$testResult
logHasKeysExactTimes $CLIENT_LOG "$BBCLIENT_OK" 1
local clientOk=$testResult
# bbclient should exit
isProcessLive $client_pid
local clientExit=$procLive
if [ "$foundName" -eq 0 -a "$joinSession" -eq 0 -a "$clientOk" -eq 0 ]; then
testResults[totalTests]=0
echo "Test result=Pass" >> $testResultFile
echo "<p>Test result=Pass</p>" >> $testResultHtml
if [ "$clientExit" -ne 0 ]; then
echo "bbclient still running!" >> $testProgressLog
fi
elif [ "$foundName" -ne 0 ]; then
mv $CLIENT_LOG $CLIENT_ERROR
testResults[totalTests]=6
echo "Test result=Fail(discovery fail!)" >> $testResultFile
echo "<p>Test result=Fail(<a class="error" href="\"$CLIENT_ERROR\"">discovery fail!</a>)</p>" >> $testResultHtml
elif [ "$joinSession" -ne 0 ]; then
mv $CLIENT_LOG $CLIENT_ERROR
testResults[totalTests]=6
echo "Test result=Fail(joinsession fail!)" >> $testResultFile
echo "<p>Test result=Fail(<a class="error" href="\"$CLIENT_ERROR\"">joinsession fail!</a>)</p>" >> $testResultHtml
elif [ "$clientOk" -ne 0 ]; then
mv $CLIENT_LOG $CLIENT_ERROR
testResults[totalTests]=6
echo "Test result=Fail(bbclient NOT exit 0)" >> $testResultFile
echo "<p>Test result=Fail(<a class="error" href="\"$CLIENT_ERROR\"">bbclient NOT exit 0</a>)</p>" >> $testResultHtml
fi
echo "Clean up service $service_pid and client $client_pid ..."
kill $service_pid
if [ "$clientExit" -ne 0 ]; then
kill $client_pid
fi
# Wait 3 seconds till cleanup
sleep 3
# Increse test count
totalTests=$(($totalTests + 1))
}
# Function name: runWinService1
# Description:
# bbservice on windows and bbclient on android
# bbservice starts 1st
# Parameter:
# advName - advertised name
# sleepTime - sleep time between bbservice and bbclient
# testDesc - test description
# Return:
# testResult is 0 if pass; 6 if fail
function runWinService1() {
local advName=$1
local sleepTime=$2
local testDesc=$3
# Initialize result to success
testResults[totalTests]=0
# Test case start from 1
local testCase=$(($totalTests + 1))
# Leave an empty line between tests in report
echo >> $testResultFile
echo "<br>" >> $testResultHtml
echo "Test case=$testCase($testDesc)"
echo "Test case=$testCase($testDesc)">> $testResultFile
echo "<p>Test case=$testCase($testDesc)</p>">> $testResultHtml
local androidClientLog="android_bbclient_${testCase}.log"
local winServiceLog="win_bbservice_${testCase}.log"
# Possible error logs to minimize Jenkins archieve
local androidClientError="android_bbclient_${testCase}_error.txt"
local winServiceError="win_bbservice_${testCase}_error.txt"
local SERVICE_PATH="$cppRelBinDir"
local CLIENT_PATH="$AndroidTestDir"
killWinProcess "bbservice"
# Launch bbservice on windows
echo "Launch bbservice..."
${SERVICE_PATH}/bbservice.exe -n $advName -t 2>$winServiceLog 1>&2 &
service_pid=$!
# Wait
sleep $sleepTime
killAndroidProcess "bbclient"
# Launch bbclient
echo "Launch bbclient on android..."
timeout 90 $ADB shell ${CLIENT_PATH}/bbclient -n $advName -d -c 10 2>$androidClientLog 1>&2
local FOUND_NAME="FoundAdvertisedName(name=$advName, transport=0x4, prefix=$advName)"
local JOIN_SESSION="JoinSession 0x4 takes"
#Check log to decide test result
echo "Check client log..."
logHasKeysExactTimes $androidClientLog "$FOUND_NAME" 1
local foundName=$testResult
logHasKeysExactTimes $androidClientLog "$JOIN_SESSION" 1
local joinSession=$testResult
logHasKeysExactTimes $androidClientLog "$BBCLIENT_OK" 1
local clientOk=$testResult
if [ "$foundName" -eq 0 -a "$joinSession" -eq 0 -a "$clientOk" -eq 0 ]; then
testResults[totalTests]=0
echo "Test result=Pass" >> $testResultFile
echo "<p>Test result=Pass</p>" >> $testResultHtml
elif [ "$foundName" -ne 0 ]; then
mv $androidClientLog $androidClientError
testResults[totalTests]=6
echo "Test result=Fail(discovery fail!)" >> $testResultFile
echo "<p>Test result=Fail(<a class="error" href="$androidClientError">discovery fail!</a>)</p>" >> $testResultHtml
elif [ "$joinSession" -ne 0 ]; then
mv $androidClientLog $androidClientError
testResults[totalTests]=6
echo "Test result=Fail(joinsession fail!)" >> $testResultFile
echo "<p>Test result=Fail(<a class="error" href="$androidClientError">joinsession fail!</a>)</p>" >> $testResultHtml
else
mv $androidClientLog $androidClientError
testResults[totalTests]=6
echo "Test result=Fail(bbclient NOT exit 0)" >> $testResultFile
echo "<p>Test result=Fail(<a class="error" href="$androidClientError">bbclient NOT exit 0</a>)</p>" >> $testResultHtml
fi
echo "Clean up service $service_pid ..."
kill $service_pid
killAndroidProcess "bbclient"
# Wait 3 seconds till cleanup
sleep 3
# Increse test count
totalTests=$(($totalTests + 1))
}
# Function name: runWinService2
# Description:
# bbservice on windows and bbclient on android
# bbclient starts 1st
# Parameter:
# advName - advertise name
# sleepTime - sleep time between bbservice and bbclient
# testDesc - test description
# Return:
# testResult is 0 if pass; 6 if fail
function runWinService2() {
local advName=$1
local sleepTime=$2
local testDesc=$3
# Initialize result to success
testResults[totalTests]=0
# Test case start from 1
local testCase=$(($totalTests + 1))
# Leave an empty line between tests in report
echo >> $testResultFile
echo "<br>" >> $testResultHtml
echo "Test case=$testCase($testDesc)"
echo "Test case=$testCase($testDesc)">> $testResultFile
echo "<p>Test case=$testCase($testDesc)</p>">> $testResultHtml
local androidClientLog="android_bbclient_${testCase}.log"
local winServiceLog="win_bbservice_${testCase}.log"
# Possible error logs to minimize Jenkins archieve
local androidClientError="android_bbclient_${testCase}_error.txt"
local winServiceError="win_bbservice_${testCase}_error.txt"
local SERVICE_PATH="$cppRelBinDir"
local CLIENT_PATH="$AndroidTestDir"
killAndroidProcess "bbclient"
killWinProcess "bbservice"
# Launch bbclient
echo "Launch bbclient on android..."
$ADB shell ${CLIENT_PATH}/bbclient -n $advName -d -c 10 2>$androidClientLog 1>&2 &
# Wait
sleep $sleepTime
# Launch bbservice on windows
echo "Launch bbservice..."
timeout 90 ${SERVICE_PATH}/bbservice.exe -n $advName -t 2>$winServiceLog 1>&2
local FOUND_NAME="FoundAdvertisedName(name=$advName, transport=0x4, prefix=$advName)"
local JOIN_SESSION="JoinSession 0x4 takes"
#Check log to decide test result
echo "Check client log..."
logHasKeysExactTimes $androidClientLog "$FOUND_NAME" 1
local foundName=$testResult
logHasKeysExactTimes $androidClientLog "$JOIN_SESSION" 1
local joinSession=$testResult
logHasKeysExactTimes $androidClientLog "$BBCLIENT_OK" 1
local clientOk=$testResult
if [ "$foundName" -eq 0 -a "$joinSession" -eq 0 -a "$clientOk" -eq 0 ]; then
testResults[totalTests]=0
echo "Test result=Pass" >> $testResultFile
echo "<p>Test result=Pass</p>" >> $testResultHtml
elif [ "$foundName" -ne 0 ]; then
mv $androidClientLog $androidClientError
testResults[totalTests]=6
echo "Test result=Fail(discovery fail!)" >> $testResultFile
echo "<p>Test result=Fail(<a class="error" href="$androidClientError">$discovery fail!)</p>" >> $testResultHtml
elif [ "$joinSession" -ne 0 ]; then
mv $androidClientLog $androidClientError
testResults[totalTests]=6
echo "Test result=Fail(joinsession fail!)" >> $testResultFile
echo "<p>Test result=Fail(<a class="error" href="$androidClientError">joinsession fail!</a>)</p>" >> $testResultHtml
else
mv $androidClientLog $androidClientError
testResults[totalTests]=6
echo "Test result=Fail(bbclient NOT exit 0)" >> $testResultFile
echo "<p>Test result=Fail(<a class="error" href="$androidClientError">bbclient NOT exit 0</a>)</p>" >> $testResultHtml
fi
killAndroidProcess "bbclient"
# Wait 3 seconds till cleanup
sleep 3
# Increse test count
totalTests=$(($totalTests + 1))
}
# Function name: runWinClient1
# Description:
# bbclient on windows and bbservice on android
# bbservice starts 1st
# Parameter:
# advName - advertised name
# sleepTime - sleep time between bbservice and bbclient
# testDesc - test description
# Return:
# testResult is 0 if pass; 6 if fail
function runWinClient1() {
local advName=$1
local sleepTime=$2
local testDesc=$3
# Initialize result to success
testResults[totalTests]=0
# Test case start from 1
local testCase=$(($totalTests + 1))
# Leave an empty line between tests in report
echo >> $testResultFile
echo "<br>" >> $testResultHtml
echo "Test case=$testCase($testDesc)"
echo "Test case=$testCase($testDesc)">> $testResultFile
echo "<p>Test case=$testCase($testDesc)</p>">> $testResultHtml
local winClientLog="win7_bbclient_${testCase}.log"
local androidServiceLog="android_bbservice_${testCase}.log"
# Possible error logs to minimize Jenkins archieve
local winClientError="win7_bbclient_${testCase}_error.txt"
local androidServiceError="android_bbservice_${testCase}_error.txt"
local SERVICE_PATH="$AndroidTestDir"
local CLIENT_PATH="$cppRelBinDir"
killAndroidProcess "bbservice"
killAndroidProcess "bbclient"
killWinProcess "bbservice"
killWinProcess "bbclient"
# Launch bbservice on andoid
echo "Launch bbservice on android"
$ADB shell ${SERVICE_PATH}/bbservice -n $advName -t 2>$androidServiceLog 1>&2 &
service_pid=$!
# Wait
sleep $sleepTime
# Launch bbclient
echo "Launch bbclient on windows..."
timeout 60 ${CLIENT_PATH}/bbclient -n $advName -d -c 10 2>$winClientLog 1>&2
local FOUND_NAME="FoundAdvertisedName(name=$advName, transport=0x4, prefix=$advName)"
local JOIN_SESSION="JoinSession 0x4 takes"
#Check log to decide test result
echo "Check client log..."
# Windows7 client should discover exact once
logHasKeysExactTimes $winClientLog "$FOUND_NAME" 1
local foundName=$testResult
logHasKeysExactTimes $winClientLog "$JOIN_SESSION" 1
local joinSession=$testResult
logHasKeysExactTimes $winClientLog "$BBCLIENT_OK" 1
local clientOk=$testResult
if [ "$foundName" -eq 0 -a "$joinSession" -eq 0 -a "$clientOk" -eq 0 ]; then
testResults[totalTests]=0
echo "Test result=Pass" >> $testResultFile
echo "<p>Test result=Pass</p>" >> $testResultHtml
elif [ "$foundName" -ne 0 ]; then
mv $winClientLog $winClientError
testResults[totalTests]=6
echo "Test result=Fail(discovery fail!)" >> $testResultFile
echo "<p>Test result=Fail(<a class="error" href="$winClientLog">discovery fail!</a>)</p>" >> $testResultHtml
elif [ "$joinSession" -ne 0 ]; then
mv $winClientLog $winClientError
testResults[totalTests]=6
echo "Test result=Fail(joinsession fail!)" >> $testResultFile
echo "<p>Test result=Fail(<a class="error" href="$winClientLog">joinsession fail!</a>)</p>" >> $testResultHtml
else
mv $winClientLog $winClientError
testResults[totalTests]=6
echo "Test result=Fail(bbclient NOT exit 0)" >> $testResultFile
echo "<p>Test result=Fail(<a class="error" href="$winClientLog">bbclient NOT exit 0</a>)</p>" >> $testResultHtml
fi
echo "Clean up bbservice on android ..."
killAndroidProcess "bbservice"
# Wait 3 seconds till cleanup
sleep 3
# Increse test count
totalTests=$(($totalTests + 1))
}
# Function name: runWinClient2
# Description:
# bbclient on windows and bbservice on android
# bbclient starts 1st
# Parameter:
# advName - advertised name
# sleepTime - sleep time between bbservice and bbclient
# testDesc - test description
# Return:
# testResult is 0 if pass; 6 if fail
function runWinClient2() {
local advName=$1
local sleepTime=$2
local testDesc=$3
# Initialize result to success
testResults[totalTests]=0
# Test case start from 1
local testCase=$(($totalTests + 1))
# Leave an empty line between tests in report
echo >> $testResultFile
echo "<br>" >> $testResultHtml
echo "Test case=$testCase($testDesc)"
echo "Test case=$testCase($testDesc)">> $testResultFile
echo "<p>Test case=$testCase($testDesc)</p>">> $testResultHtml
local winClientLog="win7_bbclient_${testCase}.log"
local androidServiceLog="android_bbservice_${testCase}.log"
# Possible error logs to minimize Jenkins archieve
local winClientError="win7_bbclient_${testCase}_error.txt"
local androidServiceError="android_bbservice_${testCase}_error.txt"
local SERVICE_PATH="$AndroidTestDir"
local CLIENT_PATH="$cppRelBinDir"
killWinProcess "bbservice"
killWinProcess "bbclient"
killAndroidProcess "bbservice"
killAndroidProcess "bbclient"
# Launch bbclient
echo "Launch bbclient on windows..."
${CLIENT_PATH}/bbclient -n $advName -d -c 10 2>$winClientLog 1>&2 &
# Wait
sleep $sleepTime
# Launch bbservice on andoid, timeout will wait 40 seconds so discovery complete
echo "Launch bbservice on android"
timeout 40 $ADB shell ${SERVICE_PATH}/bbservice -n $advName -t 2>$androidServiceLog 1>&2
local FOUND_NAME="FoundAdvertisedName(name=$advName, transport=0x4, prefix=$advName)"
local JOIN_SESSION="JoinSession 0x4 takes"
#Check log to decide test result
echo "Check client log..."
# Windows7 client should disciver once
logHasKeysExactTimes $winClientLog "$FOUND_NAME" 1
local foundName=$testResult
logHasKeysExactTimes $winClientLog "$JOIN_SESSION" 1
local joinSession=$testResult
logHasKeysExactTimes $winClientLog "$BBCLIENT_OK" 1
local clientOk=$testResult
if [ "$foundName" -eq 0 -a "$joinSession" -eq 0 -a "$clientOk" -eq 0 ]; then
testResults[totalTests]=0
echo "Test result=Pass" >> $testResultFile
echo "<p>Test result=Pass</p>" >> $testResultHtml
elif [ "$foundName" -ne 0 ]; then
mv $winClientLog $winClientError
testResults[totalTests]=6
echo "Test result=Fail(discovery fail!)" >> $testResultFile
echo "<p>Test result=Fail(<a class="error" href="$winClientLog">discovery fail!</a>)</p>" >> $testResultHtml
elif [ "$joinSession" -ne 0 ]; then
mv $winClientLog $winClientError
testResults[totalTests]=6
echo "Test result=Fail(joinsession fail!)" >> $testResultFile
echo "<p>Test result=Fail(<a class="error" href="$winClientLog">joinsession fail!</a>)</p>" >> $testResultHtml
else
mv $winClientLog $winClientError
testResults[totalTests]=6
echo "Test result=Fail(bbclient NOT exit 0)" >> $testResultFile
echo "<p>Test result=Fail(<a class="error" href="$winClientLog">bbclient NOT exit 0</a>)</p>" >> $testResultHtml
fi
killWinProcess "bbclient"
# Wait 3 seconds till cleanup
sleep 3
# Increse test count
totalTests=$(($totalTests + 1))
}
# Function name: lostAdvertisedName
# Description:
# bbClient foundAdvertisedName from bbservice 1st,
# bbservice quit and bbclient received lostAdvertisedName signal
# Parameter:
# buildVariant - release or debug
# Return: none
function lostAdvertisedName() {
local buildVariant=$1
# Initialize result to success
testResults[totalTests]=0
# Test case start from 1
local testCase=$(($totalTests + 1))
# Leave an empty line between tests in report
echo >> $testResultFile
echo "<br>" >> $testResultHtml
echo "Test case=$testCase(lostAdvertisedName test $buildVariant)"
echo "Test case=$testCase(lostAdvertisedName test $buildVariant)">> $testResultFile
echo "<p>Test case=$testCase(lostAdvertisedName test $buildVariant)</p>">> $testResultHtml
local CLIENT_LOG="bbclient_lost_$buildVariant.txt"
local SERVICE_LOG="bbservice_lost_$buildVariant.txt"
# Possible error logs to minimize Jenkins archieve
local CLIENT_ERROR="bbclient_lost_$buildVariant_error.txt"
local SERVICE_ERROR="bbservice_lost_$buildVariant_error.txt"
local SERVICE_PATH="$cppRelBinDir"
local CLIENT_PATH="$cppRelBinDir"
if [ "$buildVariant" == "debug" -o "$buildVariant" == "dbg" ]; then
echo "Debug test variant"
SERVICE_PATH="$cppDbgBinDir"
CLIENT_PATH="$cppDbgBinDir"
fi
# Launch basic_service
echo "Launch bbservice..."
${SERVICE_PATH}/bbservice.exe -n gov.a -t 2>$SERVICE_LOG 1>&2 &
service_pid=$!
# Wait
sleep 1
# Launch bbclient
echo "Launch bbclient..."
${CLIENT_PATH}/bbclient.exe -n gov -d 2>$CLIENT_LOG 1>&2 &
local client_pid=$!
#wait for client to complete discovery
sleep 10
# Quit bbservice by interrupt (ctrl +c)
kill -2 $service_pid
# Wait lostAdvertisedName to reach bbclient
sleep 5
local FOUND_NAME="FoundAdvertisedName(name=gov.a, transport=0x4, prefix=gov)"
local LOST_Name="LostAdvertisedName(name=gov.a, transport=0x4, prefix=gov)"
#Check log to decide test result
echo "Check client log to make sure foundName and lostName signals..."
logHasKeysExactTimes $CLIENT_LOG "$FOUND_NAME" 1
local foundName=$testResult
logHasKeysExactTimes $CLIENT_LOG "$LOST_Name" 1
local lostName=$testResult
if [ "$foundName" -eq 0 -a "$lostName" -eq 0 ]; then
testResults[totalTests]=0
echo "Test result=Pass" >> $testResultFile
echo "<p>Test result=Pass</p>" >> $testResultHtml
elif [ "$foundName" -eq 0 ]; then
mv $CLIENT_LOG $CLIENT_ERROR
testResults[totalTests]=6
echo "Test result=Fail(lostAdvertisedName signal not received)" >> $testResultFile
echo "<p>Test result=Fail(<a class="error" href="\"$CLIENT_ERROR\"">lostAdvertisedName signal not received</a>)</p>" >> $testResultHtml
else
mv $CLIENT_LOG $CLIENT_ERROR
testResults[totalTests]=6
echo "Test result=Fail(foundAdvertisedName signal not received)" >> $testResultFile
echo "<p>Test result=Fail(<a class="error" href="\"$CLIENT_ERROR\"">foundAdvertisedName signal not received</a>)</p>" >> $testResultHtml
fi
echo "Clean up client $client_pid ..."
kill $client_pid
sleep 3
# Increse test count
totalTests=$(($totalTests + 1))
}
# Function name: javaMethodCall
# Description:
# Java binding sample service/client method call
# Parameter:
# buildVariant - release or debug
# Return: none
function javaMethodCall() {
local buildVariant=$1
# Initialize result to success
testResults[totalTests]=0
# Test case start from 1
local testCase=$(($totalTests + 1))
# Leave an empty line between tests in report
echo >> $testResultFile
echo "<br>" >> $testResultHtml
echo "Test case=$testCase(JavaSDKDocMethod test $buildVariant)"
echo "Test case=$testCase(JavaSDKDocMethod test $buildVariant)">> $testResultFile
echo "<p>Test case=$testCase(JavaSDKDocMethod test $buildVariant)</p>">> $testResultHtml
local CLIENT_LOG="java_method_client_$buildVariant.txt"
local SERVICE_LOG="java_method_service_$buildVariant.txt"
# Possible error logs to minimize Jenkins archieve
local CLIENT_ERROR="java_method_client_${buildVariant}_error.txt"
local SERVICE_ERROR="java_method_service_${buildVariant}_error.txt"
local SERVICE_PATH="$jarRelRelativeDir"
local CLIENT_PATH="$SERVICE_PATH"
local javaLibPath="$javaRelRelativeLibDir"
# Launch JavaSDKDocMethodsService in relative path
# Java does not recognize full path like: /cygdrive/c/...
echo "Launch JavaSDKDocMethodsService..."
java -Djava.library.path=$javaLibPath -jar ${SERVICE_PATH}/JavaSDKDocMethodsService.jar 2>$SERVICE_LOG 1>&2 &
service_pid=$!
# Wait
sleep 2
# Launch JavaSDKDocMethodsClient
echo "Launch JavaSDKDocMethodsClient..."
java -Djava.library.path=$javaLibPath -jar ${CLIENT_PATH}/JavaSDKDocMethodsClient.jar 2>$CLIENT_LOG 1>&2 &
local client_pid=$!
#wait for client to complete discovery, join and method call
sleep 90
# Add some padding at log end to avoid output buffering
padLog $CLIENT_LOG
local FOUND_NAME="BusListener.foundAdvertisedName"
local JOIN_SESSION="BusAttachement.joinSession successful"
local PING_RET="Ping : Hello World"
local CONCAT="Concatenate : The Eagle has landed!"
local FIBONACCI="Fibonacci(4) : 3"
local THREAD11="Thread 1: Starting callculate P1"
local THREAD21="Thread 2: Starting callculate P1"
local THREAD12="Thread 1: Pi(1000000000) = 3.1415926525880504"
local THREAD22="Thread 2: Pi(1000000000) = 3.1415926525880504"
#Check log to decide test result
echo "Check client log ..."
logHasKeysMinTimes $CLIENT_LOG "$FOUND_NAME" 1
local foundName=$testResult
logHasKeysMinTimes $CLIENT_LOG "$JOIN_SESSION" 1
local joinSession=$testResult
logHasKeysExactTimes $CLIENT_LOG "$PING_RET" 1
local pingHello=$testResult
logHasKeysExactTimes $CLIENT_LOG "$CONCAT" 1
local concat=$testResult
logHasKeysExactTimes $CLIENT_LOG "$THREAD11" 1
local thread11=$testResult
logHasKeysExactTimes $CLIENT_LOG "$THREAD12" 1
local thread12=$testResult
logHasKeysExactTimes $CLIENT_LOG "$THREAD21" 1
local thread21=$testResult
logHasKeysExactTimes $CLIENT_LOG "$THREAD22" 1
local thread22=$testResult
isProcessLive $client_pid
local clientExit=$procLive
if [ "$foundName" -eq 0 -a "$joinSession" -eq 0 ]; then
if [ "$pingHello" -eq 0 -a "$concat" -eq 0 ]; then
if [ "$thread11" -eq 0 -a "$thread12" -eq 0 ]; then
if [ "$thread21" -eq 0 -a "$thread22" -eq 0 ]; then
testResults[totalTests]=0
echo "Test result=Pass" >> $testResultFile
echo "<p>Test result=Pass</p>" >> $testResultHtml
if [ "$clientExit" -ne 0 ]; then
echo "JavaSDKDocMethodsClient still running!" >> $testProgressLog
fi
elif [ "$thread21" -eq 0 ]; then
mv $CLIENT_LOG $CLIENT_ERROR
testResults[totalTests]=6
echo "Test result=Fail(Thread2 incomplete)" >> $testResultFile
echo "<p>Test result=Fail(<a class="error" href="\"$CLIENT_ERROR\"">Thread2 incomplete</a>)</p>" >> $testResultHtml
else
mv $CLIENT_LOG $CLIENT_ERROR
testResults[totalTests]=6
echo "Test result=Fail(Thread2 NOT started)" >> $testResultFile
echo "<p>Test result=Fail(<a class="error" href="\"$CLIENT_ERROR\"">Thread2 NOT started</a>)</p>" >> $testResultHtml
fi
elif [ "$thread11" -eq 0 ]; then
mv $CLIENT_LOG $CLIENT_ERROR
testResults[totalTests]=6
echo "Test result=Fail(Thread1 incomplete)" >> $testResultFile
echo "<p>Test result=Fail(<a class="error" href="\"$CLIENT_ERROR\"">Thread1 incomplete</a>)</p>" >> $testResultHtml
else
mv $CLIENT_LOG $CLIENT_ERROR
testResults[totalTests]=6
echo "Test result=Fail(Thread1 NOT started)" >> $testResultFile
echo "<p>Test result=Fail(<a class="error" href="\"$CLIENT_ERROR\"">Thread1 NOT started</a>)</p>" >> $testResultHtml
fi
elif [ "$pingHello" -eq 0 ]; then
mv $CLIENT_LOG $CLIENT_ERROR
testResults[totalTests]=6
echo "Test result=Fail(Concat missing)" >> $testResultFile
echo "<p>Test result=Fail(<a class="error" href="\"$CLIENT_ERROR\"">Concat missing</a>)</p>" >> $testResultHtml
else
mv $CLIENT_LOG $CLIENT_ERROR
testResults[totalTests]=6
echo "Test result=Fail(ping missing)" >> $testResultFile
echo "<p>Test result=Fail(<a class="error" href="\"$CLIENT_ERROR\"">ping missing</a>)</p>" >> $testResultHtml
fi
elif [ "$foundName" -ne 0 ]; then
mv $CLIENT_LOG $CLIENT_ERROR
testResults[totalTests]=6
echo "Test result=Fail(foundAdvertisedName)" >> $testResultFile
echo "<p>Test result=Fail(<a class="error" href="\"$CLIENT_ERROR\"">foundAdvertisedName</a>)</p>" >> $testResultHtml
else
mv $CLIENT_LOG $CLIENT_ERROR
testResults[totalTests]=6
echo "Test result=Fail(joinSession)" >> $testResultFile
echo "<p>Test result=Fail(<a class="error" href="\"$CLIENT_ERROR\"">joinSession</a>)</p>" >> $testResultHtml
fi
# Quit bbservice by interrupt (ctrl +c)
kill -2 $service_pid
if [ "$clientExit" -ne 0 ]; then
kill $client_pid
fi
sleep 3
# Increse test count
totalTests=$(($totalTests + 1))
}
# Function name: javaProperty
# Description:
# Java binding sample PropertyService/client method call
# Parameter:
# buildVariant - release or debug
# Return: none
function javaProperty() {
local buildVariant=$1
# Initialize result to success
testResults[totalTests]=0
# Test case start from 1
local testCase=$(($totalTests + 1))
# Leave an empty line between tests in report
echo >> $testResultFile
echo "<br>" >> $testResultHtml
echo "Test case=$testCase(JavaSDKDocProperties test $buildVariant)"
echo "Test case=$testCase(JavaSDKDocProperties test $buildVariant)">> $testResultFile
echo "<p>Test case=$testCase(JavaSDKDocProperties test $buildVariant)</p>">> $testResultHtml
local CLIENT_LOG="java_property_client_$buildVariant.txt"
local SERVICE_LOG="java_property_service_$buildVariant.txt"
# Possible error log files
local CLIENT_ERROR="java_property_client_${buildVariant}_error.txt"
local SERVICE_ERROR="java_property_service_${buildVariant}_error.txt"
local SERVICE_PATH="$jarRelRelativeDir"
local CLIENT_PATH="$SERVICE_PATH"
local javaLibPath="$javaRelRelativeLibDir"
# Launch JavaSDKDocPropertiesService in relative path
# Java does not recognize full path like: /cygdrive/c/...
echo "Launch JavaSDKDocPropertiesService..."
java -Djava.library.path=$javaLibPath -jar ${SERVICE_PATH}/JavaSDKDocPropertiesService.jar 2>$SERVICE_LOG 1>&2 &
service_pid=$!
# Wait
sleep 1
# Launch JavaSDKDocPropertiesClient
echo "Launch JavaSDKDocPropertiesClient..."
java -Djava.library.path=$javaLibPath -jar ${CLIENT_PATH}/JavaSDKDocPropertiesClient.jar 2>$CLIENT_LOG 1>&2 &
local client_pid=$!
#wait for client to complete discovery, join and method call
sleep 60
local FOUND_NAME="BusListener.foundAdvertisedName"
local JOIN_SESSION="BusAttachement.joinSession successful"
local TEXTSIZE1="TextSize = 12"
local TEXTSIZE2="TextSize = 3"
#Check log to decide test result
echo "Check client log ..."
logHasKeysMinTimes $CLIENT_LOG "$FOUND_NAME" 1
local foundName=$testResult
logHasKeysMinTimes $CLIENT_LOG "$JOIN_SESSION" 1
local joinSession=$testResult
logHasKeysExactTimes $CLIENT_LOG "$TEXTSIZE1" 1
local textSize1=$testResult
# TextSize = 3 should appear twice
logHasKeysExactTimes $CLIENT_LOG "$TEXTSIZE2" 2
local textSize2=$testResult
isProcessLive $client_pid
local clientExit=$procLive
if [ "$foundName" -eq 0 -a "$joinSession" -eq 0 ]; then
if [ "$textSize1" -eq 0 -a "$textSize2" -eq 0 ]; then
testResults[totalTests]=0
echo "Test result=Pass" >> $testResultFile
echo "<p>Test result=Pass</p>" >> $testResultHtml
if [ "$clientExit" -ne 0 ]; then
echo "JavaSDKDocPropertiesClient still running!" >> $testProgressLog
fi
elif [ "$textSize1" -ne 0 ]; then
mv $CLIENT_LOG $CLIENT_ERROR
testResults[totalTests]=6
echo "Test result=Fail($TEXTSIZE1 miss)" >> $testResultFile
echo "<p>Test result=Fail(<a class="error" href="\"$CLIENT_ERROR\"">$TEXTSIZE1 miss</a>)</p>" >> $testResultHtml
else
mv $CLIENT_LOG $CLIENT_ERROR
testResults[totalTests]=6
echo "Test result=Fail($TEXTSIZE2 miss)" >> $testResultFile
echo "<p>Test result=Fail(<a class="error" href="\"$CLIENT_ERROR\"">$TEXTSIZE2 miss</a>)</p>" >> $testResultHtml
fi
elif [ "$foundName" -eq 0 ]; then
mv $CLIENT_LOG $CLIENT_ERROR
testResults[totalTests]=6
echo "Test result=Fail(joinSession)" >> $testResultFile
echo "<p>Test result=Fail(<a class="error" href="\"$CLIENT_ERROR\"">joinSession</a>)</p>" >> $testResultHtml
else
mv $CLIENT_LOG $CLIENT_ERROR
testResults[totalTests]=6
echo "Test result=Fail(foundAdvertisedName)" >> $testResultFile
echo "<p>Test result=Fail(<a class="error" href="\"$CLIENT_ERROR\"">foundAdvertisedName</a>)</p>" >> $testResultHtml
fi
# Quit bbservice by interrupt (ctrl +c)
kill -2 $service_pid
if [ "$clientExit" -ne 0 ]; then
kill $client_pid
fi
sleep 3
# Increse test count
totalTests=$(($totalTests + 1))
}
# Function name: javaSignal
# Description:
# Java binding sample SignalService/client method call
# Parameter:
# buildVariant - release or debug
# Return: none
function javaSignal() {
local buildVariant=$1
# Initialize result to success
testResults[totalTests]=0
# Test case start from 1
local testCase=$(($totalTests + 1))
# Leave an empty line between tests in report
echo >> $testResultFile
echo "<br>" >> $testResultHtml
echo "Test case=$testCase(JavaSDKDocSignal test $buildVariant)"
echo "Test case=$testCase(JavaSDKDocSignal test $buildVariant)">> $testResultFile
echo "<p>Test case=$testCase(JavaSDKDocSignal test $buildVariant)</p>">> $testResultHtml
local CLIENT_LOG="java_signal_client_$buildVariant.txt"
local SERVICE_LOG="java_signal_service_$buildVariant.txt"
local CLIENT_ERROR="java_signal_client_${buildVariant}_error.txt"
local SERVICE_ERROR="java_signal_service_${buildVariant}_error.txt"
local SERVICE_PATH="$jarRelRelativeDir"
local CLIENT_PATH="$SERVICE_PATH"
local javaLibPath="$javaRelRelativeLibDir"
if [ "$buildVariant" == "debug" -o "$buildVariant" == "dbg" ]; then
echo "Debug sample variant"
SERVICE_PATH="$jarDbgRelativeDir"
local CLIENT_PATH="$SERVICE_PATH"
local javaLibPath="$javaDbgRelativeLibDir"
fi
# Launch JavaSDKDocSignalService in relative path
# Java does not recognize full path like: /cygdrive/c/...
echo "Launch JavaSDKDocSignalService..."
java -Djava.library.path=$javaLibPath -jar ${SERVICE_PATH}/JavaSDKDocSignalService.jar 2>$SERVICE_LOG 1>&2 &
service_pid=$!
# Wait
sleep 1
# Launch JavaSDKDocSignalClient
echo "Launch JavaSDKDocSignalClient..."
java -Djava.library.path=$javaLibPath -jar ${CLIENT_PATH}/JavaSDKDocSignalClient.jar 2>$CLIENT_LOG 1>&2 &
local client_pid=$!
#wait for client to complete discovery, join and method call
sleep 60
local FOUND_NAME="BusListener.foundAdvertisedName"
local JOIN_SESSION="BusAttachement.joinSession successful"
local POSITON="Players position is 100, 50, 45"
#Check log to decide test result
echo "Check client log ..."
logHasKeysMinTimes $CLIENT_LOG "$FOUND_NAME" 1
local foundName=$testResult
logHasKeysMinTimes $CLIENT_LOG "$JOIN_SESSION" 1
local joinSession=$testResult
# Client should be live to receive signals
isProcessLive $client_pid
local clientExit=$procLive
# service should be live to send signals
isProcessLive $service_pid
local serviceExit=$procLive
if [ "$foundName" -eq 0 -a "$joinSession" -eq 0 ]; then
local receivedSignals=`grep "$POSITON" $CLIENT_LOG|wc -l`
# Client should receive at least one position signal
if [ "$receivedSignals" -gt 0 ]; then
# service and client should both running
if [ "$serviceExit" -ne 0 -a "$clientExit" -ne 0 ]; then
testResults[totalTests]=0
echo "Test result=Pass" >> $testResultFile
echo "<p>Test result=Pass</p>" >> $testResultHtml
elif [ "$serviceExit" -eq 0 ]; then
mv $CLIENT_LOG $CLIENT_ERROR
testResults[totalTests]=6
echo "Test result=Fail(service stopped sending signal)" >> $testResultFile
echo "<p>Test result=Fail(<a class="error" href="\"$CLIENT_ERROR\"">service stopped sending signal</a>)</p>" >> $testResultHtml
else
mv $CLIENT_LOG $CLIENT_ERROR
testResults[totalTests]=6
echo "Test result=Fail(client stopped receiving signal)" >> $testResultFile
echo "<p>Test result=Fail(<a class="error" href="\"$CLIENT_ERROR\"">client stopped receiving signal</a>)</p>" >> $testResultHtml
fi
else
mv $CLIENT_LOG $CLIENT_ERROR
testResults[totalTests]=6
echo "Test result=Fail(client receive 0 signal)" >> $testResultFile
echo "<p>Test result=Fail(<a class="error" href="\"$CLIENT_ERROR\"">client receive 0 signal</a>)</p>" >> $testResultHtml
fi
elif [ "$foundName" -ne 0 ]; then
mv $CLIENT_LOG $CLIENT_ERROR
testResults[totalTests]=6
echo "Test result=Fail(foundAdvertisedName)" >> $testResultFile
echo "<p>Test result=Fail(<a class="error" href="\"$CLIENT_ERROR\"">foundAdvertisedName</a>)</p>" >> $testResultHtml
else
mv $CLIENT_LOG $CLIENT_ERROR
testResults[totalTests]=6
echo "Test result=Fail(joinSession)" >> $testResultFile
echo "<p>Test result=Fail(<a class="error" href="\"$CLIENT_ERROR\"">joinSession</a>)</p>" >> $testResultHtml
fi
# Quit bbservice by interrupt (ctrl +c)
kill -2 $service_pid
echo "Clean up client $client_pid ..."
if [ "$clientExit" -ne 0 ]; then
kill $client_pid
fi
sleep 3
# Increse test count
totalTests=$(($totalTests + 1))
}
# Function name: javaSecurityLogon
# Description:
# Java binding sample SecurityLogonService/client
# Parameter:
# buildVariant - release or debug
# Return: none
function javaSecurityLogon() {
local buildVariant=$1
# Initialize result to success
testResults[totalTests]=0
# Test case start from 1
local testCase=$(($totalTests + 1))
# Leave an empty line between tests in report
echo >> $testResultFile
echo "<br>" >> $testResultHtml
echo "Test case=$testCase(JavaSDKDocSecurityLogon test $buildVariant)"
echo "Test case=$testCase(JavaSDKDocSecurityLogon test $buildVariant)">> $testResultFile
echo "<p>Test case=$testCase(JavaSDKDocSecurityLogon test $buildVariant)</p>">> $testResultHtml
local CLIENT_LOG="java_logon_client_$buildVariant.txt"
local SERVICE_LOG="java_logon_service_$buildVariant.txt"
local CLIENT_ERROR="java_logon_client_${buildVariant}_error.txt"
local SERVICE_ERROR="java_logon_service_${buildVariant}_error.txt"
local SERVICE_PATH="$jarRelRelativeDir"
local CLIENT_PATH="$SERVICE_PATH"
local javaLibPath="$javaRelRelativeLibDir"
# Launch JavaSDKDocSecurityLogonService in relative path
# Java does not recognize full path like: /cygdrive/c/...
echo "Launch JavaSDKDocSecurityLogonService..."
java -Djava.library.path=$javaLibPath -jar ${SERVICE_PATH}/JavaSDKDocSecurityLogonService.jar 2>$SERVICE_LOG 1>&2 &
service_pid=$!
# Wait
sleep 1
# Launch JavaSDKDocSecurityLogonClient with 2 returns
echo "Launch JavaSDKDocSecurityLogonClient..."
java -Djava.library.path=$javaLibPath -jar ${CLIENT_PATH}/JavaSDKDocSecurityLogonClient.jar 2>$CLIENT_LOG 1>&2 <<EOF &
EOF
local client_pid=$!
#wait for client to complete discovery, join, authentication and ping
sleep 60
local FOUND_NAME="BusListener.foundAdvertisedName"
local JOIN_SESSION="BusAttachement.joinSession successful"
local PING_HELLO="Ping = Hello AllJoyn"
local REPLY_HELLO="Reply : Hello AllJoyn"
#Check log to decide test result
echo "Check client log ..."
logHasKeysMinTimes $CLIENT_LOG "$FOUND_NAME" 1
local foundName=$testResult
logHasKeysMinTimes $CLIENT_LOG "$JOIN_SESSION" 1
local joinSession=$testResult
logHasKeysExactTimes $CLIENT_LOG "$PING_HELLO" 1
local clientPing=$testResult
logHasKeysExactTimes $SERVICE_LOG "$REPLY_HELLO" 1
local serviceReply=$testResult
isProcessLive $client_pid
local clientExit=$procLive
if [ "$foundName" -eq 0 -a "$joinSession" -eq 0 ]; then
# Client ping Hello and service reply
if [ "$clientPing" -eq 0 -a "$serviceReply" -eq 0 ]; then
testResults[totalTests]=0
echo "Test result=Pass" >> $testResultFile
echo "<p>Test result=Pass</p>" >> $testResultHtml
if [ "$clientExit" -ne 0 ]; then
echo "JavaSDKDocSecurityLogonClient still running!" >> $testProgressLog
fi
elif [ "$clientPing" -ne 0 ]; then
mv $CLIENT_LOG $CLIENT_ERROR
testResults[totalTests]=6
echo "Test result=Fail(client ping miss)" >> $testResultFile
echo "<p>Test result=Fail(<a class="error" href="\"$CLIENT_ERROR\"">client ping miss</a>)</p>" >> $testResultHtml
else
mv $SERVICE_LOG $SERVICE_ERROR
testResults[totalTests]=6
echo "Test result=Fail(service reply miss)" >> $testResultFile
echo "<p>Test result=Fail(<a class="error" href="\"$SERVICE_ERROR\"">service reply miss</a>)</p>" >> $testResultHtml
fi
elif [ "$foundName" -ne 0 ]; then
mv $CLIENT_LOG $CLIENT_ERROR
testResults[totalTests]=6
echo "Test result=Fail(discovery)" >> $testResultFile
echo "<p>Test result=Fail(<a class="error" href="\"$CLIENT_ERROR\"">discovery</a>)</p>" >> $testResultHtml
else
mv $CLIENT_LOG $CLIENT_ERROR
testResults[totalTests]=6
echo "Test result=Fail(joinSession)" >> $testResultFile
echo "<p>Test result=Fail(<a class="error" href="\"$CLIENT_ERROR\"">joinSession</a>)</p>" >> $testResultHtml
fi
# Quit bbservice by interrupt (ctrl +c)
kill -2 $service_pid
if [ "$clientExit" -ne 0 ]; then
kill $client_pid
fi
sleep 3
# Increse test count
totalTests=$(($totalTests + 1))
}
# Function name: rawServiceClient
# Description:
# rawservice and rawclient over TCP
# Parameter:
# buildVariant - release or debug
# Return: none
function rawServiceClient() {
local buildVariant=$1
# Initialize result to success
testResults[totalTests]=0
# Test case start from 1
local testCase=$(($totalTests + 1))
# Leave an empty line between tests in report
echo >> $testResultFile
echo "<br>" >> $testResultHtml
echo "Test case=$testCase(rawservice/rawclient TCP)"
echo "Test case=$testCase(rawservice/rawclient TCP)">> $testResultFile
echo "<p>Test case=$testCase(rawservice/rawclient TCP)</p>">> $testResultHtml
local CLIENT_LOG="rawclient_$buildVariant.txt"
local SERVICE_LOG="rawservice_$buildVariant.txt"
local SERVICE_PATH="$cppRelBinDir"
local CLIENT_PATH="$cppRelBinDir"
if [ "$buildVariant" == "debug" -o "$buildVariant" == "dbg" ]; then
echo "Debug sample variant"
SERVICE_PATH="$cppDbgBinDir"
CLIENT_PATH="$cppDbgBinDir"
fi
killWinProcess "rawservice"
killWinProcess "rawclient"
# Launch rawservice
echo "Launch rawservice..."
${SERVICE_PATH}/rawservice.exe 2>$SERVICE_LOG 1>&2 &
service_pid=$!
# Wait
sleep 2
# Launch rawclient
echo "Launch rawclient..."
timeout 40 ${CLIENT_PATH}/rawclient.exe 2>$CLIENT_LOG 1>&2
#Check log to decide test result
echo "Check client log..."
local FOUND_NAME="FoundAdvertisedName(name=org.alljoyn.raw_test, transport=0x4, prefix=org.alljoyn.raw_test)"
local JOIN_SESSION="Session Joined with session id"
local READ_CONTENT="Bytes: abcdefghijklmnopqrstuvwxyz"
local EXIT_OK="rawclient exiting with status 0x0"
#Check log to decide test result
echo "Check client log..."
logHasKeysExactTimes $CLIENT_LOG "$FOUND_NAME" 1
local foundName=$testResult
logHasKeysExactTimes $CLIENT_LOG "$JOIN_SESSION" 1
local joinSession=$testResult
logHasKeysExactTimes $CLIENT_LOG "$READ_CONTENT" 1
local readOk=$testResult
logHasKeysExactTimes $CLIENT_LOG "$EXIT_OK" 1
local clientOk=$testResult
if [ "$foundName" -eq 0 -a "$joinSession" -eq 0 -a "$readOk" -eq 0 -a "$clientOk" -eq 0 ]; then
testResults[totalTests]=0
echo "Test result=Pass" >> $testResultFile
echo "<p>Test result=Pass</p>" >> $testResultHtml
elif [ "$foundName" -ne 0 ]; then
testResults[totalTests]=6
echo "Test result=Fail(discovery)" >> $testResultFile
echo "<p>Test result=Fail(<a class="error" href="\"$CLIENT_LOG\"">discovery</a>)</p>" >> $testResultHtml
elif [ "$joinSession" -ne 0 ]; then
testResults[totalTests]=6
echo "Test result=Fail(joinsession)" >> $testResultFile
echo "<p>Test result=Fail(<a class="error" href="\"$CLIENT_LOG\"">joinsession</a>)</p>" >> $testResultHtml
elif [ "$readOk" -ne 0 ]; then
testResults[totalTests]=6
echo "Test result=Fail(read)" >> $testResultFile
echo "<p>Test result=Fail(<a class="error" href="\"$CLIENT_LOG\"">read</a>)</p>" >> $testResultHtml
else
testResults[totalTests]=6
echo "Test result=Fail(exit non-zero)" >> $testResultFile
echo "<p>Test result=Fail(<a class="error" href="\"$CLIENT_LOG\"">exit non-zero</a>)</p>" >> $testResultHtml
fi
echo "Clean up service $service_pid and client $client_pid ..."
kill $service_pid
# Wait 3 seconds till cleanup
sleep 3
# Increse test count
totalTests=$(($totalTests + 1))
}
# Function name: runTcUnitTest
# Description:
# Thin client ajtcltest
# Parameter: None
# Return: none
function runTcUnitTest() {
local SERVICE_PATH="$cppRelBinDir"
local UNIT_PATH="$tcUnitTestPath"
local ajtclTest="${UNIT_PATH}/ajtcltest.exe"
# altcltest.exe must exist to continue test
if [ -e "$ajtclTest" ]; then
# Initialize result to success
testResults[totalTests]=0
# Test case start from 1
local testCase=$(($totalTests + 1))
# Leave an empty line between tests in report
echo >> $testResultFile
echo "<br>" >> $testResultHtml
echo "Test case=$testCase(ajtcltest)"
echo "Test case=$testCase(ajtcltest)">> $testResultFile
echo "<p>Test case=$testCase(ajtcltest)</p>">> $testResultHtml
killWinProcess "bbservice"
killWinProcess "ajtcltest"
local UNIT_LOG="ajtcltest.txt"
local SERVICE_LOG="bbservice_tc_unit.txt"
local UNIT_ERROR="ajtcltest_error.txt"
local SERVICE_ERROR="bbservice_tc_unit_error.txt"
# Launch bbservice so Security tests can connect to
echo "Launch bbservice..."
${SERVICE_PATH}/bbservice.exe -n org.alljoyn.svclite 2>$SERVICE_LOG 1>&2 &
local service_pid=$!
# Wait
sleep 1
# Launch ajtcltest
echo "Launch ajtcltest..."
# ajtcltest SecurityTest has race condition: ASACORE-1848
timeout 60 ${UNIT_PATH}/ajtcltest.exe --gtest_filter=-SecurityTest* 2>$UNIT_LOG 1>&2
local testStatus=$?
echo "ajtcltest exit with $testStatus"
isProcessLive $service_pid
local serviceLive=$procLive
# bbservice should NOT crash
if [ "$serviceLive" -ne 0 ]; then
if [ "$testStatus" -eq 0 ]; then
testResults[totalTests]=0
echo "Test result=Pass" >> $testResultFile
echo "<p>Test result=Pass</p>" >> $testResultHtml
else
mv $UNIT_LOG $UNIT_ERROR
testResults[totalTests]=6
echo "Test result=Fail(exit $testStatus)" >> $testResultFile
echo "<p>Test result=Fail(<a class="error" href="\"$UNIT_ERROR\"">log</a>)</p>" >> $testResultHtml
fi
else
mv $SERVICE_LOG $SERVICE_ERROR
testResults[totalTests]=6
echo "Test result=Fail(bbservice crash)" >> $testResultFile
echo "<p>Test result=Fail(<a class="error" href="\"$SERVICE_ERROR\"">bbservice crash</a>)</p>" >> $testResultHtml
fi
kill $service_pid
# Wait 3 seconds till cleanup
sleep 3
# Increse test count
totalTests=$(($totalTests + 1))
else
echo "$ajtclTest is missing, skip test!" >> $testDetailLog
fi
}
# Function name: runTcScTest
# Description:
# Thin client Standard client interaction tests - ajtcsctest
# Parameter: None
# Return: none
function runTcScTest() {
local ajtcscTest="${scToolsPath}/ajtcsctest.exe"
# altcsctest.exe must exist to continue test
if [ -d "$scToolsPath" -a -e "$ajtcscTest" ]; then
# Initialize result to success
testResults[totalTests]=0
# Test case start from 1
local testCase=$(($totalTests + 1))
# Leave an empty line between tests in report
echo >> $testResultFile
echo "<br>" >> $testResultHtml
echo "Test case=$testCase(ajtcsctest)"
echo "Test case=$testCase(ajtcsctest)">> $testResultFile
echo "<p>Test case=$testCase(ajtcsctest)</p>">> $testResultHtml
killWinProcess "ajtcsctest"
local UNIT_LOG="ajtcsctest.txt"
local UNIT_ERROR="ajtcsctest_error.txt"
# Launch ajtcsctest
echo "Launch ajtcsctest..."
# ajtcsctest should complete in 2 min
timeout 720 $ajtcscTest 2>$UNIT_LOG 1>&2
local testStatus=$?
echo "ajtcsctest exit with $testStatus"
if [ "$testStatus" -eq 0 ]; then
testResults[totalTests]=0
echo "Test result=Pass" >> $testResultFile
echo "<p>Test result=Pass</p>" >> $testResultHtml
else
mv $UNIT_LOG $UNIT_ERROR
testResults[totalTests]=6
echo "Test result=Fail(exit $testStatus)" >> $testResultFile
echo "<p>Test result=Fail(<a class="error" href="\"$UNIT_ERROR\"">log</a>)</p>" >> $testResultHtml
fi
# Increse test count
totalTests=$(($totalTests + 1))
else
echo "$ajtcscTest is missing, skip test!" >> $testDetailLog
fi
}
# Function name: runTcScAuthTest
# Description:
# Thin client talks to standard client locally with ECDHE_NULL, ECDHE_PSK or ECDHE_ECDSA
# Parameter:
# AUTH - ECDHE_NULL, ECDHE_PSK or ECDHE_ECDSA
# Return: none
function runTcScAuthTest() {
local AUTH=$1
local SERVICE_PATH="$tcTestPath"
local CLIENT_PATH="$cppRelBinDir"
local DAEMON_PATH="${cppRelBinDir}/samples"
local trustedTLSampleRN="${DAEMON_PATH}/TrustedTLSampleRN.exe"
local svclite="${SERVICE_PATH}/svclite.exe"
# SC TrustedTLSampleRN and svclite must exist to continue test
if [ -x "$trustedTLSampleRN" -a -x "$svclite" ]; then
# Initialize result to success
testResults[totalTests]=0
# Test case start from 1
local testCase=$(($totalTests + 1))
# Leave an empty line between tests in report
echo >> $testResultFile
echo "<br>" >> $testResultHtml
echo "Test case=$testCase(ATL service to ASL client authentication $AUTH)"
echo "Test case=$testCase(ATL service to ASL client authentication $AUTH)">> $testResultFile
echo "<p>Test case=$testCase(ATL service to ASL client authentication $AUTH)</p>">> $testResultHtml
killWinProcess "TrustedTLSampleRN"
killWinProcess "svclite"
killWinProcess "bbclient"
local CLIENT_LOG="bbclient_${AUTH}.txt"
local SERVICE_LOG="svclite_${AUTH}.txt"
local DAEMON_LOG="trustedTLSampleRN_${AUTH}.log"
local CLIENT_ERROR="bbclient_${AUTH}_error.txt"
local SERVICE_ERROR="svclite_${AUTH}_error.txt"
local DAEMON_ERROR="trustedTLSampleRN_${AUTH}_error.txt"
# Launch TrustedTLSampleRN
echo "Launch TrustedTLSampleRN..."
${trustedTLSampleRN} 2>$DAEMON_LOG 1>&2 &
local daemon_pid=$!
# Wait
sleep 1
# Launch svclite
echo "Launch svclite..."
${svclite} 2>$SERVICE_LOG 1>&2 &
local svclite_pid=$!
# Wait
sleep 1
# Launch bbclient
echo "Launch bbclient..."
timeout 40 ${CLIENT_PATH}/bbclient.exe -d -n org.alljoyn.svclite -c 100 -ek ${AUTH} 2>$CLIENT_LOG 1>&2
isProcessLive $daemon_pid
local daemonLive=$procLive
isProcessLive $svclite_pid
local svcliteLive=$procLive
# TrustedTLSampleRN and svclite should NOT crash
if [ "$daemonLive" -ne 0 -a "$svcliteLive" -ne 0 ]; then
#Check log to decide test result
echo "Check client log..."
local AUTH_OK="Authentication ALLJOYN_${AUTH} succesful"
local EXIT_OK="bbclient exiting with status 0 "
logHasKeysExactTimes $CLIENT_LOG "$AUTH_OK" 1
local authStatus=$testResult
logHasKeysExactTimes $CLIENT_LOG "$EXIT_OK" 1
local exitStatus=$testResult
if [ "$authStatus" -eq 0 ]; then
if [ "$exitStatus" -eq 0 ]; then
testResults[totalTests]=0
echo "Test result=Pass" >> $testResultFile
echo "<p>Test result=Pass</p>" >> $testResultHtml
else
mv ${CLIENT_LOG} ${CLIENT_ERROR}
testResults[totalTests]=6
echo "Test result=Fail(exit non-zero)" >> $testResultFile
echo "<p>Test result=Fail(<a class="error" href="\"$CLIENT_ERROR\"">exit non-zero</a>)</p>" >> $testResultHtml
fi
else
mv ${CLIENT_LOG} ${CLIENT_ERROR}
testResults[totalTests]=6
echo "Test result=Fail(Authentication)" >> $testResultFile
echo "<p>Test result=Fail(<a class="error" href="\"$CLIENT_ERROR\"">Authentication</a>)</p>" >> $testResultHtml
fi
elif [ "$daemonLive" -eq 0 ]; then
mv ${DAEMON_LOG} ${DAEMON_ERROR}
testResults[totalTests]=6
echo "Test result=Fail(TrustedTLSampleRN crash)" >> $testResultFile
echo "<p>Test result=Fail(<a class="error" href="\"$DAEMON_ERROR\"">TrustedTLSampleRN crash</a>)</p>" >> $testResultHtml
else
mv ${SERVICE_LOG} ${SERVICE_ERROR}
testResults[totalTests]=6
echo "Test result=Fail(svclite crash)" >> $testResultFile
echo "<p>Test result=Fail(<a class="error" href="\"$SERVICE_ERROR\"">svclite crash</a>)</p>" >> $testResultHtml
fi
echo "Clean up daemon $daemon_pid and svclite $svclite_pid ..."
kill $daemon_pid
kill $svclite_pid
# Wait 3 seconds till cleanup
sleep 3
# Increse test count
totalTests=$(($totalTests + 1))
# svclite.exe is missing
elif [ -x "$trustedTLSampleRN" ]; then
echo "$svclite not exist or not executable, skip test!" >> $testDetailLog
else
echo "$trustedTLSampleRN not exist or not executable, skip test!" >> $testDetailLog
fi
}
# Function name: runTcTests
# Description: thin client tests
# Parameter: none
# Return: none
function runTcTests() {
echo "Thin client related tests"
# run tc/sc interaction unit test when required folder exist
if [ "$scToolsMissing" -eq 0 ]; then
# Clean old keystore so previous bad keystore won't impact current test
cleanKeyStore
runTcScTest
fi
# run tc unit test when required folder exist
if [ "$tcUnitTestMissing" -eq 0 ]; then
# Clean old keystore so previous bad keystore won't impact current test
cleanKeyStore
runTcUnitTest
fi
# run tc test when required folder exist
if [ "$tcTestMissing" -eq 0 ]; then
cleanKeyStore
runTcScAuthTest "ECDHE_NULL"
cleanKeyStore
runTcScAuthTest "ECDHE_PSK"
cleanKeyStore
runTcScAuthTest "ECDHE_ECDSA"
fi
}
# Function name: runCppTests
# Description: cpp tests
# Parameter: none
# Return: none
function runCppTests() {
# Standard client security tests
ScSecurityTests
# rawservice/rawclient
rawServiceClient "release"
# basic_service/basic_client test
basicServiceClient "release"
# bbservice/bbclient test
bbServiceClient "release"
# Enable debug tests after vs2013 express installed
bbServiceClient "debug"
# FIXME: cygwin buffers output lostAdvertisedName test
# lostAdvertisedName "release"
}
# Function name: runJavaTests
# Description: java binding tests
# Parameter: none
# Return: none
function runJavaTests() {
echo "Java binding tests..."
# JavaSDKDocMethod release/debug over TCP
# FIXME: discovery not working sometimes
javaMethodCall "release"
# JavaSDKDocProperties over TCP
javaProperty "release"
# JavaSDKDocSignal over TCP
javaSignal "release"
javaSignal "debug"
# JavaSDKDocSecurityLogon over TCP
cleanKeyStore
javaSecurityLogon "release"
}
# Function name: runAndroidIntTests
# Description: Windows-Android interop tests
# Parameter: none
# Return: none
function runAndroidIntTests() {
echo "Windows-Android interop tests..."
# Check android sdk exist and adb ready
if [ "$androidSdkMissing" -eq 0 -a "$adbMissing" -eq 0 ]; then
checkAndroidCnt
# Only one android device is connected
if [ "$androidCnt" -eq 1 ]; then
# Get windows host wlan ip
getWinWlanIPAddr
# Get android ip address
getAndroidIpAddr
# Make sure android and windows on same network by ping
winAndroidSameNet "$androidIp" "$winIp"
if [ "$sameNetwork" -eq 1 ]; then
# Push test binaries to android
loadTestToAndroid
if [ "$loadSuccess" -eq 0 ]; then
# Start win-bbservice/android-bbclient test
# 2 seconds between bbservice and bbclient
runWinService1 "com.iop" 2 "Win bbservice/Android bbClient"
# Start win-bbclient/android-bbservice test
runWinClient1 "gov.iopa" 2 "Android bbservice/Win7 bbClient"
# Start NGNS inter-op tests
runWinService1 "org.ngns.unicast.response1" 45 "SCLNGNS-INTEROP-1 Android discovery Unicast response"
runWinService2 "org.ngns.unsolicited.multicast1" 45 "SCLNGNS-INTEROP-2 Android discovery multicast advertise"
runWinClient1 "org.ngns.unicast.response2" 45 "SCLNGNS-INTEROP-3 Win7 iscovery Unicast response"
runWinClient2 "org.ngns.unsolicited.multicast2" 45 "SCLNGNS-INTEROP-4 Win7 discovery multicast advertise"
else
echo "Load binaries to android fail, quit tests!"
fi
else
echo "Windows-Android NOT on same wlan, quit tests!"
fi
else
echo "No android or more than one android connected, quit tests!"
fi
else
echo "Android sdk or adb is missing, quit tests!"
fi
}
# Clean up old logs
rm -rf *.html *.error *.log *.txt 2>/dev/null 1>&2
#parse input argument
parseCmdArgs $*
SetUpGlobalVariables
echo "Test started..." >> $testDetailLog
#Print test report summary common to all tests
testDate=`eval date +%Y%m%d`
echo "Test date=$testDate" > $testResultFile
# Html format
echo "<html> <style> a.error {color:red} a.info {color:green} </style>" > $testResultHtml
echo "<body>" >> $testResultHtml
echo "<h3>Test date=$testDate </h3>" >> $testResultHtml
# Get core,tc and android commit id ref from sdk manifest.txt
getCommitIds
if [ "$scCommitId" != "${UNKNOWN_ID}" ]; then
createCommitIdUrl ${SC_COMMIT_TYPE} "$scCommitId"
echo "Windows commit id=$scCommitId" >> $testResultFile
echo "<p>Windows commit id=<a class="info" href="\"$commitUrl\"">$scCommitId</a></p>" >> $testResultHtml
fi
if [ "$tcCommitId" != "${UNKNOWN_ID}" ]; then
createCommitIdUrl ${TC_COMMIT_TYPE} "$tcCommitId"
echo "TC SDK commit id=$tcCommitId" >> $testResultFile
echo "<p>TC SDK commit id=<a class="info" href="\"$commitUrl\"">$tcCommitId</a></p>" >> $testResultHtml
fi
if [ "$androidCommitId" != "${UNKNOWN_ID}" ]; then
# Android SDK should have same prefix as SC core
createCommitIdUrl ${SC_COMMIT_TYPE} "$androidCommitId"
echo "Android SDK commit id=$androidCommitId" >> $testResultFile
echo "<p>Android SDK commit id=<a class="info" href="\"$commitUrl\"">$androidCommitId</a></p>" >> $testResultHtml
fi
echo "......" >> $testResultFile
echo "<p>......</p>" >> $testResultHtml
totalTests=0
passedTest=0
failedTest=0
blockedTest=0
# Change all cpp test and sample applications to executable
chmod 777 $cppRelSampleDir/*
chmod 777 $cppRelBinDir/*
chmod 777 $cppDbgSampleDir/*
chmod 777 $cppDbgBinDir/*
chmod 777 $tcTestPath/*
chmod 777 $tcUnitTestPath/*
# Run thin client tests
#runTcTests
runCppTests
# FIXME: ASACORE-1945 name conflict cause random failure
#runJavaTests
# Run Windows-android inter-operation tests
#runAndroidIntTests
echo "......" >> $testResultFile
echo "Total tests=$totalTests" >> $testResultFile
echo "<p>......</p>" >> $testResultHtml
echo "<p>Total tests=$totalTests</p>" >> $testResultHtml
for (( testCase=0; testCase<$totalTests; testCase++))
do
currentResult=${testResults[testCase]}
if [ "$currentResult" -eq 0 ]; then
passedTest=`expr $passedTest + 1`
elif [ "$currentResult" -eq 6 ]; then
failedTest=`expr $failedTest + 1`
else
blockedTest=`expr $blockedTest + 1`
fi
done
echo "Test passed=$passedTest" >> $testResultFile
echo "Test failed=$failedTest" >> $testResultFile
echo "Test blocked=$blockedTest" >> $testResultFile
echo "<p>Test passed=$passedTest</p>" >> $testResultHtml
echo "<p>Test failed=$failedTest</p>" >> $testResultHtml
echo "<p>Test blocked=$blockedTest</p>" >> $testResultHtml
echo "</body>" >> $testResultHtml
echo "</html>" >> $testResultHtml
echo "Test complete, passed $passedTest failed $failedTest, check $testResultFile"
if [ "$failedTest" -eq 0 ]; then
exit 0
else
exit 3
fi | true |
19a2dba0d4f91daa9a388afcf198d1fc95327852 | Shell | jimmy42/grml-etc | /etc/init.d/firewall | UTF-8 | 7,665 | 3.34375 | 3 | [] | no_license | #!/bin/sh
# Filename: /etc/init.d/firewall
# Purpose: simple [example] configuration script for iptables
# Authors: grml-team (grml.org), (c) Michael Prokop <mika@grml.org>
# Bug-Reports: see http://grml.org/bugs/
# License: This file is licensed under the GPL v2.
# Latest change: Don Jul 20 09:53:31 CEST 2006 [mika]
################################################################################
### BEGIN INIT INFO
# Provides: firewall
# Required-Start: $remote_fs $network
# Required-Stop: $remote_fs $network
# Default-Start: S 2 3 4 5
# Default-Stop:
### END INIT INFO
LANG=C
LC_ALL=C
IPTABLES="iptables"
if [ -r /etc/grml/lsb-functions ] ; then
source /etc/grml/lsb-functions
else
alias einfo='echo -n'
alias eend='echo '
fi
# IFACE='eth0'
# IFACE=$(ifconfig -a | awk '/^ppp/ {print $1}')
# IPADDR=$(ifconfig "$IFACE" | awk -F: /"inet addr"/'{print $2}' | gawk '{print $1}')
# NETMASK=$(ifconfig "$IFACE" | awk -F: /"Mask"/'{print $4}' | gawk '{print $1}')
# BROADCAST=$(ifconfig "$IFACE" | awk -F: /"inet"/'{print $3}' | gawk '{print $1}')
# LOOPBACK='127.0.0.0/8'
###################################################################################
startup(){
einfo "Starting firewall."
# Remove al chains
$IPTABLES -F
$IPTABLES -X
$IPTABLES -Z
# Set up a default policy for the built-in chains. -> DROP
$IPTABLES -P INPUT DROP
$IPTABLES -P OUTPUT DROP
$IPTABLES -P FORWARD DROP
# allow all already established connections
$IPTABLES -A INPUT -m state --state ESTABLISHED,RELATED -j ACCEPT
# Allow unlimited traffic on the loopback interface.
$IPTABLES -A INPUT -i lo -j ACCEPT
$IPTABLES -A OUTPUT -o lo -j ACCEPT
# syn-flooding protection
$IPTABLES -N syn-flood
$IPTABLES -A INPUT -p tcp --syn -j syn-flood
$IPTABLES -A syn-flood -m limit --limit 5/s --limit-burst 10 -j RETURN
$IPTABLES -A syn-flood -j REJECT
# Make sure, NEW TCP Connections are SYN packets
$IPTABLES -A INPUT -p tcp ! --syn -m state --state NEW -j DROP
# Refuse broadcast address packets.
# $IPTABLES -A INPUT -d $BROADCAST -j DROP
$IPTABLES -A INPUT -s 0.0.0.0 -d 255.255.255.255 -j DROP
# AUTH server: Reject ident probes with a tcp reset.
# This may be usefull for a broken mailhost that won't accept the
# mails if you just drop its ident probe.
# $IPTABLES -A INPUT -i $IFACE -p tcp --dport 113 -j REJECT --reject-with tcp-reset
# allow *all* output - simplifies life and keeps load low ;-)
$IPTABLES -A OUTPUT -j ACCEPT
# example for NAT/MASQUERADE (eth0: lan; eth1: to ppp0; ppp0: external):
#
# echo 1 > /proc/sys/net/ipv4/ip_forward
# or
# put 'ip_forward=yes' to /etc/network/options
#
# $IPTABLES -A INPUT -i eth1 -s 192.168.0.2 -d 192.168.0.1 -j ACCEPT
# $IPTABLES -A INPUT -i eth1 -s 192.168.0.150 -d 192.168.0.1 -j ACCEPT
# $IPTABLES -t nat -A POSTROUTING -s 192.168.0.0/24 -o ppp0 -j MASQUERADE
# $IPTABLES -A FORWARD -i eth1 -o ppp0 -s 192.168.0.0/24 -d $IP_OF_ETH1 -j ACCEPT
# $IPTABLES -A FORWARD -m state --state ESTABLISHED,RELATED -j ACCEPT
# $IPTABLES -A FORWARD -i ppp0 -o eth1 -d $IP_OF_PPP0 -j ACCEPT
# $IPTABLES -A FORWARD -j LOG --log-prefix "$LOGID ERROR in FORWARD: "
# $IPTABLES -A FORWARD -j DROP
# example for Source Network Address Translation (SNAT):
# the strict way:
# $IPTABLES -t nat -A POSTROUTING -o ppp0 -j SNAT --to $PPPIP
# the liberal way:
# $IPTABLES -t nat -A POSTROUTING -o ppp0 -j MASQUERADE
# example for DNAT:
# $IPTABLES -t nat -A PREROUTING -d 10.0.0.1 -j DNAT --to-destination 192.168.0.1
# $IPTABLES -t nat -A PREROUTING -d 10.0.0.2 -j DNAT --to-destination 192.168.0.2
# allow ssh incoming
$IPTABLES -A INPUT -p tcp --dport 22 -m state --state NEW -j ACCEPT
# create new chains
$IPTABLES -N INPUTLOG
$IPTABLES -N OUTPUTLOG
$IPTABLES -A INPUT -m limit --limit 1/second --limit-burst 5 -j INPUTLOG
$IPTABLES -A INPUT -m limit --limit 1/second --limit-burst 5 -j OUTPUTLOG
# Any udp not already allowed is logged and then dropped.
$IPTABLES -A INPUTLOG -p udp -j LOG --log-prefix "IPTABLES UDP-IN: "
$IPTABLES -A INPUTLOG -p udp -j REJECT
$IPTABLES -A OUTPUTLOG -p udp -j LOG --log-prefix "IPTABLES UDP-OUT: "
$IPTABLES -A OUTPUTLOG -p udp -j REJECT
# Any icmp not already allowed is logged and then dropped.
$IPTABLES -A INPUTLOG -p icmp -j LOG --log-prefix "IPTABLES ICMP-IN: "
$IPTABLES -A INPUTLOG -p icmp -j REJECT
$IPTABLES -A OUTPUTLOG -p icmp -j LOG --log-prefix "IPTABLES ICMP-OUT: "
$IPTABLES -A OUTPUTLOG -p icmp -j REJECT
# Any tcp not already allowed is logged and then dropped.
$IPTABLES -A INPUTLOG -p tcp -j LOG --log-prefix "IPTABLES TCP-IN: "
$IPTABLES -A INPUTLOG -p tcp -j REJECT
$IPTABLES -A OUTPUTLOG -p tcp -j LOG --log-prefix "IPTABLES TCP-OUT: "
$IPTABLES -A OUTPUTLOG -p tcp -j REJECT
# Anything else not already allowed is logged and then dropped.
# It will be dropped by the default policy anyway... but let's be paranoid.
$IPTABLES -A INPUTLOG -j LOG --log-prefix "IPTABLES PROTOCOL-X-IN: "
$IPTABLES -A INPUTLOG -j REJECT
$IPTABLES -A OUTPUTLOG -j LOG --log-prefix "IPTABLES PROTOCOL-X-OUT: "
$IPTABLES -A OUTPUTLOG -j REJECT
# end of script
eend $?
}
###################################################################################
case "$1" in
stop)
einfo "Shutting down Firewall."
$IPTABLES -F
$IPTABLES -t nat -F
$IPTABLES -t mangle -F
$IPTABLES -t filter -F
$IPTABLES -P INPUT ACCEPT
$IPTABLES -P OUTPUT ACCEPT
$IPTABLES -P FORWARD ACCEPT
$IPTABLES -X
eend $?
;;
panic)
einfo "Setting Firewall to modus panic."
$IPTABLES -F
$IPTABLES -t nat -F
$IPTABLES -t mangle -F
$IPTABLES -t filter -F
$IPTABLES -P INPUT DROP
$IPTABLES -P OUTPUT DROP
$IPTABLES -P FORWARD DROP
$IPTABLES -X
eend $?
;;
status)
$IPTABLES -L -n -v
;;
restart)
$0 stop
$0 start
;;
analyse)
echo "------------------------------------------------------------------------------------"
echo "Program: $0 $(date)"
echo "PID: $$ grml-team [mika] (c) 2004++"
echo "$(iptables --version)"
echo "Identity: whoami: $(whoami)"
echo " id: $(id)"
echo " groups: $(groups)"
echo "Uptime: $(uptime)"
echo "------------------------------------------------------------------------------------"
echo "$(vmstat)"
echo "------------------------------------------------------------------------------------"
echo "# ifconfig -a"
ifconfig -a
echo "------------------------------------------------------------------------------------"
echo "# route -n"
route -n
echo "------------------------------------------------------------------------------------"
echo "# ip a s"
ip a s
echo "------------------------------------------------------------------------------------"
echo "# $IPTABLES -L -n -v"
$IPTABLES -L -n -v
echo "------------------------------------------------------------------------------------"
echo 'for i in /proc/sys/net/*/*; do echo -n "$i: " ; cat $i; done 2>/dev/null'
for i in /proc/sys/net/*/*; do
echo -n "$i: "
cat $i;
done 2>/dev/null
echo "------------------------------------------------------------------------------------"
echo "# lsmod | grep '^ip'"
lsmod | grep '^ip'
;;
start)
startup
;;
*)
echo "Usage: $0 [start|stop|restart|panic|status|analyse]";
exit 1;
;;
esac
## END OF FILE #################################################################
| true |
c556c07f3dec9e0f3f38b1f81b7c7c2f12d9265b | Shell | denisidoro/dotfiles | /shell/themes/prompt_dns_setup | UTF-8 | 1,263 | 3.5 | 4 | [] | no_license | #!/usr/bin/env bash
FIRST_CHARACTER_OK="♪"
FIRST_CHARACTER_KO="$FIRST_CHARACTER_OK"
PROMPT_COLOR_0="{cyan}"
PROMPT_COLOR_1="{green}"
AHEAD="⇣"
BEHIND="⇡"
DIVERGED="⥄"
DIRTY="✗"
NONE="✓"
prompt_dns_pwd() {
prompt_dir="${PWD/$HOME/\~}"
print -n "%F${PROMPT_COLOR_0}${prompt_dir}"$'\n'
}
prompt_dns_git() {
[[ -n ${git_info} ]] && print -n "${(e)git_info[prompt]}"
}
prompt_dns_precmd() {
(( ${+functions[git-info]} )) && git-info
}
prompt_dns_setup() {
local prompt_dns_status="%(?:%F${PROMPT_COLOR_1}$FIRST_CHARACTER_OK:%F{red}$FIRST_CHARACTER_KO)%F{reset}"
autoload -Uz add-zsh-hook && add-zsh-hook precmd prompt_dns_precmd
prompt_opts=(cr percent sp subst)
zstyle ':zim:git-info:branch' format "%F${PROMPT_COLOR_1}%b"
zstyle ':zim:git-info:commit' format '%c'
if ! ${DOT_PREVENT_DIRTY:-true}; then
zstyle ':zim:git-info:clean' format '%F{white}$NONE'
zstyle ':zim:git-info:dirty' format '%F{yellow}$DIRTY'
else
zstyle ':zim:git-info:clean' format ''
zstyle ':zim:git-info:dirty' format ''
fi
zstyle ':zim:git-info:keys' format 'prompt' ' %F{cyan}%b%c %C%D'
PS1=$'\n'"\$(prompt_dns_pwd)\$(prompt_dns_git)%f"$'\n'"${prompt_dns_status} "
RPS1=''
}
prompt_dns_setup "${@}"
| true |
892c41cc1c4b2f530ea3d0f676705f77c3da841f | Shell | mvshmakov/dotfiles | /bin/.local/bin/fake_scan_pdf | UTF-8 | 409 | 3.078125 | 3 | [
"WTFPL"
] | permissive | #!/usr/bin/env sh
#
# Creates a "scanned version of the PDF
# Usage: fake_scan_pdf original.pdf fake.pdf
#
# Source: https://news.ycombinator.com/item?id=35132018
set -eu
IFS=$(printf '\n\t')
ROTATION=$(shuf -n 1 -e '-' '')$(shuf -n 1 -e "$(seq 0.05 .5)")
convert -density 150 "$1" \
-linear-stretch '1.5%x2%' \
-rotate "$ROTATION" \
-attenuate '0.01' \
+noise Multiplicative \
-colorspace 'gray' "$2"
| true |
d7539ece79ba977b05625145361473ed7e5cd41a | Shell | eriq-augustine/skeletons | /scripts/volume | UTF-8 | 363 | 3.890625 | 4 | [] | no_license | #!/bin/bash
function main() {
if [[ $# -ne 1 ]] ; then
echo "USAGE: $0 <volume>"
exit 1
fi
trap exit SIGINT
set -e
local volume=$1
for sink in $(pactl list short sinks | sed 's/^\([0-9]\+\)\s.*$/\1/') ; do
pactl set-sink-volume "${sink}" "${volume}"
done
}
[[ "${BASH_SOURCE[0]}" == "${0}" ]] && main "$@"
| true |
cbd972ca45b3fd89450cd3deb0b832b0be3910e5 | Shell | csouls/dotfiles | /setup/setup_mac.sh | UTF-8 | 442 | 3.109375 | 3 | [] | no_license | #!/bin/bash
set -u
SCRIPT_DIR=$(cd $(dirname $0); pwd)
# homebrew
if [ ! -e '/opt/homebrew/bin/brew' ]; then
echo "Please install Homebrew from https://brew.sh/"
exit 1
fi
eval "$(/opt/homebrew/bin/brew shellenv)"
sh ${SCRIPT_DIR}/mac/homebrew/install.sh
# install cargo packages
sh ${SCRIPT_DIR}/mac/cargo/install.sh
# post processing
mkdir -p ~/Library/LaunchAgents
find ${SCRIPT_DIR}/mac -name "*.sh" -maxdepth 1 | xargs -I S sh S
| true |
a01ef8e929b6875423d0745d54d0531a9f671c67 | Shell | iamezcua-dev/global-marketing-worldbank-report | /bin/cleanup.sh | UTF-8 | 2,135 | 3.96875 | 4 | [] | no_license | #!/bin/bash
echo ' _____________________________________________________ '
echo ' | | '
echo ' _______ | | '
echo ' / _____ | | AutoScheduler | '
echo ' / /(__) || | | '
echo ' ________/ / |OO| || | | '
echo ' | |-------|| | | '
echo '(| | -.|| |_______________________ | '
echo ' | ____ \ ||_________||____________ | ____ ____ | '
echo '/| / __ \ |______|| / __ \ / __ \ | | / __ \ / __ \ |\'
echo '\|| / \ |_______________| / \ |_| / \ |__| |___________| / \ |__| / \|_|/'
echo ' | () | | () | | () | | () | | () | '
echo ' \__/ \__/ \__/ \__/ \__/ '
echo ' '
LOG_DIR="logs/"
safelyWipeableFiles() {
echo "[$(date)] Deleting generated target folder and both, local SBT installation and their zip source package ..."
rm -rf sbt.zip target/ sbt/ *.db
}
showHelp() {
echo "Usage:"
printf "%8s%-25s - %s\n" "" "$0 [--also-logs]" 'Deletes sbt.zip file and target/ and sbt/ folders.'
printf "%8s%-25s - %s\n\n" "" "" "Optionally, delete generated logs folder if the \`--also-logs\` flag is provided"
printf "%8s%-25s - %s\n" "" "$0 --help" "Shows this help message"
}
if [ "$#" -eq 0 ]; then # Parameterless
safelyWipeableFiles
elif [ "$#" -eq 1 ]; then
if [ "$1" = "--help" ]; then # Show help
showHelp
elif [ "$1" = "--also-logs" ]; then # Also logs ...
safelyWipeableFiles
echo "[$(date)] Also deleting generated logs ..."
rm -rf "$LOG_DIR"
else
echo "Unrecognized option \"$1\""
showHelp
exit 1
fi
fi
echo "[$(date)] Done"'!'
| true |
efaa9fa6b742ee47fae1ca737b6966a30a906356 | Shell | imma/chalice | /script/version | UTF-8 | 530 | 3.109375 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env bash
function version {
local shome="$(cd -P -- "${BASH_SOURCE%/*}/.." && pwd -P)"
source "$shome/script/profile"
local ver_chalice="$(chalice --version 2>/dev/null | awk '{print $2}' || true)"
local ver_httpie="$(http --version 2>/dev/null || true)"
local ver_http_prompt="$(http-prompt --version 2>/dev/null || true)"
jq -n --arg chalice "$ver_chalice" --arg httpie "$ver_httpie" --arg http_prompt "$ver_http_prompt" '{chalice: $chalice, httpie: $httpie, http_prompt: $http_prompt}'
}
version "$@"
| true |
2e95430fb8bd09e453fd50a5d3034dcbef95211e | Shell | cdepillabout/docs | /code/mpg321_convert_all | UTF-8 | 445 | 3.984375 | 4 | [] | no_license | #!/bin/bash
# mpg321all
# Convert a whole list of mp3's to wav's.
if [ ! -n "$1" ]
then
echo "Usage: `basename $0` file1.mp3 file2.mp3 etc."
exit 1
fi
for args in "$@"
do
filename="${args%.*}"
extension="${args##*.}"
lower_extension="${extension,,}"
case "$lower_extension" in
"mp3" )
mpg123 -v --stereo -w "${filename}.wav" "$args"
;;
"flac" )
echo "running flac -d $args"
flac -d "$args"
;;
esac
done
exit 0
| true |
ec7007ff676f524b74cca1efe021524e3df57466 | Shell | deadloko/gentoo-work-configs | /splash/emerge-world/scripts/svc_started-pre | UTF-8 | 130 | 2.78125 | 3 | [
"Beerware",
"Bitstream-Vera"
] | permissive | #!/bin/bash
. /sbin/splash-functions.sh
if [ $2 != 0 ]
then
splash_comm_send "set message failed to start $1"
sleep 2
fi
| true |
064c407c9d08519fbbca0db679ee65568c8bfcad | Shell | pbstrein/.vim | /.bashrc | UTF-8 | 873 | 3.453125 | 3 | [] | no_license | #Combined git and proxy status for your PS1
function parse_git_dirty {
[[ $(git status 2> /dev/null | tail -n1) != "nothing to commit, working directory clean" ]] && echo "*"
}
# Not currently used, could replace __git_ps1
function parse_git_branch {
git branch --no-color 2> /dev/null | sed -e '/^[^*]/d' -e "s/\* \(.*\)/\1/"
}
function has_proxy {
if [ ! -z $http_proxy ] || [ ! -z $https_proxy ] ; then
echo "[proxy]"
else
echo "[no proxy]"
fi
}
# add colors to the prompt
c_user='\[\033[01;32m\]'
c_path='\[\033[01;34m\]'
c_reset='\[\033[00m\]'
c_git='\[\033[01;35m\]'
#This also sets your XTERM titles
export PROMPT_COMMAND='PS1="\[\e]2;$(has_proxy)\u@\h:\w\$(__git_ps1 \" (%s$(parse_git_dirty))\")\a\]$(has_proxy)${c_user}\u${c_reset}@${c_user}\h${c_reset}:${c_path}\w${c_reset}${c_git}\$(__git_ps1 \" (%s$(parse_git_dirty))\")${c_reset}\$ "'
| true |
65a29d7b9e4b674242c776501c88ead46561943a | Shell | pbryon/resources | /test.sh | UTF-8 | 405 | 3.09375 | 3 | [] | no_license | #!/bin/bash
project="src/TestLinks"
script=$(basename $0)
error="$script: dotnet CLI not installed. See https://github.com/dotnet/cli/blob/master/README.md"
command -v dotnet >/dev/null || (echo $error >&2 && exit 1)
which dotnet >/dev/null || (echo $error >&2 && exit 1)
dotnet --help >/dev/null || exit 1
dotnet restore $project >/dev/null
echo "Checking links..." >&2
dotnet run --project ${project} $@ | true |
3af2c563a4ea0de49944925d5d5764439ebf3918 | Shell | shieldproject/shield | /ci/scripts/build | UTF-8 | 1,546 | 3.90625 | 4 | [
"MIT"
] | permissive | #!/bin/bash
set -eu
header() {
echo
echo "================================================================================"
echo "$1"
echo "--------------------------------------------------------------------------------"
echo
}
bail() {
echo >&2 "$* Did you misconfigure Concourse?"
exit 2
}
test -n "${APP_NAME:-}" || bail "APP_NAME must be set to the name of this package."
test -n "${MODULE:-}" || bail "MODULE must be set to the Go Module path of this package."
test -n "${GOPATH:-}" || bail "Expecting GOPATH to be set -- make sure correct image is specified."
test -f "${VERSION_FROM}" || bail "Version file (${VERSION_FROM}) not found."
VERSION=$(cat "${VERSION_FROM}")
test -n "${VERSION}" || bail "Version file (${VERSION_FROM}) was empty."
# Resource Directories
export ROOT_PATH="$(pwd)"
mkdir -p "$(dirname ${GOPATH}/src/${MODULE})"
mv ${ROOT_PATH}/git "${GOPATH}/src/${MODULE}
ln -snf "${GOPATH}/src/${MODULE} ${ROOT_PATH}/git
export PATH=${PATH}:${GOPATH}/bin
export REPO_ROOT="${GOPATH}/src/${MODULE}"
export BUILD_ROOT="${ROOT_PATH}/build"
export CI_ROOT="${ROOT_PATH}/git-ci"
export VERSION_FROM="version/number"
export RELEASE_ROOT="${REPO_ROOT}/artifacts"
header "Building $APP_NAME v$VERSION..."
cd "$REPO_ROOT"
go version; echo; echo
make clean release VERSION="$VERSION"
cd "$RELEASE_ROOT"
tar -zcvf "$BUILD_ROOT/$APP_NAME-$VERSION.tar.gz" "$APP_NAME-"*
echo
echo "================================================================================"
echo "SUCCESS!"
exit 0
| true |
6d7e8366287f2de95db7d4d400460ea9e8be53e5 | Shell | frobware/vas-charms-dev | /charms/trusty/telscale-restcomm/lib/mobicents/configuration/config-load-balancer.sh | UTF-8 | 2,286 | 3.296875 | 3 | [] | no_license | #! /bin/bash
##
## Description: Configures SIP Load Balancer
## Author : Henrique Rosa
##
##
## DEPENDENCIES
##
#source $TELSCALE_ANALYTICS/read-network-props.sh
#source $TELSCALE_ANALYTICS/read-user-data.sh
RESTCOMM_HOME=/opt/restcomm
##
## FUNCTIONS
##
configLoadBalancer() {
FILE="$LB_HOME/lb-configuration.properties"
sed -e "s|^host=.*|host=$PRIVATE_IP|" $FILE > $FILE.bak
mv $FILE.bak $FILE
juju-log 'Updated Load Balancer configuration file'
}
configSipStack() {
FILE="$RESTCOMM_HOME/standalone/configuration/mss-sip-stack.properties"
juju-log "Will change mss-sip-stack.properties using $1:$2"
sed -e 's|^#org.mobicents.ha.javax.sip.BALANCERS=|org.mobicents.ha.javax.sip.BALANCERS=|' $FILE > $FILE.bak
mv $FILE.bak $FILE
sed -e "s|org.mobicents.ha.javax.sip.BALANCERS=.*|org.mobicents.ha.javax.sip.BALANCERS=$1:$2|" $FILE > $FILE.bak
mv $FILE.bak $FILE
juju-log "Activated Load Balancer on SIP stack configuration file with IP Address $1 and port $2"
sed -e '/org.mobicents.ha.javax.sip.BALANCERS=.*/ a\
\org.mobicents.ha.javax.sip.REACHABLE_CHECK=false' \
$FILE > $FILE.bak
mv $FILE.bak $FILE
juju-log 'Removed reachable checks and specified HTTP Port 8080'
}
configLogs() {
# Create directory to keep logs
mkdir -p $LB_HOME/logs
juju-log "Created logging directory $LB_HOME/logs"
# make log location absolute
FILE="$LB_HOME/lb-log4j.xml"
sed -e "s|<param name=\"file\" value=\".*\"/>|<param name=\"file\" value=\"$LB_HOME/logs/load-balancer.log\"/>|" $FILE > $FILE.bak
mv -f $FILE.bak $FILE
}
configStandalone() {
RESTCOMM_HOME=/opt/restcomm
FILE=$RESTCOMM_HOME/standalone/configuration/standalone-sip.xml
#path_name='org.mobicents.ext'
#if [ "$RUN_MODE" == "balancer" ]; then
#path_name="org.mobicents.ha.balancing.only"
#fi
# path_name="org.mobicents.ha.balancing.only"
# sed -e "s|stack-properties=\"configuration/mss-sip-stack.properties\" path-name=\".*\" |stack-properties=\"configuration/mss-sip-stack.properties\" path-name=\"$path_name\" |" $FILE > $FILE.bak
sed -e "s|path-name=\".*\" |path-name=\"org.mobicents.ha.balancing.only\" |" $FILE > $FILE.bak
mv -f $FILE.bak $FILE
juju-log "changed the MSS Path Setting to $path_name"
}
##
## MAIN
##
#configLogs
#configLoadBalancer
#configSipStack
#configStandalone
| true |
4f04d8fd57d37b8280ffd9e72ed18284c1663e18 | Shell | anne-glerum/paper-aspect-plasticity-subduction-data | /brick/extract_residual | UTF-8 | 233 | 2.921875 | 3 | [] | no_license | #!/bin/bash
phi=0
until [ $phi -eq 35 ]
do
dirname="compression_7_$phi"
infilename="$dirname/opla"
outfilename="$dirname/residual_e_7_$phi.dat"
grep 'Residual' $infilename | awk '{print $6}' > $outfilename
phi=$((phi+5))
done
| true |
f7ce7d3ea3f66b1f0a1bd24b24bb4fb56b055b02 | Shell | MarioBaron/boa | /aegir/tools/BOND.sh.txt | UTF-8 | 55,240 | 2.875 | 3 | [] | no_license | #!/bin/bash
###----------------------------------------###
###
### Barracuda-Octopus-Nginx-Drupal Tuner
###
### Copyright (C) 2010-2015 Omega8.cc
### noc@omega8.cc www.omega8.cc
###
### This program is free software. You can
### redistribute it and/or modify it under
### the terms of the GNU GPL as published by
### the Free Software Foundation, version 2
### or later.
###
### This program is distributed in the hope
### that it will be useful, but WITHOUT ANY
### WARRANTY; without even the implied
### warranty of MERCHANTABILITY or FITNESS
### FOR A PARTICULAR PURPOSE. See the GNU GPL
### for more details.
###
### You should have received a copy of the
### GNU GPL along with this program.
### If not, see http://www.gnu.org/licenses/
###
### Code: https://code.aegir.cc/aegir
###
###----------------------------------------###
###----------------------------------------###
### HOW-TO: run it with bash, not with sh ###
###----------------------------------------###
###
### $ bash BOND.sh.txt
###
### Note: to restore default values it is
### enough to start this script with
### any values defined below and answer
### NO when it prompts for confirmation
### "Are you ready to tune your Aegir".
###
###----------------------------------------###
### EDITME ###
###----------------------------------------###
###
### Enter below the settings you wish to use.
###
###----------------------------------------###
### Hostmaster root directory - /var/aegir
###
### Note: most of values tuned by this script
### are server-vide, while some, like
### mod_evasive settings will affect
### only sites hosted on the Aegir
### Satellite Instance defined below.
###
_TUNE_HOSTMASTER=/data/disk/o1
###----------------------------------------###
### Nginx server mod_evasive - default ON
###
### Note: running verify task on any SITE
### will restore default value ON
### for that site only, while TUNER
### will turn OFF/ON this feature
### for all sites hosted on the
### Hostmaster defined above.
###
_TUNE_NGINX_CONNECT=OFF
###----------------------------------------###
### Nginx server fastcgi timeout - default 180
###
### Note: running verify task on the SERVER
### in the Hostmaster created
### by Barracuda (not Octopus!)
### will restore default value
### for the server and all existing
### Aegir Satellite Instances.
###
_TUNE_NGINX_TIMEOUT=3600
###----------------------------------------###
### Nginx server firewall limit - default 300
###
### Note: don't change the default value
### if you are the only visitor, or
### you will lock yourself easily.
###
### The default value 300 means the
### firewall limit is OFF because
### it scans only the last 300 lines
### of your web server log file.
###
### If you will set this value to 100
### then every visitor IP with more
### than 100 out of the last 300
### requests will be locked.
###
### Only dynamic requests (pages) are
### counted because static files like
### images are generally not logged.
###
_TUNE_NGINX_FIREWALL=300
###----------------------------------------###
### Database server timeout - default 3600
###
_TUNE_SQL_TIMEOUT=3600
###----------------------------------------###
### PHP-FPM server timeout - default 180
###
_TUNE_PHP_FPM_TIMEOUT=3600
###----------------------------------------###
### PHP-CLI server timeout - default 3600
###
_TUNE_PHP_CLI_TIMEOUT=7200
###----------------------------------------###
### DON'T EDIT ANYTHING BELOW THIS LINE ###
###----------------------------------------###
_AEGIR_VERSION=HEAD
_AEGIR_XTS_VERSION=BOA-2.4.0
_BRANCH_BOA=2.4.x-dev
_BRANCH_PRN=2.4.x-dev
_INSTALLER_VERSION=BOA-2.4.0-dev
_NOW=`date +%y%m%d-%H%M`
_RAM=`free -mto | grep Mem: | awk '{ print $2 }'`
_THISHOST=`uname -n`
_SPINNER=YES
PATH=/usr/local/bin:/usr/local/sbin:/opt/local/bin:/usr/bin:/usr/sbin:/bin:/sbin
SHELL=/bin/bash
if [ -n "${STY+x}" ]; then
_SPINNER=NO
fi
_PHP52_API=20060613
_PHP52_VERSION=5.2.17
_PHP53_API=20090626
_PHP53_VERSION=5.3.29
_PHP54_API=20100525
_PHP54_VERSION=5.4.36
_PHP55_API=20121212
_PHP55_VERSION=5.5.20
_PHP56_API=20131226
_PHP56_VERSION=5.6.4
###---### Functions
#
# Clean pid files on exit.
clean_pid_exit () {
rm -f /var/run/boa_wait.pid
rm -f /var/run/boa_run.pid
service cron start &> /dev/null
exit 1
}
#
# Noticeable messages.
msg () {
echo "Tuner [`date`] ==> $*"
}
# Simple prompt.
prompt_yes_no () {
if [ "$_AUTOPILOT" = "YES" ] ; then
return 0
else
while true ; do
printf "$* [Y/n] "
read answer
if [ -z "$answer" ] ; then
return 0
fi
case $answer in
[Yy]|[Yy][Ee][Ss])
return 0
;;
[Nn]|[Nn][Oo])
return 1
;;
*)
echo "Please answer yes or no"
;;
esac
done
fi
}
#
# Silent runner.
st_runner () {
CMD="$1"
touch busy
if [ "$_SPINNER" = "YES" ] ; then
bash $_SRCDIR/spinner busy &
fi
if $CMD >> $_LOG; then
rm busy
sleep 1
return 0
else
rm busy
sleep 1
echo "$CMD failed. Error (if any): $?"
echo " "
echo "Displaying the last 15 lines of $_LOG to help troubleshoot this problem"
echo "If you see any error with advice to run 'dpkg --configure -a', run this"
echo "command first and choose default answer, then run this installer again"
echo " "
tail -15 $_LOG
return 1
fi
}
#
# Small spinner.
mrun () {
CMD="$1"
touch busy
if [ "$_SPINNER" = "YES" ] ; then
bash $_SRCDIR/spinner busy &
fi
if $CMD >> $_LOG; then
rm busy
sleep 1
return 0
fi
}
#
# Find correct IP.
find_correct_ip () {
_LOC_IP=`echo $(getent ahostsv4 $_LOC_DOM) | cut -d: -f2 | awk '{ print $1}'`
}
#
# Fix php.ini files to remove suhosin.so
fix_php_ini_suhosin () {
if [ -e "$_THIS_FILE" ] ; then
_SUHOSIN_INI_TEST=$(grep "extension=suhosin.so" $_THIS_FILE 2>&1)
if [[ "$_SUHOSIN_INI_TEST" =~ "extension=suhosin.so" ]] ; then
sed -i "s/.*suhosin.*//g" $_THIS_FILE &> /dev/null
fi
fi
}
#
# Fix php.ini files to add mailparse.so
fix_php_ini_mailparse () {
if [ -e "$_THIS_FILE" ] ; then
_MAILPARSE_INI_TEST=$(grep "extension=mailparse.so" $_THIS_FILE 2>&1)
if [[ "$_MAILPARSE_INI_TEST" =~ "extension=mailparse.so" ]] ; then
_DO_NOTHING=YES
else
echo "extension=mailparse.so" >> $_THIS_FILE
fi
fi
}
#
# Fix php.ini files to add jsmin.so
fix_php_ini_jsmin () {
if [ -e "$_THIS_FILE" ] ; then
_JSMIN_INI_TEST=$(grep "extension=jsmin.so" $_THIS_FILE 2>&1)
if [[ "$_JSMIN_INI_TEST" =~ "extension=jsmin.so" ]] ; then
_DO_NOTHING=YES
else
echo "extension=jsmin.so" >> $_THIS_FILE
fi
fi
}
#
# Fix php.ini files to add redis.so
fix_php_ini_redis () {
if [ -e "$_THIS_FILE" ] ; then
_REDIS_INI_TEST=$(grep "extension=redis.so" $_THIS_FILE 2>&1)
if [[ "$_REDIS_INI_TEST" =~ "extension=redis.so" ]] ; then
_DO_NOTHING=YES
else
echo "extension=redis.so" >> $_THIS_FILE
fi
fi
}
#
# Fix php.ini file to add newrelic.ini
fix_php_ini_newrelic () {
_NR_TPL="/opt/tmp/$_BOA_REPO_NAME/aegir/conf/newrelic.ini"
if [ -e "$_THIS_FILE" ] ; then
_NEWRELIC_INI_TEST_A=$(grep "extension=newrelic.so" $_THIS_FILE 2>&1)
if [[ "$_NEWRELIC_INI_TEST_A" =~ "extension=newrelic.so" ]] ; then
_DO_NOTHING=YES
else
cat $_NR_TPL >> $_THIS_FILE
fi
_NEWRELIC_INI_TEST_B=$(grep "newrelic.framework.drupal.modules" $_THIS_FILE 2>&1)
if [[ "$_NEWRELIC_INI_TEST_B" =~ "newrelic.framework.drupal.modules" ]] ; then
_DO_NOTHING=YES
else
echo "newrelic.framework.drupal.modules = 1" >> $_THIS_FILE
fi
sed -i "s/REPLACE_WITH_REAL_KEY//g" $_THIS_FILE &> /dev/null
sed -i "s/license_key=//g" $_THIS_FILE &> /dev/null
fi
}
#
# Fix all php.ini files to add newrelic.ini
fix_php_ini_newrelic_all () {
if [ -e "/etc/newrelic/newrelic.cfg" ] ; then
if [ -z "$_NEWRELIC_KEY" ] ; then
_NEWRELIC_KEY=`grep license_key /etc/newrelic/newrelic.cfg`
_NEWRELIC_KEY=`echo -n $_NEWRELIC_KEY | tr -d "\n"`
fi
_THIS_FILE=/opt/php52/etc/php52.ini
fix_php_ini_newrelic
_THIS_FILE=/opt/php52/lib/php.ini
fix_php_ini_newrelic
_THIS_FILE=/opt/php53/etc/php53.ini
fix_php_ini_newrelic
_THIS_FILE=/opt/php53/lib/php.ini
fix_php_ini_newrelic
_THIS_FILE=/opt/php54/etc/php54.ini
fix_php_ini_newrelic
_THIS_FILE=/opt/php54/lib/php.ini
fix_php_ini_newrelic
_THIS_FILE=/opt/php55/etc/php55.ini
fix_php_ini_newrelic
_THIS_FILE=/opt/php55/lib/php.ini
fix_php_ini_newrelic
_THIS_FILE=/opt/php56/etc/php56.ini
fix_php_ini_newrelic
_THIS_FILE=/opt/php56/lib/php.ini
fix_php_ini_newrelic
fi
}
#
# Fix FMP php.ini file to add opcache.so
fix_php_ini_opcache () {
if [ -e "$_THIS_FILE" ] ; then
_OPCACHE_INI_TEST=$(grep "opcache.so" $_THIS_FILE 2>&1)
if [[ "$_OPCACHE_INI_TEST" =~ "opcache.so" ]] ; then
_DO_NOTHING=YES
else
echo ";" >> $_THIS_FILE
echo "; Zend OPcache" >> $_THIS_FILE
echo "zend_extension=\"$_OPCACHE_SO\"" >> $_THIS_FILE
echo "opcache.enable=1" >> $_THIS_FILE
echo "opcache.memory_consumption=181" >> $_THIS_FILE
echo "opcache.interned_strings_buffer=8" >> $_THIS_FILE
echo "opcache.max_accelerated_files=64000" >> $_THIS_FILE
echo "opcache.revalidate_freq=60" >> $_THIS_FILE
echo "opcache.enable_file_override=1" >> $_THIS_FILE
echo "opcache.inherited_hack=1" >> $_THIS_FILE
echo "opcache.dups_fix=1" >> $_THIS_FILE
echo "opcache.log_verbosity_level=0" >> $_THIS_FILE
echo "opcache.fast_shutdown=1" >> $_THIS_FILE
echo ";" >> $_THIS_FILE
fi
fi
}
#
# Fix all FMP php.ini files to add Zend OPcache
fix_php_ini_opcache_all () {
_THIS_FILE=/opt/php52/etc/php52.ini
_OPCACHE_SO="/opt/php52/lib/php/extensions/no-debug-non-zts-$_PHP52_API/opcache.so"
fix_php_ini_opcache "52"
_THIS_FILE=/opt/php53/etc/php53.ini
_OPCACHE_SO="/opt/php53/lib/php/extensions/no-debug-non-zts-$_PHP53_API/opcache.so"
fix_php_ini_opcache "53"
_THIS_FILE=/opt/php54/etc/php54.ini
_OPCACHE_SO="/opt/php54/lib/php/extensions/no-debug-non-zts-$_PHP54_API/opcache.so"
fix_php_ini_opcache "54"
_THIS_FILE=/opt/php55/etc/php55.ini
_OPCACHE_SO="/opt/php55/lib/php/extensions/no-debug-non-zts-$_PHP55_API/opcache.so"
fix_php_ini_opcache "55"
_THIS_FILE=/opt/php56/etc/php56.ini
_OPCACHE_SO="/opt/php56/lib/php/extensions/no-debug-non-zts-$_PHP56_API/opcache.so"
fix_php_ini_opcache "56"
}
#
# Fix php.ini file to add geos.so
fix_php_ini_geos () {
if [ -e "$_THIS_FILE" ] ; then
_GEOS_INI_TEST=$(grep "extension=geos.so" $_THIS_FILE 2>&1)
if [[ "$_GEOS_INI_TEST" =~ "extension=geos.so" ]] ; then
_DO_NOTHING=YES
else
echo "extension=geos.so" >> $_THIS_FILE
fi
fi
}
#
# Fix 5.3 php.ini files to add geos.so
fix_php_ini_geos_all () {
if [ "$_PHP_GEOS" = "YES" ] || [[ "$_XTRAS_LIST" =~ "GEO" ]] ; then
_THIS_FILE=/opt/php53/etc/php53.ini
fix_php_ini_geos
_THIS_FILE=/opt/php53/lib/php.ini
fix_php_ini_geos
fi
}
#
# Fix php.ini file to add mongo.so
fix_php_ini_mongo () {
if [ -e "$_THIS_FILE" ] ; then
_MONGODB_INI_TEST=$(grep "extension=mongo.so" $_THIS_FILE 2>&1)
if [[ "$_MONGODB_INI_TEST" =~ "extension=mongo.so" ]] ; then
_DO_NOTHING=YES
else
echo "extension=mongo.so" >> $_THIS_FILE
fi
fi
}
#
# Fix 5.3 php.ini files to add mongo.so
fix_php_ini_mongo_all () {
if [ "$_PHP_MONGODB" = "YES" ] || [[ "$_XTRAS_LIST" =~ "MNG" ]] ; then
_THIS_FILE=/opt/php53/etc/php53.ini
fix_php_ini_mongo
_THIS_FILE=/opt/php53/lib/php.ini
fix_php_ini_mongo
fi
}
#
# Update PHP Config.
update_php_conf () {
if [ -z "$_THISHTIP" ] ; then
_LOC_DOM="$_THISHOST"
find_correct_ip
_THISHTIP="$_LOC_IP"
fi
if [ ! -e "/opt/etc/fpm" ] || [ ! -e "/opt/etc/fpm/fpm-pool-common.conf" ] ; then
mkdir -p /opt/etc/fpm
fi
cp -af /opt/tmp/$_BOA_REPO_NAME/aegir/conf/fpm-pool-common.conf /opt/etc/fpm/fpm-pool-common.conf
if [ ! -e "/var/www/www55" ] ; then
adduser --system --group --home /var/www/www55 www55 &> /dev/null
usermod -aG www-data www55 &> /dev/null
fi
if [ ! -e "/opt/php55/etc/php55.ini" ] || [ ! -e "/opt/php55/etc/pool.d/www55.conf" ] ; then
mkdir -p /opt/php55/etc/pool.d
cp -af /opt/tmp/$_BOA_REPO_NAME/aegir/conf/php55.ini /opt/php55/etc/php55.ini
fi
cp -af /opt/tmp/$_BOA_REPO_NAME/aegir/conf/fpm55-pool-www.conf /opt/php55/etc/pool.d/www55.conf
if [ ! -e "/var/www/www56" ] ; then
adduser --system --group --home /var/www/www56 www56 &> /dev/null
usermod -aG www-data www56 &> /dev/null
fi
if [ ! -e "/opt/php56/etc/php56.ini" ] || [ ! -e "/opt/php56/etc/pool.d/www56.conf" ] ; then
mkdir -p /opt/php56/etc/pool.d
cp -af /opt/tmp/$_BOA_REPO_NAME/aegir/conf/php56.ini /opt/php56/etc/php56.ini
fi
cp -af /opt/tmp/$_BOA_REPO_NAME/aegir/conf/fpm56-pool-www.conf /opt/php56/etc/pool.d/www56.conf
if [ ! -e "/var/www/www54" ] ; then
adduser --system --group --home /var/www/www54 www54 &> /dev/null
usermod -aG www-data www54 &> /dev/null
fi
if [ ! -e "/opt/php54/etc/php54.ini" ] || [ ! -e "/opt/php54/etc/pool.d/www54.conf" ] ; then
mkdir -p /opt/php54/etc/pool.d
cp -af /opt/tmp/$_BOA_REPO_NAME/aegir/conf/php54.ini /opt/php54/etc/php54.ini
fi
cp -af /opt/tmp/$_BOA_REPO_NAME/aegir/conf/fpm54-pool-www.conf /opt/php54/etc/pool.d/www54.conf
if [ ! -e "/var/www/www53" ] ; then
adduser --system --group --home /var/www/www53 www53 &> /dev/null
usermod -aG www-data www53 &> /dev/null
fi
if [ ! -e "/opt/php53/etc/php53.ini" ] || [ ! -e "/opt/php53/etc/pool.d/www53.conf" ] ; then
mkdir -p /opt/php53/etc/pool.d
cp -af /opt/tmp/$_BOA_REPO_NAME/aegir/conf/php53.ini /opt/php53/etc/php53.ini
fi
cp -af /opt/tmp/$_BOA_REPO_NAME/aegir/conf/fpm53-pool-www.conf /opt/php53/etc/pool.d/www53.conf
if [ ! -e "/opt/php52/etc/php52.ini" ] ; then
mkdir -p /opt/php52/etc
cp -af /opt/tmp/$_BOA_REPO_NAME/aegir/conf/php52.ini /opt/php52/etc/php52.ini
fi
if [ ! -e "/opt/php56/lib/php.ini" ] ; then
mkdir -p /opt/php56/lib
cp -af /opt/tmp/$_BOA_REPO_NAME/aegir/conf/php56-cli.ini /opt/php56/lib/php.ini
fi
if [ ! -e "/opt/php55/lib/php.ini" ] ; then
mkdir -p /opt/php55/lib
cp -af /opt/tmp/$_BOA_REPO_NAME/aegir/conf/php55-cli.ini /opt/php55/lib/php.ini
fi
if [ ! -e "/opt/php54/lib/php.ini" ] ; then
mkdir -p /opt/php54/lib
cp -af /opt/tmp/$_BOA_REPO_NAME/aegir/conf/php54-cli.ini /opt/php54/lib/php.ini
fi
if [ ! -e "/opt/php53/lib/php.ini" ] ; then
mkdir -p /opt/php53/lib
cp -af /opt/tmp/$_BOA_REPO_NAME/aegir/conf/php53-cli.ini /opt/php53/lib/php.ini
fi
if [ ! -e "/opt/php52/lib/php.ini" ] ; then
mkdir -p /opt/php52/lib
cp -af /opt/tmp/$_BOA_REPO_NAME/aegir/conf/php52-cli.ini /opt/php52/lib/php.ini
fi
if [ "$_CUSTOM_CONFIG_PHP56" = "NO" ] || [[ "$_THISHOST" =~ ".host8." ]] || [ "$_VMFAMILY" = "VS" ] ; then
cp -af /opt/php56/etc/php56.ini /var/backups/dragon/t/etc-php56.ini-pre-$_INSTALLER_VERSION-$_NOW &> /dev/null
cp -af /opt/tmp/$_BOA_REPO_NAME/aegir/conf/php56.ini /opt/php56/etc/php56.ini &> /dev/null
cp -af /opt/php56/lib/php.ini /var/backups/dragon/t/lib-php56.ini-pre-$_INSTALLER_VERSION-$_NOW &> /dev/null
cp -af /opt/tmp/$_BOA_REPO_NAME/aegir/conf/php56-cli.ini /opt/php56/lib/php.ini &> /dev/null
fi
if [ "$_CUSTOM_CONFIG_PHP55" = "NO" ] || [[ "$_THISHOST" =~ ".host8." ]] || [ "$_VMFAMILY" = "VS" ] ; then
cp -af /opt/php55/etc/php55.ini /var/backups/dragon/t/etc-php55.ini-pre-$_INSTALLER_VERSION-$_NOW &> /dev/null
cp -af /opt/tmp/$_BOA_REPO_NAME/aegir/conf/php55.ini /opt/php55/etc/php55.ini &> /dev/null
cp -af /opt/php55/lib/php.ini /var/backups/dragon/t/lib-php55.ini-pre-$_INSTALLER_VERSION-$_NOW &> /dev/null
cp -af /opt/tmp/$_BOA_REPO_NAME/aegir/conf/php55-cli.ini /opt/php55/lib/php.ini &> /dev/null
fi
if [ "$_CUSTOM_CONFIG_PHP54" = "NO" ] || [[ "$_THISHOST" =~ ".host8." ]] || [ "$_VMFAMILY" = "VS" ] ; then
cp -af /opt/php54/etc/php54.ini /var/backups/dragon/t/etc-php54.ini-pre-$_INSTALLER_VERSION-$_NOW &> /dev/null
cp -af /opt/tmp/$_BOA_REPO_NAME/aegir/conf/php54.ini /opt/php54/etc/php54.ini &> /dev/null
cp -af /opt/php54/lib/php.ini /var/backups/dragon/t/lib-php54.ini-pre-$_INSTALLER_VERSION-$_NOW &> /dev/null
cp -af /opt/tmp/$_BOA_REPO_NAME/aegir/conf/php54-cli.ini /opt/php54/lib/php.ini &> /dev/null
fi
if [ "$_CUSTOM_CONFIG_PHP53" = "NO" ] || [[ "$_THISHOST" =~ ".host8." ]] || [ "$_VMFAMILY" = "VS" ] ; then
cp -af /opt/php53/etc/php53.ini /var/backups/dragon/t/etc-php53.ini-pre-$_INSTALLER_VERSION-$_NOW &> /dev/null
cp -af /opt/tmp/$_BOA_REPO_NAME/aegir/conf/php53.ini /opt/php53/etc/php53.ini &> /dev/null
cp -af /opt/php53/lib/php.ini /var/backups/dragon/t/lib-php53.ini-pre-$_INSTALLER_VERSION-$_NOW &> /dev/null
cp -af /opt/tmp/$_BOA_REPO_NAME/aegir/conf/php53-cli.ini /opt/php53/lib/php.ini &> /dev/null
fi
if [ "$_CUSTOM_CONFIG_PHP52" = "NO" ] || [[ "$_THISHOST" =~ ".host8." ]] || [ "$_VMFAMILY" = "VS" ] ; then
cp -af /opt/php52/etc/php52.ini /var/backups/dragon/t/etc-php52.ini-pre-$_INSTALLER_VERSION-$_NOW &> /dev/null
cp -af /opt/tmp/$_BOA_REPO_NAME/aegir/conf/php52.ini /opt/php52/etc/php52.ini &> /dev/null
cp -af /opt/php52/lib/php.ini /var/backups/dragon/t/lib-php52.ini-pre-$_INSTALLER_VERSION-$_NOW &> /dev/null
cp -af /opt/tmp/$_BOA_REPO_NAME/aegir/conf/php52-cli.ini /opt/php52/lib/php.ini &> /dev/null
fi
###
cp -af /opt/tmp/$_BOA_REPO_NAME/aegir/conf/php56-fpm.conf /opt/php56/etc/php56-fpm.conf &> /dev/null
cp -af /opt/tmp/$_BOA_REPO_NAME/aegir/conf/php55-fpm.conf /opt/php55/etc/php55-fpm.conf &> /dev/null
cp -af /opt/tmp/$_BOA_REPO_NAME/aegir/conf/php54-fpm.conf /opt/php54/etc/php54-fpm.conf &> /dev/null
cp -af /opt/tmp/$_BOA_REPO_NAME/aegir/conf/php53-fpm.conf /opt/php53/etc/php53-fpm.conf &> /dev/null
cp -af /opt/tmp/$_BOA_REPO_NAME/aegir/conf/php52-fpm.conf /opt/php52/etc/php52-fpm.conf &> /dev/null
###
sed -i "s/listen.allowed_clients =.*/listen.allowed_clients = 127.0.0.1,$_THISHTIP/g" /opt/etc/fpm/fpm-pool-common.conf &> /dev/null
sed -i "s/>127.0.0.1</\>127.0.0.1,$_THISHTIP\</g" /opt/php52/etc/php52-fpm.conf &> /dev/null
sed -i "s/listen.mode =.*/listen.mode = 0660/g" /opt/etc/fpm/fpm-pool-common.conf &> /dev/null
###
_THIS_FILE=/opt/php56/etc/php56.ini
fix_php_ini_redis
fix_php_ini_jsmin
fix_php_ini_suhosin
fix_php_ini_mailparse
_THIS_FILE=/opt/php56/lib/php.ini
fix_php_ini_redis
fix_php_ini_jsmin
fix_php_ini_suhosin
fix_php_ini_mailparse
_THIS_FILE=/opt/php55/etc/php55.ini
fix_php_ini_redis
fix_php_ini_jsmin
fix_php_ini_suhosin
fix_php_ini_mailparse
_THIS_FILE=/opt/php55/lib/php.ini
fix_php_ini_redis
fix_php_ini_jsmin
fix_php_ini_suhosin
fix_php_ini_mailparse
_THIS_FILE=/opt/php54/etc/php54.ini
fix_php_ini_redis
fix_php_ini_jsmin
fix_php_ini_suhosin
fix_php_ini_mailparse
_THIS_FILE=/opt/php54/lib/php.ini
fix_php_ini_redis
fix_php_ini_jsmin
fix_php_ini_suhosin
fix_php_ini_mailparse
_THIS_FILE=/opt/php53/etc/php53.ini
fix_php_ini_redis
fix_php_ini_jsmin
fix_php_ini_suhosin
fix_php_ini_mailparse
_THIS_FILE=/opt/php53/lib/php.ini
fix_php_ini_redis
fix_php_ini_jsmin
fix_php_ini_suhosin
fix_php_ini_mailparse
_THIS_FILE=/opt/php52/etc/php52.ini
fix_php_ini_redis
fix_php_ini_jsmin
fix_php_ini_suhosin
fix_php_ini_mailparse
_THIS_FILE=/opt/php52/lib/php.ini
fix_php_ini_redis
fix_php_ini_jsmin
fix_php_ini_suhosin
fix_php_ini_mailparse
rm -f /etc/php5/conf.d/{opcache.ini,apc.ini,imagick.ini,memcached.ini,redis.ini,suhosin.ini,newrelic.ini} &> /dev/null
fix_php_ini_newrelic_all
fix_php_ini_geos_all
fix_php_ini_mongo_all
fix_php_ini_opcache_all
###
sed -i "s/^zlib.output_compression.*/zlib.output_compression = Off/g" /opt/php52/etc/php52.ini &> /dev/null
sed -i "s/^zlib.output_compression.*/zlib.output_compression = Off/g" /opt/php52/lib/php.ini &> /dev/null
sed -i "s/^zlib.output_compression.*/zlib.output_compression = Off/g" /opt/php53/etc/php53.ini &> /dev/null
sed -i "s/^zlib.output_compression.*/zlib.output_compression = Off/g" /opt/php53/lib/php.ini &> /dev/null
###
sed -i "s/.*zlib.output_compression_level/;zlib.output_compression_level/g" /opt/php52/etc/php52.ini &> /dev/null
sed -i "s/.*zlib.output_compression_level/;zlib.output_compression_level/g" /opt/php52/lib/php.ini &> /dev/null
sed -i "s/.*zlib.output_compression_level/;zlib.output_compression_level/g" /opt/php53/etc/php53.ini &> /dev/null
sed -i "s/.*zlib.output_compression_level/;zlib.output_compression_level/g" /opt/php53/lib/php.ini &> /dev/null
###
}
#
restore_default_php () {
msg "INFO: Restoring default PHP configuration"
cp -af /opt/tmp/$_BOA_REPO_NAME/aegir/conf/php53-cli.ini /opt/php53/lib/php.ini &> /dev/null
cp -af /opt/tmp/$_BOA_REPO_NAME/aegir/conf/php53.ini /opt/php53/etc/php53.ini &> /dev/null
cp -af /opt/tmp/$_BOA_REPO_NAME/aegir/conf/php54-cli.ini /opt/php54/lib/php.ini &> /dev/null
cp -af /opt/tmp/$_BOA_REPO_NAME/aegir/conf/php54.ini /opt/php54/etc/php54.ini &> /dev/null
cp -af /opt/tmp/$_BOA_REPO_NAME/aegir/conf/php55-cli.ini /opt/php55/lib/php.ini &> /dev/null
cp -af /opt/tmp/$_BOA_REPO_NAME/aegir/conf/php55.ini /opt/php55/etc/php55.ini &> /dev/null
cp -af /opt/tmp/$_BOA_REPO_NAME/aegir/conf/php56-cli.ini /opt/php56/lib/php.ini &> /dev/null
cp -af /opt/tmp/$_BOA_REPO_NAME/aegir/conf/php56.ini /opt/php56/etc/php56.ini &> /dev/null
}
#
tune_php () {
msg "INFO: Tuning PHP configuration"
if [ "$_TUNE_PHP_FPM_TIMEOUT" -lt "60" ] ; then
_TUNE_PHP_FPM_TIMEOUT=60
fi
# PHP-FPM pools
sed -i "s/180s/${_TUNE_PHP_FPM_TIMEOUT}s/g" /opt/php*/etc/pool.d/*.conf &> /dev/null
sed -i "s/180s/${_TUNE_PHP_FPM_TIMEOUT}s/g" /opt/php*/etc/php*-fpm.conf &> /dev/null
sed -i "s/180/$_TUNE_PHP_FPM_TIMEOUT/g" /opt/etc/fpm/fpm-pool-common.conf &> /dev/null
# PHP-FPM INI
sed -i "s/^default_socket_timeout =.*/default_socket_timeout = $_TUNE_PHP_FPM_TIMEOUT/g" /opt/php*/etc/php*.ini &> /dev/null
sed -i "s/^max_execution_time =.*/max_execution_time = $_TUNE_PHP_FPM_TIMEOUT/g" /opt/php*/etc/php*.ini &> /dev/null
sed -i "s/^max_input_time =.*/max_input_time = $_TUNE_PHP_FPM_TIMEOUT/g" /opt/php*/etc/php*.ini &> /dev/null
# PHP-CLI INI
sed -i "s/^max_execution_time =.*/max_execution_time = $_TUNE_PHP_CLI_TIMEOUT/g" /opt/php*/lib/php.ini &> /dev/null
sed -i "s/^max_input_time =.*/max_input_time = $_TUNE_PHP_CLI_TIMEOUT/g" /opt/php*/lib/php.ini &> /dev/null
sed -i "s/^default_socket_timeout =.*/default_socket_timeout = $_TUNE_PHP_CLI_TIMEOUT/g" /opt/php*/lib/php.ini &> /dev/null
# Redis config should sync with PHP-CLI
sed -i "s/^timeout .*/timeout $_TUNE_PHP_CLI_TIMEOUT/g" /etc/redis/redis.conf &> /dev/null
}
#
update_innodb_log_file_size () {
msg "INFO: InnoDB log file will be set to $_INNODB_LOG_FILE_SIZE_MB, please wait..."
mrun "service mysql stop" &> /dev/null
mrun "sleep 5"
if [ ! -e "/var/run/mysqld/mysqld.sock" ] && [ ! -e "/var/run/mysqld/mysqld.pid" ] ; then
mkdir -p /var/backups/old-sql-ib-log-$_NOW
mrun "sleep 5"
mv -f /var/lib/mysql/ib_logfile0 /var/backups/old-sql-ib-log-$_NOW/ &> /dev/null
mv -f /var/lib/mysql/ib_logfile1 /var/backups/old-sql-ib-log-$_NOW/ &> /dev/null
sed -i "s/.*innodb_log_file_size.*/innodb_log_file_size = $_INNODB_LOG_FILE_SIZE_MB/g" /etc/mysql/my.cnf &> /dev/null
mrun "sleep 5"
fi
mrun "service mysql start" &> /dev/null
}
#
restore_default_sql () {
msg "INFO: Restoring default SQL configuration"
sed -i "s/.*check_for_crashed_tables/#check_for_crashed_tables/g" /etc/mysql/debian-start &> /dev/null
if [ "$_CUSTOM_CONFIG_SQL" = "NO" ] || [[ "$_THISHOST" =~ ".host8." ]] || [ "$_VMFAMILY" = "VS" ] ; then
if [ "$_CUSTOM_CONFIG_SQL" = "YES" ] ; then
_DO_NOTHING=YES
else
cp -af /etc/mysql/my.cnf /var/backups/dragon/t/my.cnf-pre-$_INSTALLER_VERSION-$_NOW &> /dev/null
cp -af /opt/tmp/$_BOA_REPO_NAME/aegir/conf/my.cnf.txt /etc/mysql/my.cnf
_INNODB_LOG_FILE_SIZE=${_INNODB_LOG_FILE_SIZE//[^0-9]/}
if [ ! -z "$_INNODB_LOG_FILE_SIZE" ] ; then
if [ "$_INNODB_LOG_FILE_SIZE" -ge "10" ] && [ "$_INNODB_LOG_FILE_SIZE" -lt "501" ] ; then
_INNODB_LOG_FILE_SIZE_MB="${_INNODB_LOG_FILE_SIZE}M"
_INNODB_LOG_FILE_SIZE_TEST=$(grep "^innodb_log_file_size" /var/backups/dragon/t/my.cnf-pre-$_INSTALLER_VERSION-$_NOW 2>&1)
if [[ "$_INNODB_LOG_FILE_SIZE_TEST" =~ "= $_INNODB_LOG_FILE_SIZE_MB" ]] ; then
_INNODB_LOG_FILE_SIZE_SAME=YES
else
_INNODB_LOG_FILE_SIZE_SAME=NO
fi
fi
fi
sed -i "s/.*slow_query_log/#slow_query_log/g" /etc/mysql/my.cnf &> /dev/null
sed -i "s/.*long_query_time/#long_query_time/g" /etc/mysql/my.cnf &> /dev/null
sed -i "s/.*slow_query_log_file/#slow_query_log_file/g" /etc/mysql/my.cnf &> /dev/null
if [ ! -e "/etc/mysql/skip-name-resolve.txt" ] ; then
sed -i "s/.*skip-name-resolve/#skip-name-resolve/g" /etc/mysql/my.cnf &> /dev/null
fi
fi
fi
mv -f /etc/mysql/my.cnf-pre* /var/backups/dragon/t/ &> /dev/null
sed -i "s/.*default-table-type/#default-table-type/g" /etc/mysql/my.cnf &> /dev/null
sed -i "s/.*language/#language/g" /etc/mysql/my.cnf &> /dev/null
_THIS_DB_SERVER_TEST=`mysql -V 2>&1`
if [[ "$_THIS_DB_SERVER_TEST" =~ "5.5." ]] || [[ "$_THIS_DB_SERVER_TEST" =~ "10.0" ]] ; then
_DO_NOTHING=YES
else
sed -i "s/.*lc_messages_dir /#lc_messages_dir /g" /etc/mysql/my.cnf &> /dev/null
sed -i "s/.*lc_messages /#lc_messages /g" /etc/mysql/my.cnf &> /dev/null
fi
if [[ "$_THIS_DB_SERVER_TEST" =~ "MariaDB" ]] ; then
sed -i "s/.*innodb_lazy_drop_table /#innodb_lazy_drop_table /g" /etc/mysql/my.cnf &> /dev/null
fi
if [ "$_CUSTOM_CONFIG_SQL" = "NO" ] ; then
if [ "$_DB_BINARY_LOG" = "NO" ] ; then
bash /var/xdrago/purge_binlogs.sh &> /dev/null
sed -i "s/.*purge_binlogs.*//g" /var/spool/cron/crontabs/root &> /dev/null
sed -i "/^$/d" /var/spool/cron/crontabs/root &> /dev/null
sed -i "s/^log_bin/#log_bin/g" /etc/mysql/my.cnf &> /dev/null
sed -i "s/^expire_logs_days/#expire_logs_days/g" /etc/mysql/my.cnf &> /dev/null
sed -i "s/^max_binlog_size/#max_binlog_size/g" /etc/mysql/my.cnf &> /dev/null
elif [ "$_DB_BINARY_LOG" = "YES" ] ; then
sed -i "s/.*log_bin/log_bin/g" /etc/mysql/my.cnf &> /dev/null
sed -i "s/.*expire_logs_days/expire_logs_days/g" /etc/mysql/my.cnf &> /dev/null
sed -i "s/.*max_binlog_size/max_binlog_size/g" /etc/mysql/my.cnf &> /dev/null
fi
if [ "$_DB_ENGINE" = "MyISAM" ] || [ "$_DB_ENGINE" = "InnoDB" ] ; then
sed -i "s/^default_storage_engine.*/default_storage_engine = $_DB_ENGINE/g" /etc/mysql/my.cnf &> /dev/null
fi
if [ ! -z "$_INNODB_LOG_FILE_SIZE" ] ; then
if [ "$_INNODB_LOG_FILE_SIZE" -ge "10" ] && [ "$_INNODB_LOG_FILE_SIZE" -lt "501" ] ; then
_INNODB_LOG_FILE_SIZE_MB="${_INNODB_LOG_FILE_SIZE}M"
_INNODB_LOG_FILE_SIZE_TEST=$(grep "^innodb_log_file_size" /etc/mysql/my.cnf 2>&1)
if [[ "$_INNODB_LOG_FILE_SIZE_TEST" =~ "= $_INNODB_LOG_FILE_SIZE_MB" ]] ; then
_DO_NOTHING=YES
else
if [ "$_INNODB_LOG_FILE_SIZE_SAME" = "YES" ] ; then
sed -i "s/.*innodb_log_file_size.*/innodb_log_file_size = $_INNODB_LOG_FILE_SIZE_MB/g" /etc/mysql/my.cnf &> /dev/null
else
update_innodb_log_file_size
fi
fi
fi
fi
fi
}
#
tune_sql () {
msg "INFO: Tuning SQL configuration"
sed -i "s/3600/$_TUNE_SQL_TIMEOUT/g" /etc/mysql/my.cnf &> /dev/null
sed -i "s/3600/$_TUNE_SQL_TIMEOUT/g" /var/xdrago/minute.sh &> /dev/null
}
#
restore_default_nginx () {
msg "INFO: Restoring default Nginx configuration"
cd /var/xdrago/monitor/check
cp -af /opt/tmp/$_BOA_REPO_NAME/aegir/tools/system/monitor/check/scan_nginx ./
if [ -d "$_TUNE_HOSTMASTER" ] ; then
for Files in `find $_TUNE_HOSTMASTER/config/server_master/nginx/vhost.d -type f`
do
sed -i "s/#limit_conn /limit_conn /g" $Files &> /dev/null
done
fi
su -s /bin/bash - aegir -c "drush @server_master provision-verify &> /dev/null"
mrun "sleep 8"
}
#
tune_nginx () {
msg "INFO: Tuning Nginx configuration"
sed -i "s/ = 300/ = $_TUNE_NGINX_FIREWALL/g" /var/xdrago/monitor/check/scan_nginx &> /dev/null
sed -i "s/60/$_TUNE_NGINX_TIMEOUT/g" /var/aegir/config/server_master/nginx.conf &> /dev/null
sed -i "s/300/$_TUNE_NGINX_TIMEOUT/g" /var/aegir/config/server_master/nginx.conf &> /dev/null
sed -i "s/180/$_TUNE_NGINX_TIMEOUT/g" /var/aegir/config/server_master/nginx.conf &> /dev/null
if [ "$_TUNE_NGINX_CONNECT" = "OFF" ] ; then
sed -i "s/limit_conn /#limit_conn /g" /var/aegir/config/server_master/nginx.conf &> /dev/null
if [ -d "$_TUNE_HOSTMASTER" ] ; then
for Files in `find $_TUNE_HOSTMASTER/config/server_master/nginx/vhost.d -type f`
do
sed -i "s/limit_conn /#limit_conn /g" $Files &> /dev/null
done
fi
fi
}
#
restart_services () {
msg "INFO: Reloading services"
if [ -e "/etc/init.d/php56-fpm" ] ; then
mrun "service php56-fpm reload" &> /dev/null
fi
if [ -e "/etc/init.d/php55-fpm" ] ; then
mrun "service php55-fpm reload" &> /dev/null
fi
if [ -e "/etc/init.d/php54-fpm" ] ; then
mrun "service php54-fpm reload" &> /dev/null
fi
if [ -e "/etc/init.d/php53-fpm" ] ; then
mrun "service php53-fpm reload" &> /dev/null
fi
if [ -e "/etc/init.d/php52-fpm" ] ; then
mrun "service php52-fpm reload" &> /dev/null
fi
mrun "service mysql restart" &> /dev/null
mrun "service nginx reload" &> /dev/null
mrun "service redis-server restart" &> /dev/null
if [ -e "/usr/sbin/csf" ] && [ -e "/etc/csf/csf.deny" ] ; then
mrun "csf -q" &> /dev/null
fi
}
#
tune_web_server_config () {
_LIM_FPM="$_L_PHP_FPM_WORKERS"
if [ "$_LIM_FPM" -lt "24" ] ; then
if [[ "$_THISHOST" =~ ".host8." ]] || [ "$_VMFAMILY" = "VS" ] ; then
_LIM_FPM=24
fi
fi
let "_PROCESS_MAX_FPM = (($_LIM_FPM * 5))"
let "_CHILD_MAX_FPM = (($_LIM_FPM * 2))"
if [ "$_PHP_FPM_WORKERS" = "AUTO" ] ; then
_DO_NOTHING=YES
else
_PHP_FPM_WORKERS=${_PHP_FPM_WORKERS//[^0-9]/}
if [ ! -z "$_PHP_FPM_WORKERS" ] && [ "$_PHP_FPM_WORKERS" -gt "0" ] ; then
_CHILD_MAX_FPM="$_PHP_FPM_WORKERS"
fi
fi
let "_START_LEGACY_FPM = (($_LIM_FPM - 21))"
if [ "$_START_LEGACY_FPM" -lt "3" ] ; then
_START_LEGACY_FPM=3
fi
sed -i "s/process.max =.*/process.max = $_PROCESS_MAX_FPM/g" /opt/php56/etc/php56-fpm.conf &> /dev/null
sed -i "s/process.max =.*/process.max = $_PROCESS_MAX_FPM/g" /opt/php55/etc/php55-fpm.conf &> /dev/null
sed -i "s/process.max =.*/process.max = $_PROCESS_MAX_FPM/g" /opt/php54/etc/php54-fpm.conf &> /dev/null
sed -i "s/process.max =.*/process.max = $_PROCESS_MAX_FPM/g" /opt/php53/etc/php53-fpm.conf &> /dev/null
sed -i "s/pm.max_children =.*/pm.max_children = $_CHILD_MAX_FPM/g" /opt/php56/etc/pool.d/www56.conf &> /dev/null
sed -i "s/pm.max_children =.*/pm.max_children = $_CHILD_MAX_FPM/g" /opt/php55/etc/pool.d/www55.conf &> /dev/null
sed -i "s/pm.max_children =.*/pm.max_children = $_CHILD_MAX_FPM/g" /opt/php54/etc/pool.d/www54.conf &> /dev/null
sed -i "s/pm.max_children =.*/pm.max_children = $_CHILD_MAX_FPM/g" /opt/php53/etc/pool.d/www53.conf &> /dev/null
sed -i "s/max_children\">.*</max_children\">$_START_LEGACY_FPM</g" /opt/php52/etc/php52-fpm.conf &> /dev/null
if [ ! -z "$_PHP_FPM_DENY" ] ; then
sed -i "s/passthru,/$_PHP_FPM_DENY,/g" /opt/php56/etc/pool.d/www56.conf &> /dev/null
sed -i "s/passthru,/$_PHP_FPM_DENY,/g" /opt/php55/etc/pool.d/www55.conf &> /dev/null
sed -i "s/passthru,/$_PHP_FPM_DENY,/g" /opt/php54/etc/pool.d/www54.conf &> /dev/null
sed -i "s/passthru,/$_PHP_FPM_DENY,/g" /opt/php53/etc/pool.d/www53.conf &> /dev/null
fi
sed -i "s/,getenv,/,/g" /opt/php*/etc/php*.ini &> /dev/null
sed -i "s/,getenv,/,/g" /opt/php*/etc/pool.d/*.conf &> /dev/null
sed -i "s/,getenv,/,/g" /var/xdrago/conf/fpm-pool-foo.conf &> /dev/null
sed -i "s/,create_function,/,/g" /opt/php*/etc/php*.ini &> /dev/null
sed -i "s/,create_function,/,/g" /opt/php*/etc/pool.d/*.conf &> /dev/null
sed -i "s/,create_function,/,/g" /var/xdrago/conf/fpm-pool-foo.conf &> /dev/null
sed -i "s/assert,//g" /opt/php*/etc/php*.ini &> /dev/null
sed -i "s/,assert,/,/g" /opt/php*/etc/pool.d/*.conf &> /dev/null
sed -i "s/,assert,/,/g" /var/xdrago/conf/fpm-pool-foo.conf &> /dev/null
sed -i "s/:\/srv:\/usr\/bin\"/:\/srv:\/usr\/bin:\/opt\/tika:\/opt\/tika7:\/opt\/tika8:\/opt\/tika9\"/g" /opt/php*/etc/pool.d/*.conf &> /dev/null
sed -i "s/:\/srv:\/usr\/bin\"/:\/srv:\/usr\/bin:\/opt\/tika:\/opt\/tika7:\/opt\/tika8:\/opt\/tika9\"/g" /var/xdrago/conf/fpm-pool-foo.conf &> /dev/null
sed -i "s/:\/opt\/tika9\"/:\/opt\/tika9:\/opt\/php52:\/opt\/php53:\/opt\/php54:\/opt\/php55:\/opt\/php56\"/g" /opt/php*/etc/pool.d/*.conf &> /dev/null
sed -i "s/:\/opt\/tika9\"/:\/opt\/tika9:\/opt\/php52:\/opt\/php53:\/opt\/php54:\/opt\/php55:\/opt\/php56\"/g" /var/xdrago/conf/fpm-pool-foo.conf &> /dev/null
_ZOP_INI_TEST=$(grep "^opcache.enable=1" /opt/php*/etc/php*.ini 2>&1)
if [[ "$_ZOP_INI_TEST" =~ "opcache.enable=1" ]] ; then
_DO_NOTHING=YES
else
sed -i "s/opcache.fast_shutdown=1/opcache.fast_shutdown=1\nopcache.enable=1/g" /opt/php*/etc/php*.ini &> /dev/null
fi
# PHP-FPM INI
sed -i "s/^default_socket_timeout =.*/default_socket_timeout = 180/g" /opt/php*/etc/php*.ini &> /dev/null
sed -i "s/^max_execution_time =.*/max_execution_time = 180/g" /opt/php*/etc/php*.ini &> /dev/null
sed -i "s/^max_input_time =.*/max_input_time = 180/g" /opt/php*/etc/php*.ini &> /dev/null
# PHP-CLI INI
sed -i "s/^default_socket_timeout =.*/default_socket_timeout = 3600/g" /opt/php*/lib/php.ini &> /dev/null
sed -i "s/^max_execution_time =.*/max_execution_time = 3600/g" /opt/php*/lib/php.ini &> /dev/null
sed -i "s/^max_input_time =.*/max_input_time = 3600/g" /opt/php*/lib/php.ini &> /dev/null
# Redis config should sync with PHP-CLI
sed -i "s/^timeout .*/timeout $_TUNE_PHP_CLI_TIMEOUT/g" /etc/redis/redis.conf &> /dev/null
if [ ! -z "$_L_NGINX_WORKERS" ] ; then
sed -i "s/worker_processes.*/worker_processes $_L_NGINX_WORKERS;/g" /etc/nginx/nginx.conf &> /dev/null
else
sed -i "s/worker_processes.*/worker_processes 4;/g" /etc/nginx/nginx.conf &> /dev/null
fi
}
#
tune_sql_memory_limits () {
if [ -e "/root/.mstr.clstr.cnf" ] || [ -e "/root/.wbhd.clstr.cnf" ] || [ -e "/root/.dbhd.clstr.cnf" ] ; then
_GALERA_COMP=NO
fi
if [ ! -e "/var/opt/mysqltuner-$_INSTALLER_VERSION-$_NOW.txt" ] && [ -z "$_GALERA_COMP" ] ; then
msg "INFO: Running MySQLTuner check on all databases..."
msg "NOTE! This step may take a LONG time, please wait..."
_MYSQLTUNER_TEST_RESULT=OK
rm -f /var/opt/mysqltuner*
curl -L --max-redirs 10 -k -s --retry 10 --retry-delay 15 -A iCab "https://raw.githubusercontent.com/major/MySQLTuner-perl/master/mysqltuner.pl" -o /var/opt/mysqltuner.pl
if [ ! -e "/var/opt/mysqltuner.pl" ] ; then
curl -L --max-redirs 10 -k -s --retry 10 --retry-delay 15 -A iCab "http://files.aegir.cc/dev/mysqltuner.pl" -o /var/opt/mysqltuner.pl
fi
if [ -e "/var/opt/mysqltuner.pl" ] ; then
perl /var/opt/mysqltuner.pl > /var/opt/mysqltuner-$_INSTALLER_VERSION-$_NOW.txt 2>&1
fi
fi
if [ -e "/var/opt/mysqltuner.pl" ] && [ -e "/var/opt/mysqltuner-$_INSTALLER_VERSION-$_NOW.txt" ] ; then
_REC_MYISAM_MEM=`cat /var/opt/mysqltuner-$_INSTALLER_VERSION-$_NOW.txt | grep "Data in MyISAM tables" | cut -d: -f2 | awk '{ print $1}'`
_REC_INNODB_MEM=`cat /var/opt/mysqltuner-$_INSTALLER_VERSION-$_NOW.txt | grep "data size:" | cut -d/ -f3 | awk '{ print $1}'`
_MYSQLTUNER_TEST=$(cat /var/opt/mysqltuner-$_INSTALLER_VERSION-$_NOW.txt 2>&1)
cp -a /var/opt/mysqltuner-$_INSTALLER_VERSION-$_NOW.txt /var/xdrago/log/
if [ -z "$_REC_INNODB_MEM" ] || [[ "$_MYSQLTUNER_TEST" =~ "Cannot calculate MyISAM index" ]] || [[ "$_MYSQLTUNER_TEST" =~ "InnoDB is enabled but isn" ]] ; then
_MYSQLTUNER_TEST_RESULT=FAIL
msg "ALRT! The MySQLTuner test failed!"
msg "ALRT! Please review /var/xdrago/log/mysqltuner-$_INSTALLER_VERSION-$_NOW.txt for details"
msg "ALRT! We will use some sane SQL defaults instead, do not worry!"
fi
###--------------------###
if [ ! -z "$_REC_MYISAM_MEM" ] && [ "$_MYSQLTUNER_TEST_RESULT" = "OK" ] ; then
_RAW_MYISAM_MEM=`echo $_REC_MYISAM_MEM | sed "s/[A-Z]//g"`
if [[ "$_REC_MYISAM_MEM" =~ "G" ]] ; then
let "_RAW_MYISAM_MEM = (($_RAW_MYISAM_MEM * 1024))"
fi
if [ "$_RAW_MYISAM_MEM" -gt "$_USE_SQL" ] ; then
_USE_MYISAM_MEM="$_USE_SQL"
else
_USE_MYISAM_MEM="$_RAW_MYISAM_MEM"
fi
if [ "$_USE_MYISAM_MEM" -lt "256" ] || [ -z "$_USE_MYISAM_MEM" ] ; then
_USE_MYISAM_MEM="$_USE_SQL"
fi
_USE_MYISAM_MEM="${_USE_MYISAM_MEM}M"
sed -i "s/^key_buffer_size.*/key_buffer_size = $_USE_MYISAM_MEM/g" /etc/mysql/my.cnf &> /dev/null
else
_USE_MYISAM_MEM="${_USE_SQL}M"
if [ "$_MYSQLTUNER_TEST_RESULT" = "FAIL" ] ; then
msg "ALRT! _USE_MYISAM_MEM is $_USE_MYISAM_MEM because _REC_MYISAM_MEM was empty!"
fi
sed -i "s/^key_buffer_size.*/key_buffer_size = $_USE_MYISAM_MEM/g" /etc/mysql/my.cnf &> /dev/null
fi
###--------------------###
if [ ! -z "$_REC_INNODB_MEM" ] && [ "$_MYSQLTUNER_TEST_RESULT" = "OK" ] ; then
_RAW_INNODB_MEM=`echo $_REC_INNODB_MEM | sed "s/[A-Z]//g"`
if [[ "$_REC_INNODB_MEM" =~ "G" ]] ; then
_RAW_INNODB_MEM=`echo "$_RAW_INNODB_MEM * 1024" | bc -l`
fi
_RAW_INNODB_MEM=$(echo "($_RAW_INNODB_MEM+0.5)/1" | bc)
if [ "$_RAW_INNODB_MEM" -gt "$_USE_SQL" ] ; then
_USE_INNODB_MEM="$_USE_SQL"
else
_RAW_INNODB_MEM=$(echo "scale=2; ($_RAW_INNODB_MEM * 1.1)" | bc)
_USE_INNODB_MEM=$(echo "($_RAW_INNODB_MEM+0.5)/1" | bc)
fi
_INNODB_BPI="0"
_INNODB_BPI=`echo "scale=0; $_USE_INNODB_MEM/1024" | bc`;
if [ "$_INNODB_BPI" -gt "1" ] ; then
sed -i "s/innodb_file_per_table = 1/innodb_file_per_table = 1\ninnodb_buffer_pool_instances = $_INNODB_BPI/g" /etc/mysql/my.cnf &> /dev/null
fi
if [ "$_USE_INNODB_MEM" -lt "256" ] || [ -z "$_USE_INNODB_MEM" ] ; then
_USE_INNODB_MEM="$_USE_SQL"
fi
_USE_INNODB_MEM="${_USE_INNODB_MEM}M"
sed -i "s/^innodb_buffer_pool_size.*/innodb_buffer_pool_size = $_USE_INNODB_MEM/g" /etc/mysql/my.cnf &> /dev/null
else
_USE_INNODB_MEM="${_USE_SQL}M"
msg "ALRT! _USE_INNODB_MEM is $_USE_INNODB_MEM because _REC_INNODB_MEM was empty!"
sed -i "s/^innodb_buffer_pool_size.*/innodb_buffer_pool_size = $_USE_INNODB_MEM/g" /etc/mysql/my.cnf &> /dev/null
fi
else
_THIS_USE_MEM="${_USE_SQL}M"
if [ "$_MYSQLTUNER_TEST_RESULT" = "FAIL" ] && [ -z "$_GALERA_COMP" ] ; then
msg "ALRT! _USE_MYISAM_MEM is $_THIS_USE_MEM because _REC_MYISAM_MEM was empty!"
msg "ALRT! _USE_INNODB_MEM is $_THIS_USE_MEM because _REC_INNODB_MEM was empty!"
fi
sed -i "s/= 181/= $_USE_SQL/g" /etc/mysql/my.cnf &> /dev/null
fi
}
#
tune_memory_limits () {
msg "INFO: Default Memory Tuning"
_AWS_TEST_A=$(grep cloudimg /etc/fstab)
_AWS_TEST_B=$(grep cloudconfig /etc/fstab)
_ETH_TEST=`ifconfig 2>&1`
_VM_TEST=`uname -a 2>&1`
if [[ "$_ETH_TEST" =~ "venet0" ]] ; then
_VMFAMILY="VZ"
elif [ -e "/proc/bean_counters" ] ; then
_VMFAMILY="VZ"
elif [[ "$_THISHOST" =~ ".host8." ]] && [ -e "/boot/grub/menu.lst" ] ; then
_VMFAMILY="TG"
elif [[ "$_THISHOST" =~ ".host8." ]] && [ -e "/boot/grub/grub.cfg" ] ; then
_VMFAMILY="TG"
else
_VMFAMILY="XEN"
fi
if [[ "$_VM_TEST" =~ beng ]] ; then
_VMFAMILY="VS"
fi
if [[ "$_AWS_TEST_A" =~ "cloudimg" ]] || [[ "$_AWS_TEST_B" =~ "cloudconfig" ]] ; then
_VMFAMILY="AWS"
fi
_CPU_INFO=$(grep -c processor /proc/cpuinfo)
_CPU_INFO=${_CPU_INFO//[^0-9]/}
_NPROC_TEST=$(which nproc)
if [ -z "$_NPROC_TEST" ] ; then
_CPU_NR="$_CPU_INFO"
else
_CPU_NR=`nproc`
fi
_CPU_NR=${_CPU_NR//[^0-9]/}
if [ ! -z "$_CPU_NR" ] && [ ! -z "$_CPU_INFO" ] && [ "$_CPU_NR" -gt "$_CPU_INFO" ] && [ "$_CPU_INFO" -gt "0" ] ; then
_CPU_NR="$_CPU_INFO"
fi
if [ -z "$_CPU_NR" ] || [ "$_CPU_NR" -lt "1" ] ; then
_CPU_NR=1
fi
let "_CPU_MX = (($_CPU_NR * 2))"
if [ "$_CPU_MX" -lt "4" ] ; then
_CPU_MX=4
fi
let "_CPU_TG = (($_CPU_NR / 2))"
if [ "$_CPU_TG" -lt "4" ] ; then
_CPU_TG=4
fi
let "_CPU_VS = (($_CPU_NR / 12))"
if [ "$_CPU_VS" -lt "2" ] ; then
_CPU_VS=2
fi
_RAM=`free -mto | grep Mem: | awk '{ print $2 }'`
if [ "$_RESERVED_RAM" -gt "0" ] ; then
let "_RAM = (($_RAM - $_RESERVED_RAM))"
fi
let "_USE = (($_RAM / 4))"
if [ "$_VMFAMILY" = "VS" ] || [[ "$_THISHOST" =~ ".host8." ]] ; then
if [ "$_VMFAMILY" = "VS" ] ; then
if [ -e "/root/.tg.cnf" ] ; then
let "_USE_SQL = (($_RAM / 8))"
else
let "_USE_SQL = (($_RAM / 18))"
fi
else
let "_USE_SQL = (($_RAM / 8))"
fi
else
let "_USE_SQL = (($_RAM / 8))"
fi
if [ "$_USE_SQL" -lt "256" ] ; then
_USE_SQL=256
fi
_TMP_SQL="${_USE_SQL}M"
let "_SRT_SQL = (($_USE_SQL * 2))"
_SRT_SQL="${_SRT_SQL}K"
if [ "$_USE" -ge "512" ] && [ "$_USE" -lt "1024" ] ; then
_USE_PHP=512
_USE_OPC=512
_MXC_SQL=20
_QCE_SQL=128M
_RND_SQL=8M
_JBF_SQL=4M
if [ "$_PHP_FPM_WORKERS" = "AUTO" ] ; then
_L_PHP_FPM_WORKERS=12
else
_L_PHP_FPM_WORKERS=$_PHP_FPM_WORKERS
fi
if [ "$_NGINX_WORKERS" = "AUTO" ] ; then
_L_NGINX_WORKERS=$_CPU_MX
else
_L_NGINX_WORKERS=$_NGINX_WORKERS
fi
elif [ "$_USE" -ge "1024" ] ; then
if [ "$_VMFAMILY" = "XEN" ] || [ "$_VMFAMILY" = "AWS" ] ; then
_USE_PHP=512
_USE_OPC=1024
_MXC_SQL=30
_QCE_SQL=128M
_RND_SQL=8M
_JBF_SQL=4M
if [ "$_PHP_FPM_WORKERS" = "AUTO" ] ; then
_L_PHP_FPM_WORKERS=24
else
_L_PHP_FPM_WORKERS=$_PHP_FPM_WORKERS
fi
if [ "$_NGINX_WORKERS" = "AUTO" ] ; then
_L_NGINX_WORKERS=$_CPU_MX
else
_L_NGINX_WORKERS=$_NGINX_WORKERS
fi
elif [ "$_VMFAMILY" = "VS" ] || [ "$_VMFAMILY" = "TG" ] ; then
if [ -e "/boot/grub/grub.cfg" ] || [ -e "/boot/grub/menu.lst" ] || [ -e "/root/.tg.cnf" ] ; then
_USE_PHP=1024
_USE_OPC=1024
_MXC_SQL=100
_QCE_SQL=256M
_RND_SQL=8M
_JBF_SQL=4M
if [ "$_PHP_FPM_WORKERS" = "AUTO" ] ; then
_L_PHP_FPM_WORKERS=24
else
_L_PHP_FPM_WORKERS=$_PHP_FPM_WORKERS
fi
if [ "$_NGINX_WORKERS" = "AUTO" ] ; then
_L_NGINX_WORKERS=$_CPU_TG
else
_L_NGINX_WORKERS=$_NGINX_WORKERS
fi
sed -i "s/64000/128000/g" /opt/php56/etc/php56.ini &> /dev/null
sed -i "s/64000/128000/g" /opt/php55/etc/php55.ini &> /dev/null
sed -i "s/64000/128000/g" /opt/php54/etc/php54.ini &> /dev/null
sed -i "s/64000/128000/g" /opt/php53/etc/php53.ini &> /dev/null
sed -i "s/64000/128000/g" /opt/php52/etc/php52.ini &> /dev/null
else
_USE_PHP=1024
_USE_OPC=1024
_MXC_SQL=30
_QCE_SQL=64M
_RND_SQL=2M
_JBF_SQL=2M
if [ "$_PHP_FPM_WORKERS" = "AUTO" ] ; then
_L_PHP_FPM_WORKERS=6
else
_L_PHP_FPM_WORKERS=$_PHP_FPM_WORKERS
fi
if [ "$_NGINX_WORKERS" = "AUTO" ] ; then
_L_NGINX_WORKERS=$_CPU_VS
else
_L_NGINX_WORKERS=$_NGINX_WORKERS
fi
fi
else
_USE_PHP=256
_USE_OPC=256
_MXC_SQL=10
_QCE_SQL=32M
_RND_SQL=2M
_JBF_SQL=2M
if [ "$_PHP_FPM_WORKERS" = "AUTO" ] ; then
_L_PHP_FPM_WORKERS=12
else
_L_PHP_FPM_WORKERS=$_PHP_FPM_WORKERS
fi
if [ "$_NGINX_WORKERS" = "AUTO" ] ; then
_L_NGINX_WORKERS=$_CPU_MX
else
_L_NGINX_WORKERS=$_NGINX_WORKERS
fi
fi
else
_USE_PHP="$_USE"
_USE_OPC="$_USE"
_MXC_SQL=10
_QCE_SQL=32M
_RND_SQL=1M
_JBF_SQL=1M
if [ "$_PHP_FPM_WORKERS" = "AUTO" ] ; then
_L_PHP_FPM_WORKERS=6
else
_L_PHP_FPM_WORKERS=$_PHP_FPM_WORKERS
fi
if [ "$_NGINX_WORKERS" = "AUTO" ] ; then
_L_NGINX_WORKERS=$_CPU_MX
else
_L_NGINX_WORKERS=$_NGINX_WORKERS
fi
fi
_USE_JETTY="-Xmx${_USE_OPC}m"
if [ "$_VMFAMILY" = "VZ" ] ; then
_USE_OPC=64
fi
let "_USE_FPM = (($_USE_PHP / 2))"
if [ "$_USE_FPM" -lt "196" ] ; then
_USE_FPM=196
fi
if [ "$_USE_PHP" -lt "196" ] ; then
_USE_PHP=196
fi
if [ ! -e "/var/xdrago/conf/fpm-pool-foo.conf" ] ; then
mkdir -p /var/xdrago/conf
fi
cp -af /opt/tmp/$_BOA_REPO_NAME/aegir/conf/fpm-pool-foo.conf /var/xdrago/conf/fpm-pool-foo.conf
if [ -e "/opt/etc/fpm/fpm-pool-common.conf" ] ; then
sed -i "s/256/$_USE_FPM/g" /opt/etc/fpm/fpm-pool-common.conf &> /dev/null
fi
if [ -e "/opt/php56/etc/php56.ini" ] ; then
sed -i "s/256/$_USE_FPM/g" /opt/php56/etc/php56.ini &> /dev/null
sed -i "s/181/$_USE_OPC/g" /opt/php56/etc/php56.ini &> /dev/null
sed -i "s/256/$_USE_PHP/g" /opt/php56/lib/php.ini &> /dev/null
fi
if [ -e "/opt/php55/etc/php55.ini" ] ; then
sed -i "s/256/$_USE_FPM/g" /opt/php55/etc/php55.ini &> /dev/null
sed -i "s/181/$_USE_OPC/g" /opt/php55/etc/php55.ini &> /dev/null
sed -i "s/256/$_USE_PHP/g" /opt/php55/lib/php.ini &> /dev/null
fi
if [ -e "/opt/php54/etc/php54.ini" ] ; then
sed -i "s/256/$_USE_FPM/g" /opt/php54/etc/php54.ini &> /dev/null
sed -i "s/181/$_USE_OPC/g" /opt/php54/etc/php54.ini &> /dev/null
sed -i "s/256/$_USE_PHP/g" /opt/php54/lib/php.ini &> /dev/null
fi
if [ -e "/opt/php53/etc/php53.ini" ] ; then
sed -i "s/256/$_USE_FPM/g" /opt/php53/etc/php53.ini &> /dev/null
sed -i "s/181/$_USE_OPC/g" /opt/php53/etc/php53.ini &> /dev/null
sed -i "s/256/$_USE_PHP/g" /opt/php53/lib/php.ini &> /dev/null
fi
if [ -e "/opt/php52/etc/php52.ini" ] ; then
sed -i "s/256/$_USE_FPM/g" /opt/php52/etc/php52.ini &> /dev/null
sed -i "s/181/$_USE_OPC/g" /opt/php52/etc/php52.ini &> /dev/null
sed -i "s/256/$_USE_PHP/g" /opt/php52/lib/php.ini &> /dev/null
fi
if [ "$_CUSTOM_CONFIG_SQL" = "NO" ] ; then
tune_sql_memory_limits
_UXC_SQL=$_MXC_SQL
let "_UXC_SQL = (($_MXC_SQL / 2))"
sed -i "s/= 191/= $_UXC_SQL/g" /etc/mysql/my.cnf &> /dev/null
sed -i "s/= 292/= $_MXC_SQL/g" /etc/mysql/my.cnf &> /dev/null
sed -i "s/^tmp_table_size.*/tmp_table_size = $_TMP_SQL/g" /etc/mysql/my.cnf &> /dev/null
sed -i "s/^max_heap_table_size.*/max_heap_table_size = $_TMP_SQL/g" /etc/mysql/my.cnf &> /dev/null
sed -i "s/^myisam_sort_buffer_size.*/myisam_sort_buffer_size = $_SRT_SQL/g" /etc/mysql/my.cnf &> /dev/null
sed -i "s/^query_cache_size.*/query_cache_size = $_QCE_SQL/g" /etc/mysql/my.cnf &> /dev/null
sed -i "s/^read_rnd_buffer_size.*/read_rnd_buffer_size = $_RND_SQL/g" /etc/mysql/my.cnf &> /dev/null
sed -i "s/^join_buffer_size.*/join_buffer_size = $_JBF_SQL/g" /etc/mysql/my.cnf &> /dev/null
fi
_MAX_MEM_REDIS="${_USE_OPC}MB"
sed -i "s/^maxmemory .*/maxmemory $_MAX_MEM_REDIS/g" /etc/redis/redis.conf &> /dev/null
if [ -e "/etc/default/jetty9" ] && [ -e "/opt/solr4" ] ; then
sed -i "s/^JAVA_OPTIONS.*/JAVA_OPTIONS=\"-Xms8m $_USE_JETTY -Djava.awt.headless=true -Dsolr.solr.home=\/opt\/solr4 \$JAVA_OPTIONS\" # Options/g" /etc/default/jetty9 &> /dev/null
fi
if [ -e "/etc/default/jetty8" ] && [ -e "/opt/solr3" ] ; then
sed -i "s/^JAVA_OPTIONS.*/JAVA_OPTIONS=\"-Xms8m $_USE_JETTY -Djava.awt.headless=true -Dsolr.solr.home=\/opt\/solr3 \$JAVA_OPTIONS\" # Options/g" /etc/default/jetty8 &> /dev/null
fi
if [ -e "/etc/default/jetty7" ] && [ -e "/opt/solr1" ] ; then
sed -i "s/^JAVA_OPTIONS.*/JAVA_OPTIONS=\"-Xms8m $_USE_JETTY -Djava.awt.headless=true -Dsolr.solr.home=\/opt\/solr1 \$JAVA_OPTIONS\" # Options/g" /etc/default/jetty7 &> /dev/null
fi
tune_web_server_config
}
#
check_git_repos_status () {
_GITHUB_WORKS=NO
_GITORIOUS_WORKS=NO
if [ "$_FORCE_GIT_MIRROR" = "drupal" ] ; then
_FORCE_GIT_MIRROR=github
fi
if [ "$_FORCE_GIT_MIRROR" = "github" ] ; then
msg "INFO: We will use forced GitHub repository without testing connection"
_GITHUB_WORKS=YES
_GITORIOUS_WORKS=NO
sleep 1
elif [ "$_FORCE_GIT_MIRROR" = "gitorious" ] ; then
msg "INFO: We will use forced Gitorious mirror without testing connection"
_GITHUB_WORKS=NO
_GITORIOUS_WORKS=YES
sleep 1
else
msg "INFO: Testing repository mirror servers availability..."
sleep 1
_GITHUB_WORKS=YES
_GITORIOUS_WORKS=YES
if ! netcat -w 14 -z github.com 9418 ; then
_GITHUB_WORKS=NO
msg "WARN: The GitHub master repository server doesn't respond..."
elif ! netcat -w 14 -z code.aegir.cc 9418 ; then
_GITORIOUS_WORKS=NO
msg "WARN: The Gitorious mirror repository server doesn't respond..."
fi
fi
if [ "$_GITHUB_WORKS" = "YES" ] ; then
_BOA_REPO_NAME="boa"
_BOA_REPO_GIT_URL="git://github.com/omega8cc"
msg "INFO: GitHub master repository will be used"
elif [ "$_GITORIOUS_WORKS" = "YES" ] ; then
_BOA_REPO_NAME="barracuda-octopus"
_BOA_REPO_GIT_URL="git://code.aegir.cc/aegir"
msg "INFO: Gitorious mirror repository will be used"
else
cat <<EOF
None of repository servers responded in 8 seconds,
so we can't continue this installation.
Please try again later or check if your firewall has port 9418 open.
Bye.
EOF
clean_pid_exit
fi
_BOA_REPO_GIT_URLX=${_BOA_REPO_GIT_URL//\//\\\/}
}
###---### init
#
touch /var/run/boa_run.pid
#
_BOA_REPO_NAME="boa"
_BOA_REPO_GIT_URL="git://github.com/omega8cc"
_BOA_REPO_GIT_URLX=${_BOA_REPO_GIT_URL//\//\\\/}
if [ "$_AEGIR_VERSION" = "HEAD" ] ; then
check_git_repos_status
fi
#
#
if [ `whoami` = "root" ] ; then
chmod a+w /dev/null
if [ ! -e "/dev/fd" ] ; then
if [ -e "/proc/self/fd" ] ; then
rm -rf /dev/fd
ln -s /proc/self/fd /dev/fd
fi
fi
msg "INFO: This script is ran as a root user"
else
msg "ERROR: This script should be ran as a root user - please `sudo -i` first"
msg "Bye"
clean_pid_exit
fi
#
#
if [ ! -f "/var/log/barracuda_log.txt" ] ; then
msg "ERROR: This script should be used only when the same version of BARRACUDA was used before"
msg "Your system has to be configured/upgraded by BARRACUDA version $_INSTALLER_VERSION first"
msg "Bye"
clean_pid_exit
else
_VERSIONS_TEST=`cat /var/log/barracuda_log.txt`
if [[ "$_VERSIONS_TEST" =~ "$_INSTALLER_VERSION" ]] ; then
_VERSIONS_TEST_RESULT=OK
else
msg "ERROR: This script should be used only when the same version of BARRACUDA was used before"
msg "Your system has to be configured/upgraded by BARRACUDA version $_INSTALLER_VERSION first"
msg "Bye"
clean_pid_exit
fi
fi
#
#
rm -f /var/run/aegir_upgrade.pid
rm -f /opt/tmp/testecho*
_SRCDIR=/opt/tmp/files
mkdir -p $_SRCDIR
chmod -R 777 /opt/tmp &> /dev/null
cd /opt/tmp
rm -f -r /opt/tmp/$_BOA_REPO_NAME
if [ "$_AEGIR_VERSION" = "HEAD" ] ; then
git clone --branch $_BRANCH_BOA $_BOA_REPO_GIT_URL/$_BOA_REPO_NAME.git &> /dev/null
else
curl -L --max-redirs 10 -k -s --retry 10 --retry-delay 15 -A iCab "http://files.aegir.cc/versions/stable/tar/boa-$_INSTALLER_VERSION.tar.gz" | tar -xzf -
_BOA_REPO_NAME="boa"
fi
#
# Get spinner
cd $_SRCDIR
rm -f spinner*
cp -af /opt/tmp/$_BOA_REPO_NAME/aegir/helpers/spinner ./
chmod +x spinner &> /dev/null
#
# Create tmp stuff
_LOG=/var/backups/bond-$_NOW.log
_SILENT=/opt/tmp/silent.log
#
#
###---### Tune Your Aegir Hosting System
#
echo " "
msg "TUNER START -> checkpoint: "
cat <<EOF
* Aegir Satellite Instance to tune: $_TUNE_HOSTMASTER
* Nginx server mod_evasive will be set to $_TUNE_NGINX_CONNECT
* Nginx server fastcgi timeout will be set to $_TUNE_NGINX_TIMEOUT seconds
* Nginx firewall limit of allowed requests will be set to $_TUNE_NGINX_FIREWALL/300
* Database server timeout will be set to $_TUNE_SQL_TIMEOUT seconds
* PHP-FPM server timeout will be set to $_TUNE_PHP_FPM_TIMEOUT seconds
* PHP-CLI drush timeout will be set to $_TUNE_PHP_CLI_TIMEOUT seconds
EOF
echo " "
if prompt_yes_no "Are you ready to tune your Aegir Hosting System with above values?" ; then
true
if [ ! -e "/root/.upstart.cnf" ] ; then
msg "INFO: We will stop cron and then wait 30 seconds..."
service cron stop &> /dev/null
mrun "sleep 30"
fi
msg "INFO: Tuning in progress, please wait..."
if [ -e "/root/.barracuda.cnf" ] ; then
source /root/.barracuda.cnf
fi
restore_default_php
update_php_conf
tune_php
restore_default_sql
tune_sql
restore_default_nginx
tune_nginx
tune_memory_limits
restart_services
msg "INFO: Tuning completed"
else
if [ ! -e "/root/.upstart.cnf" ] ; then
msg "INFO: We will stop cron and then wait 30 seconds..."
service cron stop &> /dev/null
mrun "sleep 30"
fi
if [ -e "/root/.barracuda.cnf" ] ; then
source /root/.barracuda.cnf
fi
restore_default_php
update_php_conf
restore_default_sql
restore_default_nginx
tune_memory_limits
restart_services
msg "INFO: Tuning stopped and default settings restored"
fi
rm -f -r /var/opt/*
rm -f -r /opt/tmp/*
rm -f /var/run/boa_run.pid
if [ ! -e "/root/.upstart.cnf" ] ; then
service cron start &> /dev/null
fi
msg "INFO: Cron started again"
msg "BYE!"
###----------------------------------------###
###
### Barracuda-Octopus-Nginx-Drupal Tuner
### Copyright (C) 2010-2015 Omega8.cc
### noc@omega8.cc www.omega8.cc
###
###----------------------------------------###
| true |
0d29167f2b4f07395576fa63b1e8e40fa14cbeae | Shell | ETHZ/MPAF | /scripts/SubScanPerFile.sh | UTF-8 | 2,489 | 3.1875 | 3 | [] | no_license | #!/bin/bash
TEMPCARD=$1
ANTYPE=$2
MASSFILE=$3
SIGNAME=$4
FNAME=$5
#TEMPCARD=susy3lScan_template
#ANTYPE=SUSY3L
#MASSFILE=massT6ttWW.txt
#SIGNAME=T6ttWW
if [[ ! -d $MPAF/cfg/tmpFiles ]]; then
mkdir -p $MPAF/cfg/tmpFiles
fi
#root -b -l getMassBenchmarks.C <<EOF
#.q
#EOF
N=0
while read fileLine; do
if [[ "${fileLine:0:1}" == "#" ]]; then
continue
fi
path=`echo $fileLine | awk '{print $1}'`
file=`echo $fileLine | awk '{print $2}'`
echo $path $file
if [[ "$SIGNAME" != "$file" ]]; then
cp $MPAF/cfg/${TEMPCARD}.cfg $MPAF/cfg/tmpFiles/${ANTYPE}${file}.cfg
sed -i 's|MASSBENCH||' $MPAF/cfg/tmpFiles/${ANTYPE}${file}.cfg
sed -i 's|FILE|'$file'|' $MPAF/cfg/tmpFiles/${ANTYPE}${file}.cfg
sed -i 's|PATH|'$path'|' $MPAF/cfg/tmpFiles/${ANTYPE}${file}.cfg
sed -i 's|FSIM|0|' $MPAF/cfg/tmpFiles/${ANTYPE}${file}.cfg
sed -i 's|SIGNAME|NOSIG|' $MPAF/cfg/tmpFiles/${ANTYPE}${file}.cfg
qsub -q all.q -N MPAFjob${ANTYPE}${file} -o $MPAF/workdir/logs/${ANTYPE}/${ANTYPE}${file}.out -e $MPAF/workdir/logs/${ANTYPE}/${ANTYPE}${file}.err $MPAF/scripts/submit.sh $MPAF/cfg/tmpFiles/${ANTYPE}${file}.cfg $HOME
else
while read line; do
mass=$line
python splitLineScan.py $path $mass $SIGNAME $file $ANTYPE $TEMPCARD
if [[ ! -e $MPAF/cfg/tmpFiles/${ANTYPE}${SIGNAME}${mass}.cfg ]]; then
continue
fi
qsub -q all.q -N MPAFjob${ANTYPE}${SIGNAME}${mass} -o $MPAF/workdir/logs/${ANTYPE}/${ANTYPE}${SIGNAME}${mass}.out -e $MPAF/workdir/logs/${ANTYPE}/${ANTYPE}${SIGNAME}${mass}.err $MPAF/scripts/submit.sh $MPAF/cfg/tmpFiles/${ANTYPE}${SIGNAME}${mass}.cfg $HOME
done < ${MASSFILE}
fi
#while read line; do
# mass=$line
# if [[ -e $MPAF/cfg/tmpFiles/${ANTYPE}_${mass}.cfg ]]; then
# continue
# fi
##python splitLineScan.py $path $mass $SIGNAME
#dec=`python splitLineScan.py $path $mass`
#if [[ $dec == 0 ]]; then
# continue
#fi
#break
#cp $MPAF/cfg/${TEMPCARD}.cfg $MPAF/cfg/tmpFiles/${ANTYPE}${mass}.cfg
#sed -i 's|MASSBENCH|'$mass'|' $MPAF/cfg/tmpFiles/${ANTYPE}${mass}.cfg
#sed -i 's|FILE|'$file'|' $MPAF/cfg/tmpFiles/${ANTYPE}${mass}.cfg
#sed -i 's|PATH|'$path'|' $MPAF/cfg/tmpFiles/${ANTYPE}${mass}.cfg
#qsub -q all.q -N MPAFjob -o $MPAF/workdir/logs/${ANTYPE}/${ANTYPE}${mass}.out -e $MPAF/workdir/logs/${ANTYPE}/${ANTYPE}${mass}.err $MPAF/scripts/submit.sh $MPAF/cfg/tmpFiles/${ANTYPE}${mass}.cfg
# done < ${MASSFILE}
#N=`echo $N + 1 | bc`
#break
done < $FNAME
| true |
485dd9cb6ee9a8662926f9bf73a2b7f32ebd6abc | Shell | rosenbergdm/configfiles | /HOME/.zshrc.d/function_src_files/vim | UTF-8 | 4,284 | 3.703125 | 4 | [] | no_license | #!/bin/zsh -f
#
# Function to invoke sudo vim with a .vimrc file if it is needed to edit a protected file
#
# set -x # uncomment to debug
version="2.0.1"
###############################################################################
function resetter {
# re-initialize everything:
unset i
binplistfile=""
otherfile=""
myfile=""
myplistfilearray=()
otherplistfilearray=()
myfilearray=()
otherfilearray=()
optionarray=()
LOCALVIMRC=()
}
function sudowarn {
if [[ ! -f ~/.zsh/vimhushsudo ]]; then
print "\e[1m "
print " Using \e[0m sudo vim \e[1m to edit file(s). "
print "\e[0m "
sleep 1 # Pause 1 second to give the user time to read this.
fi
}
function binconvertwarn {
if [[ ! -f ~/.zsh/vimhushplist ]]; then
print "\e[1m "
print " Editing a binary plist file by temporarily converting it to XML. "
print "\e[0m "
sleep 1 # Pause 1 second to give the user time to read this.
fi
}
###############################################################################
###############################################################################
resetter
# See if there is a special vimrc file to use
if [[ -z $LOCALVIMRC ]];then
if [[ -f ~/.vimrc_save ]]; then
LOCALVIMRC=( -u ~/.vimrc_save )
elif [[ -f ~/.vimrc ]]; then
LOCALVIMRC=( -u ~/.vimrc )
else
LOCALVIMRC=()
fi
fi
###############################################################################
# If no arguments are given, just start vim. If your $PWD is not your own,
# this starts "sudo vim" and pauses for two seconds to permit the user to
# ponder the significance and ramifications of this development.
if [[ $# == 0 ]];then
if [[ -O $PWD ]];then
command vim "$@"
else
print "starting with \e[1m sudo vim \e[0m "
sleep 2
sudo vim "$@"
fi
return 0
fi
LIMIT=$#
for ((i = 1; i <= $LIMIT; i++ )) do
eval file="\$$i"
# Test existence, ownership and binary-plistness of each file:
if [[ -e $file && $file:e == plist ]];then
file_type=$(command file -b $file | awk '{print $2}' )
if [[ ($file_type == data || $file_type == binary) && ! -O $file ]];then
# This is a binary plist file I don't own
binplistfile="true"
otherfile="true"
otherplistfilearray+=( $file )
elif [[ ($file_type == data || $file_type == binary) && -O $file ]];then
# This is a binary plist file I do own
binplistfile="true"
myfile="true"
myplistfilearray+=( $file )
elif [[ $file_type != data && ! -O $file ]];then
# This is an xml plist file I don't own
# binplistfile="false"
otherfile="true"
otherfilearray+=( $file )
elif [[ $file_type != data && -O $file ]];then
# This is an xml plist file I do own
# binplistfile="false"
myfile="true"
myfilearray+=( $file )
else
: # I think there are no other possibilities
fi
elif [[ -e $file && ! -O $file ]]; then
# The file exists and someone else owns this file
# binplistfile="false"
otherfile="true"
otherfilearray+=( $file )
elif [[ -e $file && -O $file ]]; then
# The file exists and I own this file
# binplistfile="false"
myfile="true"
myfilearray+=( $file )
else
# File does not exist, or an option is given
myfile="true"
optionarray+=( $file )
fi
done
###############################################################################
# Use sudo vim to edit files that I do not own
if [[ $otherfile == "true" ]]; then
sudowarn
if [[ $binplistfile == "true" ]]; then
binconvertwarn
sudo plutil -convert xml1 $otherplistfilearray
sudo vim $LOCALVIMRC $optionarray $otherplistfilearray
sudo plutil -convert binary1 $otherplistfilearray
else
sudo vim $LOCALVIMRC $optionarray $otherfilearray
fi
fi
# Use command vim to edit files that I do own
if [[ $myfile == "true" ]]; then
if [[ $binplistfile == "true" ]]; then
binconvertwarn
plutil -convert xml1 $myplistfilearray
command vim $LOCALVIMRC $optionarray $myplistfilearray
plutil -convert binary1 $myplistfilearray
else
command vim $LOCALVIMRC $optionarray $myfilearray
fi
fi
resetter
## Source: ZSH-templates-OSX Version: 2.0.0
## /Library/init/zsh/zshrc.d/local-functions/general
## http://code.google.com/p/zsh-templates-osx/
# vim: ft=zsh
| true |
c9653a9e9e7239638306c5f3302444cbb3d1d0ca | Shell | Wizek/hs-di | /release.sh | UTF-8 | 1,655 | 3.90625 | 4 | [] | no_license | #!/bin/bash
set -eu
ver="$1"
user="$2"
# TODO get from keyring
pass="$3"
ret=0
log () {
echo "# "$@
}
projectName=`ls *.cabal | sed s/.cabal//`
if [ -e "$projectName.cabal" ]; then
log "Project Name: $projectName"
else
log "*.cabal file not found, refusing release"
exit 1
fi
if [ -z "$ver" ]; then
log "Version not given, refusing release"
exit 1
fi
if [ -z "$user" ]; then
log "Username not given, refusing release"
exit 1
fi
if [ -z "$pass" ]; then
log "Password not given, refusing release"
exit 1
fi
# getExitCode () {
# "$@"
# echo "$?"
# }
git diff --exit-code >/dev/null || ret=$?
if [ "$ret" -ne 0 ]; then
git status --short
log "There are uncommitted changes, refusing release"
exit 2
else
log "No uncommitted changes. Wonderful. Continuing."
fi
git diff --cached --exit-code >/dev/null || ret=$?
if [ "$ret" -ne 0 ]; then
git status --short
log "There are uncommitted staged changes, refusing release"
exit 3
else
log "No uncommitted staged changes either. Wonderful. Continuing."
fi
rx="(version:\s+)([0-9\w.-]+)"
oldver=`cat "$projectName.cabal" | grep "^version:" | sed -re "s/$rx/\2/"`
log "\$oldver: $oldver"
stack test
msg="Bumping version v$oldver -> v$ver"
if [ "$ver" != "$oldver" ]; then
sed -r -i -e "s/$rx/\1$ver/" "$projectName.cabal"
log "$msg"
fi
cabal sdist
cabal upload "dist/$projectName-$ver.tar.gz" --username "$user" --password "$pass"
neil docs --username "$user:$pass"
if [ "$ver" != "$oldver" ]; then
git add -u
git commit -m "$msg"
log "Committed: $msg"
fi
git push
if [ ! `git tag -l "v$ver"` ]; then
git tag "v$ver" HEAD
fi
git push --tags
| true |
63f8d09242f5b376dd5aaa7d2d419c72f5f334e6 | Shell | mangan/xmlrpc_negotiate | /git-archive | UTF-8 | 145 | 2.8125 | 3 | [
"MIT"
] | permissive | #!/bin/bash
NAME=$(basename $(realpath $(dirname $0)))
VERSION=1.0
REF=HEAD
git archive --prefix $NAME-$VERSION/ -o $NAME-$VERSION.tar.gz $REF
| true |
aa50417ab552eb10e4aac613fcd59e7258224655 | Shell | TheBauwssss/LinuxSetupScripts | /Quick commands.sh | UTF-8 | 2,323 | 2.8125 | 3 | [] | no_license | #!/bin/bash
#Mysql guides
#How To Install MySQL on Ubuntu 16.04: https://www.digitalocean.com/community/tutorials/how-to-install-mysql-on-ubuntu-16-04
#Uninstalling MySQL: https://help.cloud66.com/maestro/how-to-guides/databases/shells/uninstall-mysql.html
#Reset a MySQL root password: https://support.rackspace.com/how-to/mysql-resetting-a-lost-mysql-root-password/
#Make MySQL listen on other interfaces: https://www.garron.me/en/bits/mysql-bind-all-address.html
#Wordpress guides
#How to install Wordpress: https://tecadmin.net/install-wordpress-with-nginx-on-ubuntu/
#Security guides
#How to setup fail2ban: https://www.linode.com/docs/security/using-fail2ban-for-security/#configure-fail2ban-local
service --status-all #view all services
#Uninstall unused crap, LET OP: packages worden niet correct verwijders, deze commandos handmatig uitvoeren!
apt purge --auto-remove apache2* -y
apt purge --auto-remove postfix -y
apt purge --auto-remove rsyslog -y
apt update
apt upgrade -y
apt install software-properties-common htop locate nano unzip zip screen tree git curl fail2ban ncdu -y
add-apt-repository ppa:fish-shell/release-2
#
# Fix unicode locale errors when installing certificates by putting LC_ALL=C.UTF-8 before the command in !!BASH!!
#
apt update
apt install fish -y
apt install nginx php7.0-cli php7.0-cgi php7.0-fpm -y
#Change default shell
chsh -s $(which fish)
#Execute the below line manually in fish!
#set -g fish_prompt_pwd_dir_length 0
#PHP v7.2 (nginx):
#Uninstall old nginx + php from above
apt purge --auto-remove nginx php7.0-cli php7.0-cgi php7.0-fpm -y
#Add repo, key and update sources
apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 4F4EA0AAE5267A6C
add-apt-repository ppa:ondrej/php
#apt-add-repository ppa:ondrej/nginx-mainline
apt update
apt upgrade -y
#FIX NGINX SIGNING KEY
wget http://nginx.org/packages/keys/nginx_signing.key
cat nginx_signing.key | sudo apt-key add -
apt-get update
apt-get install nginx
#END FIX
#Install new php version
apt install nginx php7.2 php7.2-cli php7.2-common php7.2-json php7.2-opcache php7.2-readline -y
apt install php-pear php7.2-curl php7.2-dev php7.2-gd php7.2-mbstring php7.2-zip php7.2-mysql php7.2-xml php7.2-intl php7.2-sqlite3 -y
#LET OP: deze fix doorvoeren om shutdown te fixen:
https://askubuntu.com/a/879430 | true |
5d3537509f63e31d4b61c264330b7f6ee52d9a4f | Shell | FrenkenFlores/Ft_containers | /unit_tests/test_vector.sh | UTF-8 | 348 | 2.84375 | 3 | [] | no_license | #!/bin/bash
CC=clang++
SRCS=test_vector.cpp
NAME=test_vector
$CC $SRCS -g -o $NAME
if [[ "$1" == "lldb" ]] ; then
lldb test_vector
rm -rf test_vector.dSYM
rm test_vector
exit
fi
if [[ "$1" == "leaks" ]] ; then
leaks -atExit -- ./test_vector
rm -rf test_vector.dSYM
rm test_vector
exit
fi
./test_vector
rm test_vector | true |
46745cfd3147d5a9e78b7cd246a9f340b3a5122a | Shell | Sennue/gogochat | /curl.sh | UTF-8 | 6,430 | 2.578125 | 3 | [
"MIT",
"CC0-1.0",
"LicenseRef-scancode-public-domain"
] | permissive | #!/bin/sh
export API_HOST='http://localhost:8080'
export CURL_OPTIONS='-sS -A'
export CURL_USER_AGENT='user-agent'
. secure_curl.sh
export API_VERB="GET"
export API_ENDPOINT=""
printf "\n$API_VERB /$API_ENDPOINT\n"
curl $CURL_OPTIONS "$CURL_USER_AGENT" "$API_HOST/$API_ENDPOINT" -X "$API_VERB" | python -m json.tool #| less
export API_VERB="GET"
export API_ENDPOINT="version"
printf "\n$API_VERB /$API_ENDPOINT\n"
curl $CURL_OPTIONS "$CURL_USER_AGENT" "$API_HOST/$API_ENDPOINT" -X "$API_VERB" | python -m json.tool #| less
export API_VERB="GET"
export API_ENDPOINT="ping"
printf "\n$API_VERB /$API_ENDPOINT\n"
curl $CURL_OPTIONS "$CURL_USER_AGENT" "$API_HOST/$API_ENDPOINT" -X "$API_VERB" | python -m json.tool #| less
export API_VERB="GET"
export API_ENDPOINT="time"
printf "\n$API_VERB /$API_ENDPOINT\n"
curl $CURL_OPTIONS "$CURL_USER_AGENT" "$API_HOST/$API_ENDPOINT" -X "$API_VERB" | python -m json.tool #| less
export API_VERB="POST"
export API_ENDPOINT="account"
export API_HEADER="Content-Type: application/json"
export API_DATA="bad-data"
printf "\n$API_VERB /$API_ENDPOINT [bad data]\n"
curl $CURL_OPTIONS "$CURL_USER_AGENT" -H "$API_HEADER" -d "$API_DATA" "$API_HOST/$API_ENDPOINT" -X "$API_VERB" | python -m json.tool #| less
export API_VERB="POST"
export API_ENDPOINT="account"
export API_HEADER="Content-Type: application/json"
export API_DATA="{\"device_id\":\"$DEVICE_ID\", \"name\":\"$NAME\", \"email\":\"$EMAIL\", \"password\":\"$PASSWORD\"}"
printf "\n$API_VERB /$API_ENDPOINT [may exist]\n"
curl $CURL_OPTIONS "$CURL_USER_AGENT" -H "$API_HEADER" -d "$API_DATA" "$API_HOST/$API_ENDPOINT" -X "$API_VERB" | python -m json.tool #| less
export API_VERB="POST"
export API_ENDPOINT="account"
export API_HEADER="Content-Type: application/json"
export API_DATA_RANDOM_ID=`cat /dev/urandom | env LC_CTYPE=C tr -cd 'a-f0-9' | head -c 32`
export API_DATA="{\"device_id\":\"FakeID:$API_DATA_RANDOM_ID\", \"name\":\"$API_DATA_RANDOM_ID\", \"email\":\"${API_DATA_RANDOM_ID}@email.com\", \"password\":\"$API_DATA_RANDOM_ID\"}"
printf "\n$API_VERB /$API_ENDPOINT [random values]\n"
curl $CURL_OPTIONS "$CURL_USER_AGENT" -H "$API_HEADER" -d "$API_DATA" "$API_HOST/$API_ENDPOINT" -X "$API_VERB" | python -m json.tool #| less
export API_VERB="POST"
export API_ENDPOINT="auth"
export API_HEADER="Content-Type: application/json"
export API_DATA="{\"device_id\":\"$DEVICE_ID\", \"password\":\"bad-password\"}"
printf "\n$API_VERB /$API_ENDPOINT [wrong password]\n"
curl $CURL_OPTIONS "$CURL_USER_AGENT" -H "$API_HEADER" -d "$API_DATA" "$API_HOST/$API_ENDPOINT" -X "$API_VERB" | python -m json.tool #| less
export API_VERB="POST"
export API_ENDPOINT="auth"
export API_HEADER="Content-Type: application/json"
export API_DATA="{\"device_id\":\"$DEVICE_ID\", \"password\":\"$PASSWORD\"}"
printf "\n$API_VERB /$API_ENDPOINT\n"
curl $CURL_OPTIONS "$CURL_USER_AGENT" -H "$API_HEADER" -d "$API_DATA" "$API_HOST/$API_ENDPOINT" -X "$API_VERB" | python -m json.tool #| less
export API_VERB="GET"
export API_ENDPOINT="room"
export API_AUTH_HEADER="Authorization: bad-token"
printf "\n$API_VERB /$API_ENDPOINT [bad token]\n"
curl $CURL_OPTIONS "$CURL_USER_AGENT" -H "$API_AUTH_HEADER" "$API_HOST/$API_ENDPOINT" -X "$API_VERB" | python -m json.tool #| less
export API_VERB="GET"
export API_ENDPOINT="room"
export API_AUTH_HEADER="Authorization: $TOKEN"
printf "\n$API_VERB /$API_ENDPOINT\n"
curl $CURL_OPTIONS "$CURL_USER_AGENT" -H "$API_AUTH_HEADER" "$API_HOST/$API_ENDPOINT" -X "$API_VERB" | python -m json.tool #| less
export API_VERB="GET"
export API_ITEM="x"
export API_ENDPOINT="room/$API_ITEM"
export API_AUTH_HEADER="Authorization: $TOKEN"
printf "\n$API_VERB /$API_ENDPOINT [bad syntax]\n"
curl $CURL_OPTIONS "$CURL_USER_AGENT" -D - -H "$API_AUTH_HEADER" "$API_HOST/$API_ENDPOINT" -X "$API_VERB" #| python -m json.tool #| less
export API_VERB="GET"
export API_ITEM="-1"
export API_ENDPOINT="room/$API_ITEM"
export API_AUTH_HEADER="Authorization: $TOKEN"
printf "\n$API_VERB /$API_ENDPOINT [nonexistent item]\n"
curl $CURL_OPTIONS "$CURL_USER_AGENT" -D - -H "$API_AUTH_HEADER" "$API_HOST/$API_ENDPOINT" -X "$API_VERB" #| python -m json.tool #| less
export API_VERB="GET"
export API_ITEM="1"
export API_ENDPOINT="room/$API_ITEM"
export API_AUTH_HEADER="Authorization: $TOKEN"
printf "\n$API_VERB /$API_ENDPOINT\n"
curl $CURL_OPTIONS "$CURL_USER_AGENT" -H "$API_AUTH_HEADER" "$API_HOST/$API_ENDPOINT" -X "$API_VERB" | python -m json.tool #| less
export API_VERB="POST"
export API_ENDPOINT="room"
export API_AUTH="Authorization: $TOKEN"
export API_AUTH_HEADER="Content-Type: application/json"
export API_DATA_NAME="Test Room"
export API_DATA_DESCRIPTION="Test room description."
export API_DATA="{ \"name\": \"$API_DATA_NAME\", \"description\": \"$API_DATA_DESCRIPTION\" }"
printf "\n$API_VERB /$API_ENDPOINT [may exist]\n"
curl $CURL_OPTIONS "$CURL_USER_AGENT" -H "$API_AUTH" -H "$API_AUTH_HEADER" -d "$API_DATA" "$API_HOST/$API_ENDPOINT" -X "$API_VERB" | python -m json.tool #| less
export API_VERB="POST"
export API_ENDPOINT="room"
export API_AUTH_HEADER="Authorization: $TOKEN"
export API_HEADER="Content-Type: application/json"
export API_DATA_NAME=`cat /dev/urandom | env LC_CTYPE=C tr -cd 'a-f0-9' | head -c 32`
export API_DATA_DESCRIPTION=`cat /dev/urandom | env LC_CTYPE=C tr -cd 'a-f0-9' | head -c 96`
export API_DATA="{ \"name\": \"$API_DATA_NAME\", \"description\": \"$API_DATA_DESCRIPTION\" }"
printf "\n$API_VERB /$API_ENDPOINT [random values]\n"
curl $CURL_OPTIONS "$CURL_USER_AGENT" -H "$API_AUTH_HEADER" -H "$API_HEADER" -d "$API_DATA" "$API_HOST/$API_ENDPOINT" -X "$API_VERB" | python -m json.tool #| less
export API_VERB="GET"
export API_ITEM="1"
export API_ENDPOINT="message/$API_ITEM"
export API_AUTH_HEADER="Authorization: $TOKEN"
printf "\n$API_VERB /$API_ENDPOINT\n"
curl $CURL_OPTIONS "$CURL_USER_AGENT" -H "$API_AUTH_HEADER" "$API_HOST/$API_ENDPOINT" -X "$API_VERB" | python -m json.tool #| less
export API_VERB="POST"
export API_ENDPOINT="message"
export API_AUTH="Authorization: $TOKEN"
export API_AUTH_HEADER="Content-Type: application/json"
export API_DATA_ROOM_ID="1"
export API_DATA_BODY="Test Message: Hello, World!"
export API_DATA="{ \"room_id\": \"$API_DATA_ROOM_ID\", \"body\": \"$API_DATA_BODY\" }"
printf "\n$API_VERB /$API_ENDPOINT\n"
curl $CURL_OPTIONS "$CURL_USER_AGENT" -H "$API_AUTH" -H "$API_AUTH_HEADER" -d "$API_DATA" "$API_HOST/$API_ENDPOINT" -X "$API_VERB" #| python -m json.tool #| less
| true |
a2b1131131d7d0fd2a3001403c9625033b2a4e3e | Shell | Tiryoh/mycobot-setup-tools | /patch/pymycobot.sh | UTF-8 | 614 | 3.421875 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env bash
set -eu
SRC_DIR=$(cd $(dirname ${BASH_SOURCE:-$0})/../; pwd)
git_patch_apply() {
git apply $1 || git apply $1 -R --check 2>/dev/null && { echo "git patch already applied" ; echo "the 'patch does not apply' error shown above can be ignored" ; }
}
fix_pymycobot() {
pushd ${SRC_DIR}/elephantrobotics-mycobot > /dev/null
if [ "$(git rev-parse HEAD)" = "c2d4acd1e1c63dd5b1a62724f57e11808fa16359" ]; then
curl -Ss https://patch-diff.githubusercontent.com/raw/elephantrobotics/myCobot/pull/4.patch > ${SRC_DIR}/patch/4.patch
git_patch_apply ${SRC_DIR}/patch/4.patch
fi
popd > /dev/null
}
| true |
a2fa62e2bc78c345d8fa995b28bedb923080ab5d | Shell | lgorkemt/UnixScripting | /Wa08_Siebel_File_Exportbash.sh | UTF-8 | 2,843 | 3.0625 | 3 | [] | no_license | #!/bin/bash
#sh WA08_enhencement_ibmlturan 1
HOSTRemote=127.0.0.1
USERRemote=siebel
# Our Folders
MH_BATCH_PROCESS_OUT=/mnt/icrm/deneme/siebel/MH/*
KR_BATCH_PROCESS_OUT=/mnt/icrm/deneme/siebel/KR/*
SD_BATCH_PROCESS_OUT=/mnt/icrm/deneme/siebel/SD/*
#GT_BATCH_PROCESS_OUT=
# Remote folders
MH_FAX_PATH=/app_fax/MH
KR_FAX_PATH=/app_fax/KURUMSAL
SD_FAX_PATH=/app_fax/SHOPDESTEK
#GT_FAX_PATH=
#sftp $USERRemote@$HOSTRemote <<EOF
for f in $KR_BATCH_PROCESS_OUT
do
echo "Parsing $f file new..."
filename=$(basename "$f")
echo "filename...$filename"
extension="${filename##*.}"
filenamewoext="${filename%.*}"
echo "extension...$extension"
echo "filenamewoext...$filenamewoext"
parts=$(echo $filenamewoext | tr "_" "\n")
START=1
END=2
i=$START
yearPart=
for part in $parts
do
echo "part..[$part]"
echo "i..[$i]"
if [ $i -eq 2 ]
then yearPart=[$part]
fi
echo "YearPart... $yearPart"
((i=i+1))
done
yearPart2=${yearPart:1:4}
echo "YearPart2... $yearPart2"
#echo "Processing $f file..."
echo "Processing $fileName file..."
scp $f $USERRemote@$HOSTRemote:$KR_FAX_PATH/$yearPart2/
#cp $f /app_fax/KURUMSAL/$yearPart2/$filename
done
for f in $SD_BATCH_PROCESS_OUT
do
echo "Parsing $f file..."
filename=$(basename "$f")
echo "filename...$filename"
extension="${filename##*.}"
filenamewoext="${filename%.*}"
echo "extension...$extension"
echo "filenamewoext...$filenamewoext"
parts=$(echo $filenamewoext | tr "_" "\n")
START=1
END=2
i=$START
yearPart=
for part in $parts
do
echo "part..[$part]"
echo "i..[$i]"
if [ $i -eq 2 ]
then yearPart=[$part]
fi
echo "YearPart... $yearPart"
((i=i+1))
done
yearPart2=${yearPart:1:4}
echo "YearPart2... $yearPart2"
echo "Processing $fileName file..."
scp $f $USERRemote@$HOSTRemote:$SD_FAX_PATH/$yearPart2/
#cp $f /app_fax/SHOPDESTEK/$yearPart2/$filename
done
for f in $MH_BATCH_PROCESS_OUT
do
echo "Parsing $f file..."
filename=$(basename "$f")
echo "filename...$filename"
extension="${filename##*.}"
filenamewoext="${filename%.*}"
echo "extension...$extension"
echo "filenamewoext...$filenamewoext"
parts=$(echo $filenamewoext | tr "_" "\n")
START=1
END=2
i=$START
yearPart=
for part in $parts
do
echo "part..[$part]"
echo "i..[$i]"
if [ $i -eq 2 ]
then yearPart=[$part]
fi
echo "YearPart... $yearPart"
((i=i+1))
done
yearPart2=${yearPart:1:4}
echo "YearPart2... $yearPart2"
#echo "Processing $f file..."
echo "Processing $fileName file..."
scp $f $USERRemote@$HOSTRemote:$MH_FAX_PATH/$yearPart2/
#cp $f /mnt/icrm/deneme/siebelout/$yearPart2/$filename
done
#EOF
| true |
4b3d8dcc8ece4afd91261a508bef22654f1caa03 | Shell | zhengyuli/ntrace_c | /src/ntraced.sh | UTF-8 | 1,698 | 3.78125 | 4 | [] | no_license | #!/bin/bash
#---------------------------------------------------------------------------------
# Name: ntraced.sh
# Purpose:
#
# Time-stamp: <2015-04-30 18:11:13 Thursday by zhengyuli>
#
# Author: zhengyu li
# Created: 2015-04-30
#
# Copyright (c) 2015 zhengyu li <lizhengyu419@gmail.com>
#---------------------------------------------------------------------------------
source /etc/profile
export LC_ALL=C
SERVICE_NAME=ntrace
PID_FILE=/var/run/ntrace/ntrace.pid
start () {
if [ -f $PID_FILE ]
then
echo "${SERVICE_NAME} is running"
else
echo -n "Start ${SERVICE_NAME}: "
${SERVICE_NAME} -D
loop=10
while [ $loop -gt 0 ]
do
sleep 1
if [ -f $PID_FILE ]
then
echo "[OK]"
exit 0
else
loop=$(($loop - 1))
fi
done
echo "[Failed]"
fi
}
stop () {
if [ ! -f $PID_FILE ]
then
echo "${SERVICE_NAME} is not running"
else
echo -n "Stop ${SERVICE_NAME}: "
PID=$(cat $PID_FILE)
kill -2 $PID
while [ -x /proc/$PID ]
do
sleep 1
done
if [ -f $PID_FILE ]
then
rm $PID_FILE
fi
echo "[Done]"
fi
}
restart () {
stop
start
}
status () {
echo -n "${SERVICE_NAME} status: "
if [ ! -f $PID_FILE ]
then
echo "[Stopped]"
else
echo "[Runing]"
fi
}
case "$1" in
start)
start
;;
stop)
stop
;;
restart)
restart
;;
status)
status
;;
*)
echo "Usage: $0 {start|stop|restart|status}"
;;
esac
| true |
d3b86de29cbfc7320186e3aef07496e77ca4e23f | Shell | LichenInc/s2i-ruby-container | /2.5/s2i/bin/save-artifacts | UTF-8 | 264 | 3.15625 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
# Besides the tar command, all other output to standard out must
# be surpressed. Otherwise, the tar stream will be corrupted.
pushd ${HOME} >/dev/null
if [ -d bundle ] && [ -d node_modules ]; then
tar cf - bundle node_modules
fi
popd >/dev/null
| true |
095a9f63b40d2cbb5fea1e4350b625dafc21d23e | Shell | janinerugayan/NorBERT | /create_tfrec_phase2.slurm | UTF-8 | 1,580 | 3.09375 | 3 | [
"CC0-1.0"
] | permissive | #!/bin/bash
#SBATCH --job-name=BERT_TFR
#SBATCH --mail-type=FAIL
#SBATCH --account=nn9447k
#SBATCH --time=15:00:00 # Max walltime is 14 days.
#SBATCH --mem-per-cpu=8G
# Definining resource we want to allocate.
#SBATCH --nodes=1
#SBATCH --ntasks=8
# This is used to make checkpoints and logs to readable and writable by other members in the project.
umask 0007
module use -a /cluster/projects/nn9851k/software/easybuild/install/modules/all/
module purge # Recommended for reproducibility
module load NLPL-nvidia_BERT/20.06.8-gomkl-2019b-TensorFlow-1.15.2-Python-3.7.4
export MAX_PR=77 # max predictions per sequence
export MAX_SEQ_LEN=512 # max sequence length (128 for the 1st phase, 512 for the 2nd phase)
# Some handy variables, you'll need to change these.
export BERT_ROOT=$EBROOTNLPLMINNVIDIA_BERT
export LOCAL_ROOT=`pwd`
export OUTPUT_DIR=$LOCAL_ROOT/data/norbert${MAX_SEQ_LEN}/
mkdir -p $OUTPUT_DIR
echo ${1} # input corpus
echo ${2} # wordpiece vocabulary file
echo ${3} # name(s) of the output TFR file(s), for example, "norbert.tfr"
# TODO: implement creating a list of TFR file names from the list of input file names?
python3 ${BERT_ROOT}/utils/create_pretraining_data.py --input_file=${1} --vocab_file=${2} --dupe_factor=6 --max_seq_length=${MAX_SEQ_LEN} --max_predictions_per_seq=${MAX_PR} --output_file=${OUTPUT_DIR}${3}
# This is for the Uncased variant:
# python3 create_pretraining_data.py --input_file=${1} --vocab_file=${2} --dupe_factor=10 --max_seq_length=128 --max_predictions_per_seq=20 --do_lower_case --output_file=${OUTPUT_DIR}${3}
| true |
463d0d26470ceef23398e56264107a7cebf935b7 | Shell | amcadmus/chemical.potential | /methane.water/template.tf/gen.tf.sh | UTF-8 | 4,657 | 2.90625 | 3 | [] | no_license | #!/bin/bash
source env.sh
source parameters.sh
mylog=`pwd`/gen.tf.log
makelog=`pwd`/make.log
rm -f $mylog
# prepare conf.gro
echo "# prepare conf.gro"
rm -f conf.gro
dir_name=vol.`printf %.3f $ch4_ratio`
cp $conf_dir/$dir_name/out.gro ./conf.gro
nch4=`grep CH4 conf.gro | wc -l`
nwat=`grep SOL conf.gro | wc -l`
nwat=`echo "$nwat / 3" | bc`
nmol=`echo "$nwat + $nch4" | bc `
boxx=`tail conf.gro -n 1 | awk '{print $1}'`
boxy=`tail conf.gro -n 1 | awk '{print $2}'`
boxz=`tail conf.gro -n 1 | awk '{print $3}'`
half_boxx=`echo "$boxx/2.0" | bc -l`
half_boxy=`echo "$boxy/2.0" | bc -l`
half_boxz=`echo "$boxz/2.0" | bc -l`
# prepare dens.SOL.xvg
echo "# prepare dens.SOL.xvg and dens.Meth.xvg"
rm -f dens.SOL.xvg
rm -f dens.Meth.xvg
for i in `seq 0 0.05 $boxx`;
do
echo "$i 0 0" >> dens.SOL.xvg
echo "$i 0 0" >> dens.Meth.xvg
done
# echo "0 0 0" > dens.SOL.xvg
# tmp=`echo "$boxx/4.0" | bc -l`
# echo "$tmp 0 0" >> dens.SOL.xvg
# tmp=`echo "$boxx/4.0 * 2.0" | bc -l`
# echo "$tmp 0 0" >> dens.SOL.xvg
# tmp=`echo "$boxx/4.0 * 3.0" | bc -l`
# echo "$tmp 0 0" >> dens.SOL.xvg
# echo "$boxx 0 0" >> dens.SOL.xvg
# copy dir
echo "# copy dir"
rm -fr tf
cp -a tools/tf.template ./tf
# prepare grompp.mdp
echo "# prepare grompp.mdp"
rm -fr grompp.mdp
cp tf/grompp.mdp .
sed -e "/^adress_ex_width/s/=.*/= $ex_region_r/g" grompp.mdp |\
sed -e "/^adress_hy_width/s/=.*/= $hy_region_r/g" |\
sed -e "/^adress /s/=.*/= yes/g" |\
sed -e "/^nsteps/s/=.*/= $gmx_nsteps/g" |\
sed -e "/^nstenergy/s/=.*/= $gmx_nstenergy/g" |\
sed -e "/^nstxtcout/s/=.*/= $gmx_nstxtcout/g" |\
sed -e "/^adress_reference_coords/s/=.*/= $half_boxx $half_boxy $half_boxz/g" > grompp.mdp.tmp
mv -f grompp.mdp.tmp grompp.mdp
# prepare index file
make -C tools/gen.conf/ -j8 &> /dev/null
./tools/gen.conf/stupid.add.com -f conf.gro -o out.gro &>> $mylog
mv -f out.gro conf.gro
echo "# prepare index file"
echo "a CMW" > command.tmp
echo "a CMC" >> command.tmp
echo "a OW HW1 HW2" >> command.tmp
echo "a CH4" >> command.tmp
echo "name 8 EXW" >> command.tmp
echo "q" >> command.tmp
cat command.tmp | make_ndx -f conf.gro &>> $mylog
rm -fr command.tmp
# prepare settings.xml
echo "# prepare settings.xml"
rm -fr settings.xml
cp tf/settings.xml .
tf_min=`echo "$ex_region_r - $tf_extension" | bc -l`
tf_max=`echo "$ex_region_r + $hy_region_r + $tf_extension" | bc -l`
tf_spline_start=`echo "$ex_region_r - $tf_spline_extension" | bc -l`
tf_spline_end=` echo "$ex_region_r + $hy_region_r + $tf_spline_extension" | bc -l`
half_boxx_1=`echo "$half_boxx + 1." | bc -l`
prefactor_l1=`grep -n prefactor settings.xml | head -n 1 | cut -f 1 -d ":"`
prefactor_l2=`grep -n prefactor settings.xml | tail -n 1 | cut -f 1 -d ":"`
sed -e "s/<min>.*<\/min>/<min>$tf_min<\/min>/g" settings.xml |\
sed -e "s/<max>.*<\/max>/<max>$tf_max<\/max>/g" |\
sed -e "s/<step>.*<\/step>/<step>$tf_step<\/step>/g" |\
sed -e "s/<spline_start>.*<\/spline_start>/<spline_start>$tf_spline_start<\/spline_start>/g" |\
sed -e "s/<spline_end>.*<\/spline_end>/<spline_end>$tf_spline_end<\/spline_end>/g" |\
sed -e "s/<spline_step>.*<\/spline_step>/<spline_step>$tf_spline_step<\/spline_step>/g" |\
sed -e "s/<table_end>.*<\/table_end>/<table_end>$half_boxx_1<\/table_end>/g" |\
sed -e "${prefactor_l1}s/<prefactor>.*<\/prefactor>/<prefactor>$SOL_tf_prefactor<\/prefactor>/g" |\
sed -e "${prefactor_l2}s/<prefactor>.*<\/prefactor>/<prefactor>$Meth_tf_prefactor<\/prefactor>/g" |\
sed -e "s/<equi_time>.*<\/equi_time>/<equi_time>$equi_time_discard<\/equi_time>/g" |\
sed -e "s/<iterations_max>.*<\/iterations_max>/<iterations_max>$tf_iterations_max<\/iterations_max>/g" > settings.xml.tmp
mv -f settings.xml.tmp settings.xml
# prepare topol.top
echo "# prepare topol.top"
rm -fr topol.top
cp tf/topol.top .
sed "s/SOL.*/SOL $nwat/g" topol.top |
sed "s/^Meth.*/Meth $nch4/g" > tmp.top
mv -f tmp.top topol.top
# prepare table of cg
echo "# prepare table of cg"
rm -f tf/table_CMC_CMC.xvg
rm -f tf/table_CMW_CMW.xvg
rm -f tf/table_CMW_CMC.xvg
cp -L $cg_pot_dir/table_CMC_CMC.xvg ./tf/
cp -L $cg_pot_dir/table_CMW_CMW.xvg ./tf/
cp -L $cg_pot_dir/table_CMW_CMC.xvg ./tf/
# prepare initial guess
echo "# prepare initial guess"
if test -f $init_guess_SOL_tf; then
cp $init_guess_SOL_tf ./tf/SOL.pot.in
fi
if test -f $init_guess_Meth_tf; then
cp $init_guess_Meth_tf ./tf/Meth.pot.in
fi
# copy all file to tf
echo "# copy files to tf"
rm -fr tf/conf.gro tf/dens.SOL.xvg tf/dens.Meth.xvg tf/grompp.mdp tf/index.ndx tf/settings.xml tf/topol.top
mv -f conf.gro dens.SOL.xvg dens.Meth.xvg grompp.mdp index.ndx settings.xml topol.top tf/
# calculate tf
echo "# calculate tf"
cd tf
sync
csg_inverse --options settings.xml
cd ..
| true |
f98ddad40874908dccd9d6802a050f89624186df | Shell | li-weibiao/shell-- | /***统计每个远程IP访问了本机apache几次? | UTF-8 | 3,480 | 3.03125 | 3 | [] | no_license | #!/bin/bash
awk '{ip[$1]++}END{for(i in ip){print ip[i],i}}' /var/log/httpd/access_log
#########################################################
eg:BEGIN是在文本处理之前执行的语句,文本没有开始处理,谈不上第一行
END是在文本处理完成之后执行的语句,文本处理完成,当前行就是最后一行
[root@ecs-001 ~]# awk '{print ip}' /usr/local/apache2/logs/access_log
[root@ecs-001 ~]# awk '{print 1}' /usr/local/apache2/logs/access_log
1
[root@ecs-001 ~]# awk '{print $1}' /usr/local/apache2/logs/access_log
117.173.225.97
[root@ecs-001 ~]# awk '{print 2}' /usr/local/apache2/logs/access_log
2
[root@ecs-001 ~]# awk '{ip[$1]++}' /usr/local/apache2/logs/access_log #指令不完整没效果,个人理解相当于是创建了ip[]这个列表,将查到了的ip地址全部放在了这个列表里
[root@ecs-001 ~]# awk 'END{for(i in ip){print ip[i],i}}' /usr/local/apache2/logs/access_log
[root@ecs-001 ~]# awk '{ip[$1]++}END{for(i in ip){print ip[i],i}}' /usr/local/apache2/logs/access_log #然后print i,将ip地址给输出来了
1 117.173.225.97
[root@ecs-001 ~]# awk '{ip[$1]++}END{for(i in ip){print i,ip[i]}}' /usr/local/apache2/logs/access_log
117.173.225.97 1
[root@ecs-001 ~]# sh shizhen.sh
^C^C
[root@ecs-001 ~]# awk '{ip[$1]}END{for(i in ip){print i,ip[i]}}' /usr/local/apache2/logs/access_log
117.173.225.97
[root@ecs-001 ~]# cp /usr/local/apache2/logs/access_log access_log
[root@ecs-001 ~]# ls
access_log httpd-2.2.34 httpd-2.2.34.tar.gz mysql-5.5.20 mysql-5.5.20.tar.gz mysql-proxy-0.8.5-linux-el6-x86-64bit.tar.gz shizhen.sh
[root@ecs-001 ~]# vi access_log #输入了三个地址,117.173.225.97,117.173.225.97,125.78.69.70
[root@ecs-001 ~]# awk '{ip[$1]}END{for(i in ip){print i,ip[i]}}' access_log #但最后只显示了2个,说明会自动去掉重复的
117.173.225.97
125.78.69.70
[root@ecs-001 ~]# awk '{ip[$1]++}END{for(i in ip){print i,ip[i]}}' access_log #加上++ ,就有了print ip[i] 统计出现了次数的效果,说明可以累加了
117.173.225.97 2
125.78.69.70 1
[root@ecs-001 ~]# sh shizhen.sh
^C
[root@ecs-001 ~]# awk '{ip[$1]++}END{for(i in ip){print ip[i]}}' access_log #验证只有print ip[i]的效果,只出现统计数字
2
1
[root@ecs-001 ~]# awk '{ip[$1]}END{for(i in ip){print ip[i]}}' access_log #验证没有++,则不会出现print ip[i]的效果,说明print ip[i]的效果只对应前面的++
[root@ecs-001 ~]# awk '{ip[$1]}END{for(i in ip){print i}}' access_log #验证只有print i的效果,只出现ip,说明可以把for i in ip里的ip当成一个存放IP地址的列表或仓库,对应前面的ip[$1]
117.173.225.97
125.78.69.70
[root@ecs-001 ~]# awk '{ip[$1]++}END{for(i in ip){print i}}' access_log #验证有++的情况下,只显示print i的效果,证明自己的理解不算有误
117.173.225.97
125.78.69.70
#####################################################################
Q:awk '{a[$1]++}END{for (j in a) print a[j]"|"j}'
这个{a[$1]++}END{for (j in a)....}这个具体什么意思有没有大神能解释下
A:a[$1] 是一个关联数组,类似于baic++中的dumap,其中$1的值是键,a[$1]对应的内容就是值,这个值如zhi果是数字,则可以做dao加减运算。所以 a[$1]++ 就是指数组a中下标为$1对应的值增加1;
for ( j in a) 是循环遍历数组a中键值对的用法,j就是依次获取数组a中的下标
| true |
8293c952a70990285da663eca5b63335e478c81b | Shell | QLingx/Linux | /bash/hello.sh | UTF-8 | 345 | 3.234375 | 3 | [] | no_license | #!/bin/bash
echo "bash name: $0"
echo "param one: $1"
echo "count: $#"
echo "status: "
file="nihaonihao"
echo 'nihao '$file''
echo "haoma, $file"
echo ${#file}
testing=`date`
echo "the date and time are: $testing"
var1=10
var2=20
var3=`expr ${var2} / ${var1}`
echo "The resultis $var3"
test 5 -gt 2 && echo "Yes"
test 5 -eq 5 && echo "Yes" | true |
3b142894275be3e0704ee53bae5fd8415bc7276e | Shell | yukondude/Scripnix0 | /is-remote-cnx | UTF-8 | 679 | 3.546875 | 4 | [
"Unlicense"
] | permissive | #!/bin/bash
#
# Detects whether this terminal is a remote SSH connection. Exit code will be
# zero if it is.
#
# This file is a part of Scripnix <https://github.com/yukondude/Scripnix/>.
# Written in 2010 by Dave Rogers <yukondude.com>
# This is free and unencumbered software released into the public domain.
# Refer to the LICENCE file for the not-so-fine print.
scriproot="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
source "${scriproot}/bin.bash"
check_arg_count ${0} ${#} 0 0 '' ${1}
# This is a remote connection if the top-level parent process is the SSH daemon.
parent=$(command-for-pid $(top-level-parent-pid))
match=$(echo "${parent}" | egrep sshd)
test -n "${match}"
| true |
9da6074a57c53b68b49c2cdbdead0ba1bcdde15b | Shell | JimmyDurandWesolowski/config | /.zshconfig/bindkey.zsh | UTF-8 | 2,550 | 2.90625 | 3 | [] | no_license |
bindkey -e
bindkey "^[OH" beginning-of-line # Home
bindkey "^[OF" end-of-line # End
bindkey "[3~" delete-char # Del
bindkey "[2~" overwrite-mode # Insert
bindkey "[5~" beginning-of-history # PgUp
bindkey "[6~" end-of-history # PgDn
bindkey "^[[1;5C" forward-word
bindkey "^[[1;5D" backward-word
bindkey "^[[1;3C" emacs-forward-word
bindkey "^[[1;3D" emacs-backward-word
bindkey '\e[1~' beginning-of-line # Linux console
bindkey '\e[H' beginning-of-line # xterm
bindkey '\eOH' beginning-of-line # gnome-terminal
bindkey '\e[2~' overwrite-mode # Linux console, xterm, gnome-terminal
bindkey '\e[3~' delete-char # Linux console, xterm, gnome-terminal
bindkey '\e[4~' end-of-line # Linux console
bindkey '\e[F' end-of-line # xterm
bindkey '\eOF' end-of-line # gnome-terminal
# Use showkey (sudo) to get the key format from the keyboard
# The list of Zsh function is the zshzle manual
bindkey '^[[3;5~' delete-word
bindkey '^[[3;3~' delete-word # CTRL + DEL
# bindkey '^w' kill-region
# bindkey '^[w' copy-region-as-kill
bindkey '^H' backward-delete-word # CTRL + BACKSPACE
# Consider path as multiple words separated by '/', '-', '\' or '.'
WORDCHARS="${WORDCHARS:s#/#}"
WORDCHARS="${WORDCHARS:s#-#}"
WORDCHARS="${WORDCHARS:s#\\#}"
WORDCHARS="${WORDCHARS:s#.#}"
WORDCHARS="${WORDCHARS:s#_#}"
# by default: export WORDCHARS='*?_-.[]~=/&;!#$%^(){}<>'
# we take out the slash, period, angle brackets, dash here.
# export WORDCHARS='*?_[]~=&;!#$%^(){}'
# Everything splits the word
export WORDCHARS=''
## From http://chneukirchen.org/blog/archive/2011/02/10-more-zsh-tricks-you-may-not-know.html
# Complete in history with M-/, M-,
zstyle ':completion:history-words:*' list no
zstyle ':completion:history-words:*' menu yes
zstyle ':completion:history-words:*' remove-all-dups yes
bindkey "\e/" _history-complete-older
bindkey "\e," _history-complete-newer
# # Move to where the arguments belong.
# after-first-word() {
# zle beginning-of-line
# zle forward-word
# }
# zle -N after-first-word
# bindkey "^X1" after-first-word
# x-copy-region-as-kill () {
# zle copy-region-as-kill
# print -rn $CUTBUFFER | xsel -ip
# }
# zle -N x-copy-region-as-kill
# x-kill-region () {
# zle kill-region
# print -rn $CUTBUFFER | xsel -ip
# }
# zle -N x-kill-region
# x-yank () {
# CUTBUFFER=$(xsel -o -p </dev/null)
# zle yank
# }
# zle -N x-yank
# bindkey -e '\ew' x-copy-region-as-kill
# bindkey -e '^w' x-kill-region
# bindkey -e '^y' x-yank
| true |
f2b37f3b857c87d6554229fd44ad110ea0a9c8c5 | Shell | tartley/dotfiles | /bin/primary-monitor | UTF-8 | 1,134 | 4.0625 | 4 | [] | no_license | #!/usr/bin/env bash
function usage {
echo "USAGE: ${prog} MONITOR" >&2
echo "Where: MONITOR is an integer specifying which monitor to make primary." >&2
echo "Integers are assigned starting with the leftmost monitor, working rightwards." >&2
echo "Issues xrandr command to make the given monitor the current primary." >&2
echo "Useful to make sure Steam launches games on the correct monitor." >&2
}
# List the names of the connected monitors into an array,
# sorted by their horizontal position relative to each other.
readarray -t monitors < <(xrandr --listmonitors | tail -n+2 | sort -t+ -k3n | cut -d' ' -f6)
if [[ $# -ne 1 ]] ; then
echo "ERROR: more than one arg given" >&2
usage
exit 1
fi
if [[ "$1" = "-h" || "$1" == "--help" ]] ; then
usage
exit 0
fi
re_int='^[0-9]+$'
if ! [[ "$1" =~ $re_int ]] ; then
echo "ERROR: not an integer" >&2
usage
exit 1
fi
if ! [[ ( "$1" < "${#monitors[@]}" ) ]] ; then
echo "ERROR: integer not less than number of monitors (${#monitors[@]})" >&2
usage
exit 1
fi
monitor="${monitors[$1]}"
xrandr --output "$monitor" --primary
| true |
75e726d9d02382c78697cd0769277cab8beae6da | Shell | shift093/tensorrt-demo | /docker_run_bash.sh | UTF-8 | 523 | 2.90625 | 3 | [] | no_license | #!/bin/bash
if [ "$1" = "TF" ]; then
nvidia-docker run \
--rm -it \
-p 6001:6006 \
-v `pwd`:/demo/ \
-v /imagenet/:/imagenet/ \
--name tensorflow-bash \
trt-demo-tf:latest \
/bin/bash
elif [ "$1" = "TRT" ]; then
nvidia-docker run \
--rm -it \
-p 6002:6006 \
-v `pwd`:/demo/ \
-v /imagenet/:/imagenet/ \
--name tensorrt-bash \
trt-demo-trt:latest \
else
echo "Usage: ./docker_run_bash.sh <TF|TRT>"
fi
| true |
0b9f340bb82c1ac7eb6fff698b3de52250334e97 | Shell | kissthink/ports | /system/lsof/lsof.build | UTF-8 | 1,726 | 3.421875 | 3 | [] | no_license | #!/bin/bash
#
# Maintainer: Christoph J. Thompson <cjsthompson@gmail.com>
source /usr/src/ports/Build/build.sh
NAME=lsof
VERSION=4.87
BUILD=1
# Description
cat > ${PKG}/install/slack-desc <<EOF
# HOW TO EDIT THIS FILE:
# The "handy ruler" below makes it easier to edit a package description. Line
# up the first '|' above the ':' following the base package name, and the '|'
# on the right side marks the last column you can put a character in. You must
# make exactly 11 lines for the formatting to be correct. It's also
# customary to leave one space after the ':'.
$(padd)|-----handy-ruler------------------------------------------------------|
${NAME}: lsof (list open files)
${NAME}:
${NAME}: Lsof is a Unix-specific tool. Its name stands for "LiSt Open Files",
${NAME}: and it does just that. It lists information about files that are open
${NAME}: by the processes running on the system.
${NAME}:
${NAME}: Victor A. Abell of Purdue University is the developer of lsof.
${NAME}:
${NAME}: Homepage: http://people.freebsd.org/~abe
${NAME}:
${NAME}:
EOF
# Sources
SRCNAME[0]=${NAME}
SRCVERS[0]=${VERSION}
SRCPACK[0]=ftp://lsof.itap.purdue.edu/pub/tools/unix/lsof/${SRCNAME[0]}_${SRCVERS[0]}.tar.bz2
SRCROOT[0]=${SRCNAME[0]}_${SRCVERS[0]}
build0()
{
src.unpack ${SRCNAME[0]}_${SRCVERS[0]}_src.tar
(
cd ${SRCNAME[0]}_${SRCVERS[0]}_src
sed -i 's|/\* #define\tHASSECURITY\t1 \*/|#define\tHASSECURITY\t1|' dialects/linux/machine.h
./Configure -n linux
make ${JOBS} CDEF="${FLAGS}"
install.dir ${PKG}${SYS_DIR[sbin]}
install.bin lsof ${PKG}${SYS_DIR[sbin]}
install.dir ${PKG}${SYS_DIR[man]}/man8
install.man lsof.8 ${PKG}${SYS_DIR[man]}/man8
)
doc ${SRCNAME[0]}_${SRCVERS[0]}_src/00CREDITS
changelog ${SRCNAME[0]}_${SRCVERS[0]}_src/00DIST
license COPYING
}
| true |
c697539f23e66d8c601bc45286d9491d722fd8d1 | Shell | okurz/scripts | /test/fail-once-every-third-call | UTF-8 | 243 | 3.421875 | 3 | [
"MIT"
] | permissive | #!/bin/bash
TMPDIR="${TMPDIR:-"/tmp"}"
tmp="${tmp:-"$TMPDIR/tmp.$(basename "$0")"}"
if [ -e "$tmp" ]; then
attempts="$(cat "$tmp")"
fi
if [[ -z "$attempts" ]]; then
attempts=0
fi
((attempts++))
echo "$attempts" > "$tmp"
((attempts%3))
| true |
3882b8bc4d565b96a0c348dac6e1993c14a627d2 | Shell | fentie/phpstorm-template-project | /clone-project-settings.sh | UTF-8 | 496 | 2.796875 | 3 | [] | no_license | #!/bin/bash
WORKSPACE='/Volumes/workspace' # this is where your code and template project live
cp -R "$WORKSPACE/templateProject" "$WORKSPACE/templateProject2" # clone directory
mv "$WORKSPACE/templateProject2" "$WORKSPACE/$1/.idea" # rename to .idea and move inside new project dir
mv "$WORKSPACE/$1/.idea/templateProject.iml" "$WORKSPACE/$1/.idea/$1.iml" # rename IML file
perl -e "s/templateProject/$1/g" -pi $(find $WORKSPACE/$1/.idea/ -type f) # replace strings in files w/new project name
| true |
a77cb7a04554196f6d86256e57d063e1d749b8ca | Shell | cadia-lvl/lirfa | /setup/save_confirmed_words.sh | UTF-8 | 850 | 3.875 | 4 | [
"Apache-2.0"
] | permissive | #! /bin/bash
# Description: Gets today's date then retrieves all the words that have been
#confirmed since the last time this script was run, then saves it to the
#confirmedWords dir with a filename of CURRENT_DATE.txt
# Usage as a cron job run daily after working hrs from the lirfa directory
# assumes this is run from the project root
. ./path.sh
dateConfirmed=$(date +%F)
fileConfirmed="$confirmedWordsDir""$dateConfirmed"".txt"
if [[ $(date +%u) -eq 1 ]]; then
#check if monday, if so do special stuff
startDate=$(date --date="2 days ago" +%F)
else
startDate=$dateConfirmed
fi
curl -sk 'https://YOUR-DOMAIN-HERE/lirfa/api/confirmWords/?pronunciation=1&startDate='"$startDate" -o $fileConfirmed
if [[ -f "${fileConfirmed}" && ! -s "${fileConfirmed}" ]]
then
rm "$fileConfirmed"
echo "removed empty file: " "$fileConfirmed"
fi
| true |
c58fac11fa29575ec80241653c6dd4741ba70bc0 | Shell | sem-con/sc-yaml | /semcon | UTF-8 | 6,644 | 4.28125 | 4 | [
"MIT"
] | permissive | #!/bin/bash
# resources
# - https://sap1ens.com/blog/2017/07/01/bash-scripting-best-practices/
set -o errexit
set -o pipefail
# helper functions ================
version_info()
{
echo "semcon, version 0.1"
echo ""
}
usage_info()
{
echo "Usage: $0"
echo -e "\t--help print this help message"
echo -e "\t--version version information"
echo -e "\t-v|--verbose print additional information"
echo ""
echo -e "\tstart <file.yaml> start a Semantic Container with configuration in"
echo -e "\t file.yaml"
echo -e "\tset <container> <config.yaml>"
echo -e "\t define a container as specified in <config.yaml>"
echo -e "\t<container> perm create <account> <scope>"
echo -e "\t create account with given scope"
echo -e "\t<container> perm show <account>"
echo -e "\t show account information"
echo -e "\t<container> perm setenv <account> <name>"
echo -e "\t set environment variable"
echo -e "\t<container> write [<account|token>]"
echo -e "\t read data from <stdin> and write as account"
echo -e "\t<container> read [<account|token>]"
echo -e "\t read data from <container> and write to <stdout>"
echo ""
}
extract_yaml()
{
eval "$3=$(grep "$2" $1 | head -n1 | cut -d ':' -f 2,3 | tr -d "[:space:]")"
}
invalid_syntax()
{
echo "invalid option or syntax: ${args}"
usage_info
if [ "${BASH_SOURCE[0]}" != "${0}" ]; then
return 1
else
exit 1
fi
}
# read from & write into container =================
get_port()
{
eval "$2=$(docker port hse | cut -d ":" -f 2)"
}
write_data()
{
if $VERBOSE ; then
echo "writing data into container"
fi
return_value='';
get_port $1 return_value
SC_PORT=$return_value
command="cat - | curl -X POST -d @- -H 'Content-Type: application/json' http://localhost:${SC_PORT}/api/data"
if $VERBOSE ; then
echo "${command}"
fi
eval $command
}
read_data()
{
if $VERBOSE ; then
echo "reading data from container"
fi
return_value='';
get_port $1 return_value
SC_PORT=$return_value
command="curl http://localhost:${SC_PORT}/api/data"
if $VERBOSE ; then
echo "${command}"
fi
eval $command
}
# start container =================
start_container()
{
if $VERBOSE ; then
echo "starting container"
fi
return_value='';
extract_yaml "./${SC_CONFIG_FILE}" "image" return_value
IMAGE=$return_value
return_value='';
extract_yaml "./${SC_CONFIG_FILE}" "port" return_value
PORT=$return_value
return_value='';
extract_yaml "./${SC_CONFIG_FILE}" "name" return_value
CONTAINER_NAME=$return_value
CONFIG_PATH=$(dirname "$PWD/${SC_CONFIG_FILE}")
CONFIG_FILE=$(basename "./${SC_CONFIG_FILE}")
command="docker rm -f ${CONTAINER_NAME};"
command="${command} docker run -d"
command="${command} --name ${CONTAINER_NAME}"
command="${command} -p ${PORT}:3000"
command="${command} -v ${CONFIG_PATH}:/config"
command="${command} -e CONFIG_FILE=${CONFIG_FILE}"
command="${command} -e IMAGE_SHA256=\"\$(docker image ls --no-trunc -q ${IMAGE} | cut -c8-)\""
command="${command} -e IMAGE_NAME=${IMAGE}"
command="${command} ${IMAGE}"
if $VERBOSE ; then
echo "${command}"
fi
eval $command
}
# option handling =================
args=("$@")
VERBOSE=false
if [[ $# -eq 0 ]]
then
usage_info
exit 0
fi
while [ $# -gt 0 ]; do
case "$1" in
--version)
version_info
exit 0
;;
--help)
version_info
usage_info
exit 0
;;
--verbose|-v)
VERBOSE=true
;;
start)
SC_MODE=start
shift
if [[ $# -eq 1 ]]
then
SC_CONFIG_FILE=$1
start_container
exit 0
else
invalid_syntax
fi
;;
*)
SC_CONTAINER_NAME=$1
if [ "$(docker ps -q -f name=${SC_CONTAINER_NAME})" ];
then
shift
case "$1" in
perm)
SC_MODE=perm
shift
case "$1" in
show)
shift
if [[ $# -eq 1 ]]
then
SC_ACCOUNT=$1
show_perm
exit 0
else
invalid_syntax
fi
;;
create)
shift
if [[ $# -eq 2 ]]
then
SC_ACCOUNT=$1
SC_ACCOUNT_SCOPE=$2
create_perm
exit 0
else
invalid_syntax
fi
;;
setenv)
shift
if [[ $# -eq 2 ]]
then
SC_ACCOUNT=$1
SC_ENV=$2
set_permenv
exit 0
else
invalid_syntax
fi
;;
*)
invalid_syntax
;;
esac
;;
write)
SC_MODE=write
write_data $SC_CONTAINER_NAME
;;
read)
SC_MODE=read
read_data $SC_CONTAINER_NAME
;;
*)
invalid_syntax
;;
esac
else
invalid_syntax
fi
;;
esac
shift
done
| true |
e00a4c546bc399bdbec9fdf57c5349833e9f6255 | Shell | Regnskydd/bitbucket_api | /bitbucket_api.sh | UTF-8 | 1,586 | 3.15625 | 3 | [] | no_license | #!/bin/bash
# Define constants
JENKINS_USER = "PLACEHOLDER"
API_TOKEN = "PLACEHOLDER"
BITBUCKET_URL = "PLACEHOLDER"
# TODO: Add some method to get bitbucket URL from jenkins/jenkinsfile
# TODO: Add some method to get user from jenkins/jenkinsfile
# Helper function to POST payloads via curl command
function poster () {
curl -v -i -k -X PUT \
-H "Authorization:Bearer $API_TOKEN" \
-H "Content-Type:application/json" \
-d "$ARG1" \
$BITBUCKET_URL
}
#/rest/api/1.0/projects/{projectKey}/repos/{repositorySlug}/pull-requests/{pullRequestId}/participants/{userSlug} CHECK THIS TOMMORW
# Mark a pull-request as approved
function pullrequest_approved () {
#Is the best method to construct the JSON-query in here and pass it as a payload to the builder or
#is it possible (or even wanted?) to pass a string to a "JSON-builder helper" and then pass it to the poster helper?
#Is it even possible to create a generic JSON-builder since all JSON message will be so different?
"user": {
"name": "jcitizen",
"emailAddress": "jane@example.com",
"id": 101,
"displayName": "Jane Citizen",
"active": true,
"slug": "jcitizen",
"type": "NORMAL"
},
"role": "REVIEWER",
"approved": true,
"status": "APPROVED"
template='{}'
local payload = "bla"
poster payload
}
# Mark a pull-request as unapproved
function pullrequest_unapproved () {
}
# Mark a pull-request as needs work
function pullrequest_needs_work () {
}
#TODO: Investigate what functions are needed
function build_successful () {
}
function build_failed () {
} | true |
6fb66f43686c77f7a7364bdeb04216dcc593b6e3 | Shell | nathanwbrei/coding_log_book | /2020-05-20-remove-large-files-from-git/migrate_eicroot_2.sh | UTF-8 | 1,800 | 3.359375 | 3 | [
"MIT"
] | permissive | #!/bin/sh
# Create a clean mirrored clone of EicRoot locally
git clone --mirror https://gitlab.com/eic/EicRoot EicRootScratch
#git clone --mirror https://git.racf.bnl.gov/gitea/EIC/EicRoot EicRootScratch
# Make all changes to the mirrored local scratch repository first
cd EicRootScratch
# Examine current repo commits, tags, branches
git --no-pager log --graph --decorate --pretty=oneline --abbrev-commit --all
# Check that problematic file is present
git --no-pager log --all --full-history -- "input/SolenoidMap3.dat"
git diff-tree --no-commit-id --name-only -r b2ffd6 | grep input
# Check that repo size before filtering
du -h -d 0
# Previous size = 257 MB
# Remove SolenoidMap3.dat from the entire history, including updating all branches and tags
git filter-branch --force --index-filter "git rm --cached --ignore-unmatch input/SolenoidMap3.dat" --prune-empty --tag-name-filter cat -- --all
# The files still exist in history because there is one or more hidden refs under .git/refs/original
# So we delete the problematic refs
rm -Rf refs/original
# And trigger immediate garbage collection
git gc --aggressive --prune=now
# Verify that SolenoidMap3 file is gone everywhere and your branches and tags still make sense
git --no-pager log --all --full-history -- "input/SolenoidMap3.dat"
git --no-pager log --graph --decorate --pretty=oneline --abbrev-commit --all
# Verify repo size is smaller now
du -h -d 0
# Push the modified mirror to your new empty github repository
git push --mirror https://github.com/eic/EicRoot.git
# ONLY DO THIS LAST STEP AFTER YOU VERIFY THAT ALL BRANCHES AND TAGS MAKE SENSE ON THE NEW GITHUB REPO
# Force push the mirrored repository back to the origin repository at BNL, so that the old and new repositories are identical
git push --mirror --force
| true |
3a600ebc0f7d406931884b5a63712738e4bea335 | Shell | cloudfoundry/bosh-linux-stemcell-builder | /stemcell_builder/stages/system_grub/apply.sh | UTF-8 | 951 | 3.78125 | 4 | [
"LGPL-2.1-only",
"Artistic-2.0",
"LicenseRef-scancode-other-permissive",
"MPL-1.1",
"GPL-1.0-or-later",
"GPL-3.0-or-later",
"LicenseRef-scancode-unicode-mappings",
"Artistic-1.0-Perl",
"LGPL-2.1-or-later",
"LicenseRef-scancode-unknown-license-reference",
"GPL-2.0-only",
"GPL-2.0-or-later",
"GPL-3.0-only",
"Artistic-1.0",
"Apache-2.0",
"LGPL-3.0-only",
"LGPL-2.0-or-later",
"Ruby",
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-warranty-disclaimer",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-public-domain-disclaimer",
"BSD-2-Clause"
] | permissive | #!/usr/bin/env bash
set -e
base_dir=$(readlink -nf $(dirname $0)/../..)
source $base_dir/lib/prelude_apply.bash
if [ ${DISTRIB_CODENAME} == 'xenial' ]; then
preferred=grub
fallback=grub2
else
preferred=grub2
fallback=grub
fi
if pkg_exists $preferred; then
pkg_mgr install $preferred
elif pkg_exists $fallback; then
pkg_mgr install $fallback
else
echo "Can't find grub or grub2 package to install"
exit 2
fi
if [ -d $chroot/usr/lib/grub/x86* ] # classic GRUB on Ubuntu
then
rsync -a $chroot/usr/lib/grub/x86*/ $chroot/boot/grub/
elif [ -d $chroot/usr/lib/grub/i386* ] # grub-pc on bionic
then
rsync -a $chroot/usr/lib/grub/i386*/ $chroot/boot/grub/
else
echo "Can't find GRUB or GRUB 2 files, exiting"
exit 2
fi
# When a kernel is installed, update-grub is run per /etc/kernel-img.conf.
# It complains when /boot/grub/menu.lst doesn't exist, so create it.
mkdir -p $chroot/boot/grub
touch $chroot/boot/grub/menu.lst
| true |
518b8125118e913e141c2483da35331c35842ce5 | Shell | alanwthatcher/alan-plans | /demo-stuff/habitat-operator/hab-k8s-operator-deploy-helm.sh | UTF-8 | 460 | 2.609375 | 3 | [] | no_license | #!/bin/sh
set -x
NAME="habitat"
VERSION="0.8.1"
kubectl create serviceaccount --namespace kube-system tiller
kubectl create clusterrolebinding tiller-cluster-admin \
--clusterrole=cluster-admin \
--serviceaccount=kube-system:tiller
helm init --service-account tiller --wait
helm repo add habitat https://habitat-sh.github.io/habitat-operator/helm/charts/stable/
helm repo update
helm install --name ${NAME} habitat/habitat-operator --version ${VERSION} | true |
e533dfe01b48660d20bbf3612ce393fc7a6534c5 | Shell | mjzapata/WholeGenomeAnalytics | /scripts/2BAMSorting_runScripts.sh | UTF-8 | 361 | 2.671875 | 3 | [] | no_license | #!/bin/bash
#PBS -q viper
#PBS -N mjzBT_runALL
#PBS -l walltime=0:15:00
#PBS -l nodes=20
. wgaconfig.conf
i=0
while read line
do
myScripts[ $i ]="$line"
(( i++ ))
done < <(find ${scripts2Dir}* -type f -exec basename {} \;)
arrayLength=${#myScripts[@]}
echo $arrayLength " files"
for(( i=0; i<$arrayLength; i++))
do
qsub ${scripts2Dir}${myScripts[i]}
done
| true |
d64f9f2d777cd373df5888e02d5b5cc6d3c98937 | Shell | manpages/tar-spoon | /assets/bin/imksocks.sh | UTF-8 | 774 | 3.359375 | 3 | [] | no_license | function no_host {
echo "No host given, aborting. You may later run
sudo /path/to/tarspoon/assets/bin/imksocks.sh
to configure SOCKS proxy." && exit
}
r_ssh_port=22
k_path=~/.ssh/id_rsa
l_socks_port=$1
r_user=$SUDO_USER
echo "Creating socks proxy on 127.0.0.1:${l_socks_port}..."
echo
echo "Remote IP or hostname (better use IP here):"
read r_host
[[ ! -z $r_host ]] || no_host
echo "Remote SSH port [$r_ssh_port]:"
read r_ssh_port1
[[ ! -z $r_ssh_port1 ]] && r_ssh_port=$r_ssh_port1
echo "Path to SSH key [$k_path]:"
read k_path1
[[ ! -z $k_path1 ]] && k_path=$k_path1
echo "Remote user [$r_user]:"
read r_user1
[[ ! -z $r_user1 ]] && r_user=$r_user1
mksocks $r_user $r_host $k_path $r_ssh_port $l_socks_port
echo "SOCKS proxy configured. That was a triumph."
| true |
8872db46e004e34e144d5c0b76ad4e10e1d76fbc | Shell | azimut/dotfiles | /homedir/bashrc/gentoo.sh | UTF-8 | 1,461 | 3.828125 | 4 | [] | no_license | #!/bin/bash
# Add Gentoo overlay to repos.conf/ and sync it
# Example usage: add_overlay musl https://anongit.gentoo.org/git/proj/musl.git
#
# Arguments:
#
# 1: repo_id - reference used in repos.conf
# 2: repo_url
# 3: repo_mode - optional, default: git
# 4: repo_priority - optional, default: 50
add_overlay() {
local repo_id repo_url repo_mode repo_priority repo_path
repo_id="$1"
repo_url="$2"
repo_mode="${3:-git}"
repo_priority="${4:-50}"
repo_path='/var/lib/repos'
[ ! -d "${repo_path}" ] && sudo mkdir -p "${repo_path}"
sudo tee /etc/portage/repos.conf/"${repo_id}".conf >/dev/null <<END
[${repo_id}]
priority = ${repo_priority}
location = ${repo_path}/${repo_id}
sync-type = ${repo_mode}
sync-uri = ${repo_url}
END
sudo emaint sync -r "${repo_id}"
}
# Thin wrapper for app-portage/flaggie, a tool for managing portage keywords and use flags
#
# Examples:
#
# global use flags: update_use -readline +ncurses
# per package: update_use app-shells/bash +readline -ncurses
# same syntax for keywords: update_use app-shells/bash +~amd64
# target package versions as usual, remember to use quotes for < or >: update_use '>=app-text/docbook-sgml-utils-0.6.14-r1' +jadetex
# reset use/keyword to default: update_use app-shells/bash %readline %ncurses %~amd64
# reset all use flags: update_use app-shells/bash %
function update_use() {
# shellcheck disable=SC2068
sudo flaggie --strict --destructive-cleanup ${@}
}
| true |
b8360d7d9c07a18aac7a5bd4a6055a496dce17c0 | Shell | slcss/Shenzhen_project | /netlayer所有代码/netlayer/netconfig.sh | GB18030 | 1,200 | 3.59375 | 4 | [] | no_license | #!/bin/bash
inode_id=2
#Թ,ִexport DEBUG=trueԹ
export DEBUG=true
DEBUG()
{
if [ "$DEBUG" = "true" ]; then
$@
fi
}
DEBUG echo "debug opened!!"
set -x
echo "the value of DEBUG: $DEBUG"
set +x
echo "config ip of eth0/usb1"
ifconfig usb1 "192.168.$inode_id.240/24" up
echo "config mac&ip of tap0.."
#ifconfig tap0 down
ifconfig tap0 hw ether "00:11:22:33:44:$inode_id"
ifconfig tap0 "192.168.0.$inode_id" netmask 255.255.255.0 up
#ʹroute ӵ·ɣ·ɾʧЧ
echo "config arp.."
counter=1
while [ $counter -lt 33 ]
do
if [ $counter != $inode_id ]
then
echo "config arp of addr : $counter"
arp -s "192.168.0.$counter" "00:11:22:33:44:$counter"
fi
counter=`expr $counter + 1`
done
#ӵ·ɣ192.168.3.0εݰȫҪ192.168.0.3
echo "config route.."
counter=1
while [ $counter -lt 33 ]
do
if [ $counter != $inode_id ]
then
echo "config route of net : $counter"
route add -net "192.168.$counter.0/24" gw "192.168.0.$counter"
fi
counter=`expr $counter + 1`
done
echo "run route forward"
echo 1 > /proc/sys/net/ipv4/ip_forward
| true |
fee4ad2048b7019b1caf796b43665cd16d02c2c2 | Shell | song10/bin | /ubhome2my.sh | UTF-8 | 546 | 3.1875 | 3 | [] | no_license | #!/bin/sh
[ -d /mnt/data ] || exit 1
MYDIR=/mnt/data/song10
if [ -d "$MYDIR" ]; then
sudo mkdir -p "$MYDIR"
sudo chown $USER.$USER "$MYDIR"
fi
cd $HOME
[ -L my ] || ln -s $MYDIR my
for x in Desktop Documents Downloads Music Pictures Public Templates Videos 'VirtualBox VMs' workspace; do
[ -d "$MYDIR/$x" ] || mkdir "$MYDIR/$x"
[ -d "$x" -a ! -L "$x" ] && rmdir "$x"
[ -e "$x" ] || ln -s "$MYDIR/$x"
done
rm -f examples.desktop
## patch
PDIR='VirtualBox VMs'
if [ -L "$PDIR" ]; then
rm -f "$PDIR"
ln -s "$MYDIR/virtualbox" "$PDIR"
fi
| true |
33d6a46f1b31b43906385313a84257423772df9b | Shell | WangKaiwh/Learn_Record | /Bash_shell/if/cmp/string_cmp/not-equal.sh | UTF-8 | 129 | 3.046875 | 3 | [] | no_license | #!/bin/bash
testuser=unknown
if [ $USER != $testuser ]; then
echo "who are you: $testuser"
else
echo "welcome $testuser"
fi
| true |
6dba83d2d33847095a8fa71d81f28788d8202e44 | Shell | Devidian/docker-rising-world | /example/init.sh | UTF-8 | 355 | 2.734375 | 3 | [] | no_license | #!/bin/bash
# create target directory if it does not exist
mkdir -p /appdata/rising-world-java/dedicated-server
# copy files from data to game directory
rsync -rltDvzr ./data/* /appdata/rising-world-java/dedicated-server
# set user:group to copied files
chown -R 1000:1000 /appdata/rising-world-java/dedicated-server
# start service
docker compose up -d | true |
27c5f94a0f150db404e95f43af30ac1144e3f8cd | Shell | jramapuram/scripts | /monitor_auto_toggle/hotplug_monitor.sh | UTF-8 | 2,776 | 3.265625 | 3 | [
"MIT"
] | permissive | #!/bin/bash
######################################
## /usr/local/bin/hotplug_monitor.sh
######################################
X_USER=jramapuram
export DISPLAY=:0
export XAUTHORITY=/home/$X_USER/.Xauthority
export DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/1000/bus
INTERNAL_OUTPUT="eDP1"
EXTERNAL_POSTFIX="DP-1" # thunderbolt
EXTERNAL_OUTPUT="DP1" # thunderbolt
# determine what index the card is assigned
# Note: this is only used for non-tb devices
if [ -d "/sys/class/drm/card1" ]; then
CARD_INDEX="/sys/class/drm/card1"
else
CARD_INDEX="/sys/class/drm/card0"
fi
# check if we have our displaylink
DISPLAYLINK=$(ls /sys/class/drm/ | grep DVI-I-1)
if ! [ -z "$DISPLAYLINK" ]; then
if [ $(cat /sys/class/drm/${DISPLAYLINK}/status) == "connected" ]; then
echo "enabl"
ISDISPLAYLINK=true
else
echo "disable"
ISDISPLAYLINK=false
fi
else
echo "disable"
ISDISPLAYLINK=false
fi
# echo "$(cat ${CARD_INDEX}-$EXTERNAL_POSTFIX/status)"
if [ "$ISDISPLAYLINK" = true ]; then
echo "here!"
# handle the case when we have displaylink connected
EXTERNAL_OUTPUT="DVI-I-1-1"
EXTERNAL_POSTFIX="DVI-I-1"
# If at home disable laptop screen else use 'all' mode
if [ $(head -n 1 /sys/class/drm/${DISPLAYLINK}/modes) == "3440x1440" ] ; then
xrandr --output $INTERNAL_OUTPUT --off --output $EXTERNAL_OUTPUT --auto
else
xrandr --output $INTERNAL_OUTPUT --auto --output $EXTERNAL_OUTPUT --auto --left-of $INTERNAL_OUTPUT
fi
killall -SIGUSR1 conky
sleep 1 && nitrogen --restore
i3-msg 'restart'
elif [ $(cat ${CARD_INDEX}-$EXTERNAL_POSTFIX/status) == "connected" ] ; then
# If at home disable laptop screen else use 'all' mode
if [ $(head -n 1 ${CARD_INDEX}-$EXTERNAL_POSTFIX/modes) == "3440x1440" ] ; then
xrandr --output $INTERNAL_OUTPUT --off --output $EXTERNAL_OUTPUT --auto
else
xrandr --output $INTERNAL_OUTPUT --auto --output $EXTERNAL_OUTPUT --auto --left-of $INTERNAL_OUTPUT
fi
killall -SIGUSR1 conky
sleep 1 && nitrogen --restore
i3-msg 'restart'
elif [ $(cat ${CARD_INDEX}-$EXTERNAL_POSTFIX/status) == "disconnected" ] ; then
xrandr --output $INTERNAL_OUTPUT --auto --output $EXTERNAL_OUTPUT --off
killall -SIGUSR1 conky
sleep 1 && nitrogen --restore
i3-msg 'restart'
elif [ $(cat ${CARD_INDEX}-HDMI-A-1/status) == "connected" ] ; then
xrandr --output $INTERNAL_OUTPUT --auto --output HDMI1 --auto --left-of $INTERNAL_OUTPUT
killall -SIGUSR1 conky
sleep 1 && nitrogen --restore
i3-msg 'restart'
elif [ $(cat ${CARD_INDEX}-HDMI-A-1/status) == "disconnected" ] ; then
xrandr --output $INTERNAL_OUTPUT --auto --output HDMI1 --off
killall -SIGUSR1 conky
sleep 1 && nitrogen --restore
i3-msg 'restart'
else
exit
fi
| true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.