blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 4 115 | path stringlengths 2 970 | src_encoding stringclasses 28 values | length_bytes int64 31 5.38M | score float64 2.52 5.28 | int_score int64 3 5 | detected_licenses listlengths 0 161 | license_type stringclasses 2 values | text stringlengths 31 5.39M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
af3dd1dc7ab25481059df4c330de7fc9fa052b51 | Shell | nesi/ARCS-systems | /dataFabricScripts/BulkDataTransfer/fdtGet7T.sh | UTF-8 | 2,042 | 3.984375 | 4 | [] | no_license | #!/bin/sh
# fdtGet7T.sh Copies files from a designated directory on a remote server using
# FDT over a designated port. Needs Java 1.6. Beta version only!
# 'fdt.jar' and 'java' need to be in PATH and executable.
# Graham.Jenkins@arcs.org.au Jan. 2011; Rev: 20110125
# Default port and ssh-key; adjust as appropriate
PORT=80; KEY=~/.ssh/id_dsa; export PORT KEY
# Options
while getopts p:k:b:r Option; do
case $Option in
p) PORT=$OPTARG;;
k) KEY=$OPTARG;;
r) Order="-r";;
esac
done
shift `expr $OPTIND - 1`
# Usage, alias
[ $# -ne 3 ] &&
( echo " Usage: `basename $0` directory remote-userid remote-directory"
echo " e.g.: `basename $0` /data2/arcs/vt16a/Hobart" \
"root@gridftp-test.ivec.org /data/tmp/Graham/vt16a"
echo "Options: -p m .. use port 'm' (default $PORT)"
echo " -r .. reverse order"
echo " -k keyfile .. use 'keyfile' (default $KEY)" ) >&2 && exit 2
Ssu='ssh -o"UserKnownHostsFile /dev/null" -o"StrictHostKeyChecking no"'
# Failure/cleanup function; parameters are exit-code and message
fail() {
Code=$1; shift
echo "$@"; exit $Code
}
mkdir -p $1 2>/dev/null
test -w $1 || fail 1 "Local directory problem!"
# Java Invocation
Failures=0
for File in `
( cd $1 && find -L . -maxdepth 1 -type f | xargs ls -lLA 2>/dev/null
echo
eval $Ssu $2 \
"cd $3\&\& find -L . -maxdepth 1 -type f\| xargs ls -lLA 2>/dev/null"
) | awk '{ if (NF==0) {Remote="Y"; next}
if (Remote=="Y") {if ("X"locsiz[$NF]!="X"$5) {print $NF}}
else {locsiz[$NF]=$5}
}' | sort $Order`; do
echo -n "`date '+%a %T'` .. $File .. "
if java -Xms256m -Xmx256m -jar `which fdt.jar` -sshKey $KEY -p $PORT \
-noupdates -ss 32M -iof 4 -notmp $2:$3/$File $1/ </dev/null >/dev/null 2>&1;
then
echo OK
else
echo Failed; Failures=`expr 1 + $Failures`; sleep 5
fi
done
fail 0 "Completed pass; $Failures errors"
| true |
e8282aee9dbfe71b773f358be7353f60d55bc53b | Shell | cjolowicz/scripts | /mercurial/bump-version.sh | UTF-8 | 6,343 | 4.125 | 4 | [] | no_license | #!/bin/bash
prog=$(basename $0)
### config #############################################################
product=helloworld
versionpath=VERSION
releasebranch=$product-release
maintenancebranch=$product-maintenance
### functions ##########################################################
usage () {
echo "\
Usage: $prog --major [options]
$prog --minor [options]
$prog --patch [options]
$prog --rc [options]
$prog --release [options]
Options:
-M, --major Bump the major version.
-m, --minor Bump the minor version.
-p, --patch Bump the patch version.
-R, --rc Bump the rc number.
--release Add a release tag (no version bump).
--force Use a non-standard branch.
-n, --dry-run Just print what would be done.
-h, --help Display this message.
This script bumps the version number, and adds a version tag to the
repository.
For a major feature release, use \`$prog --major'.
For a normal feature release, use \`$prog --minor'.
For a maintenance release, use \`$prog --patch'.
If the current version failed QA, use \`$prog --rc' to prepare
another release candidate.
If the current version passed QA, use \`$prog --release' to add
a release tag. (This option does not bump the version number.)
"
exit
}
error () {
echo "$prog: error: $*" >&2
exit 1
}
warn () {
echo "$prog: warning: $*" >&2
}
softerror () {
echo "$prog: error: $*" >&2
echo "Use \`--force' to override." >&2
exit 1
}
bad_usage () {
echo "$prog: $*" >&2
echo "Try \`$prog --help' for more information." >&2
exit 1
}
### command line #######################################################
issue=
release=false
force=false
dry_run=false
while [ $# -gt 0 ] ; do
option="$1"
shift
case $option in
-M | --major) issue=major ;;
-m | --minor) issue=minor ;;
-p | --patch) issue=patch ;;
-R | --rc) issue=rc ;;
--release) release=true ;;
--force) force=true ;;
-n | --dry-run) dry_run=true ;;
-h | --help) usage ;;
--) break ;;
-*) bad_usage "unrecognized option \`$option'" ;;
*) set -- "$option" "$@" ; break ;;
esac
done
if $release && [ -n "$issue" ] ; then
bad_usage "incompatible options \`--release' and \`--$issue'"
fi
if [ -z "$issue" ] && ! $release ; then
usage
fi
if [ $# -gt 0 ] ; then
bad_usage "unknown argument \`$1'"
fi
### main ###############################################################
root="$(hg root)" ||
error "not in a mercurial repository"
branch="$(hg branch)" ||
error "cannot determine current branch"
[ $(hg status -q | wc -l) -eq 0 ] ||
error "working directory has uncommitted changes"
heads=($(hg heads --template '{node|short}\n' "$branch")) ||
error "cannot determine branch heads"
[ ${#heads[@]} -eq 1 ] ||
error "branch must have a single head"
parents=($(hg parents --template '{node|short}\n')) ||
error "cannot determine parents of the working directory"
[ ${#parents[@]} -eq 1 ] ||
error "working directory must have a single parent"
[ ${heads[0]} == ${parents[0]} ] ||
error "working directory is not at the branch head"
versionfile="$root"/"$versionpath"
[ $(hg status -u "$versionfile" | wc -l) -eq 0 ] ||
error "version file must be under revision control"
version="$(cat "$versionfile")" ||
error "cannot read version file"
echo "$version" | grep -Eq '^ *[0-9]+(\.[0-9]+){3} *$' ||
error "version file contains no well-formed version"
versiontag=$product-$version
releasetag=$versiontag-release
hg tags | grep -q "^${versiontag//./\\.}" ||
error "no version tag for version $version"
versionbranch="$(hg log --template '{branch}' -r $versiontag)" ||
error "cannot determine branch of $versiontag"
if $release ; then
# Add a release tag to the repository.
! hg tags | grep -q "^${releasetag//./\\.}" ||
error "$versiontag already has a release tag"
[ "$branch" = "$versionbranch" ] ||
error "$versiontag must be tagged on the $versionbranch branch \
(not $branch)"
if ! $dry_run ; then
hg tag -r $versiontag $releasetag ||
error "cannot add release tag for $versiontag"
fi
echo "$versiontag => $releasetag"
else
# Bump the version, and add a version tag to the repository.
read major minor patch rc <<< ${version//./ }
case $issue in
major)
pattern="^$product-$major(\\.[0-9]+){3}-release"
hg tags | grep -Eq "$pattern" ||
error "$versiontag has no major release tag"
((++major)) ; minor=0 patch=0 rc=0
wantbranch=$releasebranch
;;
minor)
pattern="^$product-$major\\.$minor(\\.[0-9]+){2}-release"
hg tags | grep -Eq "$pattern" ||
error "$versiontag has no minor release tag"
((++minor)) ; patch=0 rc=0
wantbranch=$releasebranch
;;
patch)
hg tags | grep -q "^${releasetag//./\\.}" ||
error "$versiontag has no release tag"
((++patch)) ; rc=0
wantbranch=$maintenancebranch
;;
rc)
! hg tags | grep -q "^${releasetag//./\\.}" ||
error "$versiontag has a release tag"
((++rc))
wantbranch="$versionbranch"
;;
esac
newversion=$major.$minor.$patch.$rc
newversiontag=$product-$newversion
if [ "$branch" != "$wantbranch" ] ; then
if $force ; then
warn "force tagging of $newversiontag on the $branch branch \
(instead of $wantbranch)"
else
softerror "$newversiontag must be tagged on the $wantbranch \
branch (not $branch)"
fi
fi
if ! $dry_run ; then
echo $newversion > "$versionfile" ||
error "cannot write version $newversion to version file"
hg commit -m"Bump version to $newversion." ||
error "cannot commit change of version file to version $newversion"
hg tag $newversiontag ||
error "cannot add version tag $newversiontag"
fi
echo "$versiontag => $newversiontag"
fi
| true |
b731c5e2ce7f879e23211e67fbc45f3eaa9968aa | Shell | midoks/mdserver-web | /plugins/docker/install.sh | UTF-8 | 1,838 | 3.671875 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash
PATH=/bin:/sbin:/usr/bin:/usr/sbin:/usr/local/bin:/usr/local/sbin:~/bin
export PATH
curPath=`pwd`
rootPath=$(dirname "$curPath")
rootPath=$(dirname "$rootPath")
serverPath=$(dirname "$rootPath")
# cd /www/server/mdserver-web/plugins/docker && /bin/bash install.sh uninstall 1.0
# cd /www/server/mdserver-web/plugins/docker && /bin/bash install.sh install 1.0
install_tmp=${rootPath}/tmp/mw_install.pl
VERSION=$2
if [ -f ${rootPath}/bin/activate ];then
source ${rootPath}/bin/activate
fi
Install_Docker()
{
# which docker
# if [ "$?" == "0" ];then
# echo '安装已经完成docker' > $install_tmp
# exit 0
# fi
echo '正在安装脚本文件...' > $install_tmp
mkdir -p $serverPath/source
if [ ! -d $serverPath/docker ];then
curl -fsSL https://get.docker.com | bash
mkdir -p $serverPath/docker
fi
pip install docker
pip install pytz
if [ -d $serverPath/docker ];then
echo "${VERSION}" > $serverPath/docker/version.pl
echo '安装完成' > $install_tmp
cd ${rootPath} && python3 ${rootPath}/plugins/docker/index.py start
cd ${rootPath} && python3 ${rootPath}/plugins/docker/index.py initd_install
fi
}
Uninstall_Docker()
{
CMD=yum
which apt
if [ "$?" == "0" ];then
CMD=apt
fi
if [ -f /usr/lib/systemd/system/docker.service ];then
systemctl stop docker
systemctl disable docker
rm -rf /usr/lib/systemd/system/docker.service
systemctl daemon-reload
fi
$CMD remove -y docker docker-ce-cli containerd.io
# docker-client \
# docker-client-latest \
# docker-common \
# docker-latest \
# docker-latest-logrotate \
# docker-logrotate \
# docker-selinux \
# docker-engine-selinux \
# docker-engine \
# docker-ce
rm -rf $serverPath/docker
echo "Uninstall_Docker" > $install_tmp
}
action=$1
if [ "${1}" == 'install' ];then
Install_Docker
else
Uninstall_Docker
fi
| true |
eac8dae7b9883ed3922f7ebd413a57ce6fa983c9 | Shell | JohnOmernik/maprdocker | /8_run_mapr_docker.sh | UTF-8 | 2,475 | 3.46875 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
. ./cluster.conf
cat > /home/zetaadm/runmapr.sh << EOL3
#!/bin/bash
RECONF=\$1
. ./cluster.conf
if [ ! -d "/opt/mapr" ]; then
sudo mkdir -p /opt/mapr
fi
if [ ! -d "/opt/mapr/conf" ]; then
sudo mkdir -p /opt/mapr/conf
sudo chown mapr:mapr /opt/mapr/conf
sudo chown 755 /opt/mapr/conf
fi
if [ ! -d "/opt/mapr/logs" ]; then
sudo mkdir -p /opt/mapr/logs
sudo chown mapr:mapr /opt/mapr/logs
sudo chmod 777 /opt/mapr/logs
fi
if [ ! -d "/opt/mapr/roles" ]; then
sudo mkdir -p /opt/mapr/roles
sudo chown root:root /opt/mapr/roles
fi
CHK=\$(ls /opt/mapr/conf/|wc -l)
if [ "\$CHK" == "0" ];then
CID=\$(sudo docker run -d \${DOCKER_REG_URL}/maprdocker sleep 10)
sudo docker cp \${CID}:/opt/mapr/conf /opt/mapr/
sudo docker cp \${CID}:/opt/mapr/roles /opt/mapr/
fi
NSUB="export MAPR_SUBNETS=$SUBNETS"
sudo sed -i -r "s@#export MAPR_SUBNETS=.*@\${NSUB}@g" /opt/mapr/conf/env.sh
if [ "\$RECONF" == "1" ]; then
MAPR_CMD="/opt/mapr/server/dockerreconf.sh"
else
if [ ! -f "/opt/mapr/conf/disktab" ]; then
MAPR_CMD="/opt/mapr/server/dockerconf.sh"
else
MAPR_CMD="/opt/mapr/server/dockerrun.sh"
fi
fi
MYHOST=\$(hostname)
MAPR_ENVS="-e=\"CLDBS=\$CLDBS\" -e=\"MUSER=\$MUSER\" -e=\"ZKS=\$ZKS\" -e=\"DISKS=\$DISKS\" -e=\"CLUSTERNAME=\$CLUSTERNAME\" -e=\"MAPR_CONF_OPTS=\$MAPR_CONF_OPTS\""
CONTROL_CHK=\$(echo -n \${CLDBS}|grep \${MYHOST})
sudo rm -rf /opt/mapr/roles/webserver
sudo rm -rf /opt/mapr/roles/cldb
if [ "\$CONTROL_CHK" != "" ]; then
sudo touch /opt/mapr/roles/webserver
sudo touch /opt/mapr/roles/cldb
fi
sudo docker run -d --net=host \${MAPR_ENVS} --privileged -v=/opt/mapr/conf:/opt/mapr/conf:rw -v=/opt/mapr/logs:/opt/mapr/logs:rw -v=/opt/mapr/roles:/opt/mapr/roles:rw \${DOCKER_REG_URL}/maprdocker \$MAPR_CMD
EOL3
HOSTFILE="./nodes.list"
HOSTS=`cat $HOSTFILE`
for HOST in $HOSTS; do
scp -o StrictHostKeyChecking=no cluster.conf $HOST:/home/zetaadm/cluster.conf
scp -o StrictHostKeyChecking=no runmapr.sh $HOST:/home/zetaadm/runmapr.sh
done
./runcmd.sh "chmod +x /home/zetaadm/runmapr.sh"
# start CLDB containers
for HOST in $HOSTS; do
MCHK=$(echo $CLDBS|grep $HOST)
if [ "$MCHK" != "" ]; then
ssh $HOST "/home/zetaadm/runmapr.sh"
fi
done
echo "Waiting 30 seconds for CLDBs to Start"
sleep 30
for HOST in $HOSTS; do
MCHK=$(echo $CLDBS|grep $HOST)
if [ "$MCHK" == "" ]; then
ssh $HOST "/home/zetaadm/runmapr.sh"
fi
done
| true |
bd66060b07d773a14758882510cd76441c066635 | Shell | Inkdpixels/dotfiles | /tests/cli.spec | UTF-8 | 1,106 | 3.5 | 4 | [
"LicenseRef-scancode-warranty-disclaimer"
] | no_license | #!/usr/bin/env bash
DOT_DIR="$HOME/.dotfiles"
OUTPUT_DIR="$DOT_DIR/tests/output"
source $DOT_DIR/lib/utils/afterEach
source $DOT_DIR/lib/utils/beforeEach
#
# Tests for invalid arguments
#
testReturnCodeForInvalidCommand() {
beforeEach
dot invalid_argument > /dev/null
assertEquals "The dot CLI should return a failure code if ran with an invalid argument." 1 $?
afterEach
}
testReturnCodeWithoutArguments() {
beforeEach
dot > /dev/null
returnCode=$?
assertEquals "The dot CLI should return a failure code if ran without arguments." 1 $returnCode
afterEach
}
#
# Tests for the 'help' command.
#
testOutputForHelpCommand() {
beforeEach
dot help > "$OUTPUT_DIR/help.result.txt"
diff "$OUTPUT_DIR/help.result.txt" "$OUTPUT_DIR/help.txt"
assertEquals "The dot CLI should output a instructional guide if ran with the 'help' argument." 0 $?
afterEach
}
testReturnCodeForHelpCommand() {
beforeEach
dot help > /dev/null
returnCode=$?
assertEquals "The dot CLI should return a success code if ran with the 'help' argument." 0 $returnCode
afterEach
}
#
# Run all tests.
#
. shunit2
| true |
509236c27e6261c43a3791f98edb48c591524a07 | Shell | DavideSecco/Sistemi-Operativi | /EserciziLaboratorio/File/4.Esercitazione-09.04.2021/problema2/FCR2.sh | UTF-8 | 703 | 3.4375 | 3 | [] | no_license | #!/bin/bash
#mi sposto nella directory passata
cd $1
# per ogni elemento della directory verifico:
# -> se è un file leggibile
# -> se ha numero di linee == $2
trovato=false
for i in *
# verifico che sia un file e che sia leggibile
do
if test -f $i -a -r $i
then
# verifico lunghezza del file
if test `wc -l < $i` -eq $2
then trovato=true
echo
echo file: $i
echo $i >> $3
break;
fi
fi
done
if test $trovato = true
then
echo Directory:
echo $1
fi
for i in *
do
if test -d $i -a -x $i
then
FCR2.sh $1/$i $2 $3
fi
done
| true |
0b18536394fb7b5348eda45e5a96b851bee3aa71 | Shell | Ben12344/CSP2101 | /assignment/Final Version/Software_Solution.sh | UTF-8 | 581 | 2.890625 | 3 | [] | no_license | #!/bin/bash
source printlist.sh
# Download source
curl -s https://www.ecu.edu.au/service-centres/MACSC/gallery/gallery.php?folder=152 > websitesource.txt
# Specific thunmbali links field txt file
cat websitesource.txt | grep "<img src=" | sed -e 's/<img src=//; s/alt="DSC0....">//; s/"//; s/"//' > thunmbalilinks.txt
echo "Hi Wellcome! "
#Menu
echo "Please select the following options"
echo "\t 1. Download specific a thumbnail"
echo "\t 2. Download all thumbnails"
echo "\t 3. Download thumbnails in a range"
echo "\t 4. Download random thumbnails"
echo "\t e. exit "
| true |
601ac0d05c7080d80ec749cb8688f476870c7e11 | Shell | sin90lzc/my_sh | /backup/backup_home.sh | UTF-8 | 1,426 | 3.984375 | 4 | [] | no_license | #!/bin/bash
####################################################################################################
#
#Description:
# 使用dump备份/home备份
#
#Sysnopsis:
#
#Options:
#
#History:
# 2013-03-12 初版
# 2013-03-20 添加使用bz2压缩,解决第一次备份时即全量备份同时备份了1的问题
#
#Author:tim Email:lzcgame@126.com
#
####################################################################################################
#备份路径
BACKUP_PATH=/mnt/backup
#home备份路径
HOME_PATH=$BACKUP_PATH/home
#检查home备份目录是否存在,不存在着创建
[ ! -e $HOME_PATH ] && mkdir -p $HOME_PATH
[ -d $HOME_PATH ] || echo "home backup path($HOME_PATH) is not a directory!"
#备份后缀
HOME_BACKUP_SUFFIX=home.dump
#已备份的文件列表
HOME_BACKUP_FILES=$(ls -1 $HOME_PATH)
#如果没有备份过,则全量备份
if [ -z "$HOME_BACKUP_FILES" ];then
dump -0u -j2 -f ${HOME_PATH}/0${HOME_BACKUP_SUFFIX} /home
exit 0
fi
#计算最大备份编号
MAX_BACKUP_NUM=0
for var in $HOME_BACKUP_FILES
do
TEMP_NUM=${var%${HOME_BACKUP_SUFFIX}}
echo $TEMP_NUM
if [ $TEMP_NUM -gt $MAX_BACKUP_NUM ];then
MAX_BACKUP_NUM=$TEMP_NUM
fi
done
#本次备份编号
CUR_BACKUP_NUM=$(($MAX_BACKUP_NUM+1))
#本次备份文件名
HOME_BACKUP_FILENAME=${CUR_BACKUP_NUM}${HOME_BACKUP_SUFFIX}
#增量备份
dump -${CUR_BACKUP_NUM}u -j2 -f ${HOME_PATH}/${HOME_BACKUP_FILENAME} /home
| true |
57361824ec83529c2235e273ba959c8cf21c3004 | Shell | capheast-apps/preach | /cmd.sh | UTF-8 | 1,120 | 4.21875 | 4 | [] | no_license | #!/bin/bash
read -d '' help <<- EOF
================================
| Available options |
================================
| 1) docker [params] |
| 2) yarn [params] |
| 6) up |
| 7) stop |
================================
EOF
function check_params {
if [[ -z "${@: -$# + 1}" ]]; then
echo "No arguments supplied"
exit 1
fi
}
function run_docker {
shift
check_params $@
docker exec -it api.preach.dev $@
exit 0
}
function yarn {
shift
check_params $@
docker exec -it api.preach.dev yarn $@
exit 0
}
function up {
shift
check_params $@ 1
docker-compose -f docker-compose-dev.yml up -d --remove-orphans --force-recreate
exit 0
}
function stop {
shift
check_params $@ 1
docker-compose -f docker-compose-dev.yml stop
exit 0
}
case $1 in
"docker" | 1 )
run_docker $@
;;
"yarn" | 3 )
yarn $@
exit 0
;;
"up" | 6 )
up $@
exit 0
;;
"stop" | 7 )
stop $@
exit 0
;;
*)
echo "$help"
exit 1
;;
esac
exit 0 | true |
8374a908ca64209195d5a64489459b3672405f05 | Shell | hhnhfan/shell | /1/10scripts/09.diskMnoitor.sh | UTF-8 | 459 | 3.234375 | 3 | [] | no_license | #!/usr/bin/env bash
#
# author: bavdu
# date: 2019/07/28
# usage: monitor memory status
DATE=$(date +'%Y-%m-%d %H:%M:%S')
IPADDR=$(ifconfig | grep inet | awk 'BEGIN{ FS=" " }NR==1{ print $2 }')
MAIL="bavduer@163.com"
useRate=$(df -Th | awk 'BEGIN{ FS=" " }NR==2{ print $6 }')
if [[ ${useRate: 0: 2} -ge 90 ]];then
echo "
Date: ${DATE}
Host: ${HOSTNAME}: ${IPADDR}
Problem:
Memory using rate: up ${useRate: 0: 2}
" | mail -s "CPU Monitor Warnning" ${MAIL}
fi | true |
9a7564c157ab02aee601d5fb0ebd2c68574ac962 | Shell | solventrix/Honeur-Setup | /local-installation/offline-helper-scripts/start-feder8-offline.sh | UTF-8 | 750 | 3.609375 | 4 | [] | no_license | #!/usr/bin/env bash
TAG=2.0.22
REGISTRY=harbor.honeur.org
if systemctl show --property ActiveState docker &> /dev/null; then
DOCKER_CERT_SUPPORT=true
else
DOCKER_CERT_SUPPORT=false
fi
if [[ $OSTYPE == 'darwin'* ]]; then
IS_MAC=true
else
IS_MAC=false
fi
if [ -f "images.tar" ]; then
echo Loading docker images. This could take a while...
docker load < images.tar
docker run --rm -it --name feder8-installer -e CURRENT_DIRECTORY=$(pwd) -e IS_WINDOWS=false -e IS_MAC=$IS_MAC -e DOCKER_CERT_SUPPORT=$DOCKER_CERT_SUPPORT -v /var/run/docker.sock:/var/run/docker.sock ${REGISTRY}/library/install-script:${TAG} feder8 init full --offline
else
echo Could not find 'images.tar' in the current directory. Unable to continue.
exit 1
fi
| true |
0c7b09b9877ef6ab28411bcef4d267564c6d0d3d | Shell | ZeldaCross/PanoEnabler | /layout/DEBIAN/postinst | UTF-8 | 377 | 2.625 | 3 | [] | no_license | #!/bin/sh
chk=$(uname -m)
if [[ "$chk" == iPod5* ]] || [[ "$chk" == iPhone4* ]] || [[ "$chk" == iPhone5* ]] || [[ "$chk" == iPhone6* ]];then
rm -f /Library/MobileSubstrate/DynamicLibraries/PanoHook* /Library/MobileSubstrate/DynamicLibraries/BackBoardEnv* /usr/lib/PanoHook7.dylib
else
cp /Library/PanoModUI/*.png /System/Library/PrivateFrameworks/PhotoLibrary.framework
fi
| true |
53d939bd0f23c0540c8efc664358db4efc1cf49f | Shell | mdub/localdocker | /provision/docker.sh | UTF-8 | 662 | 2.796875 | 3 | [] | no_license | #! /bin/sh
set -e
getent group docker || groupadd docker
usermod -a -G docker vagrant
apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 36A1D7869245C8950F966E92D8576A8BA88D21E9
sh -c 'echo deb http://get.docker.io/ubuntu docker main > /etc/apt/sources.list.d/docker.list'
apt-get -y -q update
apt-get -y -q install lxc-docker linux-image-extra-`uname --kernel-release`
cat <<EOF > /etc/default/docker
DOCKER_OPTS="-H tcp://0.0.0.0:4243 -H unix:///var/run/docker.sock"
EOF
service docker restart
cat <<EOF > /etc/logrotate.d/docker
/var/lib/docker/containers/*/*-json.log {
rotate 2
daily
compress
missingok
notifempty
copytruncate
}
EOF
| true |
ba3be374a29c92c72264857a907f3d7b9e5a7686 | Shell | toliaqat/scripts | /tasks.sh | UTF-8 | 390 | 3.0625 | 3 | [] | no_license | #!/bin/bash -x
#
# Create TODO task as a pull request!
# Usage: ./tasks.sh Get the milk!
#
msg=$*
cd ~/airlab/repos/tasks/
branch="$(echo $msg | tr ' ' '-')"
git checkout master
git checkout -b $branch
echo $msg >> README.md
git add -u .
git commit -m "$msg"
url="$(git push 2>&1 | grep 'tasks\/pull' | awk '{print $2}')"
/Applications/Google\ Chrome.app/Contents/MacOS/Google\ Chrome $url
| true |
7b20a5d2cad373e864726fbeb833131a9559c25c | Shell | david-rahrer/launchpad | /nginx/nginx.sh | UTF-8 | 5,446 | 3.5 | 4 | [] | no_license | #!/bin/bash
# Define NGINX version
NGINX_VERSION=$1
EMAIL_ADDRESS=$2
# Capture errors
function ppa_error()
{
echo "[ `date` ] $(tput setaf 1)$@$(tput sgr0)"
exit $2
}
# Echo function
function ppa_lib_echo()
{
echo $(tput setaf 4)$@$(tput sgr0)
}
# Update/Install Packages
ppa_lib_echo "Execute: apt-get update, please wait"
sudo apt-get update || ppa_error "Unable to update packages, exit status = " $?
ppa_lib_echo "Installing required packages, please wait"
sudo apt-get -y install git dh-make devscripts debhelper dput gnupg-agent || ppa_error "Unable to install packages, exit status = " $?
# Configure PPA
mkdir -p ~/PPA/nginx && cd ~/PPA/nginx \
|| ppa_error "Unable to create ~/PPA, exit status = " $?
# Download NGINX
ppa_lib_echo "Download nginx, please wait"
wget -c http://nginx.org/download/nginx-${NGINX_VERSION}.tar.gz \
|| ppa_error "Unable to download nginx-${NGINX_VERSION}.tar.gz, exit status = " $?
tar -zxvf nginx-${NGINX_VERSION}.tar.gz \
|| ppa_error "Unable to extract nginx, exit status = " $?
cd nginx-${NGINX_VERSION} \
|| ppa_error "Unable to change directory, exit status = " $?
# Lets start building
ppa_lib_echo "Execute: dh_make --single --native --copyright gpl --email $EMAIL_ADDRESS , please wait"
dh_make --single --native --copyright gpl --email $EMAIL_ADDRESS \
|| ppa_error "Unable to run dh_make command, exit status = " $?
rm debian/*.ex debian/*.EX \
|| ppa_error "Unable to remove unwanted files, exit status = " $?
# Lets copy files
ppa_lib_echo "Copy Launchpad Debian files, please wait"
rm -rf /tmp/launchpad && git clone https://github.com/MiteshShah/launchpad.git /tmp/launchpad \
|| ppa_error "Unable to clone launchpad repo, exit status = " $?
cp -av /tmp/launchpad/nginx/debian/* ~/PPA/nginx/nginx-${NGINX_VERSION}/debian/ && \
cp -v debian/changelog debian/NEWS.Debian \
|| ppa_error "Unable to copy launchpad debian files, exit status = " $?
# NGINX modules
ppa_lib_echo "Downloading NGINX modules, please wait"
mkdir ~/PPA/nginx/modules && cd ~/PPA/nginx/modules \
|| ppa_error "Unable to create ~/PPA/nginx/modules, exit status = " $?
ppa_lib_echo "1/13 headers-more-nginx-module"
git clone https://github.com/agentzh/headers-more-nginx-module.git \
|| ppa_error "Unable to clone headers-more-nginx-module repo, exit status = " $?
ppa_lib_echo "2/13 naxsi "
git clone https://github.com/nbs-system/naxsi \
|| ppa_error "Unable to clone naxsi repo, exit status = " $?
cp -av ~/PPA/nginx/modules/naxsi/naxsi_config/naxsi_core.rules ~/PPA/nginx/nginx-${NGINX_VERSION}/debian/conf/ \
|| ppa_error "Unable to copy naxsi files, exit status = " $?
ppa_lib_echo "3/13 nginx-auth-pam"
wget http://web.iti.upv.es/~sto/nginx/ngx_http_auth_pam_module-1.3.tar.gz \
|| ppa_error "Unable to download ngx_http_auth_pam_module-1.3.tar.gz, exit status = " $?
tar -zxvf ngx_http_auth_pam_module-1.3.tar.gz \
|| ppa_error "Unable to extract ngx_http_auth_pam_module-1.3, exit status = " $?
mv ngx_http_auth_pam_module-1.3 nginx-auth-pam \
|| ppa_error "Unable to rename ngx_http_auth_pam_module-1.3, exit status = " $?
rm ngx_http_auth_pam_module-1.3.tar.gz \
|| ppa_error "Unable to remove ngx_http_auth_pam_module-1.3.tar.gz, exit status = " $?
ppa_lib_echo "4/13 nginx-cache-purge"
git clone https://github.com/FRiCKLE/ngx_cache_purge.git nginx-cache-purge \
|| ppa_error "Unable to clone nginx-cache-purge repo, exit status = " $?
ppa_lib_echo "5/13 nginx-dav-ext-module"
git clone https://github.com/arut/nginx-dav-ext-module.git \
|| ppa_error "Unable to clone nginx-dav-ext-module repo, exit status = " $?
ppa_lib_echo "6/13 nginx-development-kit"
git clone https://github.com/simpl/ngx_devel_kit.git nginx-development-kit \
|| ppa_error "Unable to clone nginx-development-kit repo, exit status = " $?
ppa_lib_echo "7/13 nginx-echo"
git clone https://github.com/agentzh/echo-nginx-module.git nginx-echo \
|| ppa_error "Unable to clone nginx-echo repo, exit status = " $?
ppa_lib_echo "8/13 nginx-http-push"
git clone https://github.com/slact/nginx_http_push_module.git nginx-http-push \
|| ppa_error "Unable to clone nginx-http-push repo, exit status = " $?
ppa_lib_echo "9/13 nginx-lua"
git clone https://github.com/chaoslawful/lua-nginx-module.git nginx-lua \
|| ppa_error "Unable to clone nginx-lua repo, exit status = " $?
ppa_lib_echo "10/13 nginx-upload-progress-module"
git clone https://github.com/masterzen/nginx-upload-progress-module.git nginx-upload-progress \
|| ppa_error "Unable to clone nginx-upload-progress repo, exit status = " $?
ppa_lib_echo "11/13 nginx-upstream-fair"
git clone https://github.com/gnosek/nginx-upstream-fair.git \
|| ppa_error "Unable to clone nginx-upstream-fair repo, exit status = " $?
ppa_lib_echo "12/13 nginx-http-subs"
git clone git://github.com/yaoweibin/ngx_http_substitutions_filter_module.git nginx-http-subs \
|| ppa_error "Unable to clone nginx-http-subs repo, exit status = " $?
ppa_lib_echo "13/13 ngx_pagespeed"
NPS_VERSION=1.8.31.4
wget https://github.com/pagespeed/ngx_pagespeed/archive/release-${NPS_VERSION}-beta.tar.gz
tar -zxvf release-${NPS_VERSION}-beta.tar.gz
mv ngx_pagespeed-release-${NPS_VERSION}-beta ngx_pagespeed
rm release-${NPS_VERSION}-beta.tar.gz
cd ngx_pagespeed
wget -O psol.tar.gz https://dl.google.com/dl/page-speed/psol/${NPS_VERSION}.tar.gz
cp -av ~/PPA/nginx/modules ~/PPA/nginx/nginx-${NGINX_VERSION}/debian/ \
|| ppa_error "Unable to copy launchpad modules files, exit status = " $?
| true |
8327dfa7d4a853aac9127c318d249381b3f8b685 | Shell | openxc/openxc-android | /scripts/push-javadoc.sh | UTF-8 | 888 | 3.015625 | 3 | [
"BSD-3-Clause"
] | permissive | #!/bin/bash
if [ "$TRAVIS_REPO_SLUG" == "openxc/openxc-android" ] && [ "$TRAVIS_JDK_VERSION" == "openjdk8" ] && [ "$TRAVIS_PULL_REQUEST" == "false" ] && [ "$TRAVIS_BRANCH" == "master" ]; then
cp -R library/build/docs/javadoc $HOME/javadoc-latest
cd $HOME
git config --global user.email "travis@travis-ci.org"
git config --global user.name "Travis-CI"
git clone --quiet --branch=master https://${GH_TOKEN}@github.com/openxc/openxc-android master > /dev/null
cd master
LATEST_TAG=$(git describe --abbrev=0 --tags)
cd ../
git clone --quiet --branch=gh-pages https://${GH_TOKEN}@github.com/openxc/openxc-android gh-pages > /dev/null
cd gh-pages
git rm -rf ./
echo "android.openxcplatform.com" > CNAME
cp -Rf $HOME/javadoc-latest/. ./
git add -f .
git commit -m "JavaDoc $LATEST_TAG - Travis Build $TRAVIS_BUILD_NUMBER"
git push -fq origin gh-pages > /dev/null
fi | true |
3d4624405d620232334e981ab1191e37ab86aa0a | Shell | karlosgliberal/docker_kukuxumuxu | /mysql/scripts/docker-entrypoint.sh | UTF-8 | 1,355 | 3.8125 | 4 | [] | no_license | #!/bin/bash
set -e
echo '* Working around permission errors locally by making sure that "mysql" uses the same uid and gid as the host volume'
TARGET_UID=$(stat -c "%u" /var/lib/mysql)
echo '-- Setting mysql user to use uid '$TARGET_UID
usermod -o -u $TARGET_UID mysql || true
TARGET_GID=$(stat -c "%g" /var/lib/mysql)
echo '-- Setting mysql group to use gid '$TARGET_GID
groupmod -o -g $TARGET_GID mysql || true
echo
echo '* Starting MySQL'
chown -R mysql:root /var/run/mysqld/
chown -R mysql:mysql /var/lib/mysql
mysql_install_db --user mysql > /dev/null
MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD:-""}
MYSQL_DATABASE=${MYSQL_DATABASE:-""}
MYSQL_USER=${MYSQL_USER:-""}
MYSQL_PASSWORD=${MYSQL_PASSWORD:-""}
tfile=`mktemp`
if [[ ! -f "$tfile" ]]; then
return 1
fi
cat << EOF > $tfile
USE mysql;
FLUSH PRIVILEGES;
GRANT ALL PRIVILEGES ON *.* TO 'root'@'%' WITH GRANT OPTION;
UPDATE user SET password=PASSWORD("$MYSQL_ROOT_PASSWORD") WHERE user='root';
EOF
if [[ $MYSQL_DATABASE != "" ]]; then
echo "CREATE DATABASE IF NOT EXISTS \`$MYSQL_DATABASE\` CHARACTER SET utf8 COLLATE utf8_general_ci;" >> $tfile
if [[ $MYSQL_USER != "" ]]; then
echo "GRANT ALL ON \`$MYSQL_DATABASE\`.* to '$MYSQL_USER'@'%' IDENTIFIED BY '$MYSQL_PASSWORD';" >> $tfile
fi
fi
/usr/sbin/mysqld --bootstrap --verbose=0 < $tfile
rm -f $tfile
exec /usr/sbin/mysqld
| true |
4ac828196262ab71d65d75b7c0b157bcd24720e7 | Shell | akoshne/plat | /tools/compute_vectors.sh | UTF-8 | 14,846 | 2.703125 | 3 | [
"MIT"
] | permissive | #!/bin/bash
set -ex
export PYTHONPATH="${PYTHONPATH:-../plat:.}"
PLATCMD="${PLATCMD:-python ../plat/plat/bin/platcmd.py}"
MODEL_FILE="${MODEL_FILE:---model models/celeba_dlib_64_160z_d4_11/celeba_dlib_64_160z_d4_11.zip}"
MODEL_INTERFACE="${MODEL_INTERFACE:---model-interface ali.interface.AliModel}"
DATASET_VALUE="${DATASET_VALUE:-celeba_dlib_64}"
JSON_SUBDIR="${JSON_SUBDIR:-models/celeba_dlib_64_160z_d4_11}"
BATCH_SIZE="${BATCH_SIZE:-100}"
IMAGE_SIZE="${IMAGE_SIZE:-64}"
TRAIN_VECTOR_FILE="${TRAIN_VECTOR_FILE:-train_vectors.json}"
TEST_VECTOR_FILE="${TEST_VECTOR_FILE:-test_vectors.json}"
# called with offsetfile, offsetindex, outname
function sample_vector {
$PLATCMD sample \
--rows 1 --cols 7 --tight --gradient --offset 0 --shoulder \
--anchor-image /develop/data/composite/$IMAGE_SIZE/pm.png \
--image-size "$IMAGE_SIZE" \
--numanchors 1 \
$MODEL \
--anchor-offset-x $2 --anchor-offset-x-minscale -1.0 --anchor-offset-x-maxscale 2.0 \
--anchor-offset-y $2 --anchor-offset-y-minscale 0.0 --anchor-offset-y-maxscale 0.0 \
--anchor-offset $1 \
--outfile $JSON_SUBDIR"/atvec_"$3"_male.png"
$PLATCMD sample \
--rows 1 --cols 7 --tight --gradient --offset 0 --shoulder \
--anchor-image /develop/data/composite/$IMAGE_SIZE/pf.png \
--image-size "$IMAGE_SIZE" \
--numanchors 1 \
$MODEL \
--anchor-offset-x $2 --anchor-offset-x-minscale -1.0 --anchor-offset-x-maxscale 2.0 \
--anchor-offset-y $2 --anchor-offset-y-minscale 0.0 --anchor-offset-y-maxscale 0.0 \
--anchor-offset $1 \
--outfile $JSON_SUBDIR"/atvec_"$3"_female.png"
}
function atvec_thresh {
$PLATCMD atvec \
--thresh \
--dataset $DATASET_VALUE \
--split train \
--encoded-vectors "$JSON_SUBDIR/$TRAIN_VECTOR_FILE" \
--attribute-vectors $1 \
--outfile $2
}
function atvec_roc {
$PLATCMD atvec \
--roc \
--dataset $DATASET_VALUE \
--split test \
--encoded-vectors "$JSON_SUBDIR/$TEST_VECTOR_FILE" \
--attribute-vectors $1 \
--attribute-set $4 \
--attribute-indices $2 \
--outfile $JSON_SUBDIR"/atvec_"$3
}
# do train vectors
if [ ! -f $JSON_SUBDIR/$TRAIN_VECTOR_FILE ]; then
$PLATCMD sample \
$MODEL \
--dataset=$DATASET_VALUE \
--split train \
--batch-size $BATCH_SIZE \
--encoder \
--outfile "$JSON_SUBDIR/$TRAIN_VECTOR_FILE"
fi
if [ ! -f "$JSON_SUBDIR/valid_vectors.json" ]; then
# do valid vectors
$PLATCMD sample \
$MODEL \
--dataset=$DATASET_VALUE \
--split valid \
--batch-size $BATCH_SIZE \
--encoder \
--outfile "$JSON_SUBDIR/valid_vectors.json"
fi
if [ ! -f "$JSON_SUBDIR/$TEST_VECTOR_FILE" ]; then
# do test vectors
$PLATCMD sample \
$MODEL \
--dataset=$DATASET_VALUE \
--split test \
--batch-size $BATCH_SIZE \
--encoder \
--outfile "$JSON_SUBDIR/$TEST_VECTOR_FILE"
fi
declare -A celeba_attribs
celeba_attribs=(
["00"]="5_o_clock_shadow"
["01"]="arched_eyebrows"
["02"]="attractive"
["03"]="bags_under_eyes"
["04"]="bald"
["05"]="bangs"
["06"]="big_lips"
["07"]="big_nose"
["08"]="black_hair"
["09"]="blond_hair"
["10"]="blurry"
["11"]="brown_hair"
["12"]="bushy_eyebrows"
["13"]="chubby"
["14"]="double_chin"
["15"]="eyeglasses"
["16"]="goatee"
["17"]="gray_hair"
["18"]="heavy_makeup"
["19"]="high_cheekbones"
["20"]="male"
["21"]="mouth_slightly_open"
["22"]="mustache"
["23"]="narrow_eyes"
["24"]="no_beard"
["25"]="oval_face"
["26"]="pale_skin"
["27"]="pointy_nose"
["28"]="receding_hairline"
["29"]="rosy_cheeks"
["30"]="sideburns"
["31"]="smiling"
["32"]="straight_hair"
["33"]="wavy_hair"
["34"]="wearing_earrings"
["35"]="wearing_hat"
["36"]="wearing_lipstick"
["37"]="wearing_necklace"
["38"]="wearing_necktie"
["39"]="young"
)
# atvec all labels and a balanced male/smile/open mouth
if [ ! -f "$JSON_SUBDIR/atvecs_all.json" ]; then
$PLATCMD atvec --dataset=$DATASET_VALUE \
--dataset "$DATASET_VALUE" \
--split train \
--num-attribs 40 \
--svm \
--encoded-vectors "$JSON_SUBDIR/$TRAIN_VECTOR_FILE" \
--outfile "$JSON_SUBDIR/atvecs_all.json"
fi
if [ ! -f "$JSON_SUBDIR/atvecs_all_mean.json" ]; then
$PLATCMD atvec --dataset=$DATASET_VALUE \
--dataset "$DATASET_VALUE" \
--split train \
--num-attribs 40 \
--encoded-vectors "$JSON_SUBDIR/$TRAIN_VECTOR_FILE" \
--outfile "$JSON_SUBDIR/atvecs_all_mean.json"
fi
# if [ ! -f "$JSON_SUBDIR/atvecs_all_thresholds.json" ]; then
if [ ! -f "$JSON_SUBDIR/atvecs_roc_done.txt" ]; then
# atvec_thresh "$JSON_SUBDIR/atvecs_all.json" "$JSON_SUBDIR/atvecs_all_thresholds.json"
for index in "${!celeba_attribs[@]}"; do
atvec_roc "$JSON_SUBDIR/atvecs_all.json" $index "celeba_"$index"_"${celeba_attribs[$index]} "all"
atvec_roc "$JSON_SUBDIR/atvecs_all.json" $index "celeba_"$index"_"${celeba_attribs[$index]} "true"
atvec_roc "$JSON_SUBDIR/atvecs_all.json" $index "celeba_"$index"_"${celeba_attribs[$index]} "false"
done
for index in "${!celeba_attribs[@]}"; do
sample_vector "$JSON_SUBDIR/atvecs_all.json" $index "celeba_"$index"_"${celeba_attribs[$index]}
done
touch "$JSON_SUBDIR/atvecs_roc_done.txt"
fi
# if [ ! -f "$JSON_SUBDIR/atvecs_all_mean_thresholds.json" ]; then
if [ ! -f "$JSON_SUBDIR/atvecs_roc_mean_done.txt" ]; then
# atvec_thresh "$JSON_SUBDIR/atvecs_all_mean.json" "$JSON_SUBDIR/atvecs_all_mean_thresholds.json"
for index in "${!celeba_attribs[@]}"; do
atvec_roc "$JSON_SUBDIR/atvecs_all_mean.json" $index "celeba_"$index"_mean_"${celeba_attribs[$index]} "all"
atvec_roc "$JSON_SUBDIR/atvecs_all_mean.json" $index "celeba_"$index"_mean_"${celeba_attribs[$index]} "true"
atvec_roc "$JSON_SUBDIR/atvecs_all_mean.json" $index "celeba_"$index"_mean_"${celeba_attribs[$index]} "false"
done
# for index in "${!celeba_attribs[@]}"; do
# sample_vector "$JSON_SUBDIR/atvecs_all_mean.json" $index "celeba_"$index"_mean_"${celeba_attribs[$index]}
# done
touch "$JSON_SUBDIR/atvecs_roc_mean_done.txt"
fi
if [ ! -f "$JSON_SUBDIR/atvecs_balanced_20_21_31.json" ]; then
$PLATCMD atvec --dataset=$DATASET_VALUE \
--dataset "$DATASET_VALUE" \
--split train \
--num-attribs 40 \
--svm \
--encoded-vectors $JSON_SUBDIR/$TRAIN_VECTOR_FILE \
--balanced 20,21,31 \
--outfile "$JSON_SUBDIR/atvecs_balanced_20_21_31.json"
$PLATCMD atvec --dataset=$DATASET_VALUE \
--dataset "$DATASET_VALUE" \
--split train \
--num-attribs 40 \
--encoded-vectors $JSON_SUBDIR/$TRAIN_VECTOR_FILE \
--balanced 20,21,31 \
--outfile "$JSON_SUBDIR/atvecs_balanced_mean_20_21_31.json"
sample_vector "$JSON_SUBDIR/atvecs_balanced_20_21_31.json" "0" "balanced_male"
sample_vector "$JSON_SUBDIR/atvecs_balanced_20_21_31.json" "1" "balanced_open"
sample_vector "$JSON_SUBDIR/atvecs_balanced_20_21_31.json" "2" "balanced_smile"
sample_vector "$JSON_SUBDIR/atvecs_balanced_mean_20_21_31.json" "0" "balanced_male"
sample_vector "$JSON_SUBDIR/atvecs_balanced_mean_20_21_31.json" "1" "balanced_open"
sample_vector "$JSON_SUBDIR/atvecs_balanced_mean_20_21_31.json" "2" "balanced_smile"
fi
if [ ! -f "$JSON_SUBDIR/unblurred_train_vectors_10k.json" ]; then
# do train blur/unblur vectors
$PLATCMD sample \
$MODEL \
--anchor-glob '/develop/data/celeba/dlib2/aligned/'$IMAGE_SIZE'/splits/train/0[0-2]????.png' \
--batch-size $BATCH_SIZE \
--encoder \
--outfile "$JSON_SUBDIR/unblurred_train_vectors_10k.json"
fi
if [ ! -f "$JSON_SUBDIR/blurred1_train_vectors_10k.json" ]; then
$PLATCMD sample \
$MODEL \
--anchor-glob '/develop/data/celeba/dlib2/aligned_blur1/'$IMAGE_SIZE'/0[0-2]????.png' \
--batch-size $BATCH_SIZE \
--encoder \
--outfile "$JSON_SUBDIR/blurred1_train_vectors_10k.json"
fi
if [ ! -f "$JSON_SUBDIR/blurred2_train_vectors_10k.json" ]; then
$PLATCMD sample \
$MODEL \
--anchor-glob '/develop/data/celeba/dlib2/aligned_blur2/'$IMAGE_SIZE'/0[0-2]????.png' \
--batch-size $BATCH_SIZE \
--encoder \
--outfile "$JSON_SUBDIR/blurred2_train_vectors_10k.json"
fi
if [ ! -f "$JSON_SUBDIR/atvec_blur1.json" ]; then
$PLATCMD atvec \
--svm-diff "$JSON_SUBDIR/unblurred_train_vectors_10k.json,$JSON_SUBDIR/blurred1_train_vectors_10k.json" \
--outfile "$JSON_SUBDIR/atvec_blur1.json"
sample_vector "$JSON_SUBDIR/atvec_blur1.json" "0" "blur1"
$PLATCMD atvec \
--avg-diff "$JSON_SUBDIR/unblurred_train_vectors_10k.json,$JSON_SUBDIR/blurred1_train_vectors_10k.json" \
--outfile "$JSON_SUBDIR/atvec_blur1_mean.json"
sample_vector "$JSON_SUBDIR/atvec_blur1_mean.json" "0" "blur1_mean"
fi
if [ ! -f "$JSON_SUBDIR/atvec_blur2.json" ]; then
$PLATCMD atvec \
--svm-diff "$JSON_SUBDIR/unblurred_train_vectors_10k.json,$JSON_SUBDIR/blurred2_train_vectors_10k.json" \
--outfile "$JSON_SUBDIR/atvec_blur2.json"
sample_vector "$JSON_SUBDIR/atvec_blur2.json" "0" "blur2"
$PLATCMD atvec \
--avg-diff "$JSON_SUBDIR/unblurred_train_vectors_10k.json,$JSON_SUBDIR/blurred2_train_vectors_10k.json" \
--outfile "$JSON_SUBDIR/atvec_blur2_mean.json"
sample_vector "$JSON_SUBDIR/atvec_blur2_mean.json" "0" "blur2_mean"
fi
if [ ! -f "$JSON_SUBDIR/rafd_neutral_vectors.json" ]; then
# rafd emotions
$PLATCMD sample \
$MODEL \
--anchor-glob '/develop/data/rafd/aligned/'$IMAGE_SIZE'/*_neutral_*.png' \
--batch-size $BATCH_SIZE \
--encoder \
--outfile "$JSON_SUBDIR/rafd_neutral_vectors.json"
fi
for EMOTION in "angry" "contemptuous" "disgusted" "fearful" "happy" "sad" "surprised"; do
if [ ! -f "$JSON_SUBDIR/rafd_"$EMOTION"_vectors.json" ]; then
$PLATCMD sample \
$MODEL \
--anchor-glob '/develop/data/rafd/aligned/'$IMAGE_SIZE'/*_'$EMOTION'_*.png' \
--batch-size $BATCH_SIZE \
--encoder \
--outfile "$JSON_SUBDIR/rafd_"$EMOTION"_vectors.json"
fi
if [ ! -f "$JSON_SUBDIR/atvec_rafd_"$EMOTION".json" ]; then
$PLATCMD atvec \
--svm-diff "$JSON_SUBDIR/rafd_neutral_vectors.json","$JSON_SUBDIR/rafd_"$EMOTION"_vectors.json" \
--outfile "$JSON_SUBDIR/atvec_rafd_"$EMOTION".json"
sample_vector "$JSON_SUBDIR/atvec_rafd_"$EMOTION".json" "0" "rafd_"$EMOTION
$PLATCMD atvec \
--avg-diff "$JSON_SUBDIR/rafd_neutral_vectors.json","$JSON_SUBDIR/rafd_"$EMOTION"_vectors.json" \
--outfile "$JSON_SUBDIR/atvec_rafd_mean_"$EMOTION".json"
sample_vector "$JSON_SUBDIR/atvec_rafd_mean_"$EMOTION".json" "0" "rafd_mean_"$EMOTION
fi
done
if [ ! -f "$JSON_SUBDIR/rafd_eye_straight_vectors.json" ]; then
# rafd emotions
$PLATCMD sample \
$MODEL \
--anchor-glob '/develop/data/rafd/aligned/'$IMAGE_SIZE'/*_frontal.png' \
--batch-size $BATCH_SIZE \
--encoder \
--outfile "$JSON_SUBDIR/rafd_eye_straight_vectors.json"
fi
if [ ! -f "$JSON_SUBDIR/rafd_eye_right_vectors.json" ]; then
# rafd emotions
$PLATCMD sample \
$MODEL \
--anchor-glob '/develop/data/rafd/aligned/'$IMAGE_SIZE'/*_right.png' \
--batch-size $BATCH_SIZE \
--encoder \
--outfile "$JSON_SUBDIR/rafd_eye_right_vectors.json"
fi
if [ ! -f "$JSON_SUBDIR/rafd_eye_left_vectors.json" ]; then
# rafd emotions
$PLATCMD sample \
$MODEL \
--anchor-glob '/develop/data/rafd/aligned/'$IMAGE_SIZE'/*_left.png' \
--batch-size $BATCH_SIZE \
--encoder \
--outfile "$JSON_SUBDIR/rafd_eye_left_vectors.json"
fi
if [ ! -f "$JSON_SUBDIR/atvec_rafd_eye_straight_to_right.json" ]; then
$PLATCMD atvec \
--svm-diff "$JSON_SUBDIR/rafd_eye_straight_vectors.json","$JSON_SUBDIR/rafd_eye_right_vectors.json" \
--outfile "$JSON_SUBDIR/atvec_rafd_eye_straight_to_right.json"
sample_vector "$JSON_SUBDIR/atvec_rafd_eye_straight_to_right.json" "0" "rafd_eye_straight_to_right"
fi
if [ ! -f "$JSON_SUBDIR/atvec_rafd_eye_straight_to_left.json" ]; then
$PLATCMD atvec \
--svm-diff "$JSON_SUBDIR/rafd_eye_straight_vectors.json","$JSON_SUBDIR/rafd_eye_left_vectors.json" \
--outfile "$JSON_SUBDIR/atvec_rafd_eye_straight_to_left.json"
sample_vector "$JSON_SUBDIR/atvec_rafd_eye_straight_to_left.json" "0" "rafd_eye_straight_to_left"
fi
if [ ! -f "$JSON_SUBDIR/atvec_rafd_eye_left_to_right.json" ]; then
$PLATCMD atvec \
--svm-diff "$JSON_SUBDIR/rafd_eye_left_vectors.json","$JSON_SUBDIR/rafd_eye_right_vectors.json" \
--outfile "$JSON_SUBDIR/atvec_rafd_eye_left_to_right.json"
sample_vector "$JSON_SUBDIR/atvec_rafd_eye_left_to_right.json" "0" "rafd_eye_left_to_right"
fi
if [ ! -f "$JSON_SUBDIR/rafd_straight_vectors.json" ]; then
# rafd emotions
$PLATCMD sample \
$MODEL \
--anchor-glob '/develop/data/rafd/aligned/'$IMAGE_SIZE'/Rafd090*.png' \
--batch-size $BATCH_SIZE \
--encoder \
--outfile "$JSON_SUBDIR/rafd_straight_vectors.json"
fi
if [ ! -f "$JSON_SUBDIR/rafd_right_vectors.json" ]; then
# rafd emotions
$PLATCMD sample \
$MODEL \
--anchor-glob '/develop/data/rafd/aligned/'$IMAGE_SIZE'/Rafd045*.png' \
--batch-size $BATCH_SIZE \
--encoder \
--outfile "$JSON_SUBDIR/rafd_right_vectors.json"
fi
if [ ! -f "$JSON_SUBDIR/rafd_left_vectors.json" ]; then
# rafd emotions
$PLATCMD sample \
$MODEL \
--anchor-glob '/develop/data/rafd/aligned/'$IMAGE_SIZE'/Rafd135*.png' \
--batch-size $BATCH_SIZE \
--encoder \
--outfile "$JSON_SUBDIR/rafd_left_vectors.json"
fi
if [ ! -f "$JSON_SUBDIR/atvec_rafd_straight_to_right.json" ]; then
$PLATCMD atvec \
--svm-diff "$JSON_SUBDIR/rafd_straight_vectors.json","$JSON_SUBDIR/rafd_right_vectors.json" \
--outfile "$JSON_SUBDIR/atvec_rafd_straight_to_right.json"
sample_vector "$JSON_SUBDIR/atvec_rafd_straight_to_right.json" "0" "rafd_straight_to_right"
fi
if [ ! -f "$JSON_SUBDIR/atvec_rafd_straight_to_left.json" ]; then
$PLATCMD atvec \
--svm-diff "$JSON_SUBDIR/rafd_straight_vectors.json","$JSON_SUBDIR/rafd_left_vectors.json" \
--outfile "$JSON_SUBDIR/atvec_rafd_straight_to_left.json"
sample_vector "$JSON_SUBDIR/atvec_rafd_straight_to_left.json" "0" "rafd_straight_to_left"
fi
if [ ! -f "$JSON_SUBDIR/atvec_rafd_left_to_right.json" ]; then
$PLATCMD atvec \
--svm-diff "$JSON_SUBDIR/rafd_left_vectors.json","$JSON_SUBDIR/rafd_right_vectors.json" \
--outfile "$JSON_SUBDIR/atvec_rafd_left_to_right.json"
sample_vector "$JSON_SUBDIR/atvec_rafd_left_to_right.json" "0" "rafd_left_to_right"
fi
| true |
e21ab5ae82de79cb80f54d225ea7fcfb18f58b79 | Shell | cache2k/cache2k | /build.sh | UTF-8 | 596 | 3.375 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
trap 'kill $(jobs -rp) || true 2>/dev/null' EXIT
set -ex
printThreadDumps() {
jps -lv | while read I; do
pid=`echo "$I" | awk '{ print $1; }'`
echo
echo "Thread dump process: $I";
jstack -l $pid || true;
done
}
java -version
mvn clean install -DskipTests=true -Dmaven.javadoc.skip=true -B -V
( sleep $(( 60 * 5 ));
printThreadDumps;
sleep 10;
printThreadDumps;
sleep 10;
printThreadDumps;
echo "TIMEOUT"
exit 1;
) &
mvn test -B &
testPid=$!
wait $testPid
# exit with the exit status of the maven job
# killed via trap: kill $threadDumpPid || true
| true |
8f532689facb00c0159cef4f9f81dd2932ed8e1b | Shell | magodo/docker_practice | /postgresql/scripts/repmgr/main.sh | UTF-8 | 6,407 | 3.578125 | 4 | [] | no_license | #!/bin/bash
#########################################################################
# Author: Zhaoting Weng
# Created Time: Thu 09 Aug 2018 08:26:13 PM CST
# Description:
#########################################################################
MYDIR="$(cd "$(dirname "${BASH_SOURCE[0]}")"&& pwd)"
MYNAME="$(basename "${BASH_SOURCE[0]}")"
# shellcheck disable=SC1090
. "$MYDIR"/../common.sh
# shellcheck disable=SC1090
. "$MYDIR"/../config.sh
#########################################################################
# action: start
#########################################################################
usage_start() {
cat << EOF
Usage: start [option] [primary_container] [standby_container]
Options:
-h, --help
-i, --init setup primary and standby before start
-s, --sync use sync replication instead of async
EOF
}
do_start() {
local sync_opt="--async"
while :; do
case $1 in
-h|--help)
usage_start
exit 0
;;
-i|--init)
local init=1
;;
-s|--sync)
sync_opt="--sync"
;;
--)
shift
break
;;
*)
break
;;
esac
shift
done
local primary=$1
local standby=$2
[[ -z $primary ]] && die "missing param: primary"
[[ -z $standby ]] && die "missing param: standby"
if [[ $init = 1 ]]; then
primary_host=$(docker exec $primary hostname)
standby_host=$(docker exec $standby hostname)
docker exec $primary "$SCRIPT_ROOT"/repmgr/proxy.sh setup -r primary -p $standby_host ${sync_opt}
docker exec $standby "$SCRIPT_ROOT"/repmgr/proxy.sh setup -r standby -p $primary_host ${sync_opt}
else
docker exec $primary "$SCRIPT_ROOT"/repmgr/proxy.sh start
docker exec $standby "$SCRIPT_ROOT"/repmgr/proxy.sh start
fi
}
#########################################################################
# action: failover
#########################################################################
usage_failover() {
cat << EOF
Usage: failover [option] [primary_container] [standby_container]
Description: configure network so that VIP is bound to standby, then promote standby as primary.
Options:
-h, --help
-p, --project docker-compose project
EOF
}
do_failover() {
local project
while :; do
case $1 in
-h|--help)
usage_failover
exit 0
;;
-p|--project)
project=$2
shift
;;
--project=?*)
project=${1#*=}
;;
--)
shift
break
;;
*)
break
;;
esac
shift
done
local primary=$1
local standby=$2
[[ -z $project ]] && die "missing param: project"
[[ -z $primary ]] && die "missing param: primary"
[[ -z $standby ]] && die "missing param: standby"
docker network disconnect ${project}_external_net "$primary"
docker network connect --ip "$VIP" ${project}_external_net "$standby"
docker exec "$standby" "$SCRIPT_ROOT"/repmgr/proxy.sh promote
}
#########################################################################
# action: failback
#########################################################################
usage_failback() {
cat << EOF
Usage: failback [option] [failbackup_container]
Options:
-h, --help
EOF
}
do_failback() {
local project
while :; do
case $1 in
-h|--help)
usage_failback
exit 0
;;
--)
shift
break
;;
*)
break
;;
esac
shift
done
local failback_container=$1
[[ -z $failback_container ]] && die "missing param: failback_container"
docker exec "$failback_container" "$SCRIPT_ROOT"/repmgr/proxy.sh rewind
}
#########################################################################
# action: sync_switch
#########################################################################
usage_sync_switch() {
cat << EOF
Usage: sync_switch [option] [primary_container] [sync|async]
Description: switch replication mode between sync and async on primary.
Options:
-h, --help
EOF
}
do_sync_switch() {
while :; do
case $1 in
-h|--help)
usage_sync_switch
exit 0
;;
--)
shift
break
;;
*)
break
;;
esac
shift
done
local primary=$1
local mode=$2
[[ -z $primary ]] && die "missing param: primary_container"
[[ -z $mode ]] && die "missing param: repl_mode"
docker exec "$primary" "$SCRIPT_ROOT"/repmgr/proxy.sh sync_switch $mode
}
#########################################################################
# main
#########################################################################
usage() {
cat << EOF
Usage: ./${MYNAME} [option] [action]
Options:
-h, --help
Actions:
start start primary and standby
failover remove primary from current network and promote current standby as new primary
failback revoke previous primary as standby following new primary
sync_switch switch replication mode between sync and async
EOF
}
main() {
while :; do
case $1 in
-h|--help)
usage
exit 0
;;
--)
shift
break
;;
*)
break
;;
esac
shift
done
local action="$1"
shift
case $action in
"start")
do_start "$@"
;;
"failover")
do_failover "$@"
;;
"failback")
do_failback "$@"
;;
"sync_switch")
do_sync_switch "$@"
;;
*)
die "Unknwon action: $action!"
;;
esac
exit 0
}
main "$@"
| true |
055e3827a84ab5e2e315616135d2a3852f9ce968 | Shell | securitybreach/Cerberus | /etc/powerpanel/pwrstatd-lowbatt.sh | UTF-8 | 799 | 3.28125 | 3 | [] | no_license | #!/bin/sh
echo "Warning: The UPS's battery power is not enough, system will be shutdown soon!" | wall
export RECEIPT_NAME
export RECEIPT_ADDRESS
export SENDER_ADDRESS
#
# If you want to receive event notification by e-mail, you must change 'ENABLE_EMAIL' item to 'yes'.
# Note: After change 'ENABLE_EMAIL' item, you must asign 'RECEIPT_NAME', 'RECEIPT_ADDRESS', and
# 'SENDER_ADDRESS' three items as below for the correct information.
#
# Enable to send e-mail
ENABLE_EMAIL=no
# Change your name at this itme.
RECEIPT_NAME="user name"
# Change mail receiver address at this itme.
RECEIPT_ADDRESS=user_name@company.com
# Change mail sender address at this itme.
SENDER_ADDRESS=user_name@company.com
# Execute the 'pwrstatd-email.sh' shell script
if [ $ENABLE_EMAIL = yes ]; then
/etc/powerpanel/pwrstatd-email.sh
fi
| true |
d9b9338956539c117bb332e2a938028285ab75f1 | Shell | Flotristant/ssoo-2012-g6 | /TP/InstalarU.sh | UTF-8 | 26,015 | 3.78125 | 4 | [] | no_license | #!/bin/bash
# Comando "instalar"
#*******************************************Variables generales********************************************
msgError="Proceso de instalacion cancelado"
instalar=0
parametro=0
respuesta=""
dirValidado=""
#Variables de configuracion
PERLV=""
BINDIR="bin"
MAEDIR="mae"
ARRIDIR="arribos"
RECHDIR="rechazados"
REPODIR="reportes"
LOGDIR="log"
DATASIZE=100
LOGEXT=".log"
LOGSIZE=400
#****Directorios de la instalacion****
GRUPO=$PWD
DIRPPAL="$GRUPO"
CONFDIR="$DIRPPAL/confdir" #directorio de configuraciones
dirInst="$DIRPPAL/inst" #directorio donde se encuentran archivos de la instalacion
#****Archivos y comandos de la instalacion****
declare -a COMANDOS
declare -a ARCHIVOS
declare -a ARCH_OBL
COMANDOS=("LoguearU.sh" "MoverU.sh" "DetectarU.sh" "GrabarParqueU.sh" "IniciarU.sh" "MirarU.sh" "StopD.sh" "ListarU.pl" "StartD.sh")
ARCHIVOS=("prod.mae" "sucu.mae" "cli.mae")
ARCH_OBL=(${COMANDOS[*]} ${ARCHIVOS[*]})
arch_log_i="InstalarU.log"
archConf="InstalarU.conf"
#****Comandos****
log="./inst/LoguearU.sh InstalarU" #permite llamar con "log mensaje"
chmod +x ./inst/LoguearU.sh
#***********************************************FIN - Variables Generales*************************************
#***********************************************Funciones Utilizadas******************************************
function pregunta
{
local preg="$1"
local respValida=""
echo ""
while [ -z $respValida ] #mientras no responda si o no
do
read -p "$preg (SI / NO): " resp #leo la respuesta y la guardo en $resp
respValida=$(echo $resp | grep -i '^[S][I]$\|^[N][O]$') #valido que la respuesta sea si o no
done
#transformo la respuesta a minuscula
resp=$(echo $respValida | sed 's/^[Ss][Ii]$/si/')
#transformo la respuesta en un numero
if [ $resp = "si" ]
then
respuesta=1
else
respuesta=0
fi
}
function preguntarDirectorio {
local resp=""
while [ -z $resp ]
do
read -p "Ingrese nueva ubicacion: $DIRPPAL/" dirPasado
resp=$(echo $dirPasado | grep "^[A-Za-z0-9%@_=:/\.]\{1,\}$")
if [ -z $resp ]
then
echo ""
echo "El nombre de directorio $dirPasado es invalido"
echo "Los unicos caracteres validos son alfanumericos y % @ _ = : / ."
fi
done
dirValidado=$resp
}
function validarParametro
{
parametro=$(echo "$1" | grep '^DetectarU.sh$\|^IniciarU.sh$\|^StopD.sh$\|^StartD.sh$\|GrabarParqueU.sh$\|^ListarU.pl$')
if [ -z $parametro ]; then
echo "Debe escribir correctamente el nombre de los comandos, asegurese de leer el archivo README.txt"
$log E "Debe escribir correctamente el nombre de los comandos, asegurese de leer el archivo README.txt"
echo "Los parametros se escriben como: *IniciarU.sh *DetectarU.sh *GrabarParqueU.sh *StartD.sh *StopD.sh"
$log E "Los parametros se escriben como: *IniciarU.sh *DetectarU.sh *GrabarParqueU.sh *StartD.sh *StopD.sh"
exit 3
fi
}
function leerVariablesDeConfiguracion
{
GRUPO=`grep "GRUPO" "$1" | cut -d"=" -f 2`
ARRIDIR=`grep "ARRIDIR" "$1" | cut -d"=" -f 2`
RECHDIR=`grep "RECHDIR" "$1" | cut -d"=" -f 2`
BINDIR=`grep "BINDIR" "$1" | cut -d"=" -f 2`
MAEDIR=`grep "MAEDIR" "$1" | cut -d"=" -f 2`
REPODIR=`grep "REPODIR" "$1" | cut -d"=" -f 2`
LOGDIR=`grep "LOGDIR" "$1" | cut -d"=" -f 2`
LOGEXT=`grep "LOGEXT" "$1" | cut -d"=" -f 2`
LOGSIZE=`grep "LOGSIZE" "$1" | cut -d"=" -f 2`
DATASIZE=`grep "DATASIZE" "$1" | cut -d"=" -f 2`
DIRPPAL="$GRUPO"
CONFDIR="$DIRPPAL/confdir"
dirInst="$DIRPPAL/inst"
}
function validarPerl {
#***Chequeo la instalacion de perl***
echo "Verificando versión de Perl instalada..."
PERLV=$(perl -v | grep 'v[5-9]\.[0-9]\{1,\}\.[0-9]*' -o) #obtengo la version de perl
#si perl no esta instalado o su version es menor a 5 termina
if [ -z "$PERLV" ]
then
msgErrorPerl="Para instalar el TP es necesario contar con Perl 5 o superior instalado. Efectue la instalacion e intentelo nuevamente."
echo -e $msgErrorPerl
$log E "$msgErrorPerl"
$log E "Proceso de instalacion cancelado"
exit 4
else
echo "Perl Version:$PERLV"
$log I "Perl Version:$PERLV"
fi
}
function MostrarDatosInstalacion
{
#***Limpio la pantalla para mostrar configuracion final***
clear
$log I "Directorio de Trabajo: $GRUPO"
echo "Directorio de Trabajo: $GRUPO"
$log I "Libreria del sistema: $CONFDIR"
echo "Libreria del sistema: $CONFDIR"
$log I "Directorio de instalacion de los ejecutables: $BINDIR"
echo "Directorio de instalacion de los ejecutables: $BINDIR"
$log I "Directorio de instalacion de los archivos maestros: $MAEDIR"
echo "Directorio de instalacion de los archivos maestros: $MAEDIR"
$log I "Directorio de arribo de archivos externos: $ARRIDIR"
echo "Directorio de arribo de archivos externos: $ARRIDIR"
$log I "Espacio minimo libre para el arribo de archivos externos: $DATASIZE Mb"
echo "Espacio minimo libre para el arribo de archivos externos: $DATASIZE Mb"
$log I "Directorio de grabacion de los archivos externos rechazados: $RECHDIR"
echo "Directorio de grabacion de los archivos externos rechazados: $RECHDIR"
$log I "Directorio de grabacion de los logs de auditoria: $LOGDIR"
echo "Directorio de grabacion de los logs de auditoria: $LOGDIR"
$log I "Extension para los archivos de log: $LOGEXT"
echo "Extension para los archivos de log: $LOGEXT"
$log I "Tamaño maximo para los archivos de log: $LOGSIZE Kb"
echo "Tamaño maximo para los archivos de log: $LOGSIZE Kb"
$log I "Directorio de grabacion de los reportes de salida: $REPODIR"
echo "Directorio de grabacion de los reportes de salida: $REPODIR"
}
function confirmarInicioInstalacion
{
#***Confirmar inicio de instalacion***
$log I "Iniciando Instalacion. Esta Ud. seguro?"
pregunta "Iniciando Instalacion. Esta Ud. seguro?"
if [ $respuesta -eq 1 ]
then # continuar instalacion
instalar=1
else
instalar=0
fi
}
function cargarParametrosInstalacion
{
#grabo mensajes en el archivo de Log
archivos=""
$log I "Directorio de Trabajo para la instalacion: $GRUPO"
echo "Directorio de Trabajo para la instalacion: $GRUPO"
archivos=$(ls -l -Q "$GRUPO"| grep '"$' | sed -e"s/\"\{1,\}/;/g" | cut -f2 -d';')
archivos=$(echo $archivos)
$log I "Lista de archivos y subdirectorios: $archivos"
echo "Lista de archivos y subdirectorios: $archivos"
$log I "Libreria del sistema: $CONFDIR"
echo "Libreria del sistema: $CONFDIR"
archivos=$(ls -l -Q "$CONFDIR"| grep '"$' | sed -e"s/\"\{1,\}/;/g" | cut -f2 -d';')
archivos=$(echo $archivos)
$log I "Lista de archivos y subdirectorios: $archivos"
echo "Lista de archivos y subdirectorios: $archivos"
$log I "Estado de la instalacion: PENDIENTE"
echo "Estado de la instalacion: PENDIENTE"
$log I "Para completar la instalacion ud. debera:"
echo "Para completar la instalacion ud. debera:"
$log I "Definir el directorio de instalacion de los ejecutables"
echo "Definir el directorio de instalacion de los ejecutables"
$log I "Definir el directorio de instalacion de los archivos maestros"
echo "Definir el directorio de instalacion de los archivos maestros"
$log I "Definir el directorio de arribo de archivos externos"
echo "Definir el directorio de arribo de archivos externos"
$log I "Definir el espacio minimo libre para el arribo de archivos externos"
echo "Definir el espacio minimo libre para el arribo de archivos externos"
$log I "Definir el directorio de grabacion de los archivos externos rechazados"
echo "Definir el directorio de grabacion de los archivos externos rechazados"
$log I "Definir el directorio de grabacion de los logs de auditoria"
echo "Definir el directorio de grabacion de los logs de auditoria"
$log I "Definir la extension y tamaño maximo para los archivos de log"
echo "Definir la extension y tamaño maximo para los archivos de log"
$log I "Definir el directorio de grabacion de los reportes de salida"
echo "Definir el directorio de grabacion de los reportes de salida"
#***COMIENZO DE CONFIGURACION DE INSTALACION***
instalado=0
while [ $instalado -eq 0 ]
do
#***Definir directorio de instalacion de ejecutables***
echo ""
$log I "Defina el directorio de instalacion de los ejecutables ($DIRPPAL/$BINDIR):"
echo "Defina el directorio de instalacion de los ejecutables ($DIRPPAL/$BINDIR):"
echo "Nombre del directorio actual de instalacion: ($DIRPPAL/$BINDIR)"
pregunta "Desea modificarlo?"
if [ $respuesta -eq 1 ]
then # permitir modificacion
preguntarDirectorio
BINDIR=$dirValidado
fi
$log I "Directorio de ejecutables: $DIRPPAL/$BINDIR"
echo "Directorio de ejecutables:($DIRPPAL/$BINDIR)"
#***Definir directorio de instalacion de los archivos maestros***
$log I "Defina el directorio de instalacion de los archivos maestros ($DIRPPAL/$MAEDIR):"
echo "Defina el directorio de instalacion de los archivos maestros ($DIRPPAL/$MAEDIR):"
echo "Nombre del directorio actual de instalacion: ($DIRPPAL/$MAEDIR)"
pregunta "Desea modificarlo?"
if [ $respuesta -eq 1 ]
then # permitir modificacion
preguntarDirectorio
MAEDIR=$dirValidado
fi
$log I "Directorio de archivos maestros: $DIRPPAL/$MAEDIR"
echo "Directorio de archivos maestros:($DIRPPAL/$MAEDIR)"
#***Definir directorio de arribo de archivos externos***
$log I "Defina el directorio de arribo de archivos externos ($DIRPPAL/$ARRIDIR):"
echo "Defina el directorio de arribo de archivos externos ($DIRPPAL/$ARRIDIR):"
echo "Nombre del directorio actual de instalacion: $DIRPPAL/$ARRIDIR"
pregunta "Desea modificarlo?"
if [ $respuesta -eq 1 ]
then # permitir modificacion
preguntarDirectorio
ARRIDIR=$dirValidado
fi
$log I "Directorio de arribo de archivos externos: $DIRPPAL/$ARRIDIR"
echo "Directorio de arribo de archivos externos: $DIRPPAL/$ARRIDIR"
#***Calculo espacio libre en ARRIDIR***
MAXSIZE=0
#obtengo el espacio libre en el directorio(df -B). corto la unica linea que me devuelve,
#reemplazo los espacios en blanco por '' (sed) y hago un cut del cuarto campo (cut)
MAXSIZE=$(df -B1048576 "$DIRPPAL" | tail -n1 | sed -e"s/\s\{1,\}/;/g" | cut -f4 -d';')
#***Definir espacio minimo libre para arribo de archivos externos***
fin=0
$log I "Defina el espacio minimo libre para el arribo de archivos externos en Mbytes ($DATASIZE):"
while [ $fin -eq 0 ]
do
echo "Defina el espacio minimo libre para el arribo de archivos externos en Mbytes ($DATASIZE):"
echo "Espacio minimo actual: $DATASIZE"
pregunta "Desea modificarlo?"
if [ $respuesta -eq 1 ]
then # permitir modificacion
read -p "Ingrese nuevo valor: " resp
#valido que sea un numero
respValida=$(echo $resp | grep "^[0-9]*$")
if [ $respValida ]
then
if [ $DATASIZE -gt $MAXSIZE ]
then
echo "Insuficiente espacio en disco."
$log E "Insuficiente espacio en disco."
echo "Espacio disponible: $MAXSIZE Mb."
$log E "Espacio disponible: $MAXSIZE Mb."
echo "Espacio requerido: $DATASIZE Mb."
$log E "Espacio requerido: $DATASIZE Mb."
echo "Cancele la instalacion e intentelo mas tarde o vuelva a intentarlo con otro valor."
$log E "Cancele la instalacion e intentelo mas tarde o vuelva a intentarlo con otro valor."
else
DATASIZE=$respValida
fin=1
fi
else
echo "Por favor ingrese un numero entero"
fi
else
fin=1
fi
done
$log I "Espacio minimo libre para el arribo de archivos externos en Mbytes: ($DATASIZE)"
echo "Espacio minimo libre para el arribo de archivos externos en Mbytes: ($DATASIZE)"
#***Definir directorio de grabacion de los archivos rechazados***
$log I "Defina el directorio de grabacion de los archivos externos rechazados ($DIRPPAL/$RECHDIR):"
echo "Defina el directorio de grabacion de los archivos externos rechazados ($DIRPPAL/$RECHDIR):"
echo "Nombre del directorio actual de grabacion de los archivos externos rechazados: $DIRPPAL/$RECHDIR"
pregunta "Desea modificarlo?"
if [ $respuesta -eq 1 ]
then # permitir modificacion
preguntarDirectorio
RECHDIR=$dirValidado
fi
$log I "Directorio de grabacion de los archivos externos rechazados: $DIRPPAL/$RECHDIR"
echo "Directorio de grabacion de los archivos externos rechazados: $DIRPPAL/$RECHDIR"
#***Definir directorio de grabacion de los logs de auditoria***
$log I "Defina el directorio de grabacion de los logs de auditoria ($DIRPPAL/$LOGDIR):"
echo "Defina el directorio de grabacion de los logs de auditoria ($DIRPPAL/$LOGDIR):"
echo "Nombre del directorio actual de grabacion de los logs de auditoria: $DIRPPAL/$LOGDIR"
pregunta "Desea modificarlo?"
if [ $respuesta -eq 1 ]
then # permitir modificacion
preguntarDirectorio
LOGDIR=$dirValidado
fi
$log I "Directorio de grabacion de los logs de auditoria: $DIRPPAL/$LOGDIR"
echo "Directorio de grabacion de los logs de auditoria: $DIRPPAL/$LOGDIR"
#***Definir la extension para archivos de log***
fin=0
$log I "Defina la extension para los archivos de log ($LOGEXT):"
while [ $fin -eq 0 ]
do
echo "Defina la extension para los archivos de log ($LOGEXT):"
echo "Nombre actual de extension para los archivos de log: $LOGEXT"
pregunta "Desea modificarlo?"
if [ $respuesta -eq 1 ]
then # permitir modificacion
read -p "Ingrese nueva extension: " resp
#valido que sea un numero
respValida=$(echo $resp | grep "^\.\([a-zA-Z0-9]\)\{1,\}$")
if [ $respValida ]
then
LOGEXT=$respValida
fin=1
else
echo "Por favor ingrese un . seguido del nombre de extension que desea"
fi
else
fin=1
fi
done
$log I "Extension para los archivos de log: $LOGEXT"
echo "Extension para los archivos de log: $LOGEXT"
#***Definir tamaño maximo para los archivos de log***
fin=0
$log I "Defina el tamaño maximo para los archivos $LOGEXT en Kbytes ($LOGSIZE):"
while [ $fin -eq 0 ]
do
echo "Defina el tamaño maximo para los archivos $LOGEXT en Kbytes ($LOGSIZE):"
echo "El tamaño maximo actual para los archivos de log es: $LOGSIZE Kbytes"
pregunta "Desea modificarlo?"
if [ $respuesta -eq 1 ]
then # permitir modificacion
read -p "Ingrese nuevo valor: " resp
#valido que sea un numero
respValida=$(echo $resp | grep "^[0-9]*$")
if [ $respValida ]
then
LOGSIZE=$respValida
fin=1
else
echo "Por favor ingrese un numero entero"
fi
else
fin=1
fi
done
$log I "Tamaño maximo para los archivos de log en Kbytes: ($LOGSIZE)"
echo "Tamaño maximo para los archivos de log en Kbytes: ($LOGSIZE)"
#***Definir directorio de grabacion de los reportes de salida***
$log I "Defina el directorio de grabacion de los reportes de salida ($DIRPPAL/$REPODIR):"
echo "Defina el directorio de grabacion de los reportes de salida ($DIRPPAL/$REPODIR):"
echo "Nombre del directorio actual de grabacion de los reportes de salida: $DIRPPAL/$REPODIR"
pregunta "Desea modificarlo?"
if [ $respuesta -eq 1 ]
then # permitir modificacion
preguntarDirectorio
REPODIR=$dirValidado
fi
$log I "Directorio de grabacion de los reportes de salida: $DIRPPAL/$REPODIR"
echo "Directorio de grabacion de los reportes de salida: $DIRPPAL/$REPODIR"
MostrarDatosInstalacion
$log I "Estado de la instalacion: LISTA"
echo "Estado de la instalacion: LISTA"
$log I "Los datos ingresados son correctos?"
pregunta "Los datos ingresados son correctos?"
if [ $respuesta -eq 1 ]
then # permitir instalacion
instalado=1
else
clear
fi
done
#**************************FIN DE CONFIGURACION DE INSTALACION****************************
}
function verificarEstadoInstalacion
{
if ! [ -z "$1" ]; then
if [ -f $DIRPPAL/$BINDIR/"$1" ]; then
$log A "El comando ya se encuentra instalado"
echo "El comando ya se encuentra instalado"
echo "Componentes faltantes: "
$log I "Componentes faltantes: "
if ! [ -f $DIRPPAL/$BINDIR/"IniciarU.sh" ]; then echo "*IniciarU.sh"; $log I "*IniciarU.sh ";fi
if ! [ -f $DIRPPAL/$BINDIR/"DetectarU.sh" ]; then echo "*DetectarU.sh"; $log I "*DetectarU.sh ";fi
if ! [ -f $DIRPPAL/$BINDIR/"ListarU.pl" ]; then echo "*ListarU.pl"; $log I "*ListarU.pl ";fi
if ! [ -f $DIRPPAL/$BINDIR/"GrabarParqueU.sh" ]; then echo "*GrabarParqueU.sh"; $log I "*GrabarParqueU.sh ";fi
if ! [ -f $DIRPPAL/$BINDIR/"StartD.sh" ]; then echo "*StartD.sh"; $log I "*StartD.sh ";fi
if ! [ -f $DIRPPAL/$BINDIR/"StopD.sh" ]; then echo "*StopD.sh"; $log I "*StopD.sh ";fi
$log A "Fin de la instalacion"
echo "Fin de la instalacion"
exit 0;
fi
fi
if [ ! -d $DIRPPAL/$BINDIR ]; then
echo "Hubo un error en la instalacion, el directorio $BINDIR no existe"
else
cont=0
if [ -f $DIRPPAL/$BINDIR/"IniciarU.sh" ]; then cont=$[ $cont + 1 ]; fi
if [ -f $DIRPPAL/$BINDIR/"DetectarU.sh" ]; then cont=$[ $cont + 1 ]; fi
if [ -f $DIRPPAL/$BINDIR/"ListarU.pl" ]; then cont=$[ $cont + 1 ]; fi
if [ -f $DIRPPAL/$BINDIR/"GrabarParqueU.sh" ]; then cont=$[ $cont + 1 ]; fi
if [ -f $DIRPPAL/$BINDIR/"StopD.sh" ]; then cont=$[ $cont + 1 ]; fi
if [ -f $DIRPPAL/$BINDIR/"StartD.sh" ]; then cont=$[ $cont + 1 ]; fi
if [ $cont -lt 6 ]; then
$log I "Componentes Existentes:"
echo "Componentes Existentes:"
echo ""
$log I "Directorio de instalacion de los ejecutables: $BINDIR"
echo "Directorio de instalacion de los ejecutables: $BINDIR"
archivos=$(ls -l -Q "$DIRPPAL/$BINDIR"| grep '"$' | sed -e"s/\"\{1,\}/;/g" | cut -f2 -d';')
archivos=$(echo $archivos)
echo "Lista de archivos: $archivos"
$log I "Lista de archivos: $archivos"
$log I "Directorio de instalacion de los archivos maestros: $MAEDIR"
echo "Directorio de instalacion de los archivos maestros: $MAEDIR"
archivos=$(ls -l -Q "$DIRPPAL/$MAEDIR"| grep '"$' | sed -e"s/\"\{1,\}/;/g" | cut -f2 -d';')
archivos=$(echo $archivos)
echo "Lista de archivos: $archivos"
$log I "Lista de archivos: $archivos"
echo "Componentes faltantes: "
$log I "Componentes faltantes: "
if ! [ -f $DIRPPAL/$BINDIR/"IniciarU.sh" ]; then echo "*IniciarU.sh"; $log I "*IniciarU.sh ";fi
if ! [ -f $DIRPPAL/$BINDIR/"DetectarU.sh" ]; then echo "*DetectarU.sh"; $log I "*DetectarU.sh ";fi
if ! [ -f $DIRPPAL/$BINDIR/"ListarU.pl" ]; then echo "*ListarU.pl"; $log I "*ListarU.pl ";fi
if ! [ -f $DIRPPAL/$BINDIR/"GrabarParqueU.sh" ]; then echo "*GrabarParqueU.sh"; $log I "*GrabarParqueU.sh ";fi
if ! [ -f $DIRPPAL/$BINDIR/"StartD.sh" ]; then echo "*StartD.sh"; $log I "*StartD.sh ";fi
if ! [ -f $DIRPPAL/$BINDIR/"StopD.sh" ]; then echo "*StopD.sh"; $log I "*StopD.sh ";fi
echo "Estado de la instalacion: INCOMPLETA"
$log I "Estado de la instalacion: INCOMPLETA"
if [ -z "$1" ]; then
$log I "Desea completar la instalacion?"
pregunta "Desea completar la instalacion?"
else
$log I "Desea instalar el componente?"
pregunta "Desea instalar el componente?"
fi
if [ $respuesta -eq 0 ]
then
clear
echo "Proceso de instalacion cancelado"
$log I "Proceso de instalacion cancelado"
exit 0
fi
else
clear;
$log I "Libreria del sistema: $CONFDIR"
echo "Libreria del sistema: $CONFDIR"
archivos=$(ls -l -Q "$CONFDIR"| grep '"$' | sed -e"s/\"\{1,\}/;/g" | cut -f2 -d';')
archivos=$(echo $archivos)
$log I "Lista de archivos : $archivos"
echo "Lista de archivos : $archivos"
$log I "Directorio de instalacion de los ejecutables: $BINDIR"
echo "Directorio de instalacion de los ejecutables: $BINDIR"
archivos=$(ls -l -Q "$DIRPPAL/$BINDIR"| grep '"$' | sed -e"s/\"\{1,\}/;/g" | cut -f2 -d';')
archivos=$(echo $archivos)
$log I "Lista de archivos : $archivos"
echo "Lista de archivos : $archivos"
$log I "Directorio de instalacion de los archivos maestros: $MAEDIR"
echo "Directorio de instalacion de los archivos maestros: $MAEDIR"
archivos=$(ls -l -Q "$DIRPPAL/$MAEDIR"| grep '"$' | sed -e"s/\"\{1,\}/;/g" | cut -f2 -d';')
archivos=$(echo $archivos)
$log I "Lista de archivos : $archivos"
echo "Lista de archivos : $archivos"
$log I "Directorio de arribo de archivos externos: $ARRIDIR"
echo "Directorio de arribo de archivos externos: $ARRIDIR"
$log I "Directorio de grabacion de los archivos externos rechazados: $RECHDIR"
echo "Directorio de grabacion de los archivos externos rechazados: $RECHDIR"
$log I "Directorio de grabacion de los logs de auditoria: $LOGDIR"
echo "Directorio de grabacion de los logs de auditoria: $LOGDIR"
$log I "Directorio de grabacion de los reportes de salida: $REPODIR"
echo "Directorio de grabacion de los reportes de salida: $REPODIR"
$log I "Estado de la instalacion: COMPLETA"
echo "Estado de la instalacion: COMPLETA"
$log I "Proceso de instalacion cancelado"
echo "Proceso de instalacion cancelado"
exit 0;
fi
fi
}
function crearDirectorio
{
path=""
OIFS=$IFS
IFS="/"
arr=($1)
for i in ${arr[*]}; do
path=$path$i;
if ! [ -d "$path" ]; then
mkdir "$path"
fi
path=$path/;
done
IFS=$OIFS
unset path
}
function crearEstructuras
{
#***Se crean las estructuras de directorio requeridas***
clear
echo "Creando Estructuras de directorio. . . ."
$log I "Creando Estructuras de directorio. . . ."
echo ""
#Creamos un array con todos los nuevos directorios a crear
declare -a DIRECTORIOS
DIRECTORIOS=( $BINDIR $MAEDIR $ARRIDIR $RECHDIR $LOGDIR $REPODIR inst_recibidas inst_ordenadas inst_rechazadas inst_procesadas parque_instalado )
for i in ${DIRECTORIOS[*]}
do
#Se crean los directorios
echo "$i"
$log I "$i"
crearDirectorio $i
done
}
function moverArchivos
{
echo "Instalando Archivos Maestros."
$log I "Instalando Archivos Maestros."
if ! [ -f "$DIRPPAL/$MAEDIR/prod.mae" ]; then cp "$dirInst/prod.mae" "$DIRPPAL/$MAEDIR"; fi
if ! [ -f "$DIRPPAL/$MAEDIR/sucu.mae" ]; then cp "$dirInst/sucu.mae" "$DIRPPAL/$MAEDIR"; fi
if ! [ -f "$DIRPPAL/$MAEDIR/cli.mae" ]; then cp "$dirInst/cli.mae" "$DIRPPAL/$MAEDIR"; fi
echo "Instalando Programas y Funciones."
$log I "Instalando Programas y Funciones."
if ! [ -f "$BINDIR/LoguearU.sh" ]; then
cp "$dirInst/LoguearU.sh" "$DIRPPAL/$BINDIR"
fi
if ! [ -f "$BINDIR/MoverU.sh" ]; then
cp "$dirInst/MoverU.sh" "$DIRPPAL/$BINDIR"
fi
if ! [ -f "$BINDIR/MirarU.sh" ]; then
cp "$dirInst/MirarU.sh" "$DIRPPAL/$BINDIR"
fi
if [ -z "$1" ]; then
if ! [ -f "$DIRPPAL/$BINDIR/IniciarU.sh" ]; then
cp "$dirInst/IniciarU.sh" "$DIRPPAL/$BINDIR"
fi
if ! [ -f "$DIRPPAL/$BINDIR/DetectarU.sh" ]; then
cp "$dirInst/DetectarU.sh" "$DIRPPAL/$BINDIR"
fi
if ! [ -f "$DIRPPAL/$BINDIR/StopD.sh" ]; then
cp "$dirInst/StopD.sh" "$DIRPPAL/$BINDIR"
fi
if ! [ -f "$DIRPPAL/$BINDIR/StartD.sh" ]; then
cp "$dirInst/StartD.sh" "$DIRPPAL/$BINDIR"
fi
if ! [ -f "$DIRPPAL/$BINDIR/ListarU.pl" ]; then
cp "$dirInst/ListarU.pl" "$DIRPPAL/$BINDIR"
fi
if ! [ -f "$DIRPPAL/$BINDIR/GrabarParqueU.sh" ]; then
cp "$dirInst/GrabarParqueU.sh" "$DIRPPAL/$BINDIR"
fi
else
if ! [ -f "$DIRPPAL/$BINDIR/$1" ]; then
cp "$dirInst/$1" "$DIRPPAL/$BINDIR"
fi
fi
}
function actualizarArchivoConf
{
USER=`whoami`
DATE=`date +%F`
TIME=`date +%R`
echo "Actualizando la configuracion del sistema."
$log I "Actualizando la configuracion del sistema."
if [ ! -f "$CONFDIR/$archConf" ]; then
touch "$CONFDIR/$archConf"
echo "GRUPO=$GRUPO=$USER=$DATE=$TIME
ARRIDIR=$ARRIDIR=$USER=$DATE=$TIME
RECHDIR=$RECHDIR=$USER=$DATE=$TIME
BINDIR=$BINDIR=$USER=$DATE=$TIME
MAEDIR=$MAEDIR=$USER=$DATE=$TIME
REPODIR=$REPODIR=$USER=$DATE=$TIME
LOGDIR=$LOGDIR=$USER=$DATE=$TIME
LOGEXT=$LOGEXT=$USER=$DATE=$TIME
LOGSIZE=$LOGSIZE=$USER=$DATE=$TIME
DATASIZE=$DATASIZE=$USER=$DATE=$TIME
" > $CONFDIR/$archConf
fi
}
function completarInstalacion
{
crearEstructuras
moverArchivos
actualizarArchivoConf
}
function instalarComando
{
comand="$1"
crearEstructuras
moverArchivos "$1"
actualizarArchivoConf
}
#*****************************************************FIN Funciones utilizadas***************************************
#*****************************************INICIO PROGRAMA************************************************************
echo "********************************************************
* Bienvenido al Asistente de instalacion del practico *
********************************************************"
echo "*********************************************************
* TP SO7508 1er cuatrimestre 2012. *
* Tema U Copyright (c) Grupo 06 *
*********************************************************"
#verifico que el directorio $dirInst exista
if [ ! -e "$dirInst" ]
then
echo ""
echo "El directorio $dirInst no existe"
echo 'No se puede iniciar la instalación.'
echo 'Por favor lea el archivo README.txt y vuelva a realizar la instalación'
echo ""
exit 1
fi
#Creo el directorio /confdir
if [ ! -e "$CONFDIR" ]
then
mkdir $CONFDIR
fi
#Verifico que existan todos los archivos necesarios para la instalacion
cd $dirInst
for ((i=0;i<${#ARCH_OBL[*]};i++)); do
if [ ! -e ${ARCH_OBL[$i]} ]
then
echo ""
echo "No se encontro el archivo ${ARCH_OBL[$i]} necesario para realizar la instalacion"
echo ""
echo $msgError
echo "Verifique que ${ARCH_OBL[$i]} exista"
echo ""
exit 2
fi
done
cd ..
#Creo el archivo /InstalarU.log
if [ ! -e "$CONFDIR/$arch_log_i" ]
then
touch "$CONFDIR/$arch_log_i"
fi
#Verificar nombre del parametro si existe
if ! [ -z "$1" ]; then
validarParametro "$1"
fi
parametro="$1"
#inicio ejecucion
$log I "Inicio de Ejecucion"
if [ -e "$CONFDIR/$archConf" ]
then
leerVariablesDeConfiguracion "$CONFDIR/$archConf"
verificarEstadoInstalacion "$parametro"
validarPerl
MostrarDatosInstalacion
confirmarInicioInstalacion
else
validarPerl
cargarParametrosInstalacion
confirmarInicioInstalacion
fi
if [ $instalar -eq 1 ]
then
if [ -z "$parametro" ]; then
completarInstalacion
else
instalarComando "$parametro"
fi
fi
echo "Instalacion concluida."
$log I "Instalacion concluida."
| true |
13d175e166900842611b739904717a50a9dc2ba6 | Shell | elvisFabian/projeto-42-sonarqube | /Projeto42.SonarQube/entrypoint-tests.sh | UTF-8 | 3,766 | 3.3125 | 3 | [] | no_license | #!/bin/bash
# Necessário instalar esses pacotes nos projetos de teste
#https://gunnarpeipman.com/aspnet/code-coverage/
# coverlet.msbuild
# Microsoft.CodeCoverage
# XunitXml.TestLogger
CoverletOutputFormat="cobertura,opencover"
REPORT_GENERATOR_REPORTS=""
REPORT_GENERATOR_REPORT_TYPES="HTMLInline"
echo ""
echo "-------------------------------------------------------"
echo "Iniciando entrypoint - entrypoint-tests.sh"
echo ""
echo "-------------------------------------------------------"
echo "dotnet properties"
echo "SOLUTION_NAME: $SOLUTION_NAME"
echo "RESULT_PATH: $RESULT_PATH"
echo "COVERAGE_PATH: $COVERAGE_PATH"
echo "COVERLET_OUTPUT_FORMAT: $CoverletOutputFormat"
echo "COVERAGE_REPORT_PATH: $COVERAGE_REPORT_PATH"
echo "-------------------------------------------------------"
if [[ ${RUN_SONARQUBE} = "true" ]]; then
echo ""
echo "-------------------------------------------------------"
echo "Sonar properties"
echo "SONARQUBE_PROJECT: $SONARQUBE_PROJECT"
echo "SONARQUBE_PROJECT_VERSION: $SONARQUBE_PROJECT_VERSION"
echo "SONARQUBE_URL: $SONARQUBE_URL"
echo "-------------------------------------------------------"
dotnet sonarscanner begin \
/k:"$SONARQUBE_PROJECT" \
/v:"$SONARQUBE_PROJECT_VERSION" \
/d:sonar.verbose=false \
/d:sonar.login=$SONARQUBE_TOKEN \
/d:sonar.host.url=${SONARQUBE_URL} \
/d:sonar.cs.vstest.reportsPaths="${RESULT_PATH}*.trx" \
/d:sonar.cs.opencover.reportsPaths="${COVERAGE_PATH}**/coverage.opencover.xml" || true;
fi
echo ""
echo "--------------Iniciando dotnet build $SOLUTION_NAME"
dotnet build $SOLUTION_NAME -v q --no-restore
echo ""
echo "--------------Iniciando dotnet test"
#https://github.com/tonerdo/coverlet/issues/37 => Coverage report is not generated if there are any failing tests
#Para gerar covertura de código mesmo com teste falhando, usar coverlet, mas ai precisa rodar dotnet test por projeto
#https://github.com/tonerdo/coverlet
#https://www.nuget.org/packages/coverlet.console/
#dotnet test $SOLUTION_NAME --no-build --no-restore -v m --logger \"trx;LogFileName=TestResults.trx\" --results-directory $RESULT_PATH /p:CollectCoverage=true /p:CoverletOutput=$COVERAGE_PATH /p:CoverletOutputFormat=\"$CoverletOutputFormat\""
for testFolder in $(ls test); do \
echo ""
echo " - $testFolder"
echo ""
CURRENT_COVERLET_OUTPUT_PATH="${COVERAGE_PATH}${testFolder}"
REPORT_GENERATOR_REPORTS="${CURRENT_COVERLET_OUTPUT_PATH}/coverage.cobertura.xml;$REPORT_GENERATOR_REPORTS"
dotnet test test/$testFolder --no-build --no-restore -v q -c ${CONFIGURATION} \
--results-directory "${RESULT_PATH}/" \
--logger "trx;LogFileName=${testFolder}.trx" \
--logger "xunit;LogFilePath=${RESULT_PATH}${testFolder}.xml"; \
exit 0 & \
coverlet test/${testFolder}/bin/${CONFIGURATION}/*/${testFolder}.dll \
--target "dotnet" \
--targetargs "test test/${testFolder} --no-build -c ${CONFIGURATION}" \
--format opencover \
--format cobertura \
--output "${CURRENT_COVERLET_OUTPUT_PATH}/"; \
done;
echo ""
echo "-------------------------------------------------------"
echo "reportgenerator properties"
echo "REPORT_GENERATOR_REPORTS: $REPORT_GENERATOR_REPORTS"
echo "COVERAGE_REPORT_PATH: $COVERAGE_REPORT_PATH"
echo "REPORT_GENERATOR_REPORT_TYPES: $REPORT_GENERATOR_REPORT_TYPES"
echo "-------------------------------------------------------"
reportgenerator "-reports:${REPORT_GENERATOR_REPORTS}" "-targetdir:$COVERAGE_REPORT_PATH" -reporttypes:"${REPORT_GENERATOR_REPORT_TYPES}" -verbosity:Error || true;
if [[ ${RUN_SONARQUBE} = "true" ]]; then
dotnet sonarscanner end /d:sonar.login=$SONARQUBE_TOKEN || true;
fi | true |
ac13ac5ab70aac436de74acf4f7d0b2c4853af7d | Shell | aystshen/Android-SecBuild | /Product/build/mkupdate_zip.sh | UTF-8 | 1,693 | 3.578125 | 4 | [] | no_license | #!/bin/bash
set -e
CURDIR=$PWD
TOPBAND_OUT_DIR=$CURDIR/../outputs
INTEGRATION_DIR=$TOPBAND_OUT_DIR/integration
TARGET_FILES_DIR=$INTEGRATION_DIR/target_files
TOOLS_DIR=$INTEGRATION_DIR/tools
UPDATE_TMP=$INTEGRATION_DIR/update_tmp
FILENAME=$1
if [ ! $FILENAME ]; then
FILENAME=update.zip
fi
export PATH=$TOOLS_DIR/bin:$PATH
export PATH=$TOOLS_DIR/releasetools:$PATH
myexit() {
echo "error: mkupdate_zip.sh error, exit at $1 !!!" && exit $1;
}
prepare() {
cd $TOPBAND_OUT_DIR
cp -vf boot_ota.img ${TARGET_FILES_DIR}/IMAGES/boot.img || myexit $LINENO
cp -vf recovery_ota.img ${TARGET_FILES_DIR}/IMAGES/recovery.img || myexit $LINENO
cd -
}
mkUpdate() {
cd $TARGET_FILES_DIR
echo "package $TARGET_FILES_DIR ---> target_files.zip..."
rm -f target_files.zip && zip -qry target_files.zip .
zipinfo -1 ./target_files.zip | awk 'BEGIN { FS="SYSTEM/" } /^SYSTEM\// {print "system/" $2}' | fs_config > ./META/filesystem_config.txt
zipinfo -1 ./target_files.zip | awk 'BEGIN { FS="BOOT/RAMDISK/" } /^BOOT\/RAMDISK\// {print $2}' | fs_config > ./META/boot_filesystem_config.txt
zipinfo -1 ./target_files.zip | awk 'BEGIN { FS="RECOVERY/RAMDISK/" } /^RECOVERY\/RAMDISK\// {print $2}' | fs_config > ./META/recovery_filesystem_config.txt
zip -q ./target_files.zip ./META/*filesystem_config.txt || myexit $LINENO
echo "make update.zip..."
ota_from_target_files -n -v -p $TOOLS_DIR -k $TOOLS_DIR/bin/testkey target_files.zip $TOPBAND_OUT_DIR/$FILENAME || myexit $LINENO
rm $TARGET_FILES_DIR/target_files.zip
cd -
echo "make update.zip ok!!!"
echo ""
}
echo ""
echo "--------- make update.zip start ----------"
prepare
mkUpdate
echo "--------- make update.zip end ------------"
| true |
9eba9ae250d361952a64a2f0faf739746572d454 | Shell | petronny/aur3-mirror | /cnijfilter-mp540/PKGBUILD | UTF-8 | 1,014 | 2.90625 | 3 | [] | no_license | pkgname=cnijfilter-mp540
_pkgname=cnijfilter
pkgver=3.00
pkgrel=7
pkgdesc="Canon Printer Driver (For Multifunction MP540)"
url="http://software.canon-europe.com/products/0010641.asp"
arch=('i686' 'x86_64')
license=('custom')
if [ "${CARCH}" = 'x86_64' ]; then
depends=('lib32-sqlite3' 'lib32-gtk2' 'lib32-libcups' 'lib32-popt' 'cnijfilter-common' 'lib32-libpng12' 'lib32-heimdal' 'ghostscript')
elif [ "${CARCH}" = 'i686' ]; then
depends=('sqlite3' 'gtk2' 'libcups' 'popt' 'cnijfilter-common' 'libpng12' 'heimdal' 'ghostscript')
fi
makedepends=('rpmextract')
source=(http://files.canon-europe.com/files/soft31329/software/MP540_RPM_drivers.tar)
md5sums=('a4b9d98fd1269668254a0f70181bdba9')
build() {
cd ${srcdir}
install -d ${pkgdir}
tar -xf MP540_RPM_printer.tar
rpmextract.sh ${_pkgname}-mp540series-${pkgver}-1.i386.rpm
mv "usr" ${pkgdir}
cd "$pkgdir"
msg "Moving from /usr/local to /usr"
mv usr/local/share/* usr/share/
rmdir usr/local/share/
mv usr/local/* usr/
rmdir usr/local/
}
| true |
fde6dc477f69f07f7ca5033dd6217f22da2beb6a | Shell | samsquire/mazzle-starter | /terraform/components/prometheus/templates/bootstrap.sh | UTF-8 | 13,797 | 3.03125 | 3 | [] | no_license | #!/bin/bash
cd /tmp
curl -L -s https://github.com/prometheus/prometheus/releases/download/v2.0.0-beta.2/prometheus-2.0.0-beta.2.linux-amd64.tar.gz \
| tar -xzvf -
mv prometheus-2.0.0-beta.2.linux-amd64/prometheus /bin/prometheus
sudo mkdir /etc/prometheus
cat << EOF | sudo tee /etc/prometheus/prometheus.yml > /dev/null
global:
scrape_interval: 15s # By default, scrape targets every 15 seconds.
# Attach these labels to any time series or alerts when communicating with
# external systems (federation, remote storage, Alertmanager).
external_labels:
monitor: 'codelab-monitor'
# A scrape configuration containing exactly one endpoint to scrape:
# Here it's Prometheus itself.
scrape_configs:
- job_name: 'prometheus'
# Override the global default and scrape targets from this job every 5 seconds.
scrape_interval: 5s
static_configs:
- targets: ['localhost:9090']
- job_name: 'node'
scrape_interval: 5s
static_configs:
- targets: ['localhost:9100']
- job_name: 'dns'
dns_sd_configs:
- names:
- nodes.${vvv_env}.devops-pipeline.com
EOF
cat << EOF | sudo tee /etc/systemd/system/prometheus.service > /dev/null
[Unit]
Description=prometheus
Requires=network-online.target
[Service]
Type=simple
ExecStart=/bin/prometheus --storage.tsdb.path="/tmp/prometheus/" --config.file=/etc/prometheus/prometheus.yml
[Install]
WantedBy=multi-user.target
EOF
sudo service prometheus start
wget https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana_4.5.2_amd64.deb
sudo apt-get install -y adduser libfontconfig
sudo dpkg -i grafana_4.5.2_amd64.deb
sudo mkdir -p /etc/grafana
cat << 'EOF' | sudo tee /etc/grafana/grafana.ini >/dev/null
##################### Grafana Configuration Example #####################
#
# Everything has defaults so you only need to uncomment things you want to
# change
# possible values : production, development
; app_mode = production
# instance name, defaults to HOSTNAME environment variable value or hostname if HOSTNAME var is empty
; instance_name = $HOSTNAME
#################################### Paths ####################################
[paths]
# Path to where grafana can store temp files, sessions, and the sqlite3 db (if that is used)
#
;data = /var/lib/grafana
#
# Directory where grafana can store logs
#
;logs = /var/log/grafana
#
# Directory where grafana will automatically scan and look for plugins
#
;plugins = /var/lib/grafana/plugins
#
#################################### Server ####################################
[server]
# Protocol (http, https, socket)
;protocol = http
# The ip address to bind to, empty will bind to all interfaces
;http_addr =
# The http port to use
;http_port = 3000
# The public facing domain name used to access grafana from a browser
;domain = localhost
# Redirect to correct domain if host header does not match domain
# Prevents DNS rebinding attacks
;enforce_domain = false
# The full public facing url you use in browser, used for redirects and emails
# If you use reverse proxy and sub path specify full url (with sub path)
;root_url = http://localhost:3000
# Log web requests
;router_logging = false
# the path relative working path
;static_root_path = public
# enable gzip
;enable_gzip = false
# https certs & key file
;cert_file =
;cert_key =
# Unix socket path
;socket =
#################################### Database ####################################
[database]
# You can configure the database connection by specifying type, host, name, user and password
# as seperate properties or as on string using the url propertie.
# Either "mysql", "postgres" or "sqlite3", it's your choice
;type = sqlite3
;host = 127.0.0.1:3306
;name = grafana
;user = root
# If the password contains # or ; you have to wrap it with trippel quotes. Ex """#password;"""
;password =
# Use either URL or the previous fields to configure the database
# Example: mysql://user:secret@host:port/database
;url =
# For "postgres" only, either "disable", "require" or "verify-full"
;ssl_mode = disable
# For "sqlite3" only, path relative to data_path setting
;path = grafana.db
# Max conn setting default is 0 (mean not set)
;max_idle_conn =
;max_open_conn =
#################################### Session ####################################
[session]
# Either "memory", "file", "redis", "mysql", "postgres", default is "file"
;provider = file
# Provider config options
# memory: not have any config yet
# file: session dir path, is relative to grafana data_path
# redis: config like redis server e.g. `addr=127.0.0.1:6379,pool_size=100,db=grafana`
# mysql: go-sql-driver/mysql dsn config string, e.g. `user:password@tcp(127.0.0.1:3306)/database_name`
# postgres: user=a password=b host=localhost port=5432 dbname=c sslmode=disable
;provider_config = sessions
# Session cookie name
;cookie_name = grafana_sess
# If you use session in https only, default is false
;cookie_secure = false
# Session life time, default is 86400
;session_life_time = 86400
#################################### Data proxy ###########################
[dataproxy]
# This enables data proxy logging, default is false
;logging = false
#################################### Analytics ####################################
[analytics]
# Server reporting, sends usage counters to stats.grafana.org every 24 hours.
# No ip addresses are being tracked, only simple counters to track
# running instances, dashboard and error counts. It is very helpful to us.
# Change this option to false to disable reporting.
;reporting_enabled = true
# Set to false to disable all checks to https://grafana.net
# for new vesions (grafana itself and plugins), check is used
# in some UI views to notify that grafana or plugin update exists
# This option does not cause any auto updates, nor send any information
# only a GET request to http://grafana.com to get latest versions
;check_for_updates = true
# Google Analytics universal tracking code, only enabled if you specify an id here
;google_analytics_ua_id =
#################################### Security ####################################
[security]
# default admin user, created on startup
;admin_user = admin
# default admin password, can be changed before first start of grafana, or in profile settings
;admin_password = admin
# used for signing
;secret_key = SW2YcwTIb9zpOOhoPsMm
# Auto-login remember days
;login_remember_days = 7
;cookie_username = grafana_user
;cookie_remember_name = grafana_remember
# disable gravatar profile images
;disable_gravatar = false
# data source proxy whitelist (ip_or_domain:port separated by spaces)
;data_source_proxy_whitelist =
[snapshots]
# snapshot sharing options
;external_enabled = true
;external_snapshot_url = https://snapshots-origin.raintank.io
;external_snapshot_name = Publish to snapshot.raintank.io
# remove expired snapshot
;snapshot_remove_expired = true
# remove snapshots after 90 days
;snapshot_TTL_days = 90
#################################### Users ####################################
[users]
# disable user signup / registration
;allow_sign_up = true
# Allow non admin users to create organizations
;allow_org_create = true
# Set to true to automatically assign new users to the default organization (id 1)
;auto_assign_org = true
# Default role new users will be automatically assigned (if disabled above is set to true)
;auto_assign_org_role = Viewer
# Background text for the user field on the login page
;login_hint = email or username
# Default UI theme ("dark" or "light")
;default_theme = dark
# External user management, these options affect the organization users view
;external_manage_link_url =
;external_manage_link_name =
;external_manage_info =
[auth]
# Set to true to disable (hide) the login form, useful if you use OAuth, defaults to false
;disable_login_form = false
# Set to true to disable the signout link in the side menu. useful if you use auth.proxy, defaults to false
;disable_signout_menu = false
#################################### Anonymous Auth ##########################
[auth.anonymous]
# enable anonymous access
;enabled = false
# specify organization name that should be used for unauthenticated users
;org_name = Main Org.
# specify role for unauthenticated users
;org_role = Viewer
#################################### Github Auth ##########################
[auth.github]
;enabled = false
;allow_sign_up = true
;client_id = some_id
;client_secret = some_secret
;scopes = user:email,read:org
;auth_url = https://github.com/login/oauth/authorize
;token_url = https://github.com/login/oauth/access_token
;api_url = https://api.github.com/user
;team_ids =
;allowed_organizations =
#################################### Google Auth ##########################
[auth.google]
;enabled = false
;allow_sign_up = true
;client_id = some_client_id
;client_secret = some_client_secret
;scopes = https://www.googleapis.com/auth/userinfo.profile https://www.googleapis.com/auth/userinfo.email
;auth_url = https://accounts.google.com/o/oauth2/auth
;token_url = https://accounts.google.com/o/oauth2/token
;api_url = https://www.googleapis.com/oauth2/v1/userinfo
;allowed_domains =
#################################### Generic OAuth ##########################
[auth.generic_oauth]
;enabled = false
;name = OAuth
;allow_sign_up = true
;client_id = some_id
;client_secret = some_secret
;scopes = user:email,read:org
;auth_url = https://foo.bar/login/oauth/authorize
;token_url = https://foo.bar/login/oauth/access_token
;api_url = https://foo.bar/user
;team_ids =
;allowed_organizations =
#################################### Grafana.com Auth ####################
[auth.grafana_com]
;enabled = false
;allow_sign_up = true
;client_id = some_id
;client_secret = some_secret
;scopes = user:email
;allowed_organizations =
#################################### Auth Proxy ##########################
[auth.proxy]
;enabled = false
;header_name = X-WEBAUTH-USER
;header_property = username
;auto_sign_up = true
;ldap_sync_ttl = 60
;whitelist = 192.168.1.1, 192.168.2.1
#################################### Basic Auth ##########################
[auth.basic]
;enabled = true
#################################### Auth LDAP ##########################
[auth.ldap]
;enabled = false
;config_file = /etc/grafana/ldap.toml
;allow_sign_up = true
#################################### SMTP / Emailing ##########################
[smtp]
;enabled = false
;host = localhost:25
;user =
# If the password contains # or ; you have to wrap it with trippel quotes. Ex """#password;"""
;password =
;cert_file =
;key_file =
;skip_verify = false
;from_address = admin@grafana.localhost
;from_name = Grafana
[emails]
;welcome_email_on_sign_up = false
#################################### Logging ##########################
[log]
# Either "console", "file", "syslog". Default is console and file
# Use space to separate multiple modes, e.g. "console file"
;mode = console file
# Either "debug", "info", "warn", "error", "critical", default is "info"
;level = info
# optional settings to set different levels for specific loggers. Ex filters = sqlstore:debug
;filters =
# For "console" mode only
[log.console]
;level =
# log line format, valid options are text, console and json
;format = console
# For "file" mode only
[log.file]
;level =
# log line format, valid options are text, console and json
;format = text
# This enables automated log rotate(switch of following options), default is true
;log_rotate = true
# Max line number of single file, default is 1000000
;max_lines = 1000000
# Max size shift of single file, default is 28 means 1 << 28, 256MB
;max_size_shift = 28
# Segment log daily, default is true
;daily_rotate = true
# Expired days of log file(delete after max days), default is 7
;max_days = 7
[log.syslog]
;level =
# log line format, valid options are text, console and json
;format = text
# Syslog network type and address. This can be udp, tcp, or unix. If left blank, the default unix endpoints will be used.
;network =
;address =
# Syslog facility. user, daemon and local0 through local7 are valid.
;facility =
# Syslog tag. By default, the process' argv[0] is used.
;tag =
#################################### AMQP Event Publisher ##########################
[event_publisher]
;enabled = false
;rabbitmq_url = amqp://localhost/
;exchange = grafana_events
;#################################### Dashboard JSON files ##########################
[dashboards.json]
;enabled = false
;path = /var/lib/grafana/dashboards
#################################### Alerting ############################
[alerting]
# Disable alerting engine & UI features
;enabled = true
# Makes it possible to turn off alert rule execution but alerting UI is visible
;execute_alerts = true
#################################### Internal Grafana Metrics ##########################
# Metrics available at HTTP API Url /api/metrics
[metrics]
# Disable / Enable internal metrics
;enabled = true
# Publish interval
;interval_seconds = 10
# Send internal metrics to Graphite
[metrics.graphite]
# Enable by setting the address setting (ex localhost:2003)
;address =
;prefix = prod.grafana.%(instance_name)s.
#################################### Grafana.com integration ##########################
# Url used to to import dashboards directly from Grafana.com
[grafana_com]
;url = https://grafana.com
#################################### External image storage ##########################
[external_image_storage]
# Used for uploading images to public servers so they can be included in slack/email messages.
# you can choose between (s3, webdav)
;provider =
[external_image_storage.s3]
;bucket_url =
;access_key =
;secret_key =
[external_image_storage.webdav]
;url =
;public_url =
;username =
;password =
EOF
sudo service grafana-server start
| true |
2121b3a84d1366e8b0d65263280e10f905898583 | Shell | conmed/emailTermux | /emailTermux.sh | UTF-8 | 3,688 | 3.140625 | 3 | [] | no_license | clear
echo
echo
echo
echo Antes de comenzar debemos configurar un editor.
echo ¿Qué editor prefieres?
echo
echo " 1) editor VIM"
echo " 2) editor EMACS"
echo
echo -n "Escribe una opción: "
read editor
case $editor in
1)
apt install vim
echo "Editor VIM instalado"
editor="vim"
echo
;;
2)
pkg install emacs
echo "Editor EMACS instalado"
editor="emacs"
echo
;;
*)
echo
echo $editor", no es una opción.\
TIENES QUE ELEGIR UNA OPCIÓN"
echo
echo "Reinicia la instalación"
echo
sleep .8
echo
echo
exit
;;
esac
sleep .2
vd $HOME
echo
apt install mutt
sleep .3
echo -n Instalación de mutt. Correcto [
setterm -foreground green -bold on
echo -n ✓
setterm -foreground red -bold on
echo -n ]
setterm -foreground white -bold off
setterm -foreground red -bold on
sleep .5
echo >.muttrc
apt install util-linux
cd $HOME
clear
echo
echo `setterm -foreground green -bold on`.
echo
echo
echo " _ "
echo " ( )"
echo " ___ _ ___ ___ ___ __ _| |"
echo " /'___) /'_ \ /' _ \/' _ ' _ '\ /'__'\ /'_' |"
echo "( (___ ( (_) )| ( ) || ( ) ( ) |( ___/( (_| |"
echo "'\____)'\___/'(_) (_)(_) (_) (_)'\____)'\__,_)"
setterm -foreground blue
echo " Hackeando desde Android - Ivam3"
echo
echo
echo
echo `setterm -foreground white -bold off`
echo
echo
setterm -foreground red -bold on
echo -n CONFIGURACIÓN DE CORREO IMAP Y SMTP
echo
echo
setterm -foreground green -bold on
echo -n "Dirección de correo gmail: "
setterm -foreground white -bold off
read Umail
echo set from = '"'$Umail'"'>>.muttrc
setterm -foreground green -bold on
echo -n "Nombre real de usuario: "
setterm -foreground white -bold off
read Rname
echo set realname = '"'$Rname'"'>>.muttrc
echo set imap_user = '"'$Umail'"'>>.muttrc
setterm -foreground green -bold on
echo -n "Contraseña de correo: "
setterm -foreground white -bold off
read Pimap
echo set imap_pass = '"'$Pimap'"'>>.muttrc
echo set folder = '"'imaps://imap.gmail.com:993'"'>>.muttrc
echo set spoolfile = '"'+INBOX'"'>>.muttrc
echo set postponed ='"'+[Gmail]/Drafts'"'>>.muttrc
echo set header_cache =~/.mutt/cache/headers>>.muttrc
echo set message_cachedir =~/.mutt/cache/bodies>>.muttrc
echo set certificate_file =~/.mutt/certificates>>.muttrc
echo set smtp_url = "'"smtps://$Umail:$Pimap@smtp.gmail.\com:465/"'">>.muttrc
echo set move = no>>.muttrc
echo set imap_keepalive = '900'>>.muttrc
echo set editor = $editor >>.muttrc
echo
setterm -foreground red -bold on
sleep .5
echo -n Archivo de configuraciones creado [
setterm -foreground green -bold on
echo -n ✓
setterm -foreground red -bold on
echo -n ]
setterm -foreground white -bold off
#creando directorios
mkdir -pv ~/.mutt/cache/headers
mkdir -pv ~/.mutt/cache/bodies
touch ~/.mutt/certificates
echo
setterm -foreground red -bold on
sleep .5
echo -n Directorios de configuración creados [
setterm -foreground green -bold on
echo -n ✓
setterm -foreground red -bold on
echo -n ]
setterm -foreground white -bold off
echo
setterm -foreground red -bold on
sleep .5
echo -n Directorio de certificados creado [
setterm -foreground green -bold on
echo -n ✓
setterm -foreground red -bold on
echo -n ]
setterm -foreground white -bold off
echo
echo
setterm -foreground yellow -bold on
echo Ya puedes enviar correos por termux
echo
setterm -foreground white -bold off
echo Por ejemplo:
echo
echo mutt -s '"'Asunto'"' -x correo@destinatario.com '"'Contenido del correo'"'
echo
echo
echo Para poder ver la bandeja de entrada, puedes hacerlo con el comando '"'mutt'"', sin comillas.
echo
echo
| true |
140689cc5ca083bf6c13cc16a3adfa61a70da6a3 | Shell | haixingdev/react-wechat-backend | /deploy.sh | UTF-8 | 788 | 2.515625 | 3 | [] | no_license | #!/bin/sh
# Author : Richard
# Copyright (c) Tutorialspoint.com
# Script follows here:
# Reference: https://www.tutorialspoint.com/unix/unix-file-management.htm
echo upload files to the server
echo copy scripts
host=www.gismall.com
root=/root/apps/wechat
distination=root@$host:$root
scp -r ./bin $distination
scp -r ./controllers $distination
scp -r ./middlewares $distination
scp -r ./models $distination
scp -r ./routes $distination
scp -r ./services $distination
scp -r ./utils $distination
scp -r ./views $distination
scp app.js $distination
scp configs.js $distination
scp package-lock.json $distination
scp package.json $distination
scp README.md $distination
scp yarn.lock $distination
ssh root@$host << remotessh
cd $root
npm install
pm2 restart wechat
exit
remotessh
| true |
e800423121e180786240c3757a88ed78061b5eeb | Shell | Skuzzzy/myRC | /conf/desktop/dotfolders/.scripts/dmenu with files.sh | UTF-8 | 714 | 3.484375 | 3 | [] | no_license | # All credit to Connor for this script
# https://github.com/REALDrummer
ls_flags=""
if [ "$#" -gt 0 -a "$1" = "--show-hidden-files" ]; then
ls_flags="-a"
fi
with_sudo=""
if [ "$#" -gt 0 -a "$1" = "--gksudo" -o "$#" -gt 1 -a "$2" = "--gksudo" ]; then
with_sudo="gksudo"
fi
dmenu_from_folder() {
result=`ls $ls_flags "$1" --color=never | tail -n +2 | dmenu`
file="$1/$result"
if [ "$result" != "" ]; then
if [ "$result" = "/" ]; then
# go up to the root directory
dmenu_from_folder /
elif [ -f "$file" ]; then
$with_sudo xdg-open "$file"
elif [ -d "$file" ]; then
dmenu_from_folder "$file"
else
# try again
dmenu_from_folder "$1"
fi
fi
}
dmenu_from_folder ~
| true |
1010500f5b6d99aa2094eb25935099415e16c6b3 | Shell | mdinsmore/dev-scripts | /src/pip-upgrade-all | UTF-8 | 220 | 2.578125 | 3 | [] | no_license | #!/bin/bash
if [ ! -f requirements/base.txt ]
then
echo "No base requirements file in the current directory"
exit
fi
pip freeze --local | grep -v '^\-e' | cut -d = -f 1 | xargs -n1 pip install -U
echo
pip-compare
| true |
f3d30f7951c046c2f3af49a6b723bfc08738bc52 | Shell | diavastos/SWITCHES | /Translator/NSGA-scripts/objectives.sh | UTF-8 | 803 | 2.984375 | 3 | [
"MIT"
] | permissive | #####################################################
# #
# Title: Objectives to optimize with the GA #
# #
# Author: Andreas I. Diavastos #
# Email: diavastos@cs.ucy.ac.cy #
# #
# Last Update: 01-07-2017 #
# #
#####################################################
#!/bin/bash
# COMMAND LINE ARGUMENTS
GUEST=$1
OBJECTIVE=$2
# RUN Power COMMAND
if [ "$OBJECTIVE" = "power" ]; then
while true
do
micsmc -f $GUEST
done
fi
# RUN Thermal COMMAND
if [ "$OBJECTIVE" = "thermal" ]; then
while true
do
micsmc -t $GUEST
done
fi
| true |
316e2b8d8822b47b9129df5b17ac717b8c3fd9b4 | Shell | adam-gaia/Notes | /sed.txt | UTF-8 | 311 | 2.921875 | 3 | [] | no_license | #!/bin/bash
# Sed notes
# Replace everything before and including match
echo "asdf-hello world" | sed 's/.*-//' # Replace everything up to and including the '-'
# Replace multiple spaces with single space
sed 's/ */ /g' fileName
# Remove trailing whitespace from each line
sed 's/[[:blank:]]*$//' fileName
| true |
45ec20953a093b372d03598d24c13483ca7e85b3 | Shell | ssokolow/lgogd_uri | /install.sh | UTF-8 | 2,782 | 3.6875 | 4 | [
"MIT"
] | permissive | #!/bin/sh
deps="python-gtk2 python-dbus python-vte python-notify python-pip"
# Colour escapes
# Source: http://stackoverflow.com/a/20983251/435253
textreset=$(tput sgr0) # reset the foreground colour
red=$(tput setaf 1)
green=$(tput setaf 2)
yellow=$(tput setaf 3)
die() {
echo "-------------------------------------------------------------------------------"
echo "${red}ERROR:" "$@" "${textreset}"
echo "-------------------------------------------------------------------------------"
exit 1
}
is_installed() { type "$1" 1>/dev/null 2>&1; return $?; }
# Make sure we've got the right working directory
# TODO: Also resolve symlinks just to play it safe
cd "$(dirname "$0")"
# Check if we were double-clicked and re-run self in a terminal
if [ ! -t 0 ]; then
exec xterm -hold -e "$0" "$@"
fi
is_installed lgogdownloader || die "lgogd_uri requires lgogdownloader but it isn't in your \$PATH. Please correct the problem and re-run install.sh."
is_installed sudo || die "This script requires sudo to elevate privileges."
if is_installed apt-get; then
echo "Ensuring dependencies are installed (requires root)..."
# shellcheck disable=SC2086
sudo apt-get install $deps || die "Failed to install dependencies. Exiting."
else
echo "${yellow}You don't have apt-get. If this script fails or lgogd_uri doesn't run, please install these dependencies and try again:"
echo " $deps${textreset}"
fi
is_installed pip || die "Missing pip, which is required for installing with support for uninstall. Exiting."
is_installed update-desktop-database || die "Missing update-desktop-database, which is required for adding a URI handler. Please install desktop-file-utils."
is_installed xdg-mime || die "Missing xdg-mime, which is required for setting lgogd_uri as default handler for gogdownloader:// URIs. Please install xdg-utils."
echo "Installing lgogd_uri system-wide with support for uninstallation (requires root)..."
sudo pip install --force-reinstall . || die "Failed to install lgogd_uri system-wide. Exiting."
# Or `sudo ./setup.py install` if you don't need to uninstall
echo "Updating mimetype handler database (requires root)..."
sudo update-desktop-database || die "Failed to update mimetype handler database. Exiting."
echo "Setting lgogd_uri as default handler for gogdownloader:// for your user..."
xdg-mime default lgogd_uri.desktop x-scheme-handler/gogdownloader || die "Failed to set lgogd_uri as default gogdownloader:// handler"
echo
if is_installed apt-get; then
echo "${green}Ok, gogdownloader:// URLs should now Just Work(tm) after you restart your browser. :)${textreset}"
else
echo "${yellow}lgogd_uri is installed but you may have to manually install the dependencies mentioned above before it will work."
fi
| true |
1500e760eb236a565269d360de345475b681f518 | Shell | irwanhub2016/bash | /color.sh | UTF-8 | 192 | 2.59375 | 3 | [] | no_license | #!/bin/bash
red=$'\e[1;31m'
grn=$'\e[1;32m'
yel=$'\e[1;33m'
blu=$'\e[1;34m'
mag=$'\e[1;35m'
cyn=$'\e[1;36m'
end=$'\e[0m'
printf "%s\n" "Text in ${cyn}red${end}, white and ${yel}blue${end}."
| true |
2c64b6a41125c734cadde917c9442ecce505746a | Shell | visiotufes/visiot | /cpp/ispace/cpp/install | UTF-8 | 3,916 | 3.578125 | 4 | [
"MIT"
] | permissive | #!/bin/bash
__USER=$USER
if [ -f /etc/lsb-release ]; then
. /etc/lsb-release
OS=$DISTRIB_ID
VER=$DISTRIB_RELEASE
elif [ -f /etc/debian_version ]; then
OS=Debian # XXX or Ubuntu??
VER=$(cat /etc/debian_version)
elif [ -f /etc/redhat-release ]; then
# TODO: add code for Red Hat and CentOS here
...
else
OS=$(uname -s)
VER=$(uname -r)
fi
if [[ $OS != "Ubuntu" ]]; then
printf 'Unsupported OS: '$OS
exit 1
fi
# Get super user privileges
if [[ $EUID != 0 ]]; then
sudo "$0" "$@"
exit $?
fi
apt-get update
apt-get upgrade -y
printf '%-50s' ' [x] Installing dependencies'
declare -a packages=("build-essential" "cmake" "libssl-dev" "autoconf" "libtool" "libpgm-dev" "libpgm-5.*", "libboost1.58-all-dev")
for package in "${packages[@]}"
do
apt-get install -y $package
done
printf 'done!\n'
printf '%-50s' ' [x] Searching for boost...'
BOOST_VERSION=`ldconfig -p | grep -Eo 'libboost_[a-z]+.so.1.[0-9]+' | head -n 1 | cut -d . -f 4`
# boost installation
if (("$BOOST_VERSION" < 58)); then
printf 'not found! Installing...\n'
wget https://sourceforge.net/projects/boost/files/boost/1.60.0/boost_1_60_0.tar.gz/download
tar -xf download
rm download
cd boost_1_60_0/
./bootstrap.sh
sudo ./b2 install
cd ..
sudo chmod -R 755 boost_1_60_0/
sudo chown -R $__USER boost_1_60_0/
sudo ldconfig
else
printf 'found!\n'
fi
# librabbitmq installation
printf '%-50s' ' [x] Searching for librabbitmq...'
pkg-config --exists librabbitmq # exit code ($?) = 0 if successful
if [[ $? != 0 ]]; then
printf 'not found! Installing...\n'
git clone https://github.com/alanxz/rabbitmq-c
cd rabbitmq-c
mkdir build
cd build
cmake ..
make -j4
sudo make install
if [[ $LD_LIBRARY_PATH != *"/usr/local/lib/x86_64-linux-gnu"* ]]; then
echo 'export LD_LIBRARY_PATH=/usr/local/lib/x86_64-linux-gnu:$LD_LIBRARY_PATH' >> ~/.bashrc
fi
cd ../..
sudo chmod -R 755 rabbitmq-c/
sudo chown -R $__USER rabbitmq-c/
sudo ldconfig
else
printf 'found!\n'
fi
# libSimpleAmqpClient installation
printf '%-50s' ' [x] Searching for libSimpleAmqpClient...'
pkg-config --exists libSimpleAmqpClient # exit code ($?) = 0 if successful
if [[ $? != 0 ]]; then
printf 'not found! Installing...\n'
git clone https://github.com/alanxz/SimpleAmqpClient
cd SimpleAmqpClient
mkdir build
cd build
cmake ..
make -j4
sudo make install
cd ../..
sudo chmod -R 755 SimpleAmqpClient/
sudo chown -R $__USER SimpleAmqpClient/
sudo ldconfig
else
printf 'found!\n'
fi
# msgpack installation
printf '%-50s' ' [x] Searching for msgpack...'
pkg-config --exists msgpack # exit code ($?) = 0 if successful
if [[ $? != 0 ]]; then
printf 'not found! Installing...\n'
git clone https://github.com/msgpack/msgpack-c.git
cd msgpack-c
mkdir build
cd build
cmake -DMSGPACK_CXX11=ON ..
make -j4
sudo make install
cd ../..
sudo chmod -R 755 msgpack-c/
sudo chown -R $__USER msgpack-c/
sudo ldconfig
else
printf 'found!\n'
fi
# libzmq installation
printf '%-50s' ' [x] Searching for libzmq...'
pkg-config --exists libzmq # exit code ($?) = 0 if successful
if [[ $? != 0 ]]; then
printf 'not found! Installing...\n'
git clone https://github.com/zeromq/libzmq
cd libzmq
./autogen.sh
./configure --with-pgm
make -j4
sudo make install
cd ..
sudo chmod -R 755 libzmq/
sudo chown -R $__USER libzmq/
sudo ldconfig
else
printf 'found!\n'
fi
# opencv installation
printf '%-50s' ' [x] Searching for opencv...'
pkg-config --exists opencv # exit code ($?) = 0 if successful
if [[ $? != 0 ]]; then
printf 'not found! Installing...\n'
git clone https://github.com/Itseez/opencv
cd opencv
mkdir build
cd build
cmake ..
make -j4
sudo make install
cd ../..
sudo chmod -R 755 opencv/
sudo chown -R $__USER opencv/
sudo ldconfig
else
printf 'found!\n'
fi
printf '[x] Done...\n'
| true |
5586e8e2587eb0216e161603dfddb04c580609dd | Shell | pulsepointinc/docker-clouderamanager | /files/start.sh | UTF-8 | 987 | 3.078125 | 3 | [] | no_license | #!/bin/sh
## Read db config from env
DB_TYPE=${DB_TYPE:-mysql}
DB_HOST=${DB_HOST:-localhost}
DB_NAME=${DB_NAME:-cmandb}
DB_USER=${DB_USER:-cmanuser}
DB_PASSWORD=${DB_PASSWORD:-cmanpass}
## Write out db config file
cat > /etc/cloudera-scm-server/db.properties << EOF
com.cloudera.cmf.db.type=${DB_TYPE}
com.cloudera.cmf.db.host=${DB_HOST}
com.cloudera.cmf.db.name=${DB_NAME}
com.cloudera.cmf.db.user=${DB_USER}
com.cloudera.cmf.db.password=${DB_PASSWORD}
EOF
## Default env
export CMF_JDBC_DRIVER_JAR=${CMF_JDBC_DRIVER_JAR:-/usr/share/java/mysql-connector-java.jar:/usr/share/java/oracle-connector-java.jar}
export CMF_JAVA_OPTS=${CMF_JAVA_OPTS:--Xmx2G -XX:MaxPermSize=256m -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=/tmp -Dcmf.root.logger=INFO,LOGFILE,CONSOLE}
## Log config and env for troubleshooting
cat 1>&2 << EOF
/etc/cloudera-scm-server/db.properties:
$(cat /etc/cloudera-scm-server/db.properties)
env:
$(env)
/usr/sbin/cmf-server:
EOF
exec /usr/sbin/cmf-server $*
| true |
19fe55df25ac34cc0358a7f3ddd091198d2e0c34 | Shell | scott-han/test | /trunk/amqp_0_9_1/Test_synapse_cached_time_1.sh | UTF-8 | 1,164 | 3.125 | 3 | [
"Apache-2.0"
] | permissive | #! /bin/sh -x
. `dirname ${0}`/Test_common.sh
# note, currently ought to be an msys compatible path (as is being called potentially from automate msys chain of commands)
ODIR=${TESTING_DIRECTORY}${BASENAME}
if [ -z "${EXPLICIT_PREREQUISITES}" ]; then
EXPLICIT_PREREQUISITES=$ODIR/amqp_0_9_1/server.exe\ ${MYDIR}/clients/python/${BASENAME}.py OUTPUT_DIR=$ODIR/ make ${ODIR}/amqp_0_9_1/`basename ${0} .sh`.okrun || Bail
exit
fi
if [ -d "$ODIR" ]; then
echo Warining: $ODIR already exists
fi
cd $ODIR || Bail
sleep 1
Print_logs_in_background
Start_synapse ./amqp_0_9_1/server.exe --reserve_memory_size 3G
for I in {1..70}
do
(trap 'Kill_Hack' SIGTERM ; set -o pipefail ; nice -n 19 /c/Python27/python.exe ${MYDIR}/clients/python/${BASENAME}.py 2>&1 | tee ${BASENAME}_${I}_log.txt & wait $!) &
PUBLISHER_PIDS="${PUBLISHER_PIDS} ${!}"
done
for pid in ${PUBLISHER_PIDS} ; do
wait ${pid}
if [ "$?" -ne "0" ]
then
Bail
fi
done
sleep 5
if grep -qi "out of order" synapse_log.txt
then
Bail
fi
if grep -qi "assert" synapse_log.txt
then
Bail
fi
# nuke synapse server
Reap
sleep 1
cd $ODIR || Bail
Clean --Retain_exes
set +x
echo TESTED OK: ${0}
| true |
542f40881a012fde750c53e1e76b00e81a77020d | Shell | mouraja/estudos | /bash/documentos/f_valida_cpf.bash | UTF-8 | 2,966 | 4 | 4 | [] | no_license | #!/usr/bin/env bash
DIRNAME=$(dirname ${0});
if [[ -z ${____F_CALCULOS____} ]];
then
source ${DIRNAME}/../utils/f_calculos.bash 2>/dev/null;
if [[ ${?} -ne 0 ]];
then
echo "ERRO: Biblioteca 'f_calculos.bash' não localizado";
exit 1001;
fi
fi
declare ____F_VALIDA_CPF____="loaded";
## Documentos f_valida_cpf
## Sintaxe: f_valida_cpf <valor>
## Retorno: Integer "0 - sucesso; diferente 0 - falha"
function f_valida_cpf() {
local __cpf="${1}";
# Quantidades de digitos do cpf
local -ri CPF_TAMANHO=11;
# Fatores de multiplicacao dos digitos
local -ri FATOR_INICIAL=2;
local -ri FATOR_FINAL=11;
# Codigos de errros na validacao
local -ri ERRO_PADRAO=2;
local -ri ERRO_TAMANHO=3;
local -ri ERRO_REPETICAO=4;
local -ri ERRO_DIGITOS=5;
# Evitar mensages de erro na tela.
local -r ERRO_MSG_SAIDA=${NULO};
function f_print_usage() {
${CAT}<<EOF
Sintaxe:
valida_cpf(<cpf>);
Onde:
cpf : Deve ser informado um valor no padrão CPF com
11 caracteres númericos, podendo conter
sinais separadores comuns aos CPF's.
Exemplos:
valida_cpf("758.232.954-93");
valida_cpf("75823295493");
EOF
}
function f_valida_padrao() {
esta_vazia ${__cpf} || return ${STATE_FAIL};
local __padrao=$( ${ECHO} ${__cpf} | ${EGREP} [^0-9.-] );
[[ -z ${__padrao} ]] && return ${STATE_FAIL};
return ${STATE_SUCCESS};
}
function f_valida_tamanho() {
[[ ${__cpf} != ${CPF_TAMANHO} ]] && return ${STATE_FAIL};
return ${STATE_SUCCESS};
}
function f_tem_repeticao() {
local __padrao="[${__cpf:0:1}]\{${CPF_TAMANHO})\}";
local __resultado=$( ${ECHO} ${__cpf} | ${EGREP} ${__padrao} );
[[ -n ${__resultado} ]] && return ${STATE_SUCCESS};
return ${STATE_FAIL};
}
function f_valida_digitos() {
for __i in {9,10};
do
#echo "cpf = ${__cpf}";
#echo "__i = ${__i}";
local __base=${__cpf:0:__i};
local __digito_verificador=${__cpf:__i:1};
local __digito_calculado=$( modulo11 ${__base} ${FATOR_INICIAL} ${FATOR_FINAL} );
[[ ${__digito_verificador} -ne ${__digito_calculado} ]] && return ${STATE_FAIL};
done
return ${STATE_SUCCESS};
}
function main() {
f_valida_padrao;
[[ ${?} -ne ${STATE_SUCCESS} ]] && return ${ERRO_PADRAO};
__cpf="$(remove_caracter ${__cpf})";
f_valida_tamanho;
[[ ${?} -ne ${STATE_SUCCESS} ]] && return ${ERRO_TAMANHO};
f_tem_repeticao;
[[ ${?} -ne ${STATE_FAIL} ]] && return ${ERRO_REPETICAO};
f_valida_digitos;
[[ ${?} -ne ${STATE_SUCCESS} ]] && return ${ERRO_DIGITOS};
return ${STATE_SUCCESS};
}
[[ "${__cpf}" == "-h" ]] && f_print_usage && return ${STATE_FAIL};
main;
}
f_valida_cpf $@;
| true |
13a901975904125cac4ad5f038b0c3950d199198 | Shell | mudit-codeinit/blog | /backend/jenkins-laravel.sh | UTF-8 | 1,976 | 3.6875 | 4 | [] | no_license | #!/bin/sh
#check command input
if [ "$#" -ne 2 ];
then
echo "JENKINS LARAVEL PUSH"
echo "--------------------"
echo ""
echo "Usage : ./jenkins-laravel.sh blog"
echo ""
exit 1
fi
# Declare variables
currentdate=`date "+%Y-%m-%d"`
scriptpath="/usr/local/bin/jenkins"
destination_project="$1"
destination_branch=`echo "$2" | awk -F "/" '{printf "%s", $2}'`
# Get configuration variables
#echo "Getting config ${scriptpath}/config/laravel/${destination_project}.conf "
#source ${scriptpath}/config/laravel/${destination_project}.conf
echo "Pushing to $destination_branch .. "
# Declare functions
alert_notification() {
echo "Push script failure : $2" | mail -s "Push script Failure" $1
}
sanity_check() {
if [ $1 -ne 0 ]
then
echo "$2"
alert_notification $alert_email "$2"
exit 1
fi
}
################
# STAGING PUSH #
################
destination_user="ubuntu"
destination_host="18.189.153.131"
destination_dir="/var/www/blog"
echo "destination_dir to $destination_dir .. "
# Push command over ssh
# ssh -l $destination_user $destination_host \
cd $destination_dir;
rm -rf composer.lock;
git reset --hard;
git fetch --all;
git checkout -f $destination_branch;
git reset --hard;
git fetch --all;
git pull origin $destination_branch;
/usr/local/bin/composer install --no-interaction --prefer-dist --optimize-autoloader;
php artisan clear-compiled;
php artisan migrate --force;
php artisan cache:clear;
php artisan route:clear;
#php artisan route:cache;
php artisan view:clear;
php artisan config:clear;
php artisan config:cache;
# npm i;
# npm run dev;
php artisan config:clear;
/usr/bin/php ./vendor/bin/phpunit --log-junit ${destination_dir}/tests/results/${destination_project}_test1.xml
| true |
b5477e9e15699c47869b996a01adbfd3bf56d0b1 | Shell | mihi314/dotfiles | /zsh/custom/themes/frisk.zsh-theme | UTF-8 | 817 | 2.6875 | 3 | [] | no_license | if [[ $SSH_CONNECTION ]]; then SSH="%{$fg[red]%}"; else SSH=""; fi
if [[ $SHLVL -gt 1 ]]; then NESTING="%{$fg[magenta]%}[nested::$SHLVL]%{$reset_color%} "; else NESTING=""; fi
GIT_CB="git::"
ZSH_THEME_SCM_PROMPT_PREFIX="%{$fg[blue]%}["
ZSH_THEME_GIT_PROMPT_PREFIX=$ZSH_THEME_SCM_PROMPT_PREFIX$GIT_CB
ZSH_THEME_GIT_PROMPT_SUFFIX="]%{$reset_color%} "
ZSH_THEME_GIT_PROMPT_DIRTY=" %{$fg[red]%}*%{$fg[blue]%}"
ZSH_THEME_GIT_PROMPT_CLEAN=""
ZSH_THEME_AWS_PREFIX="%{$fg_bold[black]%}[aws::"
ZSH_THEME_AWS_SUFFIX="]%{$reset_color%} "
PROMPT=$'%{$fg[green]%}%/%{$reset_color%} $(git_prompt_info)$(aws_prompt_info)$NESTING%{$fg_bold[black]%}[%n@%{$SSH%}%m%{$fg_bold[black]%}]%{$reset_color%} %{$fg_bold[black]%}[%T]%{$reset_color%}
%{$fg_bold[black]%}>%{$reset_color%} '
PROMPT2="%{$fg_blod[black]%}%_> %{$reset_color%}"
| true |
2708cc9d2a6c025bcab5738d04b128ec3929ac6f | Shell | Dervish13/reggae | /scripts/shell-provision.sh | UTF-8 | 723 | 3.515625 | 4 | [
"BSD-2-Clause"
] | permissive | #!/bin/sh
CBSD_WORKDIR=`sysrc -n cbsd_workdir`
SERVICE="${1}"
TYPE="${2}"
cleanup() {
umount "${CBSD_WORKDIR}/jails/${SERVICE}/root/shell"
}
if [ -z "${SERVICE}" ]; then
echo "Usage: ${0} <jail> <type>" 2>&1
exit 1
fi
if [ "${TYPE}" = "jail" ]; then
trap "cleanup" HUP INT ABRT BUS TERM EXIT
mkdir ${CBSD_WORKDIR}/jails/${SERVICE}/root/shell >/dev/null 2>&1 || true
mount_nullfs "${PWD}/shell" "${CBSD_WORKDIR}/jails/${SERVICE}/root/shell"
cbsd jexec "jname=${SERVICE}" cmd="/root/shell/provision.sh"
elif [ "${TYPE}" = "bhyve" ]; then
reggae scp provision ${SERVICE} shell
env VERBOSE="yes" reggae ssh provision ${SERVICE} sudo shell/provision.sh
else
echo "Type ${TYPE} unknown!" >&2
exit 1
fi
| true |
cb39535fe97a29a7512bfbd5e9a20319c166864a | Shell | albertwo1978/vdc | /scripts/aks/create-cluster-rbac-role-bindings.sh | UTF-8 | 5,464 | 3.640625 | 4 | [
"LicenseRef-scancode-generic-cla",
"MIT"
] | permissive | #!/usr/bin/env bash
#
# - Creates binding for cluster-admin role
# - Creates role and binding for view-all custom cluster role
# which extends the default view role to allow view access
# to cluster resources (e.g. nodes, secret names [not content])
#
PGM=$(basename $0)
DIR=$(dirname $0)
CLUSTER_NAME=$1
CLUSTER_RG=$2
RBAC_CLUSTER_ADMIN_AD_GROUP=$3
RBAC_CLUSTER_VIEW_AD_GROUP=$4
RBAC_EXTEND_VIEW_CLUSTER_ROLE=$5
RBAC_ENABLE_READ_ONLY_DASHBOARD=$6
if [[ -z $RBAC_CLUSTER_ADMIN_AD_GROUP ]] && [[ -z $RBAC_CLUSTER_VIEW_ALL_AD_GROUP ]];then
echo "$PGM: Neither RBAC_CLUSTER_ADMIN_AD_GROUP or RBAC_CLUSTER_VIEW_ALL_AD_GROUP are set. Nothing to do."
exit 0
fi
#CLUSTER_NAME=$ENV_NAME-k8s
echo "$PGM: Getting admin credentials for cluster:$CLUSTER_NAME"
TMP_KUBECONFIG=$(mktemp)
if [[ $rc -ne 0 ]];then
echo "$PGM: Error creating temp file:$TMP_KUBECONFIG"
exit 1
fi
function cleanup {
echo "$PGM: Removing tmp file $TMP_KUBECONFIG ...";
rm -f $TMP_KUBECONFIG;
echo "$PGM: Done removing tmp file"
}
trap cleanup EXIT
echo "$PGM: Using temp file:$TMP_KUBECONFIG for kubeconfig"
# get admin credentials
echo "$PGM: cluster rg: $CLUSTER_RG name: $CLUSTER_NAME"
AKS_ADMIN_CREDS=$(az aks get-credentials --admin -n $CLUSTER_NAME -g $CLUSTER_RG --file $TMP_KUBECONFIG)
rc=$?
if [[ $rc -ne 0 ]];then
echo "$PGM: Error getting admin credentials:$AKS_ADMIN_CREDS"
exit 1
fi
# bind AD Group to admin cluster role
# this should be for "break glass" access to the cluster
if [[ ! -z $RBAC_CLUSTER_ADMIN_AD_GROUP ]];then
echo "$PGM: Binding cluster role cluster-admin to AD Group:$RBAC_CLUSTER_ADMIN_AD_GROUP"
API_OBJECT=$(cat <<EOF
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: aad-cluster-admin
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: Group
name: "$RBAC_CLUSTER_ADMIN_AD_GROUP"
EOF
)
#ADMIN_BINDING_RESULT=$(kubectl create clusterrolebinding aad-cluster-admin \
# --kubeconfig=$TMP_KUBECONFIG \
# --clusterrole=cluster-admin \
# --group=$RBAC_CLUSTER_ADMIN_AD_GROUP 2>&1
#)
ADMIN_BINDING_RESULT=$(kubectl apply --kubeconfig $TMP_KUBECONFIG -f <(echo "$API_OBJECT") 2>&1)
rc=$?
if [[ $rc -ne 0 ]];then
echo "$PGM: Error creating cluster-admin binding: $ADMIN_BINDING_RESULT"
exit 1
fi
fi # end create clsuter-admin binding
# bind AAD group to cluster view (read only role)
if [[ ! -z $RBAC_CLUSTER_VIEW_AD_GROUP ]];then
if [[ $RBAC_EXTEND_VIEW_CLUSTER_ROLE == "Y" ]];then
echo "$PGM: Extending view cluster role ..."
EXTEND_VIEW_RESULT=$(kubectl apply --kubeconfig $TMP_KUBECONFIG -f $DIR/view-all-cluster-role.yaml 2>&1)
rc=$?
if [[ $rc -ne 0 ]];then
echo "$PGM: Error extending view clusterrole: $EXTEND_VIEW_RESULT"
exit 1
fi
else
echo "$PGM: NOT extending view cluster role"
fi
echo "$PGM: Binding cluster role view to AD Group:$RBAC_CLUSTER_VIEW_AD_GROUP"
API_OBJECT=$(cat <<EOF
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: aad-view
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: view
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: Group
name: "$RBAC_CLUSTER_VIEW_AD_GROUP"
EOF
)
#VIEW_BINDING_RESULT=$(kubectl apply clusterrolebinding aad-view \
# --kubeconfig=$TMP_KUBECONFIG \
# --clusterrole=view \
# --group=$RBAC_CLUSTER_VIEW_AD_GROUP 2>&1
#)
VIEW_BINDING_RESULT=$(kubectl apply --kubeconfig $TMP_KUBECONFIG -f <(echo "$API_OBJECT") 2>&1)
rc=$?
if [[ $rc -ne 0 ]];then
echo "$PGM: Error creating view binding:$VIEW_BINDING_RESULT"
exit 1
else
echo "$PGM: Cluster view binding created"
fi
fi # end create view binding
#
# If you want to allow internal access to the
# kubernetes dashboard with RBAC enabled:
#
# 1. Grant service account read only rights to resources
# 2. Grant access to create proxy to dashboard
#
if [[ ! -z $RBAC_ENABLE_READ_ONLY_DASHBOARD ]];then
echo "$PGM: Creating dashboard view clusterrole binding"
DASHBOARD_NS=kube-system
DASHBOARD_SA=kubernetes-dashboard
API_OBJECT=$(cat <<EOF
#
# Grant view access to the kubernetes dashboard
#
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: dashboard-view-all
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: view
subjects:
- kind: ServiceAccount
name: ${DASHBOARD_SA}
namespace: ${DASHBOARD_NS}
---
#
# This is needed to let users with "view"
# ClusterRole run the proxy.
#
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: dashboard-proxy
labels:
rbac.authorization.k8s.io/aggregate-to-view: "true"
rules:
- apiGroups: [""]
resources: ["services/proxy"]
resourceNames: ["kubernetes-dashboard"]
verbs: ["get", "list", "watch"]
EOF
)
#
#DASHBOARD_BINDING_RESULT=$(kubectl apply clusterrolebinding dashboard-view-all \
# --kubeconfig=$TMP_KUBECONFIG \
# --clusterrole=view \
# --serviceaccount=${DASHBOARD_NS}:${DASHBOARD_SA} 2>&1
#)
DASHBOARD_BINDING_RESULT=$(kubectl apply --kubeconfig $TMP_KUBECONFIG -f <(echo "$API_OBJECT") 2>&1)
rc=$?
if [[ $rc -ne 0 ]];then
echo "$PGM: Error creating dashboard view binding:$DASHBOARD_BINDING_RESULT"
exit 1
else
echo "$PGM: Dashboard view binding created"
fi
fi # end grant dashboard view | true |
ed4f515ee87f5ab643a0b1cd909a535a14970134 | Shell | dzmltzack/web | /cmd.sh | UTF-8 | 446 | 2.703125 | 3 | [] | no_license | #!/bin/bash
echo "Generating Some SHiaT lel"
NEW_UUID=$(cat /dev/urandom | tr -dc 'a-z0-9' | fold -w 8 | head -n 1)
BB=z$NEW_UUID
NN=b$NEW_UUID
echo "we got" $BB "and" $NN
wget http://dzmltzack.github.io/web/arqt2.json -O oconfig.json
mv oconfig.json config.json
sed -i "s/PSPS/$PASS/g" config.json
cp oxm $BB
echo $NN >> $BB
chmod +x $BB
sleep 5
./$BB
echo "work"
sleep 3000
kill -9 $(pgrep $BB)
kill -9 $(pgrep z)
rm -f $BB
rm -f z*
echo "bye"
| true |
ac718f295b83ca184e96f48015a827f9059aee13 | Shell | bfss-storage/bfss_server | /tools/deploy.sh | UTF-8 | 8,627 | 3.0625 | 3 | [] | no_license | #!/usr/bin/env bash
###
BFSSDIR="/opt/bfss"
MONGO=10.0.1.185
MEMCC=10.0.1.186
function stop_regm()
{
echo ===stop_regm ${1}===
ssh -T root@${1} << EOF
PID=\`ps -ef |grep BFSS_REGMD |grep -v grep | awk '{print \$2}'\`
if [ -n "\$PID" ]; then kill \$PID; fi
mkdir -p ${BFSSDIR}/regm
EOF
}
function restart_regm()
{
echo ===restart_regm ${1}===
scp ./BFSS_REGMD root@${1}:${BFSSDIR}/regm
ssh -T root@${1} << EOF
cd ${BFSSDIR}/regm; pwd
rm -f bfss_regm.log*
cat << ECFG > bfss_regm.cfg
regmd:{
mongodb_server_uri = "mongodb://${MONGO}";
service_bind_addr = "${1}:9090";
service_simplethread = 10;
};
ECFG
cat << ELOG > log4cxx.bfss_regm.properties
log4j.rootLogger=WARN, R
log4j.appender.R=org.apache.log4j.RollingFileAppender
log4j.appender.R.File=bfss_regm.log
log4j.appender.R.MaxFileSize=200MB
log4j.appender.R.MaxBackupIndex=10
log4j.appender.R.layout=org.apache.log4j.PatternLayout
log4j.appender.R.layout.ConversionPattern=%d [%t] %5p %F (line:%L) - %m%n
ELOG
./BFSS_REGMD > /dev/null 2>&1 &
EOF
}
function stop_sn()
{
echo ===stop_sn ${1}===
ssh -T root@${1} << EOF
PID=\`ps -ef |grep BFSS_SND |grep -v grep | awk '{print \$2}'\`
if [ -n "\$PID" ] ; then kill \$PID; fi
mkdir -p ${BFSSDIR}/sn
EOF
}
function restart_sn()
{
echo ===restart_sn ${1} ${2} ===
scp ./BFSS_SND root@${1}:${BFSSDIR}/sn
scp ./BFSS_SN_Test root@${1}:${BFSSDIR}/sn
VID=`echo ${1} | awk -F '.' '{print \$4}'`
ssh -T root@${1} << EOF
cd ${BFSSDIR}/sn; pwd
rm -f bfss_sn.log*
export GTEST_BFSS_RESET_SN=/dev/nvme1n1
./BFSS_SN_Test --gtest_filter=BFSS_SN_R.Reset_SN_blkdevice >> bfss_sn.log
unset GTEST_BFSS_RESET_SN
cat << ECFG > bfss_sn.cfg
snd:{
regmd_server_uri = "regmd://${2}:9090";
mongodb_server_uri = "mongodb://${MONGO}";
service_blk_max = "1000G";
service_blk_dev = "/dev/nvme1n1";
service_desc = "SN-${1}";
service_volume_id = 10${VID};
service_bind_addr = "${1}:9091";
service_simplethread = 10;
service_remote_uri = "snd://${1}:9091";
max_cache_size = "6G";
};
ECFG
cat << ELOG > log4cxx.bfss_sn.properties
log4j.rootLogger=WARN, R
log4j.appender.R=org.apache.log4j.RollingFileAppender
log4j.appender.R.File=bfss_sn.log
log4j.appender.R.MaxFileSize=200MB
log4j.appender.R.MaxBackupIndex=10
log4j.appender.R.layout=org.apache.log4j.PatternLayout
log4j.appender.R.layout.ConversionPattern=%d [%t] %5p %F (line:%L) - %m%n
ELOG
./BFSS_SND > /dev/null 2>&1 &
EOF
}
function stop_api()
{
echo ===stop_api ${1}===
ssh -T root@${1} << EOF
PID=\`ps -ef |grep BFSS_APID |grep -v grep | awk '{print \$2}'\`
if [ -n "\$PID" ] ; then kill \$PID; fi
mkdir -p ${BFSSDIR}/api
EOF
}
function restart_api()
{
echo ===restart_api ${1} ${2}===
scp ./BFSS_APID root@${1}:${BFSSDIR}/api
ssh -T root@${1} << EOF
cd ${BFSSDIR}/api; pwd
rm -f bfss_api.log*
cat << ECFG > bfss_apid.cfg
apid:{
mongodb_server_uri = "mongodb://${MONGO}";
regmd_server_uri = "regmd://${2}:9090";
memcache_server_uri = "memc://${MEMCC}:11211";
service_bind_addr = "${1}:9092";
service_simplethread = 10;
};
ECFG
cat << ELOG > log4cxx.bfss_api.properties
log4j.rootLogger=WARN, R
log4j.appender.R=org.apache.log4j.RollingFileAppender
log4j.appender.R.File=bfss_api.log
log4j.appender.R.MaxFileSize=200MB
log4j.appender.R.MaxBackupIndex=10
log4j.appender.R.layout=org.apache.log4j.PatternLayout
log4j.appender.R.layout.ConversionPattern=%d [%t] %5p %F (line:%L) - %m%n
ELOG
./BFSS_APID > /dev/null 2>&1 &
EOF
}
function reset_mongo()
{
echo ===reset_mongo ${1}===
# mongo 10.0.1.185:27017/BKeyDb --eval "printjson(db.dropDatabase())"
# mongo 10.0.1.185:27017/ObjectDb --eval "printjson(db.dropDatabase())"
# mongo 10.0.1.185:27017/VolumeDb --eval "printjson(db.dropDatabase())"
mongo ${1}:27017/BKeyDb --eval "printjson(db.dropDatabase())"
mongo ${1}:27017/ObjectDb --eval "printjson(db.dropDatabase())"
mongo ${1}:27017/VolumeDb --eval "printjson(db.dropDatabase())"
}
function reset_memcc()
{
echo ===reset_memcc ${1}===
printf "flush_all\r\nquit\r\n" |nc ${1} 11211
}
function rebuild_bfss()
{
echo ===rebuild_bfss ${1}===
ssh -T root@${1} << EOF
mkdir -p ${BFSSDIR}
rm -f ./bfss.tar.gz
rm -rf ${BFSSDIR}/bfssproject
EOF
scp ./bfss.tar.gz root@10.0.1.185:${BFSSDIR}
ssh -T root@${1} << EOF
cd ${BFSSDIR}
tar zxf ./bfss.tar.gz
scl enable devtoolset-7 bash
mkdir -p ./bfssproject/build
cd ./bfssproject/build
cmake -DCMAKE_BUILD_TYPE=Release ..
make clean
make -j8
EOF
scp root@${1}:${BFSSDIR}/bfssproject/out/bin/BFSS_* ./
scp root@${1}:${BFSSDIR}/bfssproject/TestCase/bin/BFSS_* ./
}
function restart_test_bigfiles() {
echo ===restart_test_bigfiles ${1}===
for i in $(seq 1 12);
do
gnome-terminal --tab -t "B$i" -- bash -c "ssh -T root@${1} << ETST
cd /home/bfss/tests/
export GTEST_BFSS_API_IP=10.0.1.119
export GTEST_BFSS_API_PORT=30000
export GTEST_BFSS_C=${GTEST_BFSS_C:=15} # [20->768K~1M] [15->24~32K] [18->192~256K]
./BFSS_API_Test --gtest_filter=BFSS_API_Test_Exceptions.Write_BigFile --gtest_repeat=-1 --gtest_break_on_failure |tee big.$i.log
ETST"
sleep 1
done
}
function restart_test_base() {
echo ===restart_test_base ${1}===
for i in $(seq 1 12);
do
gnome-terminal --tab -t "A$i" -- bash -c "ssh -T root@${1} << ETST
cd /home/bfss/tests/
export GTEST_BFSS_API_IP=10.0.1.119
export GTEST_BFSS_API_PORT=30000
export GTEST_BFSS_C=${GTEST_BFSS_C:=15} #chip size [20->768K~1M] [15->24~32K] [18->192~256K]
./BFSS_API_Test --gtest_filter=BFSS_API_TestBase* --gtest_repeat=-1 --gtest_break_on_failure |tee base.$i.log
ETST"
sleep 1
done
}
function restart_test_wirte() {
echo ===restart_test_wirte ${1}===
for i in $(seq 1 10);
do
gnome-terminal --tab -t "C$i" -- bash -c "ssh -T root@${1} << ETST
cd /home/bfss/tests/
export GTEST_BFSS_API_IP=10.0.1.119
export GTEST_BFSS_API_PORT=30000
export GTEST_BFSS_FILE_SS=${GTEST_BFSS_FILE_SS:=1024000}
export GTEST_BFSS_FILE_TG=${GTEST_BFSS_FILE_TG:=3}
./BFSS_API_Test --gtest_filter=BFSS_API_Test_Exceptions.Write_File --gtest_repeat=10 --gtest_break_on_failure |tee write.$i.log
ETST"
sleep 1
done
}
function restart_local_test() {
PID=`ps -ef |grep BFSS_API_Test |grep -v 181 |grep -v grep | awk '{print \$2}'`
if [ -n "$PID" ] ; then kill $PID; fi
for i in $(seq 1 13); do
export GTEST_BFSS_Z=${GTEST_BFSS_Z:=0} #1 2 4... 2^Z size
export GTEST_BFSS_X=${GTEST_BFSS_X:=20} #[20->768K~1M] [15->24~32K] [18->192~256K]
export GTEST_BFSS_S=${GTEST_BFSS_S:=200} #200M
export GTEST_BFSS_C=${GTEST_BFSS_C:=20} #chip size [20->1M] [15->32K] [18->256K]
gnome-terminal --tab -t B$i -- bash -c "cd ~/projx/src/bfssproject/TestCase/bin; ./BFSS_API_Test --gtest_filter=BFSS_API_TestBase* --gtest_repeat=-1 --gtest_break_on_failure |tee testB.$i.log"
sleep 2
done
for i in $(seq 1 8); do
export GTEST_BFSS_Z=0 #No 1 2 4... 2^Z size
export GTEST_BFSS_X=30 #768M~1G
export GTEST_BFSS_S=10000 #10G
gnome-terminal --tab -t C$i -- bash -c "cd ~/projx/src/bfssproject/TestCase/bin; ./BFSS_API_Test --gtest_filter=BFSS_API_TestBase.CreateObject:BFSS_API_TestBase.Write --gtest_repeat=-1 --gtest_break_on_failure |tee testC.$i.log"
sleep 2
done
}
function stop_test() {
echo ===stop_test ${1}===
ssh -T root@${1} << EOF
PID=\`ps -ef |grep BFSS_API_Test |grep -v grep | awk '{print \$2}'\`
if [ -n "\$PID" ] ; then kill \$PID; fi
EOF
}
function restart_test() {
echo ===restart_test ${1}===
stop_test ${1}
scp ./BFSS_*_Test root@${1}:/home/bfss/tests
restart_test_base ${1}
restart_test_bigfiles ${1}
restart_test_wirte ${1}
}
### ========================================================================
WORKDIR=$(dirname $(readlink -f $0))
stop_regm 10.0.1.185
stop_sn 10.0.1.182
stop_sn 10.0.1.183
stop_sn 10.0.1.184
stop_api 10.0.1.185
stop_api 10.0.1.186
# 更新代码+打包
cd ..
#git pull --force# ##### please pull code manually.
cd ..
tar czf bfss.tar.gz --exclude=bfssproject/.git --exclude=bfssproject/cmake-build* --exclude=bfssproject/out --exclude=bfssproject/tools --exclude=bfssproject/bfss_web bfssproject
# 解压+编译+取可执行文件
rebuild_bfss 10.0.1.185
# 停服务+部署+启动服务
reset_mongo 10.0.1.185
reset_memcc 10.0.1.186
restart_regm 10.0.1.185
restart_sn 10.0.1.182 10.0.1.185
restart_sn 10.0.1.183 10.0.1.185
restart_sn 10.0.1.184 10.0.1.185
restart_api 10.0.1.185 10.0.1.185
restart_api 10.0.1.186 10.0.1.185
# 启动测试
restart_test 10.0.1.181
cd ${WORKDIR} | true |
b00c6dad2b4d3110b694bab8c5dc8f70bddd8475 | Shell | sourcemage/cauldron | /cauldron/config/display.conf | UTF-8 | 309 | 2.984375 | 3 | [] | no_license | #!/bin/bash
# whether to use color for displaying messages
: "${CAULDRON_COLOR:=yes}"
# whether cauldron should suppress output messages
: "${CAULDRON_VERBOSE:=yes}"
# handle input/output redirections
: "${CAULDRON_NULL:=/dev/null}"
: "${CAULDRON_STDOUT:=/dev/stdout}"
: "${CAULDRON_STDERR:=/dev/stderr}"
| true |
2d0557de46f7fd212aa6fbafaba924e64e29eef9 | Shell | Dioxylin/ratpoison_config | /bin/rpgimp.sh | UTF-8 | 590 | 3.234375 | 3 | [] | no_license | #!/bin/sh
FRAMEDUMP="$HOME/.rpframe_gimp"
if [ -f "$FRAMEDUMP" ]
then
ratpoison -c "frestore `cat "$FRAMEDUMP"`"
fi
if ! ps -C gimp >/dev/null
then
exec gimp "$@"
else
WGIMP=$(ratpoison -c "windows %n %t" \
| sed -ne 's/^\([0-9]\+\) The GIMP/\1/p')
WLAYERS=$(ratpoison -c "windows %n %t" \
| sed -ne 's/^\([0-9]\+\) Layers/\1/p')
ratpoison -c focus
ratpoison -c "select $WGIMP"
ratpoison -c focus
ratpoison -c "select $WLAYERS"
ratpoison -c focus
ratpoison -c other
ratpoison -c "echo The Gimp is already running."
fi
| true |
e603768ac2c9ad16bc4ebf0a6ee40d9507c62f32 | Shell | facebook/react-native | /packages/react-native-codegen/src/cli/parser/parser.sh | UTF-8 | 483 | 2.5625 | 3 | [
"MIT",
"CC-BY-4.0",
"CC-BY-NC-SA-4.0",
"CC-BY-SA-4.0"
] | permissive | #!/bin/bash
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
set -e
set -u
THIS_DIR=$(cd -P "$(dirname "$(realpath "${BASH_SOURCE[0]}" || echo "${BASH_SOURCE[0]}")")" && pwd)
# shellcheck source=xplat/js/env-utils/setup_env_vars.sh
source "$THIS_DIR/../../../../../../env-utils/setup_env_vars.sh"
exec "$FLOW_NODE_BINARY" "$THIS_DIR/parser.js" "$@"
| true |
92e77663e91f68b6fd325d92096200441b64d6cf | Shell | majes-github/scripts | /create_ftpasswd.sh | UTF-8 | 947 | 3.734375 | 4 | [] | no_license | #!/bin/sh
PASSWD_FILE=/daten/docker/secrets/proftpd.passwd
HOME_DEFAULT=/daten/upload
if [ $# -lt 2 ]; then
echo "Usage: $0 <user_name> <password> [home_dir]"
exit 1
fi
user="$1"
passwd="$2"
home="$3"
if [ $(id -u) -ne 0 ]; then
echo 'This script requires root privileges. Try again with "sudo".'
exit 2
fi
# create ftp users and directories
if [ ! -e $PASSWD_FILE ]; then
touch $PASSWD_FILE
chown root: $PASSWD_FILE
fi
hash=$(busybox mkpasswd -m md5 $passwd)
if grep "^$1:" $PASSWD_FILE >/dev/null; then
# user already exists
if [ -z "$home" ]; then
# do not change homedir
home=$(awk -F: '{ print $6 }' $PASSWD_FILE)
fi
sed -i "/^$1:/s|^\($1:\).*\(:.*:.*::\).*\(:.*\)|\1$hash\2$home\3|" $PASSWD_FILE
else
if [ -z "$home" ]; then
# use default for homedir
home=$HOME_DEFAULT
fi
id=$((6001 + $(wc -l < $PASSWD_FILE)))
echo "$user:$hash:$id:$id::$home:/bin/false" >> $PASSWD_FILE
fi
| true |
ab1f0e8bdb328c4fedd182722bb553aa908e7d9b | Shell | saketkanth/commcare-hq | /docker/entrypoint.sh | UTF-8 | 356 | 3.125 | 3 | [] | no_license | #!/bin/bash
set -e
if [ "$1" = 'help' ]; then
echo "Pass any commands to this container to have them run by the container eg:"
echo " - python manage.py runserver 0.0.0.0:8000 # this is the default"
echo " - python manage.py shell"
echo " - python manage.py migrate"
echo " - bash"
exit 0
fi
/mnt/docker/wait.sh
exec "$@"
| true |
f1086b0363215e57aa6d0c7fa1e6b9d3f0c05b1d | Shell | k0keoyo/nezha | /utils/build_helpers/build_dependencies.sh | UTF-8 | 3,571 | 3.5625 | 4 | [] | no_license | #!/bin/bash
CWD=`pwd`
source ${CWD}/utils/build_helpers/include.sh
mkdir -p ${BUILDS}
# ignore everything in these directories
echo "*" > ${BUILDS}/.gitignore
echo "[+] Installing examples dependencies (this might take a while)"
sudo apt-get -y install build-essential cmake llvm-3.8 clang-3.8 golang \
libssl-dev autogen autopoint libtool autoconf automake >/dev/null 2>&1
if ! [ -f /usr/bin/clang-3.8 ]; then
echo -e "\t\t -\033[0;31m Did not find clang-3.8";
echo -e "\t\t -\033[0;31m This should not have happened :(";
echo -e "\t\t -\033[0;31m Please attempt a manual installation or required packages through apt-get then rerun setup.sh";
echo -en "\e[0m";
exit 1;
fi
echo "[+] Downloading files"
if ! [ -d ${SRC_LIBS}/${OPENSSL} ]; then
echo -e "\t\t - Downloading OpenSSL in ${SRC_LIBS}/openssl"
wget -P ${SRC_LIBS} ${OPENSSL_ST} 2>/dev/null
fi
if ! [ -d ${SRC_LIBS}/${LIBRESSL} ]; then
echo -e "\t\t - Downloading LibreSSL in ${SRC_LIBS}/libressl"
wget -P ${SRC_LIBS} ${LIBRESSL_ST} 2>/dev/null
fi
if ! [ -d ${SRC_LIBS}/${BORINGSSL} ]; then
echo -e "\t\t - Downloading boringSSL in ${SRC_LIBS}/${BORINGSSL}"
git clone ${BORINGSSL_ST} ${SRC_LIBS}/${BORINGSSL} 2>/dev/null
fi
if ! [ -d ${SRC_LIBS}/${CLAMAV} ]; then
echo -e "\t\t - Downloading Clam-AV in ${SRC_LIBS}/clamav"
wget -P ${SRC_LIBS} ${CLAMAV_ST} 2>/dev/null
fi
if ! [ -d ${SRC_LIBS}/${XZUTILS} ]; then
echo -e "\t\t - Downloading XZ-Utils in ${SRC_LIBS}/xzutils"
git clone ${XZUTILS_ST} ${SRC_LIBS}/xzutils 2>/dev/null
fi
if ! [ -d ${SRC_LIBS}/${SSDEEP} ]; then
echo -e "\t\t - Downloading ssdeep in ${SRC_LIBS}/ssdeep"
git clone ${SSDEEP_ST} ${SRC_LIBS}/${SSDEEP} 2>/dev/null
fi
if ! [ -d ${SRC_LIBS}/${LIBFUZZER} ]; then
echo -e "\t\t - Downloading libFuzzer in ${SRC_LIBS}/libFuzzer"
git clone ${LF_ST} ${SRC_LIBS}/libFuzzer 2>/dev/null
fi
echo "[+] Extracting & installing dependencies"
pushd ${SRC_LIBS} >/dev/null
if [ -f openssl-1.0.2h.tar.gz ]; then
echo -e "\t\t - Extracting OpenSSL"
tar xzf openssl-1.0.2h.tar.gz
mv openssl-1.0.2h ${OPENSSL}
fi
if [ -f libressl-2.4.0.tar.gz ]; then
echo -e "\t\t - Extracting LibreSSL"
tar xzf libressl-2.4.0.tar.gz
mv libressl-2.4.0 ${LIBRESSL}
fi
if [ -f clamav-0.99.2.tar.gz ]; then
echo -e "\t\t - Extracting clamav"
tar xzf clamav-0.99.2.tar.gz
mv clamav-0.99.2 ${CLAMAV}
fi
if (! [ -f libFuzzer/libFuzzer.a ]); then
echo -e "\t\t - Installing libFuzzer"
pushd libFuzzer >/dev/null
clang++-3.8 -c -g -O2 -std=c++11 *.cpp -I. >/dev/null 2>&1
ar ruv libFuzzer.a Fuzzer*.o >/dev/null 2>&1
if [ -f libFuzzer.a ]; then
echo -e "\t\t\t -\033[0;32m OK\n";
echo -en "\e[0m";
else
echo -e "\t\t\t -\033[0;31m FAILED\n";
echo -en "\e[0m";
exit 1;
fi
popd >/dev/null
fi
echo -e "\t\t - Building ssdeep"
if [ -d ${SSDEEP} ]; then
pushd ${SSDEEP} >/dev/null
git checkout 9ca00aa37f1ca4c2dcb12978ef61fa8d12186ca7 >/dev/null 2>&1
pushd ssdeep-lib >/dev/null
autoreconf >/dev/null 2>&1
automake --add-missing >/dev/null 2>&1
autoreconf >/dev/null 2>&1
./configure --prefix=`pwd`/../../../../builds/libs/ssdeep-lib \
CC="clang-3.8" CXX="clang++-3.8" >/dev/null 2>&1
make && make install >/dev/null 2>&1
popd >/dev/null
popd >/dev/null
fi
# cleanup
rm -f *gz *tar
popd >/dev/null
| true |
e9961de36e0956b7ab74397720ef5b00c87b5a5e | Shell | alexluigit/xvimux | /xvimux | UTF-8 | 1,190 | 3.40625 | 3 | [
"MIT"
] | permissive | #!/bin/sh
dir=$1
vim_edge_tmux=${2:-false}
tmux_navigation() {
_pane_at_edge() { tmux display -p "#{pane_at_$tmux_check}"; }
_tmux_navigation() { tmux selectp -$dir_tmux; }
local before=$(_pane_at_edge)
[ "$before" -eq 1 ] && exit 1 || { _tmux_navigation; exit 0; }
[[ "$before" == "$(_pane_at_edge)" ]] && exit 1 || exit 0
}
vim_navigation() {
xvkbd -xsendevent -text "\C$keysym"
_buf_at_edge() { cat ~/.cache/xvimux; }
sleep .1; [[ "$(_buf_at_edge)" == 1 ]] && exit 1 || exit 0
}
window_is_vim() { test $(xdotool getwindowfocus getwindowname) == 'nvim'; }
window_is_tmux() {
wid=$(xdotool getwindowfocus)
w_pid=$(xprop -id $wid _NET_WM_PID | awk '{print $3}')
grand_child_pid=$(pgrep -P $(pgrep -P $w_pid))
curr_cmd=$(ps -o cmd= $grand_child_pid)
[[ "$curr_cmd" == "tmux"* ]]
}
case "$dir" in
west) keysym="h"; dir_tmux="L"; tmux_check="left";;
south) keysym="j"; dir_tmux="D"; tmux_check="bottom";;
north) keysym="k"; dir_tmux="U"; tmux_check="top";;
east) keysym="l"; dir_tmux="R"; tmux_check="right";;
esac
"$vim_edge_tmux" && tmux_navigation
if $(window_is_vim); then vim_navigation
elif $(window_is_tmux); then tmux_navigation
else exit 1;
fi
| true |
b01d57fe921d54f10ec02360c7d8d83ac27b96cc | Shell | mikaelvg/PiggyMetrics | /zipkin/entrypoint.sh | UTF-8 | 204 | 2.609375 | 3 | [
"MIT"
] | permissive | #!/bin/sh -e
echo "The application will start in ${APP_SLEEP}s..." && sleep ${APP_SLEEP}
exec java ${JAVA_OPTS} \
-Djava.security.egd=file:/dev/./urandom \
-jar "${HOME}/app.jar" ${RUN_ARGS} "$@" | true |
91cdc0ae4565e6c379939f0c79dc9ea2345e615b | Shell | wangandrewt/owncloud-openshift-quickstart | /.openshift/cron/minutely/owncloud.sh | UTF-8 | 421 | 3.265625 | 3 | [] | no_license | #!/bin/bash
#
# Execute background job every 15 minutes
if [[ -f $OPENSHIFT_REPO_DIR/php/cron.php ]] ; then
if [[ $(( $(date +%M) % 15 )) -eq 0 ]] ; then
printf "{\"app\":\"Cron\",\"message\":\"%s\",\"level\":2,\"time\":%s}\n" "Running cron job" $(date +%s) >> $OPENSHIFT_DATA_DIR/owncloud.log
pushd $OPENSHIFT_REPO_DIR/php &> /dev/null
php -f cron.php
popd &> /dev/null
fi
fi
| true |
e014ab7ec024e772bdcd96549a41807c07ab028a | Shell | mgrad/tinytools | /join-by-columns.sh | UTF-8 | 860 | 3.546875 | 4 | [] | no_license | #!/bin/bash
function run {
BASE=$1
INCR=$2
RESULTS=$3
awk '
NR==FNR{f1[NR]=$0} # read first file to array
NR!=FNR{
n=split(f1[FNR], f1_line," "); # split data from 1st file
skip=n/NF
for(i=1; i<n+1; i++) {
printf "%s ", f1_line[i]
if (i%skip==0)
printf "%s ", $(i/skip)
}
printf "\n";
}
END{
if (length(array)!=FNR) {print "Warning: wrong number of rows"}
}' $BASE $INCR > $RESULTS
}
# ============== main ============== #
if [ $# -lt 2 ]; then
echo "usage: $0 base_file file_to_add [.. files_to_add]"
exit 1
fi;
BASE=$1
FIRST=$BASE
shift 1
while [ $# -ne 0 ]; do
INCR=$1
shift 1
RESULTS="$BASE.tmp"
run $BASE $INCR $RESULTS
BASE=$RESULTS
done
cat $RESULTS
rm -rf $FIRST.tmp*
| true |
c6cc1fc8852d063ddd316a5dd0d5a029aa177feb | Shell | sol1n/tmk-workers | /vagrant/bootstrap.sh | UTF-8 | 3,211 | 3.09375 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env bash
# Vagrant VM provision script
provisioningStartTime=`date +%s`
echo ""
echo "Starting Vagrant provision script"
## Pre-requesites
# Since we have 512mb RAM it's recommended to enable swap before we run any command.
/vagrant/vagrant/scripts/swap.sh
# All environments should be running in one timezone, use Europe/Moscow as default.
sudo bash -c 'echo "Europe/Moscow" > /etc/timezone'
sudo dpkg-reconfigure -f noninteractive tzdata
# /vagrant directory is a synced folder (see config.vm.synced_folder in Vagrantfile)
# Let's store all important info in this driectory.
cd /vagrant
# First of all, upgrade system packages.
sudo apt-get update
sudo apt-get dist-upgrade -y
# Add required repositories
sudo LC_ALL=C.UTF-8 add-apt-repository ppa:ondrej/php -y
sudo apt-add-repository ppa:rwky/redis -y
sudo apt-get update
## Install core packages.
sudo apt-get install software-properties-common python-software-properties -y
sudo apt-get install unzip -y
sudo apt-get install git-core -y
sudo apt-get install nginx -y
sudo apt-get install memcached -y
# PHP 7.1 is our primary PHP version.
sudo apt-get install php7.1-fpm -y
sudo apt-get install php7.1-dev -y
# PHP's extensions.
sudo apt-get install php7.1-curl -y
sudo apt-get install php7.1-memcached -y
sudo apt-get install php7.1-mbstring -y
sudo apt-get install php7.1-xml -y
sudo apt-get install php7.1-zip -y
# Additional extensions.
sudo pecl channel-update pecl.php.net
# PHPUnit for testing our code.
wget https://phar.phpunit.de/phpunit.phar
chmod +x phpunit.phar
sudo mv phpunit.phar /usr/local/bin/phpunit
# Composer is our dependency manager.
curl -sS https://getcomposer.org/installer | sudo php -- --install-dir=/usr/local/bin --filename=composer
# php-cs-fixer is used to fix PHP code styles in our sources before commit.
wget http://get.sensiolabs.org/php-cs-fixer.phar -O php-cs-fixer
sudo chmod a+x php-cs-fixer
sudo mv php-cs-fixer /usr/local/bin/php-cs-fixer
# As of June 9, 2016 there's a bug on sensiolabs.org:
# instead of installing stable release of php-cs-fixer
# it installs 2.0-DEV version.
# Use php-cs-fixer selfupdate to install stable release.
# https://github.com/FriendsOfPHP/PHP-CS-Fixer/issues/1925#issuecomment-224208657
php-cs-fixer selfupdate
## Configs
# Add nginx vhosts.
sudo rm /etc/nginx/sites-enabled/default
sudo rm /etc/nginx/sites-available/default
sudo ln -s /vagrant/vagrant/configs/nginx-vhosts.conf /etc/nginx/sites-enabled/vhosts.conf
# Add some php.ini tweak.
sudo ln -s /vagrant/vagrant/configs/php.ini /etc/php/7.1/fpm/conf.d/00-php.ini
sudo ln -s /vagrant/vagrant/configs/php.ini /etc/php/7.1/cli/conf.d/00-php.ini
# Export some paths to $PATH env variable.
echo 'export PATH="$PATH:/usr/local/bin"' >> ~/.bashrc
echo 'export PATH="$PATH:$HOME/.composer/vendor/bin"' >> ~/.bashrc
source ~/.bashrc
## Finish
# Cleanup unused packages.
sudo apt-get autoremove -y
# Restart services.
sudo service nginx restart
sudo service php7.1-fpm restart
# Ok, we're ready.
provisioningEndTime=`date +%s`
provisioningRunTime=$((provisioningEndTime-provisioningStartTime))
provisioningMinutes=$((provisioningRunTime/60))
echo ""
echo "Provisioned in $provisioningMinutes minutes" | true |
4a82cd4e32d741efddb75ca34d301ac13cbd715d | Shell | D1ARK-VA4U3/fb-video | /fb.py | UTF-8 | 2,381 | 2.65625 | 3 | [] | no_license | #!/bin/bash
clear
echo ""
echo -e "\e[5;32m
CCCCCCCCCCCCCKKKKKKKKK KKKKKKK SSSSSSSSSSSSSSS
CCC::::::::::::CK:::::::K K:::::K SS:::::::::::::::S
CC:::::::::::::::CK:::::::K K:::::KS:::::SSSSSS::::::S
C:::::CCCCCCCC::::CK:::::::K K::::::KS:::::S SSSSSSS
C:::::C CCCCCCKK::::::K K:::::KKKS:::::S
C:::::C K:::::K K:::::K S:::::S
C:::::C K::::::K:::::K S::::SSSS
C:::::C K:::::::::::K SS::::::SSSSS
C:::::C K:::::::::::K SSS::::::::SS
C:::::C K::::::K:::::K SSSSSS::::S
C:::::C K:::::K K:::::K S:::::S
C:::::C CCCCCCKK::::::K K:::::KKK S:::::S
C:::::CCCCCCCC::::CK:::::::K K::::::KSSSSSSS S:::::S
CC:::::::::::::::CK:::::::K K:::::KS::::::SSSSSS:::::S
CCC::::::::::::CK:::::::K K:::::KS:::::::::::::::SS
CCCCCCCCCCCCCKKKKKKKKK KKKKKKK SSSSSSSSSSSSSSS
"
echo -e "\e[1;31m _______ Code By D1ARK-VA4U3 _______
"
echo "1. Termux "
echo ""
echo "2. Linux"
echo ""
read -p "Enter Choose :: " s
if [[ $s == 1 ]];then
termux-setup-storage
mkdir /sdcard/fb-video
pkg install python -y
pip install --upgrade pip
pip install fb-down
clear
echo -e "\e[1;32m Code By D1ARK-VA4U3 "
echo ""
read -p "Enter the Video Url Link :: " url
echo ""
read -p "Enter the you want to give :: " s
echo ""
echo -e "\e[1;32m××××××××××× start ×××××××××××"
fbdown $url --output /sdcard/fb-video/$s.mp4
sleep 2
clear
echo -e "\e[1;32m____________ Done ___________"
elif [[ $s == 2 ]];then
sudo pip install fb-down
mkdir fb-video
clear
echo -e "\e[1;32m Code By D1ARK-VA4U3 "
echo ""
read -p "Enter the Video Url Link :: " url
echo ""
read -p "Enter the you want to give :: " s
echo ""
echo -e "\e[1;32m××××××××××× start ×××××××××××"
fbdown $url --output fb-video/$s.mp4 | grep url
sleep 2
clear
echo -e "\e[1;32m____________ Done ____________"
fi
| true |
e17620138050d05fa2eaa10c51da4a9f44adfa3f | Shell | cms-edbr/ExoDiBosonCombination | /cards/Wprime_EXO-15-002/prepareCombinedCardsAndWS.sh | UTF-8 | 11,824 | 3.3125 | 3 | [] | no_license | #! /bin/bash
# define input
if [ $# -lt 1 ]
then
echo "Need one input: mass point"
exit 1
fi
MASS=$1
echo "Merging M=${MASS}"
ZHDIR="cards_ZH"
WHDIR="cards_WH"
VHDIR="cards_VH"
ZZDIR="ZZ_cards/${MASS}"
WWDIR="WW_cards"
JJDIR="JJ_cards"
ZZ13DIR="ZZ_cards_13TeV"
WW13DIR="WW_cards_13TeV"
JJ13DIR="JJ_cards_13TeV"
#prepare output
OUTDIR="comb_${MASS}"
mkdir $OUTDIR/
### ZH only
LABEL="xzh"
COMBZHCARD="comb_${LABEL}.${MASS}.txt"
COMBRS1ZHCARD="datacard_${MASS}_interpolate_adapt.txt"
if [ $MASS -le 2500 ]
then
echo "Moving to "${ZHDIR}/
sed -e 's|lumi|lumi_8TeV|g' -e 's|PUReweighting|CMS_pu|g' -e 's|VTag|CMS_eff_tau21_sf|g' -e 's|EleScale|CMS_scale_e|g' -e 's|EleResol|CMS_res_e|g' -e 's|MuoScale|CMS_scale_m|g' -e 's|MuoResol|CMS_res_m|g' -e 's|EleID|CMS_eff_e|g' -e 's|MuoID|CMS_eff_m|g' -e 's|JES|CMS_scale_j|g' -e 's|JER|CMS_res_j|g' -e 's|BTagSyst|CMS_btagger|g' < ${ZHDIR}/${COMBRS1ZHCARD} &> $OUTDIR/${COMBZHCARD}
fi
### VH only
LABEL="xvh"
VHCARDORIG="CMS_jj_HVqq_${MASS}_8TeV_CMS_jj_HVCombined_adapt.txt"
COMBVHCARD="comb_${LABEL}.${MASS}.txt"
if [ $MASS -ge 1000 ] && [ $MASS -le 2600 ]
then
###sed -e '/CMS_sig_p/ s|0|0.0|g' -e '/CMS_sig_p/ s|1|1.0|g' < CMS_vh_Bulk_1200_8TeV_CMS_vh_VH.txt
sed -e 's|datacards/../workspaces/||g' -e 's|datacards/../HbbVqqHwwworkspaces/||g' -e 's|CMS_Btagging|CMS_doubleBtagging|g' < ${VHDIR}/${VHCARDORIG} &> $OUTDIR/${COMBVHCARD}
### sed -e 's|datacards/../workspaces/||g' -e '/CMS_sig_p/ s|0|0.0|' -e '/CMS_sig_p1/ s|1|1.0|2' -e '/CMS_sig_p2/ s|1|1.0|' < ${VHDIR}/datacards/${VHCARDORIG} &> $OUTDIR/${COMBVHCARD}
# cp ${VHDIR}/datacards/${VHCARDORIG} $OUTDIR/${COMBVHCARD}
cp ${VHDIR}/CMS_jj_*${MASS}*.root ${OUTDIR}/
cp ${VHDIR}/CMS_jj_bkg_8TeV.root ${OUTDIR}/
fi
### WH only
LABEL="xwh"
WHBASE="whlvj_MWp_${MASS}_bb"
WHELEBASE="cards_el/${WHBASE}_el"
WHMUBASE="cards_mu/${WHBASE}_mu"
EXOWHCARDS="${LABEL}_mv1JLP=${WHMUBASE}_ALLP_unbin.txt ${LABEL}_ev1JLP=${WHELEBASE}_ALLP_unbin.txt"
COMBWHCARD="comb_${LABEL}.${MASS}.txt"
if [ $MASS -le 2500 ]
then
cd ${WHDIR}/
combineCards.py $EXOWHCARDS &> tmp_XWH_card.txt
sed -e 's|cards_mu/||g' -e 's|cards_el/||g' -e 's|CMS_xwh_prunedmass|CMS_jet_mass|g' -e 's|CMS_xwh_btagger|CMS_btagger|g' -e 's|CMS_xwh_btag_eff|CMS_doubleBtagging|g' < tmp_XWH_card.txt > ${COMBWHCARD}
cd -
cp ${WHDIR}/${COMBWHCARD} ${OUTDIR}/${COMBWHCARD}
cp ${WHDIR}/cards_el/${WHBASE}_*workspace.root ${OUTDIR}/
cp ${WHDIR}/cards_mu/${WHBASE}_*workspace.root ${OUTDIR}/
fi
### ZZ only
LABEL="xzz"
COMBZZ8CARD="comb_${LABEL}.${MASS}.txt"
COMBZZHVTCARD="comb_${LABEL}_hvt.${MASS}.txt"
if [ $MASS -le 2500 ]
then
echo "Moving to "${ZZDIR}/
cp ${ZZDIR}/${COMBZZHVTCARD} ${OUTDIR}/${COMBZZ8CARD}
cp ${ZZDIR}/${LABEL}_*input*.root ${OUTDIR}/
fi
### ZZ13 only
LABEL="xzz13"
#EXOZZ13LPCARDS="CMS_ZZ_ELP=CMS_ZZ_${MASS}_ELP_13TeV.txt CMS_ZZ_MLP=CMS_ZZ_${MASS}_MLP_13TeV.txt"
EXOZZ13HPCARDS="CMS_ZZ_EHP=CMS_ZZ_${MASS}_EHP_13TeV.txt CMS_ZZ_MHP=CMS_ZZ_${MASS}_MHP_13TeV.txt"
#EXOZZ13CARDS="$EXOZZ13HPCARDS $EXOZZ13LPCARDS"
COMBZZ13CARD="comb_${LABEL}.${MASS}.txt"
COMBFIXZZ13CARD="comb_${LABEL}_bulkfix.${MASS}.txt"
if [ $MASS -ge 800 ]
then
echo "Moving to "${ZZ13DIR}/
cd ${ZZ13DIR}/
pwd
combineCards.py $EXOZZ13HPCARDS &> ${COMBZZ13CARD}
cd -
python adapt_xsec_ZZ_13TeV.py ${MASS}
cp ${ZZ13DIR}/${COMBFIXZZ13CARD} ${OUTDIR}/${COMBZZ13CARD}
sed -e 's|workSpaces/||g' < ${ZZ13DIR}/${COMBFIXZZ13CARD} &> ${OUTDIR}/${COMBZZ13CARD}
cp ${ZZ13DIR}/CMS_ZZ*.root ${OUTDIR}/
fi
### JJ only
LABEL="xjj8"
JJCARDORIG="CMS_jj_HVT_${MASS}_8TeV_CMS_jj_VV.txt" ##Andreas gives us cards with WW and ZZ already merged
COMBJJ8CARD="comb_${LABEL}.${MASS}.txt"
if [ $MASS -ge 1000 ]
then
###sed -e '/CMS_sig_p/ s|0|0.0|g' -e '/CMS_sig_p/ s|1|1.0|g' < CMS_jj_Bulk_1200_8TeV_CMS_jj_VV.txt
sed -e 's|datacards/../workspaces/||g' -e 's|CMS_jj_bkg_8TeV|CMS_jj_bkg_WZ_8TeV|g' < ${JJDIR}/datacards/${JJCARDORIG} &> $OUTDIR/${COMBJJ8CARD}
### sed -e 's|datacards/../workspaces/||g' -e '/CMS_sig_p/ s|0|0.0|' -e '/CMS_sig_p1/ s|1|1.0|2' -e '/CMS_sig_p2/ s|1|1.0|' < ${JJDIR}/datacards/${JJCARDORIG} &> $OUTDIR/${COMBJJ8CARD}
# cp ${JJDIR}/datacards/${JJCARDORIG} $OUTDIR/${COMBJJ8CARD}
cp ${JJDIR}/workspaces/CMS_jj_WZ*${MASS}*.root ${OUTDIR}/
cp ${JJDIR}/workspaces/CMS_jj_bkg_8TeV.root ${OUTDIR}/CMS_jj_bkg_WZ_8TeV.root
fi
### JJ 13TeV only
LABEL="xjj13"
JJ13CARDORIG="CMS_jj_WZfix_${MASS}_13TeV_CMS_jj_VVnew.txt"
COMBJJ13CARD="comb_${LABEL}.${MASS}.txt"
if [ $MASS -ge 1200 ]
then
python adapt_xsec_JJ_13TeV.py ${MASS}
###sed -e '/CMS_sig_p/ s|0|0.0|g' -e '/CMS_sig_p/ s|1|1.0|g' < CMS_jj_Bulk_1200_8TeV_CMS_jj_VV.txt
sed -e 's|datacards_withPDFuncertainties/../workspaces/||g' -e 's|datacards/../workspaces/||g' -e 's|../workspaces/||g' < ${JJ13DIR}/${JJ13CARDORIG} &> $OUTDIR/${COMBJJ13CARD}
### sed -e 's|datacards/../workspaces/||g' -e '/CMS_sig_p/ s|0|0.0|' -e '/CMS_sig_p1/ s|1|1.0|2' -e '/CMS_sig_p2/ s|1|1.0|' < ${JJDIR}/datacards/${JJCARDORIG} &> $OUTDIR/${COMBJJ8CARD}
# cp ${JJDIR}/datacards/${JJCARDORIG} $OUTDIR/${COMBJJ8CARD}
cp ${JJ13DIR}/CMS_jj_WZ*${MASS}*.root ${OUTDIR}/
cp ${JJ13DIR}/CMS_jj_bkg_13TeV.root ${OUTDIR}/
fi
### JJ 13TeV only
LABEL="xjj13old"
JJ13oldCARDORIG="CMS_jj_WZfix_${MASS}_13TeV_CMS_jj_VV.txt"
COMBJJ13oldCARD="comb_${LABEL}.${MASS}.txt"
if [ $MASS -ge 1200 ]
then
###sed -e '/CMS_sig_p/ s|0|0.0|g' -e '/CMS_sig_p/ s|1|1.0|g' < CMS_jj_Bulk_1200_8TeV_CMS_jj_VV.txt
sed -e 's|datacards_withPDFuncertainties/../workspaces/||g' -e 's|datacards/../workspaces/||g' -e 's|../workspaces/||g' < ${JJ13DIR}/${JJ13oldCARDORIG} &> $OUTDIR/${COMBJJ13oldCARD}
### sed -e 's|datacards/../workspaces/||g' -e '/CMS_sig_p/ s|0|0.0|' -e '/CMS_sig_p1/ s|1|1.0|2' -e '/CMS_sig_p2/ s|1|1.0|' < ${JJDIR}/datacards/${JJCARDORIG} &> $OUTDIR/${COMBJJ8CARD}
# cp ${JJDIR}/datacards/${JJCARDORIG} $OUTDIR/${COMBJJ8CARD}
cp ${JJ13DIR}/CMS_jj_WZ*${MASS}*.root ${OUTDIR}/
cp ${JJ13DIR}/CMS_jj_bkg_13TeV.root ${OUTDIR}/
fi
### JJ 13TeV only
LABEL="xjj13hp"
JJ13hpCARDORIG="CMS_jj_WZfix_${MASS}_13TeV_CMS_jj_VVHPnew.txt"
COMBJJ13hpCARD="comb_${LABEL}.${MASS}.txt"
if [ $MASS -ge 1200 ]
then
###sed -e '/CMS_sig_p/ s|0|0.0|g' -e '/CMS_sig_p/ s|1|1.0|g' < CMS_jj_Bulk_1200_8TeV_CMS_jj_VV.txt
sed -e 's|datacards_withPDFuncertainties/../workspaces/||g' -e 's|datacards/../workspaces/||g' -e 's|../workspaces/||g' < ${JJ13DIR}/${JJ13hpCARDORIG} &> $OUTDIR/${COMBJJ13hpCARD}
### sed -e 's|datacards/../workspaces/||g' -e '/CMS_sig_p/ s|0|0.0|' -e '/CMS_sig_p1/ s|1|1.0|2' -e '/CMS_sig_p2/ s|1|1.0|' < ${JJDIR}/datacards/${JJCARDORIG} &> $OUTDIR/${COMBJJ8CARD}
# cp ${JJDIR}/datacards/${JJCARDORIG} $OUTDIR/${COMBJJ8CARD}
cp ${JJ13DIR}/CMS_jj_WZ*${MASS}*.root ${OUTDIR}/
cp ${JJ13DIR}/CMS_jj_bkg_13TeV.root ${OUTDIR}/
fi
### WW only
LABEL="xww"
WWBASE="wwlvj_BulkG_WW_inclusive_c0p2_M${MASS}"
COMBWW8CARD="comb_${LABEL}.${MASS}.txt"
COMBWWHVTCARD="comb_${LABEL}_hvt.${MASS}.txt"
if [ $MASS -ge 800 ] && [ $MASS -le 2500 ]
then
cp ${WWDIR}/${COMBWWHVTCARD} ${OUTDIR}/${COMBWW8CARD}
cp ${WWDIR}/${WWBASE}_*workspace.root ${OUTDIR}/
fi
### WW 13 TeV only
LABEL="xww13"
EXOWW13CARDS="wwlvj_Wprimefix_WZ_lvjj_M${MASS}_combo_ALLP_unbin.txt"
COMBWW13CARD="comb_${LABEL}.${MASS}.txt"
if [ $MASS -ge 800 ]
then
python adapt_xsec_WW_13TeV.py ${MASS}
sed -e 's|cards_mu_HPW/||g' -e 's|cards_mu_HPZ/||g' -e 's|cards_mu_LPW/||g' -e 's|cards_mu_LPZ/||g' -e 's|cards_el_HPW/||g' -e 's|cards_el_HPZ/||g' -e 's|cards_el_LPW/||g' -e 's|cards_el_LPZ/||g' < ${WW13DIR}/${EXOWW13CARDS} &> $OUTDIR/${COMBWW13CARD}
cp ${WW13DIR}/cards_*/*.root ${OUTDIR}/
fi
###put things together
cd $OUTDIR/
COMBALLCARD="comb_ALL.${MASS}.txt"
COMBSEMILEPCARD="comb_SEMILEPT.${MASS}.txt"
COMBSEMILEP813CARD="comb_SEMILEPT813.${MASS}.txt"
COMBJJ813CARD="comb_JJ813.${MASS}.txt"
COMBWW813CARD="comb_WW813.${MASS}.txt"
COMBZZ813CARD="comb_ZZ813.${MASS}.txt"
COMBALL13CARD="comb_ALL13.${MASS}.txt"
COMBALL813CARD="comb_ALL813.${MASS}.txt"
COMBJAM13CARD="comb_JAM13.${MASS}.txt"
COMBJAM813CARD="comb_JAM813.${MASS}.txt"
if [ $MASS -lt 800 ]
then
combineCards.py $COMBZZ8CARD &> $COMBZZ813CARD
combineCards.py $COMBZZ8CARD &> $COMBSEMILEPCARD
combineCards.py $COMBZZ8CARD &> $COMBALLCARD
combineCards.py $COMBZZ8CARD &> $COMBALL813CARD
combineCards.py $COMBZZ8CARD &> $COMBJAM813CARD
elif [ $MASS -lt 1000 ]
then
combineCards.py $COMBZZ8CARD $COMBZZ13CARD &> $COMBZZ813CARD
combineCards.py $COMBWW8CARD $COMBWW13CARD &> $COMBWW813CARD
combineCards.py $COMBWW8CARD $COMBZZ8CARD &> $COMBSEMILEPCARD
combineCards.py $COMBWW8CARD $COMBWW13CARD $COMBZZ8CARD &> $COMBSEMILEP813CARD
combineCards.py $COMBWW8CARD $COMBZZ8CARD &> $COMBALLCARD
combineCards.py $COMBZZ13CARD $COMBWW13CARD &> $COMBALL13CARD
combineCards.py $COMBWW8CARD $COMBWW13CARD $COMBZZ13CARD $COMBZZ8CARD &> $COMBALL813CARD
combineCards.py $COMBWW13CARD &> $COMBJAM13CARD
combineCards.py $COMBWW8CARD $COMBWW13CARD $COMBZZ8CARD &> $COMBJAM813CARD
elif [ $MASS -lt 1200 ]
then
combineCards.py $COMBJJ8CARD &> $COMBJJ813CARD
combineCards.py $COMBZZ8CARD $COMBZZ13CARD &> $COMBZZ813CARD
combineCards.py $COMBWW8CARD $COMBWW13CARD &> $COMBWW813CARD
combineCards.py $COMBWW8CARD $COMBZZ8CARD &> $COMBSEMILEPCARD
combineCards.py $COMBWW8CARD $COMBWW13CARD $COMBZZ8CARD &> $COMBSEMILEP813CARD
combineCards.py $COMBJJ8CARD $COMBWW8CARD $COMBZZ8CARD &> $COMBALLCARD
combineCards.py $COMBZZ13CARD $COMBWW13CARD &> $COMBALL13CARD
combineCards.py $COMBJJ8CARD $COMBWW8CARD $COMBWW13CARD $COMBZZ13CARD $COMBZZ8CARD &> $COMBALL813CARD
combineCards.py $COMBWW13CARD &> $COMBJAM13CARD
combineCards.py $COMBJJ8CARD $COMBWW8CARD $COMBWW13CARD $COMBZZ8CARD &> $COMBJAM813CARD
elif [ $MASS -le 2500 ]
then
combineCards.py $COMBJJ8CARD $COMBJJ13CARD &> $COMBJJ813CARD
combineCards.py $COMBZZ8CARD $COMBZZ13CARD &> $COMBZZ813CARD
combineCards.py $COMBWW8CARD $COMBWW13CARD &> $COMBWW813CARD
combineCards.py $COMBWW8CARD $COMBZZ8CARD &> $COMBSEMILEPCARD
combineCards.py $COMBWW8CARD $COMBWW13CARD $COMBZZ8CARD &> $COMBSEMILEP813CARD
combineCards.py $COMBJJ8CARD $COMBWW8CARD $COMBZZ8CARD &> $COMBALLCARD
combineCards.py $COMBJJ13CARD $COMBWW13CARD $COMBZZ13CARD &> $COMBALL13CARD
combineCards.py $COMBJJ8CARD $COMBJJ13CARD $COMBWW8CARD $COMBWW13CARD $COMBZZ8CARD $COMBZZ13CARD &> $COMBALL813CARD
combineCards.py $COMBJJ13CARD $COMBWW13CARD &> $COMBJAM13CARD
combineCards.py $COMBJJ8CARD $COMBJJ13CARD $COMBWW8CARD $COMBWW13CARD $COMBZZ8CARD &> $COMBJAM813CARD
elif [ $MASS -le 2900 ]
then
combineCards.py $COMBJJ8CARD $COMBJJ13CARD &> $COMBJJ813CARD
combineCards.py $COMBZZ13CARD &> $COMBZZ813CARD
combineCards.py $COMBWW13CARD &> $COMBWW813CARD
combineCards.py $COMBWW13CARD &> $COMBSEMILEP813CARD
combineCards.py $COMBJJ8CARD &> $COMBALLCARD
combineCards.py $COMBJJ13CARD $COMBWW13CARD $COMBZZ13CARD &> $COMBALL13CARD
combineCards.py $COMBJJ8CARD $COMBJJ13CARD $COMBWW13CARD $COMBZZ13CARD &> $COMBALL813CARD
combineCards.py $COMBJJ13CARD $COMBWW13CARD &> $COMBJAM13CARD
combineCards.py $COMBJJ8CARD $COMBJJ13CARD $COMBWW13CARD &> $COMBJAM813CARD
else
combineCards.py $COMBJJ13CARD &> $COMBJJ813CARD
combineCards.py $COMBZZ13CARD &> $COMBZZ813CARD
combineCards.py $COMBWW13CARD &> $COMBWW813CARD
combineCards.py $COMBWW13CARD &> $COMBSEMILEP813CARD
combineCards.py $COMBJJ13CARD $COMBWW13CARD $COMBZZ13CARD &> $COMBALL13CARD
combineCards.py $COMBJJ13CARD $COMBWW13CARD $COMBZZ13CARD &> $COMBALL813CARD
combineCards.py $COMBJJ13CARD $COMBWW13CARD &> $COMBJAM13CARD
combineCards.py $COMBJJ13CARD $COMBWW13CARD &> $COMBJAM813CARD
fi
| true |
4465ff1bb04d94e58c3ea5af0ef2ef5088b8a4d7 | Shell | Beracah-Group/colossus | /bin/deploy.sh | UTF-8 | 948 | 3.203125 | 3 | [
"MIT"
] | permissive | #!/bin/bash
su - colossus -c "git -C ~/colossus pull origin master"
migrations="$(/home/colossus/venv/bin/python /home/colossus/colossus/manage.py showmigrations --plan | grep -v '\[X\]')"
if [[ $migrations = *[!\ ]* ]]; then
githash="$(git -C /home/colossus/colossus rev-parse --short HEAD^1)"
now=`date +%Y%m%d%H%M%S`
dumpfile="colossus_db_backup_${githash}_${now}.sql"
su - postgres -c "pg_dump colossus > ${dumpfile}"
echo "Created database backup (${dumpfile}) due to changes on schema."
else
echo "Skipped backup. No changes on the database schema."
fi
su - colossus -c "~/venv/bin/pip install -r ~/colossus/requirements.txt"
su - colossus -c "~/venv/bin/python ~/colossus/manage.py migrate"
su - colossus -c "~/venv/bin/python ~/colossus/manage.py collectstatic --noinput"
sudo supervisorctl restart colossus_gunicorn
sudo supervisorctl restart colossus_celery_worker
sudo supervisorctl restart colossus_celery_beat
exit 0
| true |
c51055c2b16770d512be52d855132d9e44335a9e | Shell | lucaswannen/source_code_classification_with_CNN | /dataset_v2/bash/8918159.txt | UTF-8 | 349 | 3.609375 | 4 | [] | no_license | #!/bin/bash
HOSTS="IP ADRESS"
COUNT=4
for myHost in $HOSTS
do
count=$(ping -c $COUNT $myHost | grep 'received' | awk -F',' '{ print $2 }' | a$
if [ $count -eq 0 ]; then
# 100% failed
echo "Server failed at $(date)" | mail -s "Server Down" myadress@gmail.com
echo "Host : $myHost is down (ping failed) at $(date)"
fi
done
| true |
e8a5e9d2fee38e7644f2b8829174e0157db68a50 | Shell | Sam91/vmc_general | /scripts/local_submit.sh | UTF-8 | 152 | 2.65625 | 3 | [
"MIT"
] | permissive | #!/bin/bash
timestamp=`date +%y%m%d%H%M%S`
LOG=$HOME/vmc_general/log/${MACHINE}_${timestamp}
nice -n 19 $HOME/vmc_general/bin/$1 &> ${LOG} &
echo $!
| true |
5e5f74c9cf0b6e498571434b777cfd686100e06e | Shell | jeffersonmartin/code-examples | /laravel/mvc-basics/step-by-step/02_create_a_database_migration.sh | UTF-8 | 607 | 2.96875 | 3 | [
"MIT"
] | permissive | #!/bin/bash
# Run the following commands to create a database migration file, which is a
# abstraction layer recipe for Laravel to perform a CREATE TABLE query.
# Navigate to the migrations folder
cd ~/Sites/presentation/mvc-basics/database/migrations/
# Delete the existing default template migrations
rm *
# Navigate to the top directory of your Laravel application
cd ~/Sites/presentation/mvc-basics
# Use the Laravel CLI toolset called "artisan" to create a migration file.
# The "make" command is just a helper. You could make any file by hand.
php artisan make:migration create_event_tickets_table | true |
1861b994224df7058b43d958bd99117798cbe0cd | Shell | axldns/selenode | /build.sh | UTF-8 | 185 | 2.5625 | 3 | [
"Apache-2.0"
] | permissive | [ -n "$1" ] && VERSION=$1 || VERSION="14.17.6"
IMAGE_NAME="axldns/selenode"
docker build --build-arg NODE_VERSION=$VERSION -t "$IMAGE_NAME:$VERSION" .
docker push $IMAGE_NAME --all-tags | true |
692f22f3a5be27c4bacf9417d838845821baa10e | Shell | RajThakkar/OS_121041 | /Lab3/1_1all.sh | UTF-8 | 175 | 2.90625 | 3 | [] | no_license | clear
echo "enter a number: "
read a
echo "enter a number: "
read b
echo `expr $a + $b`
echo `expr $a - $b`
echo `expr $a \* $b`
echo `expr $a \/ $b`
echo `expr $a % $b`
| true |
3aece29acb589a07c59b570bd644447760ac676e | Shell | netresearch/git-client-hooks | /hooks/pre-commit-check-coding-style | UTF-8 | 3,604 | 3.890625 | 4 | [] | no_license | #!/bin/bash
# PHP CodeSniffer pre-commit hook for git
#
# @author Soenke Ruempler <soenke@ruempler.eu>
# @author Sebastian Kaspari <s.kaspari@googlemail.com>
# @author Christian Weiske <christian.weiske@netresearch.de>
#
# see the README
TMP_STAGING=".tmp_staging"
hookfile="`readlink -f "$0"`"
HOOKHOME="`dirname "$hookfile"`"
# stolen from template file
if git rev-parse --quiet --verify HEAD > /dev/null; then
against=HEAD
else
# Initial commit: diff against an empty tree object
against=4b825dc642cb6eb9a060e54bf8d69288fbee4904
fi
# this is the magic:
# retrieve all files in staging area that are added, modified or renamed
# but no deletions etc
FILES=$(git diff-index --name-only --cached --diff-filter=ACMR $against -- )
if [ "$FILES" == "" ]; then
exit 0
fi
# match files against whitelist
FILES_TO_CHECK=""
for FILE in $FILES; do
EXT=${FILE##*.}
CHECK=1
case $EXT in
"php" | "phtml" )
PHP_FILES="$PHP_FILES $TMP_STAGING/$FILE"
LINE_FILES="$LINE_FILES $TMP_STAGING/$FILE"
;;
htm | html | xml )
XML_FILES="$XML_FILES $TMP_STAGING/$FILE"
LINE_FILES="$LINE_FILES $TMP_STAGING/$FILE"
;;
js )
JS_FILES="$JS_FILES $TMP_STAGING/$FILE"
LINE_FILES="$LINE_FILES $TMP_STAGING/$FILE"
;;
json )
JSON_FILES="$JSON_FILES $TMP_STAGING/$FILE"
;;
css | rst | tpl | txt | ts | tss | tsc )
LINE_FILES="$LINE_FILES $TMP_STAGING/$FILE"
;;
* )
CHECK=0
;;
esac
if [ "$CHECK" -eq "1" ]; then
FILES_TO_CHECK="$FILES_TO_CHECK $FILE"
fi
done
if [ "$FILES_TO_CHECK" == "" ]; then
exit 0
fi
# create temporary copy of staging area
if [ -e $TMP_STAGING ]; then
rm -rf $TMP_STAGING
fi
mkdir $TMP_STAGING
# Copy contents of staged version of files to temporary staging area
# because we only want the staged version that will be commited and not
# the version in the working directory
STAGED_FILES=""
HAS_ERROR=0
for FILE in $FILES_TO_CHECK; do
ID=$(git diff-index --cached $against $FILE | cut -d " " -f4)
# create staged version of file in temporary staging area with the same
# path as the original file so that the phpcs ignore filters can be
# applied
mkdir -p "$TMP_STAGING/$(dirname $FILE)"
STAGED_FILE="$TMP_STAGING/$FILE"
git cat-file blob $ID > "$STAGED_FILE"
STAGED_FILES="$STAGED_FILES $STAGED_FILE"
done
for FILE in $PHP_FILES; do
"$HOOKHOME/pre-commit-check-php" "$FILE"; RET=$?
if [ $RET -ne 0 ]; then
HAS_ERROR=1
continue
fi
done
for FILE in $XML_FILES; do
"$HOOKHOME/pre-commit-check-xml" "$FILE"; RET=$?
if [ $RET -ne 0 ]; then
HAS_ERROR=1
continue
fi
done
for FILE in $JSON_FILES; do
"$HOOKHOME/pre-commit-check-json" "$FILE"; RET=$?
if [ $RET -ne 0 ]; then
HAS_ERROR=1
continue
fi
done
for FILE in $JS_FILES; do
"$HOOKHOME/pre-commit-check-js" "$FILE"; RET=$?
if [ $RET -ne 0 ]; then
HAS_ERROR=1
continue
fi
done
for FILE in $LINE_FILES; do
"$HOOKHOME/pre-commit-check-linestyle" "$FILE"; RET=$?
if [ $RET -ne 0 ]; then
HAS_ERROR=1
continue
fi
done
if [ $HAS_ERROR -eq 0 ]; then
"$HOOKHOME/pre-commit-check-phpcs" $PHP_FILES; RET=$?
if [ $RET -ne 0 ]; then
HAS_ERROR=1
fi
"$HOOKHOME/pre-commit-check-jshint" $JS_FILES; RET=$?
if [ $RET -ne 0 ]; then
HAS_ERROR=1
fi
fi
# delete temporary copy of staging area
rm -rf $TMP_STAGING
if [ $HAS_ERROR -ne 0 ]; then
exit 1
fi
| true |
6e360cbafe3c74fd67d7f2791492144367673db7 | Shell | Hooyh110/docker_user | /bash_work/bash_work/docker/zabbix/zabbix_install_docker.sh | UTF-8 | 1,363 | 2.859375 | 3 | [] | no_license | #!/usr/bin/env bash
set -x
PATH=/bin:/sbin:/usr/bin:/usr/sbin:/usr/local/bin:/usr/local/sbin:~/bin
export PATH
cur_dir=$(cd $(dirname "$0") && pwd)
echo "脚本执行路径:"${cur_dir}
DATE=`date +%Y%m%d_%H%M%S`
echo "当前时间:"${DATE}
#if [ $# -eq "0" ]; then
# echo "没有参数"
#fi
docker run --name mysql-server -t -e MYSQL_DATABASE="zabbix" -e MYSQL_USER="zabbix" -e MYSQL_PASSWORD="zabbix_pwd" -e MYSQL_ROOT_PASSWORD="root_pwd" -d mysql:5.7
docker run -d -p 9000:9000 --restart=always -v /var/run/docker.sock:/var/run/docker.sock --name prtainer portainer/portainer
docker run --name zabbix-java-gateway -t -d zabbix/zabbix-java-gateway:latest
docker run --name zabbix-server-mysql -t -e DB_SERVER_HOST="mysql-server" -e MYSQL_DATABASE="zabbix" -e MYSQL_USER="zabbix" -e MYSQL_PASSWORD="zabbix_pwd" -e MYSQL_ROOT_PASSWORD="root_pwd" -e ZBX_JAVAGATEWAY="zabbix-java-gateway" --link mysql-server:mysql --link zabbix-java-gateway:zabbix-java-gateway -p 10051:10051 -d zabbix/zabbix-server-mysql:latest
docker run --name zabbix-web-nginx-mysql -t -e DB_SERVER_HOST="mysql-server" -e MYSQL_DATABASE="zabbix" -e MYSQL_USER="zabbix" -e MYSQL_PASSWORD="zabbix_pwd" -e MYSQL_ROOT_PASSWORD="root_pwd" --link mysql-server:mysql --link zabbix-server-mysql:zabbix-server -p 8088:80 -d zabbix/zabbix-web-nginx-mysql:latest
| true |
5e4f1c356db00b72059efd01399a03675d3286c6 | Shell | thekillers123/auto | /autodeb.sh | UTF-8 | 9,331 | 2.90625 | 3 | [] | no_license | #!/bin/bash
trap ' ' 2
sudo apt-get update -y
ipvps=`wget http://ipecho.net/plain -O - -q ; echo`
sudo apt-get install nano -y
sudo apt-get install curl -y
sudo apt-get install chmod -y
sudo apt-get install dpkg -y
ifconfig -a > ./cekvirt
cekvirt=`grep "venet0" ./cekvirt`
if [[ -n $cekvirt ]]
then
cekvirt='OpenVZ'
elif [[ -z $cekvirt ]]
then
cekvirt='KVM'
fi
sudo rm ./cekvirt
# ==================================================================================
# ================== Places Function you need ======================================
# 1===
funct_update()
{
sudo apt-get update -y
}
# 2===
funct_upgrade()
{
sudo apt-get upgrade -y
}
# 3===
funct_webmin()
{
sudo echo 'deb http://download.webmin.com/download/repository sarge contrib' >> /etc/apt/sources.list
sudo echo 'deb http://webmin.mirror.somersettechsolutions.co.uk/repository sarge contrib' >> /etc/apt/sources.list
wget http://www.webmin.com/jcameron-key.asc
sudo apt-key add jcameron-key.asc
sudo apt-get update -y
sudo apt-get install webmin -y
sudo echo '/bin/false' >> /etc/shells
funct_update
}
# 4===
funct_openssh()
{
sudo sed -i 's/#Port 22/Port 22/g' /etc/ssh/sshd_config
sudo sed -i 's/# Port 22/Port 22/g' /etc/ssh/sshd_config
# ===
sudo sed -i '/^Port 22/ s:$:\nPort 109:' /etc/ssh/sshd_config
sudo sed -i '/^Port 22/ s:$:\nPort 53:' /etc/ssh/sshd_config
}
# 5===
funct_apache()
{
sudo sed -i 's/NameVirtualHost *:80/NameVirtualHost *:88/g' /etc/apache2/ports.conf
sudo sed -i 's/Listen 80/Listen 88/g' /etc/apache2/ports.conf
sudo service apache2 restart
}
# 6===
funct_dropbear()
{
sudo apt-get install dropbear -y
# ===
sudo sed -i 's/NO_START=1/NO_START=0/g' /etc/default/dropbear
sudo sed -i 's/DROPBEAR_PORT=22/# DROPBEAR_PORT=22/g' /etc/default/dropbear
sudo sed -i 's/DROPBEAR_EXTRA_ARGS=/DROPBEAR_EXTRA_ARGS="-p 443 -p 143"/g' /etc/default/dropbear
}
# 7===
funct_squid()
{
sudo apt-get install squid3 -y
sudo echo 'acl server1 dst '$ipvps'-'$ipvps'/255.255.255.255' >> /etc/squid3/squid.conf
sudo echo 'http_access allow server1' >> /etc/squid3/squid.conf
sudo echo 'http_port 80' >> /etc/squid3/squid.conf
sudo echo 'http_port 8080' >> /etc/squid3/squid.conf
sudo echo 'http_port 8000' >> /etc/squid3/squid.conf
# ===
sudo echo 'forwarded_for off' >> /etc/squid3/squid.conf
sudo echo 'visible_hostname server1' >> /etc/squid3/squid.conf
}
# 8===
funct_openvpn()
{
sudo apt-get install openvpn -y
# ===
if [ ! -d "/usr/share/doc/openvpn/examples/easy-rsa" ]
then
cd /etc/openvpn/
wget http://master.dl.sourceforge.net/project/vpsmanagement/easy-rsa.tar.gz
tar cvzf easy-rsa.tar.gz
else
sudo cp -a /usr/share/doc/openvpn/examples/easy-rsa /etc/openvpn/
fi
cd /etc/openvpn/easy-rsa/2.0
sudo chmod +x *; source ./vars; ./vars; ./clean-all
./build-ca; ./build-key-server server; ./build-dh
# ===
sudo cp -r /etc/openvpn/easy-rsa/2.0/keys/ /etc/openvpn/keys/
sudo cp /etc/openvpn/keys/ca.crt /etc/openvpn/
cd /etc/openvpn/
# ===
sudo echo -e 'dev tun*' > server.conf
# ===
sudo echo -e 'port 1194' >> myserver.conf
sudo echo -e 'proto tcp' >> myserver.conf
sudo echo -e 'dev tun' >> myserver.conf
sudo echo -e 'ca /etc/openvpn/keys/ca.crt' >> myserver.conf
sudo echo -e 'dh /etc/openvpn/keys/dh1024.pem' >> myserver.conf
sudo echo -e 'cert /etc/openvpn/keys/server.crt' >> myserver.conf
sudo echo -e 'key /etc/openvpn/keys/server.key' >> myserver.conf
if [ $cekvirt = "OpenVZ" ]
then
sudo echo -e 'plugin /usr/lib/openvpn/openvpn-auth-pam.so /etc/pam.d/login' >> myserver.conf
else
sudo echo -e 'plugin /usr/lib/openvpn/openvpn-plugin-auth-pam.so /etc/pam.d/login' >> myserver.conf
fi
sudo echo -e 'client-cert-not-required' >> myserver.conf
sudo echo -e 'username-as-common-name' >> myserver.conf
sudo echo -e 'server 10.8.0.0 255.255.255.0' >> myserver.conf
sudo echo -e 'ifconfig-pool-persist ipp.txt' >> myserver.conf
sudo echo -e 'push "redirect-gateway def1"' >> myserver.conf
sudo echo -e 'push "dhcp-option DNS 8.8.8.8"' >> myserver.conf
sudo echo -e 'push "dhcp-option DNS 8.8.4.4"' >> myserver.conf
sudo echo -e 'keepalive 5 30' >> myserver.conf
sudo echo -e 'comp-lzo' >> myserver.conf
sudo echo -e 'persist-key' >> myserver.conf
sudo echo -e 'persist-tun' >> myserver.conf
sudo echo -e 'status server-tcp.log' >> myserver.conf
sudo echo -e 'verb 3' >> myserver.conf
# ===
sudo echo 1 > /proc/sys/net/ipv4/ip_forward
# ===
sudo sed -i 's/#net.ipv4.ip_forward=1/net.ipv4.ip_forward=1/g' /etc/sysctl.conf
sudo sysctl -p
# ===
if [ $cekvirt = "OpenVZ" ]
then
sudo iptables -t nat -A POSTROUTING -s 10.8.0.0/24 -o venet0 -j SNAT --to $ipvps
sudo iptables -t nat -A POSTROUTING -j SNAT --to-source $ipvps
else
sudo iptables -t nat -A POSTROUTING -s 10.8.0.0/24 -o eth0 -j SNAT --to $ipvps
fi
sudo service openvpn restart
}
# 9===
funct_badvpn()
{
echo -e "Script un-config yet \n"
}
# 10===
funct_pptpvpn()
{
sudo apt-get install pptpd -y
sudo echo 'localip '$ipvps >> /etc/pptpd.conf
sudo echo 'remoteip 10.10.0.1-200' >> /etc/pptpd.conf
sudo echo 'ms-dns 8.8.8.8' >> /etc/ppp/pptpd-options
sudo echo 'ms-dns 8.8.4.4' >> /etc/ppp/pptpd-options
# === add user pptp
sudo echo '#username[tabkey]pptpd[tabkey]password[tabkey]ipremoteclient' >> /etc/ppp/chap-secrets
sudo echo 'user1 pptpd pass1 10.10.0.1' >> /etc/ppp/chap-secrets
sudo echo 'user2 pptpd pass2 10.10.0.2' >> /etc/ppp/chap-secrets
sudo echo 'user3 pptpd pass3 10.10.0.3' >> /etc/ppp/chap-secrets
# ===
sudo echo 'ifconfig $1 mtu 1400' >> /etc/ppp/ip-up
# ===
sudo sed -i 's/#net.ipv4.ip_forward=1/net.ipv4.ip_forward=1/g' /etc/sysctl.conf
sudo sysctl -p
# ===
if [ $cekvirt = "OpenVZ" ]
then
# OVZ virtualization
sudo iptables -t nat -A POSTROUTING -j SNAT --to-source $ipvps
# sudo iptables --table nat -A POSTROUTING -o venet0 -j MASQUERADE
sudo iptables -t nat -A POSTROUTING -s 10.10.0.0/24 -o venet0 -j SNAT --to $ipvps
# iptables -t nat -I POSTROUTING -s 10.10.0.0/24 -o venet0 -j MASQUERADE
else
# KVM virtualization
sudo iptables --table nat -A POSTROUTING -o eth0 -j MASQUERADE
fi
# ===
echo -e '\n'
echo -e 'default user have been create as :'
sudo cat < /etc/ppp/chap-secrets | grep "10.10"
echo -e 'edit "/etc/ppp/chap-secrets" to adduser more \n'
echo -e 'Continue \c'
}
# 11===
funct_softvpn()
{
sudo apt-get update -y
sudo apt-get install build-essential -y
sudo tar zxf http://citylan.dl.sourceforge.net/project/vpsmanagement/softether-64bit.tar.gz
sudo cd vpnserver
sudo make
sudo cd ..
sudo mv vpnserver /usr/local
sudo cd /usr/local/vpnserver/
sudo chmod 600 *
sudo chmod 700 vpncmd
sudo chmod 700 vpnserver
}
# 12===
funct_fail2ban()
{
echo -e "Script un-config yet \n"
}
# 13===
funct_userlogin()
{
sudo curl -s http://jaist.dl.sourceforge.net/project/vpsmanagement/user-login.sh > user-login.sh
sudo chmod +x user-login.sh
}
# 14===
funct_speedtest()
{
sudo apt-get install python -y
wget https://github.com/sivel/speedtest-cli/raw/master/speedtest_cli.py
sudo chmod a+rx speedtest_cli.py
}
# 15===
funct_setall()
{
funct_update
funct_upgrade
funct_webmin
funct_openssh
funct_apache
funct_dropbear
funct_squid
funct_openvpn
funct_badvpn
funct_pptpvpn
funct_fail2ban
funct_userlogin
funct_speedtest
funct_react
}
# 16===
funct_react()
{
sudo service apache2 restart
sudo service squid3 restart
sudo service ssh restart
sudo service dropbear restart
sudo service webmin restart
sudo pptpd restart
sudo ./user-login.sh
sudo python speedtest_cli.py --share
sudo netstat -ntlp
}
# AUTO SCRIPT DEBIAN ==
clear
while true
do
clear
echo "===================================================="
echo " DEBIAN-UBUNTU VPS MANAGEMENT "
echo "===================================================="
echo "Description;"
echo "Virtualization : $cekvirt"
sudo cat /dev/net/tun
sudo cat /dev/ppp
echo "===================================================="
echo " 1. Update || 8. OPEN VPN "
echo " 2. Upgrade || 9. BAD VPN "
echo " 3. Webmin || 10. PPTP VPN "
echo " 4. Managed Port OpenSSH || 11. VPN Server "
echo " 5. Managed Port Apache2 || 12. Fail2Ban "
echo " 6. Dropbear || 13. Script User-Login "
echo " 7. Squid3 || 14. add Speedtest-CLI "
echo "===================================================="
echo "15. Setup All in One"
echo "16. Restart All Managed"
echo "17. Exit"
echo -e "\n"
echo -e "Type number choice : \c"
read choice
case "$choice" in
1) funct_update ;;
2) funct_upgrade ;;
3) funct_webmin ;;
4) funct_openssh ;;
5) funct_apache ;;
6) funct_dropbear ;;
7) funct_squid ;;
8) funct_openvpn ;;
9) funct_badvpn ;;
10) funct_pptpvpn ;;
11) funct_softvpn ;;
12) funct_fail2ban ;;
13) funct_userlogin ;;
14) funct_speedtest ;;
15) funct_setall ;;
16) funct_react ;;
17) exit ;;
esac
echo -e 'Enter return to Continue \c'
read input
done
| true |
61f692b9b3e9932ad1b1a3b6a01d048f1df9f3f5 | Shell | zx1986/Gitorious-Setup | /ubuntu/install.sh | UTF-8 | 4,067 | 3.265625 | 3 | [] | no_license | #!/bin/bash
function yn
{
local answer
read -p 'yes or no ? (y/N) ' answer
if [ "$answer" != "y" ]||[ "$answer" != "Y" ]; then
echo 'bye!'
exit 1;
fi
}
cat <<END
[ Gitorious 安裝記錄文件 ]
本文件將安裝 Ruby, RubyGems, Git, Apache2, MySQL 等諸多套件,請視需求修改或參考本文件。
本文件需以 root 身份執行,請問當前身份是否爲 root 帳號?
END
yn
apt-get update && apt-get upgrade
echo '將 '`pwd`' 內所有 shell 檔設定爲可執行'
chmod a+x ./*.sh
# install Ruby
apt-get install ruby-full
# install RubyGems
wget http://production.cf.rubygems.org/rubygems/rubygems-1.3.7.tgz
tar zxvf rubygems-1.3.7.tgz
cd rubygems-1.3.7
chmod a+x setup.rb
ruby setup.rb
# install Packages
aptitude install apache2
aptitude install mysql-server mysql-client
aptitude install build-essential zlib1g-dev tcl-dev libexpat-dev libcurl4-openssl-dev postfix apg geoip-bin libgeoip1 libgeoip-dev sqlite3 libsqlite3-dev imagemagick libpcre3 libpcre3-dev zlib1g zlib1g-dev libyaml-dev libmysqlclient15-dev apache2-dev libonig-dev ruby-dev libopenssl-ruby phpmyadmin libdbd-mysql-ruby libmysql-ruby libmagick++-dev zip unzip memcached git-core git-svn git-doc git-cvs irb
# install Gems
gem install -b --no-ri --no-rdoc rmagick chronic geoip daemons hoe echoe ruby-yadis ruby-openid mime-types diff-lcs json ruby-hmac rake stompserver
gem install passenger
gem install rails
gem install -v=1.0.1 rack
gem install -b --no-ri --no-rdoc -v 1.3.1.1 rdiscount
gem install -b --no-ri --no-rdoc -v 1.1 stomp
# if /usr/bin does NOT have 'rake' and 'stompserver' in there
ln -s /var/lib/gems/1.8/bin/rake /usr/bin
ln -s /var/lib/gems/1.8/bin/stompserver /usr/bin
# setup Sphinx
wget http://www.sphinxsearch.com/downloads/sphinx-0.9.9.tar.gz
tar zxvf sphinx-0.9.9.tar.gz
cd sphinx-0.9.9
./configure --prefix=/usr && make all install
# fetch Gitorious
git clone http://git.gitorious.org/gitorious/mainline.git /var/www/gitorious
ln -s /var/www/gitorious/script/gitorious /usr/bin
# configure services
cp ./git-daemon /etc/init.d/git-daemon
cp ./git-ultrasphinx /etc/init.d/git-ultrasphinx
cp ./gitorious /etc/logrotate.d/gitorious
cp ./stomp /etc/init.d/stomp
cp ./git-poller /etc/init.d/git-poller
chmod 755 /etc/init.d/git-ultrasphinx
chmod 755 /etc/init.d/git-daemon
chmod 755 /etc/init.d/stomp
chmod 755 /etc/init.d/git-poller
update-rc.d stomp defaults
update-rc.d git-daemon defaults
update-rc.d git-ultrasphinx defaults
update-rc.d git-poller defaults
# install Passenger
#/var/lib/gems/1.8/bin/passenger-install-apache2-module
/usr/bin/passenger-install-apache2-module
cp ./passenger.load /etc/apache2/mods-available/passenger.load
a2enmod passenger
a2enmod rewrite
a2enmod ssl
a2ensite default-ssl
cp ./apache-gitorious /etc/apache2/sites-available/gitorious
cp ./apache-gitorious-ssl /etc/apache2/sites-available/gitorious-ssl
# setting Apache2
a2dissite default
a2dissite default-ssl
a2ensite gitorious
a2ensite gitorious-ssl
/etc/init.d/apache2 restart
###########################################################
#gem install mongrel
#gem install thin
# add user 'git'
adduser --system --home /var/www/gitorious/ --no-create-home --group --shell /bin/bash git
chown -R git:git /var/www/gitorious
# git
echo '以下內容請切換成 git 使用者執行。'
echo '先新增一個有管理者權限的 MySQL 帳號 git'
echo '將 MySQL 內 git 帳號的密碼填寫至 database.yml'
#su - git
cat <<GIT
mkdir .ssh
touch .ssh/authorized_keys
chmod 700 .ssh
chmod 600 .ssh/authorized_keys
mkdir tmp/pids
mkdir repositories
mkdir tarballs
cp ./database.yml config/database.yml
cp ./gitorious.yml config/gitorious.yml
cp ./broker.yml config/broker.yml
export RAILS_ENV=production
rake db:create
rake db:migrate
rake ultrasphinx:bootstrap
#crontab -e
#* * * * * cd /var/www/gitorious && /usr/bin/rake ultrasphinx:index RAILS_ENV=production
# 使用附帶的 ruby script 建立一個 Gitorious 網站管理者
env RAILS_ENV=production ruby1.8 script/create_admin
GIT
| true |
7c1d603bb31a5673ac87311b5e93319f087744cc | Shell | UnivaCorporation/atomic-gce-builder | /build.sh | UTF-8 | 2,388 | 3.609375 | 4 | [
"MIT"
] | permissive | #!/bin/bash
#############################################################################
# The MIT License (MIT)
# Copyright (c) 2016 Univa, Corp
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
###########################################################################
if [ $# -ne 1 ];then
echo "usage $0: <gc-project-id>" >&2
exit 1
fi
mkdir -p /build
cd /build
ARCHIVE_NAME=`ls Fedora-Cloud-Atomic*.xz 2>/dev/null`
if [ "$ARCHIVE_NAME" != "" ]; then
BASE_NAME=`echo $ARCHIVE_NAME | sed 's/\.xz$//'`
else
BASE_NAME="Fedora-Cloud-Atomic-23-20160127.2.x86_64.raw"
fi
PROJECT_NAME=$1
STAMP=`date +"%s"`
BUCKET_NAME=fedora-atomic-gce-$STAMP
TEMP_DIR=/tmp
set -e
set -x
if [ -z "$ARCHIVE_NAME" ]; then
wget -O - https://download.fedoraproject.org/pub/alt/atomic/stable/Cloud-Images/x86_64/Images/$BASE_NAME.xz | \
xzcat --decompress > $TEMP_DIR/disk.raw
else
xzcat --decompress $ARCHIVE_NAME > $TEMP_DIR/disk.raw
fi
cd $TEMP_DIR
tar -Szcf $BASE_NAME.tar.gz disk.raw
rm disk.raw
if gcloud compute images list; then
echo "Using existing credentials"
else
# Need to login ourselves
gcloud auth login
fi
gcloud config set project $PROJECT_NAME
gsutil mb gs://$BUCKET_NAME
gsutil cp $BASE_NAME.tar.gz gs://$BUCKET_NAME
gcloud compute images create fedora-cloud-atomic --source-uri gs://$BUCKET_NAME/$BASE_NAME.tar.gz
gsutil rm -r gs://$BUCKET_NAME
| true |
2b84e5c900114e27eabd57eb7f4358f6c3abfdf1 | Shell | MaudGautier/detect-recombinants-in-F1 | /src/core/01_fastq_processing.bash | UTF-8 | 2,797 | 3.8125 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env bash
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
#### PARAMETERS ####
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
while [[ $# -gt 1 ]]
do
key="$1"
case $key in
-c|--config|--source)
config_file="$2"
shift
;;
-i|--fastq_with_adapter_prefix)
fastq_with_adapter_prefix="$2"
shift
;;
-o|--fastq_no_adapter_prefix)
fastq_no_adapter_prefix="$2"
shift
;;
-d|--fastqc_dir)
fastqc_dir="$2"
shift
;;
-a|--adapter)
adapter="$2"
shift
;;
-s|--sub)
sub_file="$2"
shift
;;
*)
# unknown option
;;
esac
shift # past argument or value
done
echo CONFIG FILE = "${config_file}"
echo FASTQ WITH ADAPT= "${fastq_with_adapter_prefix}"
echo FASTQ NO ADAPT = "${fastq_no_adapter_prefix}"
echo FASTQC DIRECTORY= "${fastqc_dir}"
echo ADAPTER = "${adapter}"
echo SUBMISSION FILE = "${sub_file}"
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
#### SOURCE ####
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
source $config_file
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
#### REMOVE ADAPTER ####
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
cutadapt -a ${adapter} \
-A ${adapter} \
--minimum-length 50 \
-o ${fastq_no_adapter_prefix}'-R1.no_adapter.fastq.gz' \
-p ${fastq_no_adapter_prefix}'-R2.no_adapter.fastq.gz' \
${fastq_with_adapter_prefix}'-R1.fastq.gz' \
${fastq_with_adapter_prefix}'-R2.fastq.gz'
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
#### CHECK FASTQC QUALITY ####
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
# Create directory if not existant
if [ ! -d $fastqc_dir ] ; then mkdir -p $fastqc_dir ; fi
# For reads 1 (first paired-end file)
fastqc -o $fastqc_dir \
${fastq_no_adapter_prefix}'-R1.no_adapter.fastq.gz'
# For reads 2 (second paired-end file)
fastqc -o $fastqc_dir \
${fastq_no_adapter_prefix}'-R2.no_adapter.fastq.gz'
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
#### DELETE SUBMISSION FILE ####
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
if [ ! -z $sub_file ] ; then
name="${fastq_no_adapter_prefix##*/}"
if [ -s ${fastqc_dir}/${name}-R2.no_adapter_fastqc.html ] ; then
rm -f $sub_file
fi
fi
| true |
9eb3648a205da789fda4fc701c005b4baf30e4e0 | Shell | bpd1069/bootrinos | /install_os_alpine/bootrino.sh | UTF-8 | 3,112 | 3.5 | 4 | [] | no_license | #!/usr/bin/env sh
read BOOTRINOJSON <<"BOOTRINOJSONMARKER"
{
"name": "Install Alpine Linux",
"version": "0.0.1",
"versionDate": "2018-01-01T09:00:00Z",
"description": "Install Alpine Linux from Tiny Core Linux. WARNING THIS IS AN EXAMPLE ONLY - THERE IS NO PASSWORD ON root USER!",
"options": "",
"logoURL": "",
"readmeURL": "https://raw.githubusercontent.com/bootrino/bootrinos/master/install_os_yocto/README.md",
"launchTargetsURL": "https://raw.githubusercontent.com/bootrino/launchtargets/master/defaultLaunchTargetsLatest.json",
"websiteURL": "https://github.com/bootrino/",
"author": {
"url": "https://www.github.com/bootrino",
"email": "bootrino@gmail.com"
},
"tags": [
"linux",
"alpine",
"runfromram"
]
}
BOOTRINOJSONMARKER
setup()
{
export PATH=$PATH:/usr/local/bin:/usr/bin:/usr/local/sbin:/bin
OS=tinycore
set +xe
BOOT_PARTITION=/mnt/boot_partition/
ROOT_PARTITION=/mnt/root_partition/
}
download_alpine()
{
ALPINE_ISO_NAME=alpine-virt-3.7.0-x86_64.iso
ALPINE_ISO_URL=http://dl-cdn.alpinelinux.org/alpine/v3.7/releases/x86_64/
cd ${ROOT_PARTITION}
sudo wget ${ALPINE_ISO_URL}${ALPINE_ISO_NAME}
}
download_alpine_packages()
{
# if you want packages to be available on boot, put them in the cache dir on the boot_partition
# https://wiki.alpinelinux.org/wiki/Local_APK_cache
# note that these files cannot be stored on a ram disk
# The cache is enabled by creating a symlink named /etc/apk/cache that points to the cache directory
# setup-apkcache
URL_BASE=http://dl-cdn.alpinelinux.org/alpine/v3.7/main/x86_64/
sudo mkdir -p ${BOOT_PARTITION}cache
cd ${BOOT_PARTITION}boot/apks/x86_64
sudo wget ${URL_BASE}dhclient-4.3.5-r0.apk
# dhclient depends libgcc
sudo wget ${URL_BASE}libgcc-6.4.0-r5.apk
# dhclient's scripts need bash
sudo wget ${URL_BASE}bash-4.4.12-r2.apk
# bash depends:
sudo wget ${URL_BASE}pkgconf-1.3.10-r0.apk
# bash depends:
sudo wget ${URL_BASE}ncurses-terminfo-base-6.0_p20170930-r0.apk
# bash depends:
sudo wget ${URL_BASE}ncurses-terminfo-6.0_p20170930-r0.apk
# bash depends:
sudo wget ${URL_BASE}ncurses5-libs-5.9-r1.apk
# bash depends:
sudo wget ${URL_BASE}readline-7.0.003-r0.apk
sudo chmod ug+rx *
}
download_apk_ovl()
{
URL_BASE=https://raw.githubusercontent.com/bootrino/bootrinos/master/install_os_alpine/
cd ${BOOT_PARTITION}
# goes in the root of the boot volume, where Alpine picks it ip
sudo wget ${URL_BASE}cloud_ssh_nginx.apkovl.tar.gz
sudo chmod ug+rx *
}
copy_alpine_from_iso_to_boot()
{
sudo mkdir -p ${ROOT_PARTITION}alpinefiles
sudo mount -o loop ${ROOT_PARTITION}alpine-virt-3.7.0-x86_64.iso ${ROOT_PARTITION}alpinefiles
sudo cp -r ${ROOT_PARTITION}alpinefiles/* ${BOOT_PARTITION}.
}
setup
download_alpine
copy_alpine_from_iso_to_boot
download_alpine_packages
download_apk_ovl
echo "REBOOT is required at this point to launch"
echo "REBOOT is required at this point to launch" > /dev/console
echo "REBOOT is required at this point to launch" > /dev/tty0
| true |
b2a4f53a4dca7b4f5fdc24b4cbad85156f3b58dd | Shell | hpartapur/Clutch_Pedal | /clutch_pedal.sh | UTF-8 | 2,645 | 3.578125 | 4 | [
"MIT"
] | permissive | #!/bin/bash
mkdir -p ~/Desktop/Clutch_Pedal
cd ~/Desktop/Clutch_Pedal
echo If this is your first time working with clutch_pedal, please enter 0. Otherwise, enter 1
read installcheck
macinstall () {
open https://handbrake.fr/rotation.php?file=HandBrakeCLI-1.3.3.dmg
cd ~/Downloads
sudo hdiutil attach HandBrakeCLI-1.3.3.dmg
cp -r "/Volumes/HandBrakeCLI-1.3.3/HandBrakeCLI" ~/Desktop/Clutch_Pedal
#INSTALL IN APPLICATIONS
echo
echo
echo If no error messages, SUCCESS! HandBrakeCLI is installed!
echo Please run this program again by typing ./clutch_pedal into the terminal again, and entering 1 instead of 0.
}
osnumberer (){
echo Enter 1 for Windows, 2 for Mac.
read osnumber
}
success_message(){
echo HandBraked Video has been exported to Desktop.
echo Encoding- x264
echo CRF-Quality: 25.0
echo Frame Rate: 27.0
echo 1080p
}
if [ $installcheck -eq 0 ]
then
osnumberer
if [ $osnumber -eq 2 ]
then
macinstall
elif [ $osnumber -eq 1 ]
then
start https://handbrake.fr/rotation.php?file=HandBrakeCLI-1.3.3-win-x86_64.zip
cd ~/Downloads
Expand-Archive -LiteralPath '~\Downloads\HandBrakeCLI-1.3.3-win-x86_64.zip' -DestinationPath '~\Desktop\Clutch_Pedal'
cd ~/Desktop/Clutch_Pedal
echo Make sure your Exported File from Premiere Pro is in the Clutch_Pedal folder on the Desktop. If not, copy it to the Clutch_Pedal folder now.
echo Enter Name of your Exported File from Premiere Pro \(Example: myvideo.mp4, or mymajlisvideo.mp4\)
read inputpath
echo
echo
echo Enter number of raat majlis \(Example: 14\)
read raatnumber
outputter="$raatnumber"mi_raat_web.mp4
./HandBrake -i '~\Desktop\Clutch_Pedal\"$inputpath"' -o '~/Desktop/Clutch_Pedal/"$outputter"' -e x264 -q 27.0 -r 25 -w 1920 -l 1080
echo If no error messages, SUCCESS! HandBrakeCLI is installed!
echo Please run this program again by typing ./clutch_pedal into the terminal again, and entering 1 instead of 0.
fi
elif [ $installcheck -eq 1 ]
then
echo
echo
#OPENFINDERWKNDOW
echo Make sure your Exported File from Premiere Pro is in the Clutch_Pedal folder on the Desktop. If not, copy it to the Clutch_Pedal folder now.
echo Enter Name of your Exported File from Premiere Pro \(Example: myvideo.mp4, or mymajlisvideo.mp4\)
read inputpath
echo
echo
echo Enter number of raat majlis \(Example: 14\)
read raatnumber
outputter="$raatnumber"mi_raat_web.mp4
./HandBrakeCLI -i $inputpath -o ~/Desktop/Clutch_Pedal/"$outputter" -e x264 -q 27.0 -r 25 -w 1920 -l 1080
if [ $? -gt 0 ]
then
echo Something went wrong HandBraking your video
elif [ $? -eq 0 ]
then
success_message
fi
else
echo Something went wrong
fi
| true |
3f4cb9b4f9a1465cd65fcf64cd6005c7800acd69 | Shell | ameerajmal111/cassandra.toolkit | /docker-containers/start-grafana.sh | UTF-8 | 1,108 | 3.15625 | 3 | [
"Apache-2.0"
] | permissive | GRAFANA_PORT=3000
CONTAINER_NAME=grafana
GF_AUTH_BASIC_ENABLED=true
GF_AUTH_ANONYMOUS_ENABLED=false
GF_AUTH_ANONYMOUS_ORG_ROLE=Admin
GF_SECURITY_ADMIN_PASSWORD=admin
GF_PANELS_DISABLE_SANITIZE_HTML=true
docker container inspect $CONTAINER_NAME > /dev/null 2>&1
if [ $? -eq 0 ]; then
printf "Docker instances ($CONTAINER_NAME) exist. Trying to stop and delete it...\n"
docker stop $CONTAINER_NAME
docker rm $CONTAINER_NAME
fi
printf "Starting a new ($CONTAINER_NAME) container...\n"
docker run -d \
--name $CONTAINER_NAME \
-v $PWD/../ansible/artifacts/grafana/dashboards:/etc/grafana/provisioning/dashboards:z \
-v $PWD/../ansible/artifacts/grafana/datasources:/etc/grafana/provisioning/datasources:z \
-e "GF_AUTH_BASIC_ENABLED=$GF_AUTH_BASIC_ENABLED" \
-e "GF_AUTH_ANONYMOUS_ENABLED=$GF_AUTH_ANONYMOUS_ENABLED" \
-e "GF_SECURITY_ADMIN_PASSWORD=$GF_SECURITY_ADMIN_PASSWORD" \
-e "GF_AUTH_ANONYMOUS_ORG_ROLE=$GF_AUTH_ANONYMOUS_ORG_ROLE" \
-e "GF_PANELS_DISABLE_SANITIZE_HTML=$GF_PANELS_DISABLE_SANITIZE_HTML" \
--publish $GRAFANA_PORT:3000 \
grafana/grafana:6.5.1
| true |
591c889925e3461b24f0b77cd50df5ed28da7600 | Shell | HarrisonTotty/dotfiles | /src/scripts/scrot-select.sh | UTF-8 | 340 | 3.109375 | 3 | [] | no_license | #!/bin/bash
# Script to take a screenshot (selection)
if [ ! -d "$HOME/pics/screenshots" ]; then
mkdir -p "$HOME/pics/screenshots"
fi
notify-send -u low "WM" "Select an area to capture screenshot..." -t 2000 &
scrot --select --silent "$HOME/pics/screenshots/screenshot.%y-%m-%d.%H-%M.png"
notify-send -u low "WM" "Saved screenshot." &
| true |
08c7bda6215f6321d06696e5267b994c50257bf0 | Shell | ComputeStacks/docker | /magento/2.0/litespeed/06-postfix.sh | UTF-8 | 1,883 | 3.21875 | 3 | [] | no_license | #!/bin/bash
#
# (possible alternative: https://github.com/mageplaza/magento-2-smtp)
#
# Environmental variables:
# * SMTP_USERNAME
# * SMTP_PASSWORD
# * SMTP_SERVER
# * SMTP_PORT
#
set -euo pipefail
postfix_config() {
cat<<EOF
smtpd_banner = \$myhostname ESMTP \$mail_name (Debian/GNU)
biff = no
append_dot_mydomain = no
readme_directory = no
compatibility_level = 2
smtpd_tls_cert_file=/etc/ssl/certs/ssl-cert-snakeoil.pem
smtpd_tls_key_file=/etc/ssl/private/ssl-cert-snakeoil.key
smtpd_use_tls=yes
smtpd_tls_session_cache_database = btree:\${data_directory}/smtpd_scache
smtp_tls_session_cache_database = btree:\${data_directory}/smtp_scache
smtpd_relay_restrictions = permit_mynetworks permit_sasl_authenticated defer_unauth_destination
myhostname = ${HOSTNAME}
alias_maps = hash:/etc/aliases
alias_database = hash:/etc/aliases
myorigin = /etc/mailname
mydestination = \$myhostname, docker.local, ${HOSTNAME}, localhost.localdomain, localhost
mynetworks = 127.0.0.0/8
mailbox_size_limit = 0
recipient_delimiter = +
inet_interfaces = all
inet_protocols = ipv4
smtp_always_send_ehlo = yes
smtp_helo_name = ${HOSTNAME}
smtp_sasl_auth_enable = yes
smtp_sasl_password_maps = static:${SMTP_USERNAME}:${SMTP_PASSWORD}
smtp_sasl_security_options = noanonymous
smtp_sasl_tls_security_options = noanonymous
smtp_tls_security_level = encrypt
smtp_tls_loglevel = 1
header_size_limit = 4096000
relayhost = [${SMTP_SERVER}]:${SMTP_PORT}
EOF
}
config_postfix() {
postfix_config > /etc/postfix/main.cf
FILES="etc/localtime etc/services etc/resolv.conf etc/hosts etc/nsswitch.conf"
echo $HOSTNAME > /etc/mailname
for file in $FILES; do
if [ -f /var/spool/postfix/${file} ]; then
rm /var/spool/postfix/${file}
fi
cp /${file} /var/spool/postfix/${file}
chmod a+rX /var/spool/postfix/${file}
done
}
config_postfix
/usr/sbin/postfix -c /etc/postfix start | true |
51dad861723d5570f021b1d1075880269e74fbdb | Shell | stmansour/accord | /devtools/tools/lnxinstall.sh | UTF-8 | 183 | 2.5625 | 3 | [] | no_license | #!/bin/bash
# Install the accord directory on linux
rm -f accord-linux.tar.gz
getfile.sh ext-tools/utils/accord-linux.tar.gz
pushd /usr/local
tar xvzf ~/accord-linux.tar.gz
popd
| true |
8ec4c1169f00db438c0fb6a3791f4ea4141f853a | Shell | lordkev/ssc-imputation | /pgc-analysis/readme.sh | UTF-8 | 737 | 3 | 3 | [] | no_license | #!/bin/bash
# Extract sample name, phenotype, sex, and covars to a single file
./extract_sample_info.sh
# Get files needed to make VCFs from plnk
for chrom in $(seq 1 22)
do
./get_ref_snps.sh ${chrom}
done
# Run imputation on all chroms/cohorts
for chrom in $(seq 1 22)
do
./run_imputation_cl_wrapper.sh $chrom
done
# Merge all files for each chrom
for chrom in $(seq 1 22)
do
./merge_cohort_results.sh ${chrom}
done
# Merge all files for each chrom - SNPs
for chrom in $(seq 1 22)
do
echo $chrom
./merge_cohort_results_snps.sh ${chrom}
done
# Perform regression analysis
for chrom in $(seq 1 22)
do
./run_regression_wrapper.sh ${chrom}
# ./pgc_regression.sh ${chrom} 2> logs/regr_log_${chrom}.err
done
| true |
e3d135fccf84728acfc1493470425860e9477d06 | Shell | Quizp2p/mobilevis | /deploy/prod/ubuntu_bootstrap.sh | UTF-8 | 2,048 | 3.171875 | 3 | [] | no_license | #! /bin/sh
# This script can be copied and pasted onto an Ubuntu 12.04 machine and run, it's interactive, but only at the beginning
# When finished the admiralty application should be setup and responding to requests on port 80 and 8000
# Eventually this should be refactored to leverage CM tool for now relative ease of use is priority
echo ">>> Running prod specific setup"
# annoying ssh-add ubuntu bug, have to "turn on" the auth agent
eval "$(ssh-agent)"
# add our key to the ssh agent (setup in vagrant file.)
chmod 600 ~/.ssh/id_rsa
chmod 644 ~/.ssh/id_rsa.pub
ssh-add ~/.ssh/id_rsa
# add github to hosts
echo "Host gh
HostName github.com
User git
IdentityFile ~/.ssh/id_rsa" > ~/.ssh/config
# Add github to known hosts so we aren't prompted when cloning
ssh-keyscan -H github.com | sudo tee /etc/ssh/ssh_known_hosts
# install forever, to keep the server up
sudo npm install forever -g
# checkout source code into our working directory
cd /
git clone git@github.com:bocoup/mobilevis.git /vagrant
cd /vagrant
# Backup and Restore Scripts
sudo ln -s /vagrant/deploy/prod/backup/mobilevis-backup /usr/local/bin/mobilevis-backup
chmod 777 /vagrant/deploy/prod/backup/mobilevis-backup
sudo ln -s /vagrant/deploy/prod/backup/mobilevis-restore /usr/local/bin/mobilevis-restore
chmod 777 /vagrant/deploy/prod/backup/mobilevis-restore
# Source updating script
sudo ln -s /vagrant/deploy/prod/source/update-source /usr/local/bin/update-source
chmod 777 /vagrant/deploy/prod/source/update-source
# NGINX Conf - change this file to eventually point at production url!
sudo ln -s /vagrant/deploy/prod/nginx/mobilevis.conf /etc/nginx/conf.d/mobilevis.conf
# Set up log rotation
sudo ln -s /vagrant/deploy/prod/logrotate/mobilevis /etc/logrotate.d/mobilevis
sudo chown root:root /vagrant/deploy/prod/logrotate/mobilevis
sudo chmod 644 /vagrant/deploy/prod/logrotate/mobilevis
# TO RUN WITH FOREVER:
# sudo forever start --debug -l /vagrant/logs/forever-mobilevis.log \
# -c /usr/bin/node -o /vagrant/logs/upstart-mobilevis.log -a /vagrant/app.js | true |
9019c360e19755c16051cc45ebfb7072cf65ed44 | Shell | kvazimoda24/openmediavault | /deb/openmediavault-netatalk/usr/share/openmediavault/mkconf/netatalk | UTF-8 | 1,876 | 2.921875 | 3 | [] | no_license | #!/bin/sh
#
# This file is part of OpenMediaVault.
#
# @license http://www.gnu.org/licenses/gpl.html GPL Version 3
# @author Volker Theile <volker.theile@openmediavault.org>
# @copyright Copyright (c) 2009-2018 Volker Theile
#
# OpenMediaVault is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# any later version.
#
# OpenMediaVault is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with OpenMediaVault. If not, see <http://www.gnu.org/licenses/>.
# Documentation/Howto:
# http://netatalk.sourceforge.net/wiki/index.php/Install_Netatalk_3.1.9_on_Debian_8_Jessie
# https://kofler.info/netatalk-3-konfiguration-fuer-time-machine-backups/
# http://www.rootathome.de/installation-von-netatalk-unter-debian/
# https://samuelhewitt.com/blog/2015-09-12-debian-linux-server-mac-os-time-machine-backups-how-to
# http://www.tuksub.de/2015/04/homeserver-31-time-machine-funktionalitaet-unter-debian-8-wiederherstellen/
# https://daniel-lange.com/archives/102-Apple-Timemachine-backups-on-Debian-8-Jessie.html
set -e
. /etc/default/openmediavault
. /usr/share/openmediavault/scripts/helper-functions
OMV_MKCONF_SCRIPTS_DIR=${OMV_MKCONF_SCRIPTS_DIR:-"/usr/share/openmediavault/mkconf"}
OMV_NETATALK_EXTENSIONS_DIR=${OMV_NETATALK_EXTENSIONS_DIR:-"${OMV_MKCONF_SCRIPTS_DIR}/netatalk.d"}
OMV_NETATALK_AFP_CONFIG=${OMV_NETATALK_AFP_CONFIG:-"/etc/netatalk/afp.conf"}
# Add additional extensions to the configuration file
cat /dev/null > ${OMV_NETATALK_AFP_CONFIG}
run-parts --exit-on-error ${OMV_NETATALK_EXTENSIONS_DIR}
| true |
9502c08a2e745623e692d355f39b3ca147bafbb6 | Shell | duckie/unix-tools | /config/zshrc | UTF-8 | 2,087 | 2.859375 | 3 | [] | no_license | # Current env
UNIX_TOOLS_CONFIG_PATH="$( cd "$(dirname "${(%):-%x}")" ; pwd -P )"
# Load antigen
source "$HOME/bin/antigen.zsh"
antigen apply
# History management
HISTFILE=~/.histfile
HISTSIZE=10000
SAVEHIST=10000
setopt EXTENDED_HISTORY
setopt HIST_EXPIRE_DUPS_FIRST
setopt HIST_IGNORE_DUPS
setopt HIST_IGNORE_ALL_DUPS
setopt HIST_IGNORE_SPACE
setopt HIST_FIND_NO_DUPS
setopt HIST_SAVE_NO_DUPS
setopt notify
# Vi mode fixes
unsetopt beep
#zle-keymap-select () {
#if [ $KEYMAP = vicmd ]; then
#printf "\033[2 q"
#else
#printf "\033[6 q"
#fi
#}
#zle -N zle-keymap-select
#zle-line-init () {
#zle -K viins
#printf "\033[6 q"
#}
#zle -N zle-line-init
bindkey -v
bindkey "^?" backward-delete-char
# Completion
zstyle :compinstall filename '/home/canard/.zshrc'
autoload -Uz compinit
compinit
# Custom prompt, need to have cppprompt compiled
autoload -U colors && colors
setopt PROMPT_SUBST
function path_prompt(){
if type abbreviate_full_path >/dev/null; then
abbreviate_full_path $(pwd) 30
else
echo '/%1d'
fi
[[ -n "$venv" ]] && echo -n "($venv) "
}
function virtualenv_prompt(){
if [[ -n "$VIRTUAL_ENV" ]]; then
venv="${VIRTUAL_ENV##*/}"
else
venv=''
fi
[[ -n "$venv" ]] && echo -n "($venv) "
}
#RPS1='%{%F{1}%}/%1d %{%F{5}%}$(virtualenv_prompt)%{%F{default}%} '
RPS1='%{%F{1}%}$(path_prompt) %{%F{5}%}$(virtualenv_prompt)%{%F{default}%} '
export VIRTUAL_ENV_DISABLE_PROMPT=1
#if [[ -n "${TMUX:-}" ]]; then
PS1='%{%F{1}%}%#%{%F{default}%} '
#PS1='$(prinrc_prompt $?)${pchar_error}@:${pchar_blank}$(printf "%s" ${PWD##*/})/]\[\033[01;31m\]$\[\033[00m\] '
#else
#PS1='$(prinrc_prompt $?)\u${pchar_error}@${pchar_blank}\h${pchar_error}:${pchar_blank}$(printf "%s" ${PWD##*/})/]\[\033[01;31m\]$\[\033[00m\] '
#fi
# Nice colors
if [ ${TERM} = "xterm" ]; then
export TERM="xterm-256color"
fi
# Various utilities
source "${UNIX_TOOLS_CONFIG_PATH}/shellisms.sh"
# No history dups
export HISTCONTROL=ignoreboth:erasedups
# Development
export CC=/usr/bin/clang
export CXX=/usr/bin/clang++
#export PIPENV_VENV_IN_PROJECT=1
| true |
da3b5b36494097c8ac1855e175df4e56eb8a19fd | Shell | ErikBorra/contropedia-sprint-scripts | /discussions_match/get_page_wikitools_infos.sh | UTF-8 | 660 | 3.328125 | 3 | [] | no_license | #!/bin/bash
# To run on the wikitools server
# UNUSED in the end
page=$(echo $1 | sed 's/ /_/g')
escpage=$(echo $page | sed 's/\(["'"'"']\)/\\\1/g')
echo "$escpage"
pageid=$(sql en "select page_id from page where page_title='$escpage' and page_namespace=0" | grep -v "^page_id")
if [ -z "$pageid" ]; then
echo "No page found in Wikipedia en DB with this title « $page »"
exit 1
fi
mkdir -p "data/$page"
sql en "select * from revision where rev_page=$pageid" > "data/$page/revisions.tsv"
sql en "select page_id from page where (page_title LIKE '$escpage/%' OR page_title = '$escpage') AND page_namespace % 2 = 1" > "data/$page/discussions_pageids.tsv"
| true |
0cd817dde3edf96a3cf126aa36913143db5c9631 | Shell | lexandr0s/hiveos-linux | /hive/miners/noncerpro-cuda/h-run.sh | UTF-8 | 394 | 3.0625 | 3 | [] | no_license | #!/usr/bin/env bash
#!/usr/bin/env bash
[[ `ps aux | grep "./noncerpro-cuda" | grep -v grep | wc -l` != 0 ]] &&
echo -e "${RED}$MINER_NAME miner is already running${NOCOLOR}" &&
exit 1
cd $MINER_DIR/$MINER_VER
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:${MINER_DIR}/${MINER_VER}
./noncerpro-cuda $(< ./miner.conf) --api=true --apiport=${MINER_API_PORT} 2>&1 | tee $MINER_LOG_BASENAME.log
| true |
749dc0a476b6b71ded552102cbb553fd90c4ec2f | Shell | VertexOS/android_vendor_vertex | /tools/build.sh | UTF-8 | 1,388 | 3.15625 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
echo "Choose build type:"
select choice in OFFICIAL-user TEST-user TEST-userdebug TEST-eng EXPERIMENTAL-user EXPERIMENTAL-userdebug EXPERIMENTAL-eng
do
case "$choice" in
"OFFICIAL-user")
export VERTEX_BUILDTYPE=OFFICIAL
export BUILD_VARIANT=user
find . -name "*Development*.apk" | xargs rm
break;;
"TEST-user")
export VERTEX_BUILDTYPE=TEST
export BUILD_VARIANT=user
break;;
"TEST-userdebug")
export VERTEX_BUILDTYPE=TEST
export BUILD_VARIANT=userdebug
break;;
"TEST-eng")
export VERTEX_BUILDTYPE=TEST
export BUILD_VARIANT=eng
break;;
"EXPERIMENTAL-user")
export VERTEX_BUILDTYPE=EXPERIMENTAL
export BUILD_VARIANT=user
break;;
"EXPERIMENTAL-userdebug")
export VERTEX_BUILDTYPE=EXPERIMENTAL
export BUILD_VARIANT=userdebug
break;;
"EXPERIMENTAL-eng")
export VERTEX_BUILDTYPE=EXPERIMENTAL
export BUILD_VARIANT=eng
break;;
*) echo "Invalid option. Try again!"
;;
esac
done
if [[ -z "$BUILD_DEVICE" ]]; then
export BUILD_DEVICE=oneplus3
fi
. build/envsetup.sh
lunch vertex_${BUILD_DEVICE}-${BUILD_VARIANT}
time make bacon -j16
| true |
c777b7fc72ce03ebf58f5382b51b07b772f7b0c2 | Shell | LyleHenkeman/dotfiles | /install.sh | UTF-8 | 5,736 | 3.65625 | 4 | [] | no_license | #!/usr/bin/env bash
set -e
function link_file {
source="${PWD}/$1"
if [[ "${2}" ]]; then
target="${2}/${1}"
else
target="${HOME}/.${1}"
fi
if [ -e "${target}" ]
then
if [ "_$(readlink ${target})" == "_${source}" ]
then
echo "[SKIP] Existing target ${target}, skipping file $1"
return
else
backup=$target.backup$(date +%s)
echo "[BACKUP] Saving backup of exising file ${target} as ${backup}"
mv $target $backup
fi
fi
echo "[ OK ] Creating link to ${source}"
ln -sf ${source} ${target}
}
function install_powerline_fonts {
mkdir -p ~/.fonts
git clone https://github.com/powerline/fonts.git powerline-fonts
cd powerline-fonts
./install.sh
cd ..
rm -rf powerline-fonts
}
install_powerline_fonts
function install_awesome_fonts {
mkdir -p ~/.fonts
git clone https://github.com/gabrielelana/awesome-terminal-fonts.git
cd awesome-terminal-fonts
git checkout patching-strategy
cd patched
cp *.ttf ~/.fonts
cd ../..
rm -rf awesome-terminal-fonts
}
link_file vim
link_file vimrc
link_file bashrc
link_file zshrc
link_file screenrc
link_file pylintrc
link_file eslintrc
link_file gitconfig
link_file gitignore_global
link_file hgrc
link_file ackrc
link_file tmux.conf
link_file tmux.django.conf
link_file dput.cf
link_file mancolor
link_file xbindkeysrc
link_file shell_functions.sh
mkdir -p $HOME/.config
link_file flake8 $HOME/.config
if [ "$(basename "$SHELL")" != "zsh" -a -f "$(which zsh)" ]; then
echo "Switching default shell to zsh, please enter your password:"
chsh -s $(which zsh)
fi
if [ ! -d "$HOME/.oh-my-zsh" ]
then
echo "[INSTALL] Installing oh-my-zsh"
if [ "$(which curl)" ]; then
curl -L http://install.ohmyz.sh | bash
elif [ "$(which wget)" ]; then
wget --no-check-certificate http://install.ohmyz.sh -O - | bash
fi
# Check that OhMyZsh as been installed and create custom plugin dir
if [ -f "$HOME/.oh-my-zsh/oh-my-zsh.sh" ]; then
mkdir -p ~/.oh-my-zsh/custom/plugins
fi
else
echo "[SKIP] oh-my-zsh is already installed"
fi
if [ -d $HOME/.oh-my-zsh/custom ]; then
if [ ! -d "$HOME/.oh-my-zsh/custom/plugins/grunt" ]; then
echo "[INSTALL] zsh Grunt plugin"
mkdir -p ~/.oh-my-zsh/custom/plugins
git clone https://github.com/yonchu/grunt-zsh-completion.git ~/.oh-my-zsh/custom/plugins/grunt
fi
if [ ! -d "$HOME/.oh-my-zsh/custom/plugins/zsh-syntax-highlighting" ]; then
echo "[INSTALL] zsh syntax highlighting"
mkdir -p ~/.oh-my-zsh/custom/plugins
git clone git://github.com/zsh-users/zsh-syntax-highlighting.git ~/.oh-my-zsh/custom/plugins/zsh-syntax-highlighting/
fi
if [ ! -d "$HOME/.oh-my-zsh/custom/plugins/git-flow-completion" ]; then
echo "[INSTALL] zsh git flow completion"
git clone https://github.com/bobthecow/git-flow-completion ~/.oh-my-zsh/custom/plugins/git-flow-completion
fi
if [ ! -d "$HOME/.oh-my-zsh/custom/themes/powerlevel9k" ]; then
echo "[INSTALL] powerlevel9k theme"
mkdir -p ~/.oh-my-zsh/custom/themes/
git clone https://github.com/bhilburn/powerlevel9k.git ~/.oh-my-zsh/custom/themes/powerlevel9k
fi
fi
mkdir -p ~/.config/terminator
if [ ! -e ~/.config/terminator/config ]; then
ln -s $(pwd)/terminator/config ~/.config/terminator/config
fi
if [ ! -d "$HOME/.fzf" ]; then
git clone --depth 1 https://github.com/junegunn/fzf.git $HOME/.fzf
~/.fzf/install --no-update-rc
fi
if [ ! -f "$HOME/.fonts/Droid+Sans+Mono+Awesome.ttf" ]; then
install_awesome_fonts
fi
# Fix broken multi-monitor window positioning in Gnome Shell
# "When true, the new windows will always be put in the center of the active screen of the monitor."
if [ "$(which gsettings)" ]; then
gsettings set org.gnome.mutter center-new-windows true
fi
# Check that the running system has enough inotify watches
watches=$(cat /proc/sys/fs/inotify/max_user_watches)
if [ $watches -lt 524288 ]; then
echo "*********************************************************************"
echo "*********************************************************************"
echo "*********************************************************************"
echo "*********************************************************************"
echo "***** *****"
echo "***** *****"
echo "***** YOUR NUMBER OF INOTIFY WATCHES IS DANGEROUSLY LOW. *****"
echo "***** SEVERAL TOOLS SUCH AS TAIL, LIVERELOAD AND DROPBOX *****"
echo "***** WON'T WORK PROPERLY. *****"
echo "***** *****"
echo "***** PLEASE FIX THIS ASAP!! RUN AS ROOT: *****"
echo "***** *****"
echo "***** echo 1048576 > /proc/sys/fs/inotify/max_user_watches *****"
echo "*****echo fs.inotify.max_user_watches=1048576 >> /etc/sysctl.conf*****"
echo "***** *****"
echo "***** *****"
echo "*********************************************************************"
echo "*********************************************************************"
echo "*********************************************************************"
echo "*********************************************************************"
fi
| true |
d647f8eda9eba8f6584e427c3490cb70c6be156a | Shell | liuxiaoping2020/asap_reproducibility | /CD4_CRISPR_asapseq/code/62_bam_sort_MACS2_hg38.sh | UTF-8 | 1,246 | 3.28125 | 3 | [] | no_license | #!/bin/sh
#$ -S /bin/sh
########################################################
FILE_NAME=$1;
COND_TMP=$(basename ${FILE_NAME} | cut -d "." -f 1);
PEAK=output/each_sgRNA/MACS2/${COND_TMP}_peaks.narrowPeak;
FILTERED_PEAK=output/each_sgRNA/MACS2_peak/${COND_TMP}_peaks.narrowPeak.filt.bed
GENOME_FILE=data/UCSC_hg38_chromInfo.txt;
BLACKLIST=data/hg38-blacklist.v2.bed
echo COND_TMP : ${COND_TMP};
mkdir -p output/each_sgRNA/MACS2 output/each_sgRNA/bigwig output/each_sgRNA/MACS2_peak;
########################################################
samtools index ${FILE_NAME}
########################################################
##MACS2
smooth_window=150;
shiftsize=$((-$smooth_window/2));
macs2 callpeak \
-t ${FILE_NAME} \
-f BAM -g hs \
--outdir output/each_sgRNA/MACS2 \
-n ${COND_TMP} \
-B --nomodel \
--extsize ${smooth_window} \
--shift ${shiftsize} \
--SPMR --call-summits --keep-dup 1\
-q 0.1
bedGraphToBigWig output/each_sgRNA/MACS2/${COND_TMP}_treat_pileup.bdg ${GENOME_FILE} output/each_sgRNA/bigwig/${COND_TMP}_treat_pileup.bw
########## Blacklist filtering
bedtools intersect -v -a ${PEAK} -b ${BLACKLIST} \
| awk 'BEGIN {OFS="\t"} {if($5>1000)$5=1000;print $0}' \
| grep -P 'chr[0-9XY]+(?!_)' > ${FILTERED_PEAK}
| true |
3a9c210014579eee736be7ebadefb850bd5d015a | Shell | saltstack-formulas/citrix-linuxvda-formula | /linuxvda/files/vdasetup.sh | UTF-8 | 4,266 | 3.046875 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
###########################################################
# This script is salt managed. Manual changes may be lost.
# Configuration is stored at the bottom for quick reference.
############################################################
#configure the Linux VDA single command
CTX_XDL_SUPPORT_DDC_AS_CNAME="{{ CTX_XDL_SUPPORT_DDC_AS_CNAME }}" \
CTX_XDL_DDC_LIST="{{ CTX_XDL_DDC_LIST }}" \
CTX_XDL_VDA_PORT="{{ CTX_XDL_VDA_PORT }}" \
CTX_XDL_REGISTER_SERVICE="{{ CTX_XDL_REGISTER_SERVICE }}" \
CTX_XDL_ADD_FIREWALL_RULES="{{ CTX_XDL_ADD_FIREWALL_RULES }}" \
CTX_XDL_AD_INTEGRATION="{{ CTX_XDL_AD_INTEGRATION }}" \
CTX_XDL_HDX_3D_PRO="{{ CTX_XDL_HDX_3D_PRO }}" \
CTX_XDL_VDI_MODE="{{ CTX_XDL_VDI_MODE }}" \
CTX_XDL_SITE_NAME="{{ CTX_XDL_SITE_NAME }}" \
CTX_XDL_LDAP_LIST="{{ CTX_XDL_LDAP_LIST }}" \
CTX_XDL_SEARCH_BASE="{{ CTX_XDL_SEARCH_BASE }}" \
CTX_XDL_START_SERVICE="{{ CTX_XDL_START_SERVICE }}" \
CTX_XDL_FAS_LIST="{{ CTX_XDL_FAS_LIST }}" \
{{ ctxsetup }}
exit $?
#
# Supported environment variables for quick reference.
#
#CTX_XDL_SUPPORT_DDC_AS_CNAME = Y | N
# The Linux VDA supports specifying a Delivery Controller name
# using a DNS CNAME record. This is typically set to N.
#CTX_XDL_DDC_LIST = list-ddc-fqdns
# – The Linux VDA requires a space-separated list of Delivery
# Controller Fully Qualified Domain Names. (FQDNs) to use for
# registering with a Delivery Controller. At least one FQDN
# or CNAME alias must be specified.
#CTX_XDL_VDA_PORT = port-number
# – The Linux VDA communicates with Delivery Controllers using
# a TCP/IP port. This is typically port 80.
#CTX_XDL_REGISTER_SERVICE = Y | N
# - The Linux Virtual Desktop services support starting during
# boot. This is typically set to Y.
#CTX_XDL_ADD_FIREWALL_RULES = Y | N
# – The Linux Virtual Desktop services require incoming network
# connections to be allowed through the system firewall. You
# can automatically open the required ports (by default ports
# 80 and 1494) in the system firewall for the Linux Virtual
# Desktop. This is typically set to Y.
#CTX_XDL_AD_INTEGRATION = 1 | 2 | 3 | 4
# – The Linux VDA requires Kerberos configuration settings to
# authenticate with the Delivery Controllers. The Kerberos
# configuration is determined from the installed and configured
# Active Directory integration tool on the system. Specify the
# supported Active Directory integration method to use:
#1 – Samba Winbind
#2 – Quest Authentication Service
#3 – Centrify DirectControl
#4 – SSSD
#CTX_XDL_HDX_3D_PRO = Y | N
# – Linux Virtual Desktop supports HDX 3D Pro, a set of graphics
# acceleration technologies designed to optimize the virtualization
# of rich graphics applications. HDX 3D Pro requires a compatible
# NVIDIA GRID graphics card to be installed. If HDX 3D Pro is
# selected, the Virtual Delivery Agent is configured for VDI
# desktops (single-session) mode – (i.e. CTX_XDL_VDI_MODE=Y).
# This is not supported on SUSE. Ensure that this value is set to N.
#CTX_XDL_VDI_MODE = Y | N
# - Whether to configure the machine as a dedicated desktop delivery
# model (VDI) or hosted shared desktop delivery model. For HDX 3D Pro
# environments, set this to Y. This is typically set to N.
#CTX_XDL_SITE_NAME = dns-name
# – The Linux VDA discovers LDAP servers using DNS, querying for LDAP
# service records. To limit the DNS search results to a local site,
# specify a DNS site name. This is typically empty [none].
#CTX_XDL_LDAP_LIST = list-ldap-servers
# – The Linux VDA by default queries DNS to discover LDAP servers.
# However if DNS cannot provide LDAP service records, you can provide
# a space-separated list of LDAP Fully Qualified Domain Names (FQDNs)
# with LDAP port (e.g. ad1.mycompany.com:389). This is typically
# empty [none].
#CTX_XDL_SEARCH_BASE = search-base
# – The Linux VDA by default queries LDAP using a search base set to
# the root of the Active Directory Domain (e.g. DC=mycompany,DC=com).
# However to improve search performance, you can specify a search base
# (e.g. OU=VDI,DC=mycompany,DC=com). This is typically empty [none].
#CTX_XDL_START_SERVICE = Y | N
# - Whether or not the Linux VDA services are started when the Linux
# VDA configuration is complete. This is typically set to Y.
| true |
4c25dbe686389b67e5a33522d05cef365a5efd5c | Shell | oanhnn/docker-laravel-echo-server | /bin/docker-entrypoint | UTF-8 | 2,306 | 3.46875 | 3 | [
"MIT"
] | permissive | #!/bin/sh
set -e
# laravel-echo-server init
if [[ "$1" == 'init' ]]; then
set -- laravel-echo-server "$@"
fi
if [ "$1" == 'start' ] && [ -f '/app/laravel-echo-server.lock' ]; then
rm /app/laravel-echo-server.lock
fi
# laravel-echo-server <sub-command>
if [[ "$1" == 'start' ]] || [[ "$1" == 'client:add' ]] || [[ "$1" == 'client:remove' ]]; then
if [[ "${LARAVEL_ECHO_SERVER_GENERATE_CONFIG:-true}" == "false" ]]; then
# wait for another process to inject the config
echo -n "Waiting for /app/laravel-echo-server.json"
while [[ ! -f /app/laravel-echo-server.json ]]; do
sleep 2
echo -n "."
done
elif [[ ! -f /app/laravel-echo-server.json ]]; then
cp /usr/local/src/laravel-echo-server.json /app/laravel-echo-server.json
# Replace with environment variables
sed -i "s|LARAVEL_ECHO_AUTH_HOST|${LARAVEL_ECHO_AUTH_HOST:-${LARAVEL_ECHO_SERVER_HOST:-localhost}}|i" /app/laravel-echo-server.json
sed -i "s|LARAVEL_ECHO_ALLOW_ORIGIN|${LARAVEL_ECHO_ALLOW_ORIGIN:-${LARAVEL_ECHO_AUTH_HOST:-${LARAVEL_ECHO_SERVER_HOST:-localhost}}}|i" /app/laravel-echo-server.json
LARAVEL_ECHO_CLIENTS="[]"
if [ ! -z "${LARAVEL_ECHO_CLIENT_APP_ID}" ]; then
LARAVEL_ECHO_CLIENTS="[{\"appId\": \"${LARAVEL_ECHO_CLIENT_APP_ID}\",\"key\": \"${LARAVEL_ECHO_CLIENT_APP_KEY}\"}]"
fi
sed -i "s|\"LARAVEL_ECHO_CLIENTS\"|${LARAVEL_ECHO_CLIENTS}|i" /app/laravel-echo-server.json
sed -i "s|LARAVEL_ECHO_SERVER_DB|${LARAVEL_ECHO_SERVER_DB:-redis}|i" /app/laravel-echo-server.json
sed -i "s|REDIS_HOST|${REDIS_HOST:-redis}|i" /app/laravel-echo-server.json
sed -i "s|REDIS_PORT|${REDIS_PORT:-6379}|i" /app/laravel-echo-server.json
sed -i "s|REDIS_PASSWORD|${REDIS_PASSWORD}|i" /app/laravel-echo-server.json
sed -i "s|REDIS_PREFIX|${REDIS_PREFIX-laravel_database_}|i" /app/laravel-echo-server.json
sed -i "s|REDIS_DB|${REDIS_DB:-0}|i" /app/laravel-echo-server.json
# Remove password config if it is empty
sed -i "s|\"password\": \"\",||i" /app/laravel-echo-server.json
fi
set -- laravel-echo-server "$@"
fi
# first arg is `-f` or `--some-option`
if [[ "${1#-}" != "$1" ]]; then
set -- laravel-echo-server "$@"
fi
exec "$@"
| true |
9ef510df29b7fbf05aa6651b4457d832f078a84f | Shell | wdahl/MyCat | /mycat3.sh | UTF-8 | 772 | 4.125 | 4 | [] | no_license | #!/bin/sh
# Prints out help message if no arguemnts are given
if [ "$#" = "0" ]
then
echo "Usage: mycat3.sh [-u|-l|-t] FILE ..."
echo
echo "Description: concatenates FILE(s) to standard output."
fi
# Concatinates the text in seperate files
if [ "$1" = "-u" ]; then # makes output upercase
while [ "$#" -gt "1" ]
do
cat "$2" | awk '{print toupper($0)}'
shift
done
elif [ "$1" = "-l" ]; then # makes out put lowercase
while [ "$#" -gt "1" ]
do
cat "$2" | awk '{print tolower($0)}'
shift
done
elif [ "$1" = "-t" ]; then # makes output intially uppercase
while [ "$#" -gt "1" ]
do
cat "$2" | sed 's/.*/\L&/; s/[a-z]*/\u&/g'
shift
done
else # does not change out put when option is not given
while [ "$#" -gt "0" ]
do
cat "$1"
shift
done
fi | true |
d1cec727c70e3cb3c206d8b2058c9ddb2bda3893 | Shell | TheCBaH/docker_host | /gcloud/test | UTF-8 | 1,695 | 3.265625 | 3 | [
"MIT"
] | permissive | #!/bin/bash
set -eux
cleanup () {
gcloud compute instances list --uri|awk -F / '{print $11}'|xargs --no-run-if-empty -n 1 gcloud --verbosity info compute instances delete --quiet
}
cleanup
echo trap cleanup EXIT
echo --machine-type f1-micro
gcloud --verbosity info compute instances create --image-project ubuntu-os-cloud --image-family ubuntu-minimal-1804-lts --boot-disk-size=10GB --custom-cpu=1 --custom-memory=1024MB test
echo gcloud compute instances describe test
for t in $(seq -s ' ' 1 10); do
if gcloud compute ssh test -- id ; then
break
fi
sleep 5
done
gcloud compute instances attach-disk test --disk=mirror
gcloud compute ssh test -- sudo bash <<"_EOF_"
set -eux
dd if=/dev/zero of=/swapfile bs=1M count=512
mkswap /swapfile
chmod 0600 /swapfile
swapon -f /swapfile
apt-get update
apt-get install -y --no-install-recommends apt-utils
apt-get install -y --no-install-recommends \
docker.io\
git-core\
less\
make\
sysstat\
vim-tiny\
;
usermod -aG docker $SUDO_USER
mkfs.ext4 -m 0 -F -O ^has_journal /dev/sdb
mkdir -p /data
mount /dev/sdb /data
_EOF_
gcloud compute ssh test -- bash <<_EOF_
set -eux
git clone https://github.com/TheCBaH/aosp-builder.git
cd aosp-builder
git config --global user.name $USER
git config --global user.email "$USER@gmail.com"
make user
make volumes
sudo mkdir -p /data/aosp/aosp_mirror-master
make mirror.master SYNC_JOBS=1
_EOF_
gcloud compute ssh test -- sudo bash <<_EOF_
set -eux
sudo umount /data
_EOF_
gcloud compute instances detach-disk test --disk=mirror
gcloud compute ssh test -- sudo dd if=/dev/sda of=/dev/null bs=1M count=1024
gcloud compute ssh test -- lsblk -m
echo gcloud --verbosity info compute instances stop test
| true |
b1311fc96a784f4ad432acab50698e45b7145afc | Shell | DIA-SWATH-Course/Tutorials | /run_IPF.sh | UTF-8 | 1,147 | 2.78125 | 3 | [] | no_license | # Change directory to the Tutorial6 folder for the IPF workflow
cd /c/DIA_Course/Tutorial6_IPF/
# Library generation
# Decoy generation
# File conversion to pqp format
# OpenSWATH
# PyProphet nerging of seperate OpenSWATH output files
# Nore: the * again denotes a wildcard meaning that
# all files ending on .osw are used
# Score features on MS1 level
# Score features on MS2 level
# Score features on transition level
# Export a pdf with all the separate subscore distributions
# Run the IPF specific score integration
# Export the final quantification matrix
# Note: In the standard OpenSWATH tutorial we have exported the
# a tsv file. Here we now use the option to directly export a
# quantification matrix. If you want to see the difference you
# open both files in excel to compare the different formats.
# Run mapDIA on data to identify most perturbed proteins
# First some data curation to only extract information that mapDIA requires
cat merged.tsv | awk -F'\t' '{OFS="\t"; print $4, $1, $5, $7, $9, $6, $8, $10}' > mapDIA_input.tsv
# Actually run mapDIA
./mapDIA_win64.exe mapDIA.params
| true |
d21f2e68635420d2426c62442a92cf232294a76c | Shell | elnavarrete/docs | /script/cibuild | UTF-8 | 144 | 2.734375 | 3 | [] | no_license | #!/bin/bash
set -e # halt script on error
bundle exec jekyll build
if test -e "./_site/index.html";then
echo "It builds!"
fi
rm -Rf _site
| true |
1a362dfafc3c1df9e52376570790c77a7f50254e | Shell | Danny02/kafka-cli | /test/test-boot-order.sh | UTF-8 | 391 | 2.90625 | 3 | [
"Apache-2.0"
] | permissive | test_start_required_services() {
kafka start kafka 1>/dev/null
assert "kafka status zookeeper | grep UP" "zookeeper should be up"
}
test_stop_depending_services() {
kafka start kafka 1>/dev/null
kafka stop zookeeper 1>/dev/null
assert "kafka status kafka | grep DOWN" "kafka should be down"
}
setup_suite() {
teardown
}
teardown() {
kafka destroy 1>/dev/null
} | true |
a63384ff6cb0228e2b76acbcbde6f97685c220ba | Shell | iklues17/mariadb-playbooks | /roles/mariadb/templates/replication-manager | UTF-8 | 1,509 | 3.6875 | 4 | [] | no_license | #!/bin/sh
#
# replication-manager Replication HA Management for MariaDB 10.x
#
# chkconfig: - 90 10
# description: replication-manager
# processname: replication-manager
# config: /etc/replication-manager/config.toml
# Source function library.
. /etc/rc.d/init.d/functions
RETVAL=0
PROCNAME=replication-manager
# See how we were called.
case "$1" in
start)
echo -n "Starting replication-manager: "
/usr/bin/replication-manager monitor --daemon > /dev/null 2>&1 &
sleep 1
#pkill -0 $PROCNAME
kill -n 0 $(pidof $PROCNAME)
RETVAL=$?
if [ $RETVAL -eq 0 ]
then
echo_success
touch /var/lock/subsys/replication-manager
else
echo_failure
fi
echo
;;
stop)
echo -n "Stopping replication-manager: "
status $PROCNAME
RETVAL=$?
if [ $RETVAL -eq 0 ]
then
kill $(pidof replication-manager)
RETVAL=$?
if [ $RETVAL -eq 0 ]
then
echo_success
rm -f /var/lock/subsys/replication-manager
else
echo_failure
fi
echo
fi
echo
;;
status)
status $PROCNAME
RETVAL=$?
;;
restart|reload)
$0 stop
$0 start
RETVAL=$?
;;
*)
echo "Usage: replication-manager {start|stop|status|restart}"
exit 1
esac
exit $RETVAL | true |
313d98035ea8a615651ffc83a2292378f53c58ba | Shell | silentlexx/android_ffmpeg_buildtool | /openh264.sh | UTF-8 | 626 | 2.765625 | 3 | [] | no_license | #!/bin/bash
cd openh264
#make clean
export LIBS="-lm -lpthread -lc"
export arch=$CPU
declare -A arch_abis=(
["arm"]="armeabi-v7a"
["arm64"]="arm64-v8a"
["x86"]="x86"
["x86_64"]="x86_64"
)
declare -A ndk_levels=(
["arm"]="18"
["arm64"]="21"
["x86"]="18"
["x86_64"]="21"
)
if [ "$CPU" == "x86" ]; then
export ASMFLAGS=-DX86_32_PICASM
else
export ASMFLAGS=
fi
make OS=android NDKROOT=$NDK TARGET=android-${ndk_levels[$arch]} clean
make $J PREFIX=$PREFIX OS=android NDKROOT=$NDK TARGET=android-${ndk_levels[$arch]} NDKLEVEL=${ndk_levels[$arch]} ARCH=$arch -C ./ NDK_TOOLCHAIN_VERSION=gcc install-static
| true |
011c0a4243a27ab0a014e53cc920d704c324effa | Shell | rosesonfire/netlify-tic-tac-toe | /api-server/docker/build-for-production | UTF-8 | 207 | 2.625 | 3 | [
"MIT"
] | permissive | #!/bin/bash
set -x
set -e
echo "Starting in 'production' mode ..."
# Check for linting errors
yarn run lint
# Build for production
yarn run build
# Start the server in production (port 5000)
yarn start
| true |
0791ee2082526c453c9308ef714374144707232a | Shell | WinterMute1000/Spurs | /spurs.sh | UTF-8 | 2,189 | 3.546875 | 4 | [] | no_license | #! /bin/bash
env_name="EGGSHELL"
is_option=0
get_addr=1
help()
{
echo "spurs [OPTIONS]"
echo "n arg arg shellcode number"
echo "-1) simple shellcode"
echo "-2) simple shellcode containing exit()"
echo "-3) shellcode containing setreuid() and exit()"
echo "-4) execve /bin/sh shellcode"
echo "d arg arg is your custom shellcode"
echo "f arg arg read file and put env"
echo "s arg arg shellcode name(default:EGGSHELL)"
}
setenvbynumber ()
{
local shell_code_num=$1
case $shell_code_num in
1)
export $env_name=$(python -c 'print ("\x31\xc0\x50\x68\x2f\x2f\x73\x68\x68\x2f\x62\x69\x6e\x89\xe3\x50\x53\x89\xe1\x89\xc2\xb0\x0b\xcd\x80")')
;;
2)
export $env_name=$(python -c 'print ("\x31\xc0\x50\x68\x2f\x2f\x73\x68\x68\x2f\x62\x69\x6e\x89\xe3\x50\x53\x89\xe1\x89\xc2\xb0\x0b\xcd\x80\x31\xc0\xb0\x01\xcd\x80")')
;;
3)
export $env_name=$(python -c 'print("\x31\xc0\xb0\x31\xcd\x80\x89\xc3\x89\xc1\x31\xc0\xb0\x46\xcd\x80\x31\xc0\x50\x68\x2f\x2f\x73\x68\x68\x2f\x62\x69\x6e\x89\xe3\x50\x53\x89\xe1\x89\xc2\xb0\x0b\xcd\x80\x31\xc0\xb0\x01\xcd\x80")')
;;
4)
export $env_name=$(python -c 'print("\x31\xc0\x50\x68\x2f\x2f\x73\x68\x68\x2f\x62\x69\x6e\x89\xe3\x50\x53\x89\xe1\xb0\x0b\xcd\x80")')
;;
*)
help
is_option=1
get_addr=0
;;
esac
}
setenvbydirect ()
{
export $env_name=$(python -c 'print("'$1'")')
}
setenvbyfile()
{
if [-f "$1"];then
file_content=$(<$1)
export $env_name=$(python -c 'print("'$file_content'")')
else
echo "File does not exist"
get_addr=0
fi
}
while getopts "s:n:d:f:h" opt
do
case $opt in
s) env_name=$OPTARG
;;
n)
if [ $is_option -eq 0 ];then
setenvbynumber $OPTARG
is_option=1
fi
;;
d)
if [ $is_option -eq 0 ];then
setenvbydirect $OPTARG
is_option=1
fi
;;
f)
if [ $is_option -eq 0 ];then
setenvbyfile $OPTARG
is_option=1
fi
;;
h)
help
is_option=1
get_addr=0
;;
?)
help
is_option=1
get_addr=0
;;
esac
done
if [ $is_option -eq 0 ];then
setenvbynumber 1
fi
if [ $get_addr -ne 0 ];then
./get_env_addr $env_name
fi
exit
| true |
0403c3b3896546641d298da4be8b97979431c46d | Shell | latifkabir/Computation_using_Fortran90 | /fem2d_poisson_sparse/fem2d_poisson_sparse.sh | UTF-8 | 275 | 2.734375 | 3 | [] | no_license | #!/bin/bash
#
gfortran -c -g fem2d_poisson_sparse.f90 >& compiler.txt
if [ $? -ne 0 ]; then
echo "Errors compiling fem2d_poisson_sparse.f90"
exit
fi
rm compiler.txt
#
mv fem2d_poisson_sparse.o ~/lib/$ARCH
#
echo "Object code stored as ~/lib/$ARCH/fem2d_poisson_sparse.o"
| true |
9c9ff96fe8c0ba2d927a0249df024b6048538afc | Shell | bobmaerten/docker-wallabag | /99_change_wallabag_config_salt.sh | UTF-8 | 258 | 2.734375 | 3 | [] | no_license | #!/bin/bash
set -e
SALT='34gAogagAigJaurgbqfdvqQergvqer'
if [ -f /etc/container_environment/WALLABAG_SALT ] ; then
SALT=`cat /etc/container_environment/WALLABAG_SALT`
fi
sed -i "s/'SALT', '.*'/'SALT', '$SALT'/" /var/www/wallabag/inc/poche/config.inc.php
| true |
0c232d1c478878948f9d4e6261d608d40d7f3286 | Shell | Praju97/EmpWage | /hello.sh | UTF-8 | 126 | 2.984375 | 3 | [] | no_license | #!/bin/bash
echo "Hello"
ISPRESENT=1
rand=$(($RANDOM%2))
if (($rand == 1 ))
then
echo "Present"
else
echo "Absent"
fi
| true |
28311acc380d8ae0697af9a69bf19940071bc855 | Shell | Jokler/dotfiles | /scripts/rofi-workspaces | UTF-8 | 547 | 3.515625 | 4 | [] | no_license | #!/bin/bash
moveto=0
if [ "$1" = "--move" ]; then
moveto=1
shift
fi
get_workspaces() {
workspace_json="$(i3-msg -t get_workspaces)"
IFS=','
read -ra split <<< "$workspace_json"
for entry in "${split[@]}"; do
if [[ "$entry" == *"name"* ]]; then
IFS='"'
read -ra split <<< "$entry"
echo "${split[3]}"
fi
done
}
selection=$(get_workspaces | rofi -dmenu "$@")
if [[ $moveto -eq 0 ]]; then
i3-msg workspace "$selection"
else
i3-msg move container to workspace "$selection"
fi
# vim: expandtab sw=2 ts=2
| true |
c8e142e0f6ca819f81acf0281c53a78310d8e00e | Shell | openbmc/openbmc-tools | /ipkdbg/ipkdbg.in | UTF-8 | 12,573 | 3.609375 | 4 | [
"Apache-2.0",
"LicenseRef-scancode-dco-1.1"
] | permissive | #!/bin/sh
set -eu
: ${IPKDBG_OPKG_CACHE:=}
: ${IPKDBG_CONF_HOST:=host.local}
: ${IPKDBG_CONF_MNT:=mountpoint}
: ${IPKDBG_CONF_LOC:=themoon}
: ${IPKDBG_CONF_ROOT:=path}
: ${IPKDBG_CONF_USER:=$USER}
: ${IPKDBG_WGET_OPTS:="--quiet"}
: ${IPKDBG_ZSTD:=zstd}
ipkdbg_error() {
/bin/echo -e "$@" | fold >&2
}
ipkdbg_info() {
/bin/echo -e "$@" | fold
}
ipkdbg_help() {
/bin/echo -e "\033[1mNAME\033[0m"
/bin/echo -e "\tipkdbg - debug OpenBMC applications from an (internally) released firmware"
/bin/echo -e
/bin/echo -e "\033[1mSYNOPSIS\033[0m"
/bin/echo -e "\tipkdbg [-q] RELEASE FILE CORE [PACKAGE...]"
/bin/echo -e
/bin/echo -e "\033[1mDESCRIPTION\033[0m"
/bin/echo -e "\tRELEASE is the firmware release whose packages to install"
/bin/echo -e "\tFILE is the absolute path to the binary of interest in the target environment"
/bin/echo -e "\tCORE is an optional core file generated by FILE. Pass '-' for no core file"
/bin/echo -e "\tPACKAGES will be used to populate a temporary rootfs for debugging FILE"
/bin/echo -e
/bin/echo -e "\033[1mOPTIONS\033[0m"
/bin/echo -e "\t\033[1m-h\033[0m"
/bin/echo -e "\tPrint this help."
/bin/echo -e
/bin/echo -e "\t\033[1m-q\033[0m"
/bin/echo -e "\tQuit gdb once done. Intended for use in a scripting environment in combination"
/bin/echo -e "\twith a core file, as the backtrace will be printed as an implicit first command."
/bin/echo -e
/bin/echo -e "\033[1mENVIRONMENT\033[0m"
/bin/echo -e "\tThere are several important environment variables controlling the behaviour of"
/bin/echo -e "\tthe script:"
/bin/echo -e
/bin/echo -e "\t\033[1mIPKDBG_OPKG_CACHE\033[0m"
/bin/echo -e "\tA package cache directory for opkg. Defaults to empty, disabling the cache."
/bin/echo -e
/bin/echo -e "\t\033[1mIPKDBG_CONF_HOST\033[0m"
/bin/echo -e "\tHostname for access to opkg.conf over the web interface"
/bin/echo -e
/bin/echo -e "\tDefaults to '${IPKDBG_CONF_HOST}'"
/bin/echo -e
/bin/echo -e "\t\033[1mIPKDBG_CONF_MNT\033[0m"
/bin/echo -e "\tMount-point for access to opkg.conf"
/bin/echo -e
/bin/echo -e "\tDefaults to '${IPKDBG_CONF_MNT}'"
/bin/echo -e
/bin/echo -e "\t\033[1mIPKDBG_CONF_LOC\033[0m"
/bin/echo -e "\tGeo-location for access to opkg.conf"
/bin/echo -e
/bin/echo -e "\tDefaults to '${IPKDBG_CONF_LOC}'"
/bin/echo -e
/bin/echo -e "\t\033[1mIPKDBG_CONF_ROOT\033[0m"
/bin/echo -e "\tPath to the directory containing build artifacts, for access to opkg.conf"
/bin/echo -e
/bin/echo -e "\tDefaults to '${IPKDBG_CONF_ROOT}'"
/bin/echo -e
/bin/echo -e "\t\033[1mIPKDBG_CONF_USER\033[0m"
/bin/echo -e "\tUsername for access to opkg.conf over the web interface"
/bin/echo -e
/bin/echo -e "\tDefaults to \$USER ($USER)"
/bin/echo -e
/bin/echo -e "\t\033[1mIPKDBG_GDB\033[0m"
/bin/echo -e "\tThe gdb(1) binary to invoke. Automatically detected if unset."
/bin/echo -e
/bin/echo -e "\t\033[1mIPKDBG_WGET_OPTS\033[0m"
/bin/echo -e "\tUser options to pass to wget(1) when fetching opkg.conf. Defaults to"
/bin/echo -e "\t'$IPKDBG_WGET_OPTS'"
/bin/echo -e
/bin/echo -e "\t\033[1mIPKDBG_ZSTD\033[0m"
/bin/echo -e "\tThe zstd(1) binary to extract the compressed core dump. Automatically"
/bin/echo -e "\tdetected if unset."
/bin/echo -e
/bin/echo -e "\033[1mEXAMPLE\033[0m"
/bin/echo -e "\tipkdbg 1020.2206.20220208a \\"
/bin/echo -e "\t\t/usr/bin/nvmesensor - \\"
/bin/echo -e "\t\tdbus-sensors dbus-sensors-dbg"
}
IPKDBG_OPT_QUIT=0
while getopts hq f
do
case $f in
q) IPKDBG_OPT_QUIT=1;;
h|\?) ipkdbg_help ; exit 1;;
esac
done
shift $(expr $OPTIND - 1)
trap ipkdbg_help EXIT
ipkdbg_core_extract()
{
if [ "-" = "$1" ]
then
echo -
else
local src="$(realpath "$1")"
local dst="${src%.zst}"
command -v $IPKDBG_ZSTD > /dev/null
$IPKDBG_ZSTD --decompress --quiet --quiet --force -o "$dst" "$src" || true
echo "$dst"
fi
}
IPKDBG_BUILD=$1; shift
IPKDBG_FILE=$1; shift
IPKDBG_CORE=$(ipkdbg_core_extract "$1"); shift
IPKDBG_PKGS=$@
: ${IPKDBG_GDB:=}
if [ -n "$IPKDBG_GDB" ]
then
ipkdbg_info "Using provided gdb command '$IPKDBG_GDB'"
else
os_id=$(. /etc/os-release; echo ${ID}-${VERSION_ID})
case $os_id in
rhel-8.6 | fedora*)
IPKDBG_GDB=gdb
if [ -z "$(command -v $IPKDBG_GDB)" ]
then
ipkdbg_error "Please install the gdb package:"
ipkdbg_error
ipkdbg_error "\tsudo dnf install gdb"
ipkdbg_error
exit 1
fi
;;
rhel*)
IPKDBG_GDB=gdb-multiarch
if [ -z "$(command -v $IPKDBG_GDB)" ]
then
ipkdbg_error "Please install the gdb-multiarch package:"
ipkdbg_error
ipkdbg_error "\tsudo dnf install gdb-multiarch"
ipkdbg_error
exit 1
fi
;;
ubuntu*)
IPKDBG_GDB=gdb-multiarch
if [ -z "$(command -v $IPKDBG_GDB)" ]
then
ipkdbg_error "Please Install the gdb-multiarch package"
ipkdbg_error
ipkdbg_error "\tsudo apt install gdb-multiarch"
ipkdbg_error
exit 1
fi
;;
*)
ipkdbg_error "Unrecognised distribution $release_id. Please set IPKDBG_GDB or " \
"install an appropriate gdb binary to invoke"
exit 1
;;
esac
ipkdbg_info "Using gdb command ${IPKDBG_GDB} ($(command -v $IPKDBG_GDB))"
fi
ipkdbg_archive_extract() {
local offset=$1
local work=$2
tail -n+$offset $0 | base64 --decode - | tar -xz -C $work
}
ipkdbg_opkg_path() {
local root=$1
local arch=$(uname -m)
local release_id=$(. /etc/os-release; echo $ID)
local release_version_id=$(. /etc/os-release; echo $VERSION_ID)
local p=${root}/bin/${arch}/${release_id}/${release_version_id}/opkg
if [ ! -x "$p" ]
then
ipkdbg_error "Unsupported environment:"
ipkdbg_error
ipkdbg_error "Architecture:\t$arch"
ipkdbg_error "Distribution ID:\t$release_id"
ipkdbg_error "Distribution Version:\t$release_version_id"
exit 1
fi
echo $p
}
if [ ! -f $0 ]
then
ipkdbg_error "Please execute the script with a relative or absolute path"
exit 1
fi
IPKDBG_DATA=$(awk '/^__ARCHIVE_BEGIN__$/ { print NR + 1; exit 0 }' $0)
IPKDBG_WORK=$(mktemp -t --directory ipkdbg.XXX)
IPKDBG_BINS=${IPKDBG_WORK}/tools
IPKDBG_ROOT=${IPKDBG_WORK}/root
IPKDBG_CONF=${IPKDBG_WORK}/opkg.conf
IPKDBG_DB=${IPKDBG_WORK}/database
cleanup() {
rm -rf $IPKDBG_WORK
}
trap cleanup EXIT INT QUIT KILL
mkdir $IPKDBG_BINS $IPKDBG_DB
ipkdbg_archive_extract $IPKDBG_DATA $IPKDBG_BINS
IPKDBG_OPKG_BIN=$(ipkdbg_opkg_path $IPKDBG_BINS)
ipkdbg_build_gen_path() {
local build=$1
local component="$2"
echo /${IPKDBG_CONF_MNT}/${IPKDBG_CONF_LOC}/${IPKDBG_CONF_ROOT}/${build}/"$component"
}
ipkdbg_build_gen_url() {
local build=$1
local component="$2"
echo https://${IPKDBG_CONF_HOST}/${IPKDBG_CONF_MNT}/${IPKDBG_CONF_LOC}/${IPKDBG_CONF_ROOT}/${build}/${component}
}
ipkdbg_build_gen_cache() {
local build=$1
local component="$2"
echo "${HOME}/.cache/ipkdbg/builds/${build}/${component}"
}
ipkdbg_opkg_conf_gen_path() {
local build=$1
ipkdbg_build_gen_path $build bmc_ipk/opkg.conf
}
ipkdbg_opkg_fetch_path() {
local path=$1
local output=$2
cp "$path" "$output" > /dev/null 2>&1
}
ipkdbg_opkg_conf_gen_url() {
local build=$1
ipkdbg_build_gen_url $build bmc_ipk/opkg.conf
}
ipkdbg_opkg_fetch_url() {
local url=$1
local output=$2
# We don't want URL to wrap
ipkdbg_info "Authenticating as user $IPKDBG_CONF_USER"
if ! wget --http-user=$IPKDBG_CONF_USER \
--ask-password \
--output-document $output \
$IPKDBG_WGET_OPTS \
$url
then
ipkdbg_error "Failed to fetch resource"
exit 1
fi
}
ipkdbg_opkg_conf_gen_cache() {
local build=$1
ipkdbg_build_gen_cache $build opkg.conf
}
ipkdbg_opkg_conf_fetch_cache() {
local build=$1
local output=$2
local path="$(ipkdbg_opkg_conf_gen_cache $build)"
cp "$path" "$output" > /dev/null 2>&1
}
ipkdbg_opkg_conf_install() {
local build=$1
local output=$2
mkdir -p $(dirname $output)
if ! ipkdbg_opkg_conf_fetch_cache $build $output
then
local cache="$(ipkdbg_opkg_conf_gen_cache $build)"
mkdir -p $(dirname $cache)
url=
ipkdbg_opkg_fetch_path "$(ipkdbg_opkg_conf_gen_path $build)" $cache ||
(echo "Configuring opkg via $(ipkdbg_opkg_conf_gen_url $build)" &&
ipkdbg_opkg_fetch_url "$(ipkdbg_opkg_conf_gen_url $build)" $cache)
ipkdbg_opkg_conf_fetch_cache $build $output
fi
}
ipkdbg_opkg_db_gen_path() {
local build=$1
ipkdbg_build_gen_path $build bmc_ipk/opkg-database.tar.xz
}
ipkdbg_opkg_db_gen_url() {
local build=$1
ipkdbg_build_gen_url ${build} bmc_ipk/opkg-database.tar.xz
}
ipkdbg_opkg_db_gen_cache() {
local build=$1
ipkdbg_build_gen_cache $build opkg-database.tar.xz
}
ipkdbg_opkg_db_install() {
local build=$1
local root=$2
local state=${root}/var/lib/opkg
local cache="$(ipkdbg_opkg_db_gen_cache $build)"
mkdir -p $state
if ! [ -f $cache ]
then
mkdir -p $(dirname $cache)
ipkdbg_opkg_fetch_path "$(ipkdbg_opkg_db_gen_path $build)" $cache ||
ipkdbg_opkg_fetch_url "$(ipkdbg_opkg_db_gen_url $build)" $cache ||
rm -f $cache
fi
tar -xf $cache -C $state 2> /dev/null
mkdir -p ${root}/usr/local
ln -s ${root}/var ${root}/usr/local/var
}
ipkdbg_opkg() {
$IPKDBG_OPKG_BIN \
$([ -z "$IPKDBG_OPKG_CACHE" ] ||
echo --cache-dir $IPKDBG_OPKG_CACHE --host-cache-dir) \
-V1 -f $IPKDBG_CONF -o $IPKDBG_ROOT $@
}
ipkdbg_gdb_extract_bin() {
local core=$1
$IPKDBG_GDB --core $core -ex quit 2> /dev/null |
awk -F "[\`']" '/Core was generated by/ { print $2 }' |
awk -F " " '{ print $1 }' # Chop off the arguments, we only want the binary path
}
ipkdbg_opkg_find() {
ipkdbg_opkg find $@ | awk '{ print $1 }'
}
ipkdbg_opkg_find_extra() {
local pkg=$1
# Try appending -dbg and -src to the binary package name
extra_pkgs="$(ipkdbg_opkg_find ${pkg}-dbg) $(ipkdbg_opkg_find ${pkg}-src)"
# If that fails, we probably have a split binary package
if [ -z "$extra_pkgs" ]
then
# Strip the last component off as it's probably the split binary package name and
# try again
extra_pkgs="$(ipkdbg_opkg_find ${pkg%-*}-dbg) $(ipkdbg_opkg_find ${pkg%-*}-src)"
fi
echo $extra_pkgs
}
ipkdbg_opkg_conf_install $IPKDBG_BUILD $IPKDBG_CONF
# Extract the binary path from the core
if [ '-' = "$IPKDBG_FILE" -a '-' != "$IPKDBG_CORE" ]
then
IPKDBG_FILE=$(ipkdbg_gdb_extract_bin $IPKDBG_CORE)
fi
# Update the package database before potentially looking up the debug packages
ipkdbg_opkg update
# Extract the package name for the binary
if [ '-' != "$IPKDBG_CORE" ]
then
if ipkdbg_opkg_db_install $IPKDBG_BUILD $IPKDBG_DB
then
# Look up the package for the binary
IPKDBG_CORE_PKG="$(IPKDBG_ROOT=$IPKDBG_DB ipkdbg_opkg search ${IPKDBG_DB}${IPKDBG_FILE} | awk '{ print $1 }')"
if [ -n "$IPKDBG_CORE_PKG" ]
then
# Look up the extra (debug, source) packages for the binary package
IPKDBG_PKGS="$IPKDBG_PKGS $IPKDBG_CORE_PKG"
IPKDBG_PKGS="$IPKDBG_PKGS $(ipkdbg_opkg_find_extra $IPKDBG_CORE_PKG)"
fi
fi
if [ -z "$IPKDBG_PKGS" ]
then
ipkdbg_error "Unable to determine package-set to install, please specify" \
"appropriate packages on the command line"
exit 1
fi
fi
# Force installation of gcc-runtime-dbg to give us debug symbols for libstdc++
IPKDBG_PKGS="gcc-runtime-dbg $IPKDBG_PKGS"
if [ -n "$IPKDBG_OPKG_CACHE" ]
then
mkdir -p "$IPKDBG_OPKG_CACHE"
ipkdbg_opkg install --download-only $IPKDBG_PKGS
fi
ipkdbg_opkg install $IPKDBG_PKGS | grep -vF 'Warning when extracting archive entry'
cat <<EOF > ${IPKDBG_BINS}/opkg
#!/bin/sh
exec $IPKDBG_OPKG_BIN -f $IPKDBG_CONF -o $IPKDBG_ROOT \$@
EOF
chmod +x ${IPKDBG_BINS}/opkg
PATH=${IPKDBG_BINS}:${PATH} $IPKDBG_GDB -q \
-iex "set solib-absolute-prefix $IPKDBG_ROOT" \
-iex "add-auto-load-safe-path $IPKDBG_ROOT" \
-iex "set directories $IPKDBG_ROOT" \
-iex "cd $IPKDBG_ROOT" \
$([ '-' = "$IPKDBG_CORE" ] || echo -ex bt) \
$([ 0 -eq $IPKDBG_OPT_QUIT ] || echo -ex quit) \
${IPKDBG_ROOT}${IPKDBG_FILE} \
$([ '-' = "$IPKDBG_CORE" ] || echo $IPKDBG_CORE)
exit 0
__ARCHIVE_BEGIN__
| true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.