blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
3c0819ac396db10699c1f6a355bcf8c46a867bc1
|
Shell
|
greenboxal/korekube
|
/lib/helpers
|
UTF-8
| 1,056
| 3.671875
| 4
|
[] |
no_license
|
#!/bin/bash
verify_guard() {
NAME=/etc/kubernetes/.${1}_bootstrapped
[ -f $NAME ] && exit 0
}
touch_guard() {
NAME=/etc/kubernetes/.${1}_bootstrapped
touch $NAME
}
credstash_get() {
KEY=$1
credstash -r "$EC2_REGION" get "${CLUSTER_NAME}.${KEY}"
}
credstash_get_file() {
KEY=$1
NAME=$2
credstash_get $KEY > $NAME
chmod 644 $NAME
}
credstash_get_secret() {
KEY=$1
NAME=$2
credstash_get $KEY > $NAME
chmod 600 $NAME
}
write_manifest() {
NAME=$1
cat > /etc/kubernetes/manifests/$NAME.yaml
}
write_kubeconfig() {
NAME=$1
cat > /etc/kubernetes/$NAME-kubeconfig.yaml <<EOF
apiVersion: v1
kind: Config
clusters:
- name: $CLUSTER_NAME
cluster:
certificate-authority: /etc/kubernetes/ssl/ca.pem
server: $KUBE_API_ENDPOINT
users:
- name: $NAME
user:
client-certificate: /etc/kubernetes/ssl/$NAME.pem
client-key: /etc/kubernetes/ssl/$NAME-key.pem
contexts:
- context:
cluster: $CLUSTER_NAME
user: $NAME
name: default
current-context: default
EOF
}
| true
|
473d5fb4a38fb3b2794fb6530f402b2753558f21
|
Shell
|
christiangda/PuppetMaster
|
/Vagrant/scripts/provision-couchdb.sh
|
UTF-8
| 4,210
| 2.859375
| 3
|
[] |
no_license
|
#!/bin/bash
################################################################################
# Install requirements
yum -y install epel-release
yum -y install autoconf autoconf-archive automake curl-devel erlang gcc-c++
yum -y install help2man js-devel libicu-devel libtool perl-Test-Harness unzip zip
yum -y install python-devel python-setuptools python-pip wget gem
################################################################################
# Install dependencies
pip install --upgrade pip
pip install -U Sphinx
################################################################################
# Install more dependencies
cd /usr/local/src
wget http://ftp.mozilla.org/pub/js/js185-1.0.0.tar.gz
tar -xvf js185-1.0.0.tar.gz
cd js-1.8.5/js/src/
./configure
make && sudo make install
################################################################################
# Compile and install CouchDB
cd /usr/local/src
wget http://www-us.apache.org/dist/couchdb/source/2.0.0/apache-couchdb-2.0.0.tar.gz
tar -xvf apache-couchdb-2.0.0.tar.gz
cd apache-couchdb-2.0.0
./configure
make release
################################################################################
# Create user
adduser --system --create-home --shell /bin/bash --user-group couchdb
cd /usr/local/src/apache-couchdb-2.0.0
cp -R rel/couchdb /home/couchdb/
chown -R couchdb:couchdb /home/couchdb/
find /home/couchdb/ -type d -exec chmod 0770 {} \;
chmod 0644 /home/couchdb/couchdb/etc/*
cat > /etc/profile.d/couchdb.sh << _EOF
export PATH=\$PATH:/home/couchdb/couchdb/bin
_EOF
source /etc/profile.d/couchdb.sh
################################################################################
# prepare setting
# start couchdb to configure default databases
sudo -i -u couchdb couchdb/bin/couchdb > /dev/null 2>&1 &
sleep 60
# Create defaults databases
curl -X PUT http://127.0.0.1:5984/_users
curl -X PUT http://127.0.0.1:5984/_replicator
curl -X PUT http://127.0.0.1:5984/_global_changes
# stop couchdb after setting
pkill -u couchdb
# permanent auto start
cat > /lib/systemd/system/couchdb.service << _EOF
[Unit]
Description=the system-wide CouchDB instance
After=network.target
[Service]
Type=simple
User=couchdb
Group=couchdb
#ExecStart=/bin/su - couchdb couchdb/bin/couchdb &
ExecStart=/home/couchdb/couchdb/bin/couchdb
PIDFile=/run/couchdb/couchdb.pid
ExecStop=pkill -u couchdb
[Install]
WantedBy=multi-user.target
_EOF
touch /home/couchdb/couchdb/var/log/couch.log
chown couchdb.couchdb /home/couchdb/couchdb/var/log/couch.log
ln -sf /lib/systemd/system/couchdb.service /etc/systemd/system/couchdb.service
ln -s /home/couchdb/couchdb/etc /etc/couchdb
ln -s /home/couchdb/couchdb/var/log /var/log/couchdb
sed -i 's/bind_address = 127.0.0.1/bind_address = 0.0.0.0/g' /home/couchdb/couchdb/etc/default.ini
sed -i '/\[log\]/a file \= \/home\/couchdb\/couchdb\/var\/log\/couch\.log' /home/couchdb/couchdb/etc/default.ini
################################################################################
# start service
systemctl enable couchdb.service
systemctl start couchdb.service
sleep 60
# Set user admin's passowrd
#curl -X PUT http://127.0.0.1:5984/_config/admins/admin -d '"admin"'
# Create defaults hiera hierarchy to test from the agent.puppet.local
# See: /etc/puppet/hiera.yaml
curl -X PUT http://127.0.0.1:5984/default -d '{}'
curl -X PUT http://127.0.0.1:5984/default/common -d '{
"profile::common::packages::redhat_family": [
"nmap"
],
"profile::common::motd::content": " hostname: %{::fqdn}\n node_group: %{::node_group}\n node_environment: %{::node_environment}\n node_location: %{::node_location}\n puppet env: %{::environment}\n\n",
"profile::common::packages::debian_family": [
"nmap"
]
}'
curl -X PUT http://127.0.0.1:5984/location -d '{}'
curl -X PUT http://127.0.0.1:5984/location/vagrant -d '{
"profile::common::packages::redhat_family": [
"git"
],
"profile::common::packages::debian_family": [
"git"
]
}'
curl -X PUT http://127.0.0.1:5984/vagrant -d '{}'
curl -X PUT http://127.0.0.1:5984/vagrant/common -d '{}'
curl -X PUT http://127.0.0.1:5984/vagrant/vagrant -d '{}'
curl -X PUT http://127.0.0.1:5984/nodes -d '{}'
| true
|
b168283da5094c5c79140f4b48f2829fbe5fd8ec
|
Shell
|
pepaslabs/deploy.sh
|
/lib/deploy.sh/utils.bash
|
UTF-8
| 538
| 3.71875
| 4
|
[
"MIT"
] |
permissive
|
function cp_diff()
{
local a="${1}"
local b="${2}"
if [ ! -e "${b}" ]
then
echo_step "Copying ${color_yellow}$( basename ${a} )${color_off} into $( dirname ${b} )."
cp -a "${a}" "${b}"
elif files_differ "${a}" "${b}"
then
echo_step_warning "Files differ: ${color_yellow}${a}${color_off} ${b}"
if has_cmd colordiff
then
colordiff -urN "${a}" "${b}" || true
else
diff -urN "${a}" "${b}" || true
fi
cp -ai "${a}" "${b}"
fi
}
| true
|
2f2a70ab178685a5f10f5b107dfa180bae05807e
|
Shell
|
simshaun/bash
|
/.bash_aliases
|
UTF-8
| 1,153
| 2.765625
| 3
|
[] |
no_license
|
alias ls='ls -F --color=always'
alias dir='dir -F --color=always'
alias cp='cp -iv'
alias rm='rm -i'
alias mv='mv -iv'
alias grep='grep --color=auto -in'
alias ..='cd ..'
#
# Shortcuts
#
function proj () {
cd /e/_projects
cd "$1"
}
function work () {
cd /e/work
cd "$1"
}
#
# Docker
#
export DOCKER_HOST=tcp://localhost:2375
#
# Xdebug
#
# Enable xdebug for CLI
alias phpx='php -dzend_extension=xdebug.so'
#
# PHPUnit
#
# PHPUnit needs xdebug for coverage.
alias phpunit='php $(which phpunit)'
#
# Symfony (global)
#
alias sftest="php backend/bin/console --env=test"
alias sfdev="php backend/bin/console --env=dev"
alias sfprod="php backend/bin/console --env=prod"
alias sfunit='backend/vendor/bin/simple-phpunit'
#
# Symfony (app-specific)
#
alias dbrel='sfdev doctrine:schema:drop --force && sfdev doctrine:schema:create && sfdev ha:fixtures:load -n'
alias pun='backend/vendor/bin/phpunit'
function fastest () {
find backend/src/*/Tests/ -name '*Test.php' | \
(read files; SYMFONY_DEPRECATIONS_HELPER="disabled" backend/vendor/bin/fastest "backend/vendor/bin/phpunit $files;" $*)
}
export XDEBUG_CONFIG="idekey=PHPSTORM"
| true
|
353f00aed143a7b98ceaf28722c5361ca6247d9a
|
Shell
|
jrhopper/poMMo
|
/library/tools/buildscripts/lang_merge.sh
|
UTF-8
| 200
| 2.703125
| 3
|
[] |
no_license
|
#!/bin/sh
####
### After generating a fresh .pot file, merges its changes into existing translation files
#
cd poMMo
for i in `find language/ -name *.po`; do
msgmerge -U $i ../pommo.new.pot
done
| true
|
ef8e9ab462d9828c7d4000608a3a8832e896336c
|
Shell
|
eranas97/synthsis_code
|
/modeltech_examples/examples/ucis/doit.sh
|
UTF-8
| 6,185
| 3.53125
| 4
|
[] |
no_license
|
#!/bin/sh
#
# Copyright 1991-2016 Mentor Graphics Corporation
#
# All Rights Reserved.
#
# THIS WORK CONTAINS TRADE SECRET AND PROPRIETARY INFORMATION WHICH IS THE
# PROPERTY OF MENTOR GRAPHICS CORPORATION OR ITS LICENSORS AND IS SUBJECT TO
# LICENSE TERMS.
#
# Simple UCIS Examples - Simulation shell script
#
# Usage: Help/usage ..................... doit.sh
# Run all examples ............... doit.sh run
# Clean directory ................ doit.sh clean
#
if [ "$1" = "clean" ] ; then
rm -f transcript *.wlf core* *.log workingExclude.cov
rm -f *.dll *.exp *.lib *.obj *.sl *.o *.so *.ucis
rm -f vsim_stacktrace.vstf
rm -f create_ucis create_filehandles find_object test_bin_assoc
rm -f increment_cover traverse_scopes_rs read_attrtags read_coverage formal
rm -f read_coverage2 traverse_scopes remove_data create_ucis_ws dump_UIDs
rm -rf work
exit 0
fi
if [ "$1" != "run" ] ; then
echo ""
echo "### Help/Usage ..................... doit.sh"
echo "### Run ucis examples .............. doit.sh run"
echo "### Clean directory ................ doit.sh clean"
echo ""
echo "Some files have minor modifications to reduce compile warnings"
echo "Some tests rely on results from earlier tests"
echo ""
fi
# The rest of the script is "run"
if [ -z "$MTI_HOME" ] ; then
echo "ERROR: Environment variable MTI_HOME is not set"
exit 0
fi
`vsim -version | grep "64 vsim" > /dev/null`
if [ $? -eq 0 ]; then
MTI_VCO_MODE=64
else
MTI_VCO_MODE=32
fi
export MTI_VCO_MODE
if [ "X$2" != "X" ] ; then
platform=$2
echo "Platform set to $platform"
else
platform=`$MTI_HOME/vco`
fi
rm -f *.o *.dll *.so
case `uname` in
Linux)
gccversion=`gcc -dumpversion | awk -F. '{print $1}'`
machine=`uname -m`
if [ "$MTI_VCO_MODE" = "64" ] ; then
CC="gcc -g -c -m64 -Wall -I. -I$MTI_HOME/include"
LD="gcc -m64 -Wl,-export-dynamic -o "
CPP="g++ -g -c -m64 -Wall -I. -I$MTI_HOME/include"
LPP="g++ -m64 -Wl,-export-dynamic -o "
else
CC="gcc -g -c -m32 -Wall -I. -I$MTI_HOME/include"
LD="gcc -m32 -Wl,-export-dynamic -o "
CPP="g++ -g -c -m32 -Wall -I. -I$MTI_HOME/include"
LPP="g++ -m32 -Wl,-export-dynamic -o "
fi
LDLIB="$MTI_HOME/$platform/libucis.so"
;;
Win*|CYGWIN_NT*)
CC="cl -c -Ox -Oy /MD -I $MTI_HOME/include "
LD="link -INCREMENTAL:NO -DEBUG -subsystem:console"
LDLIB="-DEFAULTLIB:$MTI_HOME/win32/ucis.lib"
CPP="cl -c -Ox -Oy /EHsc /MD -I $MTI_HOME/include "
LPP="link -INCREMENTAL:NO -DEBUG -subsystem:console"
;;
*)
echo "Script not configured for `uname`, see User's manual."
exit 0
;;
esac
echo ""
echo "### NOTE: Compiling ..."
echo ""
$CC ./src/create_filehandles.c
$CC ./src/dump_UIDs.c
$CC ./src/read_attrtags.c
$CC ./src/test_bin_assoc.c
$CC ./src/create_ucis.c
$CC ./src/find_object.c
$CC ./src/read_coverage2.c
$CC ./src/traverse_scopes.c
$CC ./src/create_ucis_ws.c
$CC ./src/read_coverage.c
$CC ./src/traverse_scopes_rs.c
$CC ./src/increment_cover.c
$CC ./src/remove_data.c
$CPP ./src/formal.cpp
echo ""
echo "### NOTE: Linking ..."
echo ""
case `uname` in
Linux)
$LD create_filehandles create_filehandles.o $LDLIB
$LD dump_UIDs dump_UIDs.o $LDLIB
$LD read_attrtags read_attrtags.o $LDLIB
$LD test_bin_assoc test_bin_assoc.o $LDLIB
$LD create_ucis create_ucis.o $LDLIB
$LD find_object find_object.o $LDLIB
$LD read_coverage2 read_coverage2.o $LDLIB
$LD traverse_scopes traverse_scopes.o $LDLIB
$LD create_ucis_ws create_ucis_ws.o $LDLIB
$LD read_coverage read_coverage.o $LDLIB
$LD traverse_scopes_rs traverse_scopes_rs.o $LDLIB
$LD increment_cover increment_cover.o $LDLIB
$LD remove_data remove_data.o $LDLIB
$LPP formal formal.o $LDLIB
;;
Win*|CYGWIN_NT*)
$LD -OUT:create_filehandles create_filehandles.obj $LDLIB
$LD -OUT:dump_UIDs dump_UIDs.obj $LDLIB
$LD -OUT:read_attrtags read_attrtags.obj $LDLIB
$LD -OUT:test_bin_assoc test_bin_assoc.obj $LDLIB
$LD -OUT:create_ucis create_ucis.obj $LDLIB
$LD -OUT:find_object find_object.obj $LDLIB
$LD -OUT:read_coverage2 read_coverage2.obj $LDLIB
$LD -OUT:traverse_scopes traverse_scopes.obj $LDLIB
$LD -OUT:create_ucis_ws create_ucis_ws.obj $LDLIB
$LD -OUT:read_coverage read_coverage.obj $LDLIB
$LD -OUT:traverse_scopes_rs traverse_scopes_rs.obj $LDLIB
$LD -OUT:increment_cover increment_cover.obj $LDLIB
$LD -OUT:remove_data remove_data.obj $LDLIB
$LPP -OUT:formal formal.obj $LDLIB
;;
esac
echo ""
echo "### NOTE: Running create_ucis (A15.1) ..."
echo ""
./create_ucis
echo ""
echo "### NOTE: Running create_filehandles (A15.2) ..."
echo ""
./create_filehandles
echo ""
echo "### NOTE: Running dump_UIDs test_API.ucis (A15.3)..."
echo ""
./dump_UIDs test_API.ucis
echo ""
echo "### NOTE: Running find_object test_API.ucis /top/cg (A15.4) ..."
echo ""
./find_object test_API.ucis /top/cg
echo ""
echo "### NOTE: Running increment_cover test_API.ucis /4:top/:5:#stmt#6#1# (A15.5) ..."
echo ""
./increment_cover test_API.ucis /4:top/:5:#stmt#6#1#
echo ""
echo "### NOTE: Running read_attrtags test_API.ucis /4:top/:5:#stmt#6#1# (A15.6) ..."
echo ""
./read_attrtags test_API.ucis /4:top/:5:#stmt#6#1#
echo ""
echo "### NOTE: Running read_coverage test_API.ucis (A15.7) ..."
echo ""
./read_coverage test_API.ucis
echo ""
echo "### NOTE: Running read_coverage2 test_API.ucis (A15.8) ..."
echo ""
./read_coverage2 test_API.ucis
echo ""
echo "### NOTE: Running traverse_scopes_rs test_API.ucis (A15.9) ..."
echo ""
./traverse_scopes_rs test_API.ucis
echo ""
echo "### NOTE: Running remove_data test_API.ucis /4:top/:5:#stmt#6#1# (A15.10) ..."
echo ""
./remove_data test_API.ucis /4:top/:5:#stmt#6#1#
echo ""
echo "### NOTE: Running traverse_scopes test_API.ucis (A15.11) ..."
echo ""
./traverse_scopes test_API.ucis
echo ""
echo "### NOTE: Running test_bin_assoc (A15.12)..."
echo ""
./test_bin_assoc
echo ""
echo "### NOTE: Running create_ucis_ws (A15.13) ..."
echo ""
./create_ucis_ws
echo ""
echo "### NOTE: Running dump_UIDs test_ws.ucis -p . (A15.3) ..."
echo ""
./dump_UIDs test_ws.ucis -p .
echo ""
echo "### NOTE: Running formal test_API.ucis (A15.14) ..."
echo ""
./formal test_API.ucis
exit 0
| true
|
27787c790d892ae67b20c5329b12f7bf5e096d30
|
Shell
|
weaselp/test-travis
|
/add
|
UTF-8
| 207
| 3.171875
| 3
|
[] |
no_license
|
#!/bin/sh
query="0"
while [ "$#" -gt 0 ]; do
query="$query + $1"
shift
done
s=$(psql -c "select $query" -t -U postgres)
echo $s
exit
s=0
while [ "$#" -gt 0 ]; do
s=$(( s + $1 ))
shift
done
echo $s
| true
|
1dab8abef66c3d62971a29a32fc02715473c95a1
|
Shell
|
sh4ka/installsf
|
/new_project.sh
|
UTF-8
| 1,086
| 3.59375
| 4
|
[] |
no_license
|
#!/bin/bash
if [ $# -lt 1 ]
then
echo 'You must provide a project name.'
else
project_name=$1
project_dir=~/Proyectos/$1
echo Creating dir ~/Proyectos/$1
mkdir ~/Proyectos/$1
echo 'Installing symfony with composer'
composer.phar create-project symfony/framework-standard-edition ~/Proyectos/$1
echo Setup permissions for logs and cache
sudo setfacl -R -m u:www-data:rwx -m u:`whoami`:rwx $project_dir/app/cache $project_dir/app/logs
sudo setfacl -dR -m u:www-data:rwx -m u:`whoami`:rwx $project_dir/app/cache $project_dir/app/logs
echo 'Generating nginx host file'
cp nginx_skel.txt $1.loc
echo Editing $1.loc
sed -i.bak s:'{{ project_dir }}':$project_dir/web:g $1.loc
sed -i.bak s:'{{ project_name }}':$project_name:g $1.loc
echo Moving host file to nginx dir
sudo mv $1.loc /etc/nginx/sites-enabled/
echo Removing backup file
rm $1.loc.bak
echo Restarting nginx
sudo service nginx restart
echo Editing hosts file
sudo -- sh -c "echo 127.0.0.1 $1.loc >> /etc/hosts"
echo All jobs done
fi
| true
|
3948f470c8ebc4913b4cbd125252ff944e3d7e72
|
Shell
|
aim11/PanGenSpark
|
/compile.sh
|
UTF-8
| 2,388
| 2.859375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
CUR=$( pwd )
# compile needed accessories
mkdir modules
cd modules
git clone "https://gitlab.com/dvalenzu/CHIC.git"
git clone "https://gitlab.com/dvalenzu/PanVC.git"
git clone "https://github.com/mariusmni/radixSA.git"
yum install -y maven2
yum install -y gcc gcc-c++ autoconf automake
yum install -y cmake
yum install -y tbb-devel
yum install -y unzip
yum install -y mmv
cd ${CUR}
cd modules/CHIC/
make
cd ${CUR}
cd modules/CHIC/ext
./get_relz.sh
cd ${CUR}
cd modules/CHIC/src
make all
cd ${CUR}
mv modules/PanVC/components/normalize_vcf/ components/
mv modules/PanVC/components/sam_to_pos/ components/
cd modules/PanVC/components/pan_genome_index_real/ChicAligner-beta/ext
./compile.sh
cd ${CUR}
cd modules/radixSA/
make
cd ${CUR}
yum install -y curl-devel
yum install -y bzip2
wget https://github.com/samtools/samtools/releases/download/1.10/samtools-1.10.tar.bz2
tar -xf samtools-1.10.tar.bz2
cd samtools-1.10
make
#Tools for mapping variants to original reference genome positions if needed
#cd ${CUR}
#cd components/normalize_vcf
#git clone "https://github.com/lindenb/jvarkit.git"
#cd jvarkit
#make msa2vcf
#cd ${CUR}
#cd components/normalize_vcf/projector
#make
cd ${CUR}
pip install numpy
pip install biopython
#compile PanSpark
mvn package
#Copy needed binaries
cp -f modules/CHIC/src/chic_index components/index/chic_index
cp -f modules/CHIC/src/chic_align components/index/chic_align
cp -f modules/radixSA/radixSA ./
cp -f modules/PanVC/components/pan_genome_index_real/ChicAligner-beta/ext/LZ/RLZ_parallel/src/rlz_for_hybrid ./
mkdir -p /opt/chic/
cp -r components/index/ /opt/chic/
mkdir -p /mnt/tmp
#Setup worker nodes
for i in {1..25}; do
ssh -tt -o "StrictHostKeyChecking no" node-$i mkdir /opt/chic/
scp -o "StrictHostKeyChecking no" -r components/index/ node-$i:/opt/chic/
ssh -tt -o "StrictHostKeyChecking no" node-$i mkdir -p $CUR/modules/CHIC/ext/
scp -o "StrictHostKeyChecking no" -r modules/CHIC/ext/BOWTIE2 node-$i:$CUR/modules/CHIC/ext/
ssh -tt -o "StrictHostKeyChecking no" node-$i mkdir -p /opt/samtools
scp -o "StrictHostKeyChecking no" samtools-1.10/* node-$i:/opt/samtools/
ssh -tt -o "StrictHostKeyChecking no" node-$i mkdir /mnt/tmp
#scp -o "StrictHostKeyChecking no" /opt/hadoop/etc/hadoop/* node-$i:/opt/hadoop/etc/hadoop/ &
#scp /etc/hosts node-$i:/etc/hosts
done
| true
|
fd562bed0b5b1d674b612737bf313ba0307b3650
|
Shell
|
Rtlzoz2012/FBz-v2
|
/FBz v2.sh
|
UTF-8
| 3,654
| 2.75
| 3
|
[] |
no_license
|
#usr/bin/bash
clear
bi='\033[34;1m' #biru
i='\033[32;1m' #ijo
pur='\033[35;1m' #purple
cy='\033[36;1m' #cyan
me='\033[31;1m' #merah
pu='\033[37;1m' #putih
ku='\033[33;1m' #kuning
# [SUBSCRIBE] Am p5
figlet -f standard Rtlzoz Tools | lolcat
echo
echo " ____________________-=[Private Tools]=-____________________" | lolcat
echo "| |" | lolcat
echo "| Root®Autor : Rtlzoz |" | lolcat
echo "| Youtube : Am p5 |" | lolcat
echo "| Blog : Lagibobo.com |" | lolcat
echo "|___________________________________________________________|" | lolcat
date | lolcat
echo
echo $i"========================================"
echo $i"|"$me" 0"$i" |"$pu" Install dulu bahannya bro "
echo $i"========================================"
echo $i"|"$me" 1"$i" |"$pu" BRUTEFORCE FB PHP"
echo $i"|"$me" 2"$i" |"$pu" BRUTEFORCE FB v.1.3"
echo $i"|"$me" 3"$i" |"$pu" BRUTEFORCE FB "$me"[Root Access]"
echo $i"────────────────────────────────"
echo $i"|"$me" 4"$i" |"$pu" PROFILE GUARD FACEBOOK"
echo $i"|"$me" 5"$i" |"$pu" AUTO REPORT FACBOOK"
echo $i"────────────────────────────────"
echo $i"|"$me" 6"$i" |"$pu" PHISING FACEBOOK 1"
echo $i"|"$me" 7"$i" |"$pu" PHISING FACEBOOK 2"
echo $i"|"$me" 8"$i" |"$pu" PHISING FACEBOOK 4"
echo $i"========================================"
echo $i"|"$me" 9"$i" |"$pu" EXIT"
echo $i"========================================"
echo
echo $me"┌==="$bi"["$i"Rtlzoz"$bi"]"$me"======"$bi"["$i""Masukan Nomor Bro""$bi"]"
echo $me"¦"
read -p"└──# " pil
if [ $pil = 0 ]
then
clear
apt update && apt upgrade
apt install python2
pip2 install urllib3 chardet certifi idna requests
pkg install git
pip2 install mechanize
pkg install curl
pkg install ruby
pkg install gem
gem install lolcat
pkg install git
pkg install php
pkg install ruby cowsay toilet figlet
pkg install neofetch
pkg install nano
figlet -f slant " SUCCESS "|lolcat
fi
if [ $pil = 1 ]
then
clear
figlet -f slant "Bentar bro"|lolcat
sleep 1
git clone https://github.com/FR13ND8/fbbrute
cd fbbrute
php fb.php
fi
if [ $pil = 2 ]
then
clear
figlet -f slant "Bentar bro"|lolcat
sleep 1
git clone https://github.com/FR13ND8/mbf
cd mbf
python2 MBF.py
fi
if [ $pil = 3 ]
then
clear
figlet -f slant "Bentar bro"|lolcat
sleep 1
git clone https://github.com/thelinuxchoice/facebash.git
cd facebash
bash facebash.sh
fi
if [ $pil = 4 ]
then
clear
figlet -f slant "Bentar bro"|lolcat
sleep 1
git clone https://github.com/FR13ND8/ProfileGuardFb
cd ProfileGuardFb
php guard.php
fi
if [ $pil = 5 ]
then
clear
figlet -f slant "Bentar bro"|lolcat
sleep 1
git clone https://github.com/IlayTamvan/Report
cd Report
unzip Report.zip
python2 Report.py
fi
if [ $pil = 6 ]
then
clear
figlet -f slant "Bentar bro"|lolcat
sleep 1
git clone https://github.com/thelinuxchoice/shellphish.git
cd shellphish
bash shellphish.sh
fi
if [ $pil = 7 ]
then
clear
figlet -f slant "Bentar bro"|lolcat
sleep 1
git clone https://github.com/thelinuxchoice/blackeye.git
cd blackeye
bash blackeye.sh
fi
if [ $pil = 8 ]
then
clear
figlet -f slant "Bentar bro"|lolcat
sleep 1
https://github.com/evait-security/weeman.git
chmod +x *
python2 weeman.py
fi
if [ $pil = 9 ]
then
clear
figlet -f slant "SHUTDOWN"|lolcat
sleep 2
echo $cy"Thanks For Using This Tools"
sleep 2
echo $i"If u find the bug please contact to me"
sleep 2
echo $ku"Youtube :"$i" Am p5"
sleep 2
echo $pur"STAY THE CRIME MY BOSS, SEE U !"
exit
fi
| true
|
b52ee8d5053cfa491697248f1d9065b434c52964
|
Shell
|
coderbradlee/iotex-core-rosetta-gateway
|
/tests/test.sh
|
UTF-8
| 2,751
| 3.609375
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
set -o nounset -o pipefail -o errexit
# Kill all dangling processes on exit.
cleanup() {
printf "${OFF}"
pkill -P $$ || true
}
trap "cleanup" EXIT
# ANSI escape codes to brighten up the output.
GRN=$'\e[32;1m'
OFF=$'\e[0m'
ROSETTA_PATH=$(pwd)
function constructionCheckTest() {
cd $ROSETTA_PATH/rosetta-cli-config
printf "${GRN}### Run rosetta-cli check:construction${OFF}\n"
rosetta-cli check:construction --configuration-file testing/iotex-testing.json >rosetta-cli.log 2>&1 &
constructionCheckPID=$!
sleep 1
## TODO change this to sub process, sleep 1s, may not be right
SEND_TO=$(grep -o "Waiting for funds on \w\+" rosetta-cli.log | rev | cut -d' ' -f 1 | rev)
cd $ROSETTA_PATH/tests/inject
printf "${GRN}### Starting transfer, send to: ${SEND_TO}${OFF}\n"
ROSETTA_SEND_TO=$SEND_TO go test -test.run TestInjectTransfer10IOTX
printf "${GRN}### Finished transfer funds${OFF}\n"
sleep 30
cd $ROSETTA_PATH/rosetta-cli-config
## TODO change this grep to a sub process, fail this grep in x sec should fail the test
COUNT=$(grep -c "Transactions Confirmed: 1" rosetta-cli.log)
printf "${GRN}### Finished check transfer, count: ${COUNT}${OFF}\n"
ps -p $constructionCheckPID >/dev/null
printf "${GRN}### Run rosetta-cli check:construction succeeded${OFF}\n"
}
function dataCheckTest() {
cd $ROSETTA_PATH/rosetta-cli-config
printf "${GRN}### Run rosetta-cli check:data${OFF}\n"
rosetta-cli check:data --configuration-file testing/iotex-testing.json &
dataCheckPID=$!
cd $ROSETTA_PATH/tests/inject
printf "${GRN}### Inject some actions...${OFF}\n"
go test
sleep 10 #wait for the last candidate action
ps -p $dataCheckPID >/dev/null
printf "${GRN}### Run rosetta-cli check:data succeeded${OFF}\n"
}
function viewTest(){
cd $ROSETTA_PATH/rosetta-cli-config
printf "${GRN}### Run rosetta-cli view:account and view:block...${OFF}\n"
rosetta-cli view:account '{"address":"io1ph0u2psnd7muq5xv9623rmxdsxc4uapxhzpg02"}' --configuration-file testing/iotex-testing.json
rosetta-cli view:block 10 --configuration-file testing/iotex-testing.json
printf "${GRN}### Run rosetta-cli view succeeded${OFF}\n"
}
function startServer(){
cd $ROSETTA_PATH
printf "${GRN}### Starting the iotex server...${OFF}\n"
GW="iotex-server -config-path=./tests/config_test.yaml -genesis-path=./tests/genesis_test.yaml -plugin=gateway"
${GW} &
sleep 3
printf "${GRN}### Starting the Rosetta gateway...${OFF}\n"
GW="iotex-core-rosetta-gateway"
${GW} &
sleep 3
}
printf "${GRN}### Start testing${OFF}\n"
startServer
constructionCheckTest &
constructionCheckTestPID=$!
dataCheckTest
viewTest
wait $constructionCheckTestPID
printf "${GRN}### Tests finished.${OFF}\n"
| true
|
c9e0fed6320da0739368ee82f8dc62ef7d374cdf
|
Shell
|
AlexVoip/py_auto
|
/tests/scripts/forwork/ssw350/bug53916/run
|
UTF-8
| 2,893
| 3.28125
| 3
|
[] |
no_license
|
#!/bin/bash
# Тест услуги Callback. Версия ПО 3.3.
#
# Условия теста: создано два SIP-абонента
# Переменные командной строки
# $DEV_USER SSW_cocon_user
# $DEV_PASS SSW_cocon_password
# $DEV_DOMAIN SSW_domain
# $EXTER_IP SSW_IP_address
# $IP SIPP_IP_address
. conf
#. tests/scripts/ssw342/conf
# Библиотеки
source $LIB_PATH/screen_lib.sh
source $LIB_PATH/create_lib.sh
# Путь к скриптам
ROOT_DIR=$(pwd)
SRC_PATH=$ROOT_DIR/tests/scripts/ssw350/bug53916
create_dir $SRC_PATH/temp
TEMP_PATH=$SRC_PATH/temp
create_file $TEMP_PATH/ex_code.txt
create_dir log/ssw350_bug53916
LOG_PATH=log/ssw350_bug53916
create_file $LOG_PATH/log.txt
# Путь к sipp
#SIPP_PATH=../../../opt/sipp
# Переменные для подсчета успешных, неуспешных вызовов и номер теста
FAIL_COUNT=0
SUCC_COUNT=0
COUNT=0
# Создаем файлы с данными
echo "SEQUENTIAL;
$USER_A;$DOM_A;" > $TEMP_PATH/$USER_A.csv
FINISH ()
{
# Unregistration
COUNT=$(($COUNT+1))
T_NAME=UnRegistration
sudo $SIPP_PATH/sipp $EXTER_IP:$EXTER_PORT -sf $SRC_PATH/reg_auth_all0.xml -inf $TEMP_PATH/$USER_A.csv -mi $IP -s $AUTH_A -ap $PASS_A -rtp_echo -l 1 -r 1 -m 1 -nd -i $IP -p $PORT_A -recv_timeout 200s -timeout_error
REZULT_A
# Удаляем файлы с данными
sudo rm $TEMP_PATH/$USER_A.csv
# Читаем exit коды, чтобы убедиться, что все UAS отработали корректно.
EX_FAIL=0
while read line;do
excode=`echo $line | cut -d ' ' -f 3`
name=`echo $line | cut -d ' ' -f 1`
echo "[DEBUG] Exit code from test $name equal $excode" >> $LOG_PATH/log.txt
if [ "$excode" != "0" ];then
EX_FAIL=1
FAIL_COUNT=$(($FAIL_COUNT+1))
fi
done < $TEMP_PATH/ex_code.txt
sudo rm $TEMP_PATH/ex_code.txt
# Вывод результата
echo "Success $SUCC_COUNT, Failed $FAIL_COUNT"
if [[ $EX_FAIL -eq 0 && $FAIL_COUNT -eq 0 ]]
then
exit 0
else
exit 1
fi
}
# Функция подсчета успешных и неуспешных вызовов
REZULT_A ()
{
if test $? -eq 0
then
SUCC_COUNT=$(($SUCC_COUNT+1))
echo "[DEBUG] Test $COUNT $T_NAME side A passed" >> $LOG_PATH/log.txt
else
FAIL_COUNT=$(($FAIL_COUNT+1))
echo "[DEBUG] Test $COUNT $T_NAME side A failed" >> $LOG_PATH/log.txt
fi
}
# Function finishing script
FATAL ()
{
sudo killall -9 screen
screen -wipe
sudo killall -9 sipp
FAIL_COUNT=$(($FAIL_COUNT+999))
}
# Registration users
T_NAME=Registration
sudo $SIPP_PATH/sipp $EXTER_IP:$EXTER_PORT -sf $SRC_PATH/reg_auth_exp.xml -inf $TEMP_PATH/$USER_A.csv -mi $IP -s $AUTH_A -ap $PASS_A -rtp_echo -l 1 -r 1 -m 1 -nd -i $IP -p $PORT_A -recv_timeout 200s -timeout_error
REZULT_A
sleep 10
FINISH
| true
|
11dfcad588fa288210e67d72936dd9a780cb62cc
|
Shell
|
chandr20/devstack-plugin-container
|
/devstack/plugin.sh
|
UTF-8
| 981
| 3.53125
| 4
|
[] |
no_license
|
# container - Devstack extras script to install container engine
# Save trace setting
XTRACE=$(set +o | grep xtrace)
set -o xtrace
echo_summary "container's plugin.sh was called..."
source $DEST/devstack-plugin-container/devstack/lib/docker
(set -o posix; set)
if is_service_enabled container; then
if [[ "$1" == "stack" && "$2" == "install" ]]; then
echo_summary "Installing container engine"
if [[ ${CONTAINER_ENGINE} == "docker" ]]; then
check_docker || install_docker
fi
elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then
echo_summary "Configuring container engine"
if [[ ${CONTAINER_ENGINE} == "docker" ]]; then
configure_docker
fi
fi
if [[ "$1" == "unstack" ]]; then
if [[ ${CONTAINER_ENGINE} == "docker" ]]; then
stop_docker
fi
fi
if [[ "$1" == "clean" ]]; then
# nothing needed here
:
fi
fi
# Restore xtrace
$XTRACE
| true
|
40af2d3cef8f712e233a035db2af8ceba1872992
|
Shell
|
gregpaton08/home_lock
|
/ble/lockStatus.sh
|
UTF-8
| 151
| 2.734375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
address=`cat lockaddr`
res=$(curl $address -X GET > /dev/null 2>&1)
if [[ $res == *"true"* ]]; then
echo 1
else
echo 0
fi
| true
|
f0ae01af96087365454c067a2d89bbb5aaf98413
|
Shell
|
DigitKoodit/unix-mansion-adventure
|
/utils/generate-random-files.sh
|
UTF-8
| 279
| 3.96875
| 4
|
[] |
no_license
|
#!/bin/bash
FOLDER=$1
FILE_NAME_LENGTH=$2
FILE_COUNT=$3
if [[ ! -d $1 ]]; then
echo "Folder doesn't exist"
exit 1
fi
cd $FOLDER
for (( i=0;i<=$FILE_COUNT;i++ )); do
RND=$(LC_ALL=C tr -dc 'A-Za-z0-9' < /dev/urandom | head -c $FILE_NAME_LENGTH)
echo "$RND" > $RND
done
| true
|
ead11d707f191b18036f0e6490335a90bdcca423
|
Shell
|
hironeko/dotfiles
|
/install/functions/functions.sh
|
UTF-8
| 2,188
| 3.65625
| 4
|
[] |
no_license
|
#!/bin/bash
DOTPATH=$HOME/dotfiles
prezto_set () {
cat <<EOF
#############################################
# #
# prezto setup... check #
# #
#############################################
EOF
if [ $SHELL = "/bin/bash" ]; then
. $DOTPATH/etc/prezto.sh
else
echo "alredy changed zsh"
fi
cat <<EOF
#############################################
# #
# prezto setup... done #
# #
#############################################
EOF
return 0
}
spacemacs_set () {
cat <<EOF
#############################################
# #
# spacemacs install ...start #
# #
#############################################
EOF
# spacemacs clone
# if [ ! -e $HOME/.emacs.d/spacemacs.mk ]; then
. $DOTPATH/etc/spacemacs.sh
# else
# echo "done"
# fi
cat <<EOF
#############################################
# #
# spacemacs install ...done #
# #
#############################################
EOF
emacs --insecure
}
symlink_set () {
DOT_FILES=(
.zshrc
.tmux.conf
.vimrc
package.json
)
for file in ${DOT_FILES[@]}; do
ln -sf $DOTPATH/$file $HOME/$file
done
cat <<EOF
########################
# #
# done symlink #
# #
########################
EOF
return 0
}
# how to
# ! has some; then
has () {
type "$1" &> /dev/null ;
}
starship_set() {
curl -fsSL https://starship.rs/install.sh | bash
exec $SHELL -l
}
setup () {
. $DOTPATH/etc/mac.sh
# prezto_set
#starship_set
symlink_set
#if [ ! -e $HOME/.emacs.d/spacemacs.mk ]; then
# spacemacs_set
#fi
}
| true
|
e0f4866d84a82c5b16406777b49f73b4e7934fea
|
Shell
|
cmagina/layer-apache-zookeeper
|
/actions/start-rest
|
UTF-8
| 299
| 3
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
set -e
source /etc/environment
if [[ -d ${ZOO_BIN_DIR} ]]; then
ZOO_HOME_DIR=${ZOO_BIN_DIR}/..
chown -R zookeeper:zookeeper ${ZOO_HOME_DIR}
cd ${ZOO_HOME_DIR}
su zookeeper -c "ant"
fi
if [[ -d ${ZOO_REST_DIR} ]]; then
cd ${ZOO_REST_DIR}
su zookeeper -c "nohup ant run &"
fi
| true
|
511c36f5a6cba68c3bd64ef82c7e9dacf0f49cfd
|
Shell
|
quantomb/MEM
|
/BETTS/run_scripts/run_list_SP
|
UTF-8
| 2,922
| 3.296875
| 3
|
[] |
no_license
|
#!/bin/bash
#
# This bash script analytically continues the coarse-grained single particle
# density of states and coarse-grained spectra,
#
# N(w) and Abar(K,w) .
#
# dsigcluster employs a hilbert transform and a root finder to calculate
#
# Gbar(K,w) and Sigma(K,w)
#
# and Sigma is printed into the files SigmaFILE_ick.dat.
#
# Inputs to this script are read from the standard input file. Each line
# describes an argument
#
# $1 $2 $3 $4 $5 $6 $7 $8
# file job Ncw aflag Pmin coarse print cflag
#
# file is the file number
# job is the job type
# 1 symmetric fermion'
# 2 asymmetric fermion'
# Ncw is the number of K in the IR
# aflag determines the MEM algorithm used
# 0 Classic w/JP
# 1 Bryan w/JP
# 2 Classic wo/JP
# 3 Bryan wo/JP
# Pmin sets the minimum ftest probability for data to be used
# coarse set the coarse-graining size for rebinning sequential QMC data
# print sets the print level
# cflag determines whether the bin and sigma files are compressed
# 0 compress nothing
# 1 compress bins files
# 2 compress bins and sigma.dat files
#
# SET SOME PATHS
#
source ~/mem_locs
#
# start reading the parameters
#
imfirst=0
while [ true ]
do
read file job Ncw aflag pmin coarse idraw cflag
#
if [ ! "$file" ]
then
exit
fi
if let "imfirst == 0"
then
#
# copy the models to some disposable files
#
echo 'setting models'
ick=1
while let "ick <= Ncw"
do
cp model_Akw$ick dump$ick
let "ick=ick + 1"
done
cp model_dos dumpdos
imfirst=1
fi
#
# Uncompress the bin and sigma files if necessary.
#
if [ -s "bins$file.bz2" ] ; then
bunzip2 "bins$file.bz2"
fi
if [ -s "sigma$file.dat.bz2" ] ; then
bunzip2 "sigma$file.dat.bz2"
fi
#
# DOS
#
echo 'working on the DOS of set ' $file
ick=0
$runone_mem $file $job $ick dumpdos $aflag $pmin $coarse 0 $idraw
cp -f model_next dumpdos
#
# Akw
#
ick=1
while let "ick <= Ncw"
do
echo 'working on the spectra of set ' $file 'with ick=' $ick
$runone_mem $file $job $ick dump$ick $aflag $pmin $coarse 0 $idraw
cp -f model_next dump$ick
let "ick=ick + 1"
done
#
# Now we have the DOS and Akw corressponding to file file. We
# calculate the self energy, Sigma(K,w), using dsigcluster.
#
ick=1
while let "ick <= Ncw"
do
echo 'Calculating Sigma(K,w) of set ' $file 'with ick=' $ick
cp dos"$file"_"$ick"QMC model_next
cat <<EOF> dummy
model_next
sigma$file.dat
$ick
0.00000001
0.0
EOF
$dsigcluster <dummy>out
rm dummy out
mv Sigma.dat Sigma"$file"_"$ick".dat
let "ick=ick + 1"
done
#
# Optionally, compress the data files
#
if let "cflag > 0"; then
bzip2 "bins$file"
if let "cflag > 1"; then
bzip2 "sigma$file.dat"
fi
fi
done
exit
| true
|
baac8531ed78d4bd96ca044bd1ef6f06cb90762a
|
Shell
|
Skylled/sdk
|
/tools/dartium/generate_patches.sh
|
UTF-8
| 1,819
| 3.578125
| 4
|
[
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] |
permissive
|
#!/usr/bin/env bash
#
set -x
# generate_patches.sh [systems]
#
# Convenience script to generate patches for JsInterop under Dartium. Do not call from build steps or tests
# - call fremontcutbuilder and dartdomgenerator instead. Do not add 'real'
# functionality here - change the python code instead.
#
# I find it essential to generate all the systems so I know if I am breaking
# other systems. My habit is to run:
#
# ./go.sh
#
# 1. After running go.sh libraries in sdk/lib may change.
# 2. Build Dartium.
# 3. Run this script and sdk/lib/js/dartium/cached_patches will be created.
# 4. Rebuild Dartium.
# 5. Commit files in sdk/lib
#
# NOTE: If the Dart files generated from the IDLs may cause major changes which
# could cause the patch files to fail (missing classes, etc). If this
# happens delete the contents of the sdk/lib/js/dartium/cached_patches.dart
# build Dartium, run this script and build Dartium again with the newly
# generated patches.
ARG_OPTION="dartGenCachedPatches"
LOCATION_DARTIUM="../../../out/Release"
DARTIUM="$LOCATION_DARTIUM"
if [[ "$1" != "" ]] ; then
if [[ "$1" =~ ^--roll ]]; then
ARG_OPTION="dartGenCachedPatchesForRoll"
else
DARTIUM="$1"
fi
fi
if [[ "$2" != "" ]] ; then
if [[ "$2" =~ ^--roll ]]; then
ARG_OPTION="dartGenCachedPatchesForRoll"
else
DARTIUM="$2"
fi
fi
DART_APP_LOCATION="file://"$PWD"/generate_app/generate_cached_patches.html"
DARTIUM_ARGS=" --user-data-dir=out --disable-web-security --no-sandbox --enable-blink-features="$ARG_OPTION""
CACHED_PATCHES_FILE=""$PWD"/../../sdk/lib/js/dartium/cached_patches.dart"
cmd=""$DARTIUM"/chrome "$DARTIUM_ARGS" "$DART_APP_LOCATION" |
(sed -n '/START_OF_CACHED_PATCHES/,/END_OF_CACHED_PATCHES/p') > "$CACHED_PATCHES_FILE""
reset && eval "${cmd}"
| true
|
8ef82132907116a1ee5c71084d8d4f33034bec21
|
Shell
|
delkyd/alfheim_linux-PKGBUILDS
|
/codesearch/PKGBUILD
|
UTF-8
| 1,197
| 2.671875
| 3
|
[] |
no_license
|
# Contributor: wenLiangcan <boxeed@gmail.com>
# Maintainer: aksr <aksr at t-com dot me>
pkgname=codesearch
pkgver=0.01
pkgrel=3
pkgdesc="A tool for indexing and then performing regular expression searches over large bodies of source code."
arch=('x86_64' 'i686')
url="https://code.google.com/p/codesearch/"
license=('BSD 3-Clause')
install=${pkgname}.install
if [[ ${CARCH} == "x86_64" ]]; then
source=("$pkgname-$pkgver::https://codesearch.googlecode.com/files/$pkgname-$pkgver-linux-amd64.zip")
md5sums=('cea96b90bbe51d13a9591f2ef1650502')
sha1sums=('976e9ce3a9a765539b0ea2333fe2c1d0f8c42f20')
sha256sums=('5bee62e322ad975072a32aff453d50f6e894ad529b4180baef1115c8c0af010e')
elif [[ ${CARCH} == "i686" ]]; then
source=("$pkgname-$pkgver::https://codesearch.googlecode.com/files/$pkgname-$pkgver-linux-386.zip")
md5sums=('5abee89270270b8bee447e4770a30ade')
sha1sums=('385c6662b7d055f2585052901d04e2b4cc100b1f')
sha256sums=('38e813c9dc6ab453920a55f8647c5767219df12dddb3c0cc111980af221f90e9')
fi
package() {
cd "$srcdir/$pkgname-$pkgver"
install -Dm755 cgrep "$pkgdir/usr/bin/cgrep"
install -Dm755 cindex "$pkgdir/usr/bin/cindex"
install -Dm755 csearch "$pkgdir/usr/bin/csearch"
}
| true
|
ce5712460226ba1e0f61b2f7eb993853353d90c3
|
Shell
|
heitor57/PhaserMultiMovement
|
/assets/tilemaps/export
|
UTF-8
| 163
| 2.78125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
echo $(pwd)
for file in *.tmx
do
SUB=$(echo $file | cut -d'.' -f 1)
tiled --export-map json $file $SUB.json
tmxrasterizer $file $SUB.png
done
time
| true
|
7d737496ce9f1ffb89029b04f723a2e14a478f4f
|
Shell
|
ajaymur91/nucl_chtc
|
/generate_child.sh
|
UTF-8
| 1,202
| 3.375
| 3
|
[] |
no_license
|
#!/bin/bash
#set -e
#if [[ $# -ne 3 ]]; then
# echo " Pass the cluster size and trials and Number of samples" >&2
# echo " Usage: bash generate_child.sh <cluster_size> <trials> <samples>" >&2
# exit 2
#fi
eval "$(conda shell.bash hook)"
conda activate NUCL
N=$(($1+2))
T=100
Ns=100
Gm=0.1
eval echo volume_{0..$((T-1))}.txt | xargs -n 1 cat > Veff.txt
eval echo mratio{0..$((T-1))} | xargs -n 1 cat > mratio.txt
eval echo FE_{0..$((T-1))}.txt | xargs -n 1 tail -n 1 -q | awk '{print $2}' > TI.txt
[ -s Pr.txt ] || for i in `seq 1 $Ns`; do echo 1; done > Pr.txt
echo "Gm=$Gm"
mkdir -p results/N$N
cp Pr.txt results/N$N/
Rscript --vanilla Boltzmann.R $N $Ns $Gm
mkdir -p N_$(($N+1))
k=0
while read p; do
#echo "$p, $k"
cp FE_"$p".gro N_$(($N+1))/"$k".gro
k=$((k+1))
done <boltzmann.txt
mkdir -p results/N$N
mv TI.txt results/N$N/
mv FE*.txt results/N$N/
mv FE*.gro results/N$N/
mv mratio.txt results/N$N/
mv mratio* results/N$N/
mv Veff.txt results/N$N/
mv volume*.txt results/N$N/
mv boltzmann.txt results/N$N/
mv dG.txt results/N$N/
cp P.txt results/N$N/
RETRY=$1
if (( $RETRY < 10 ))
then {
echo "GO AGAIN"
exit 1
}
else {
echo "DONE"
exit 0
}
fi
| true
|
8fc8df04a355b9aa988180a5854fb48bb7c86358
|
Shell
|
irisnet/testnets
|
/nyancat/v0.16/upgrade_verify.sh
|
UTF-8
| 3,812
| 3.375
| 3
|
[] |
no_license
|
#!/bin/bash
echo "Parsing... DO NOT QUIT"
declare -A hash_height_map
declare -A fva_hash_map
declare -A fva_vote_map
declare -A fva_moniker_map
declare -A testnet_identity_map
declare -A mainnet_identity_map
### acquire validator_address of validators who has signed one of blocks between 89500 and 89800 from lcd and save it to hash_height_map,
### the height of software upgrade switched to version v0.16.0-rc1 is 89500.
for i in {89501..89801}
do
# echo $i;
PreCommits=$(curl -s 'https://lcd.nyancat.irisnet.org/blocks/'$i | jq '.block.last_commit.precommits')
for row in $(echo "${PreCommits}" | jq -r '.[] | @base64'); do
_jq() {
echo ${row} | base64 --decode | jq -r ${1}
}
validator_address=$(_jq '.validator_address')
height=$(_jq '.height')
### filter validator_address of validator who already has signed last block.
if [ "$validator_address" != "null" -a -z "${hash_height_map[$validator_address]}" ]
then
hash_height_map["$validator_address"]=$height
fi
done
done
echo "count of hash_height_map: " ${#hash_height_map[@]}
### acquire info of validators from api of nyancat explorer and save it to fva_hash_map and testnet_identity_map.
curl -s 'https://nyancat.irisplorer.io/api/stake/validators?page=1&size=100&type=""&origin=browser' | jq '.Data[] | .proposer_addr+":"+.operator_address+":"+.description.identity' | sed 's/"//g' > nyancat-task-temp
while read row; do
# echo $row
hash=$(echo $row | cut -d':' -f1)
fva=$(echo $row | cut -d':' -f2)
identity=$(echo $row | cut -d':' -f3)
fva_hash_map[$fva]=$hash
testnet_identity_map[$fva]=$identity
done < nyancat-task-temp
### acquire info of validators from api of explorer of mainnet and save it to mainnet_identity_map.
curl -s 'https://www.irisplorer.io/api/stake/validators?page=1&size=100&type=""&origin=browser' | jq '.Data[] | .proposer_addr+":"+.operator_address+":"+.description.identity' | sed 's/"//g' > nyancat-task-temp
while read row; do
identity=$(echo $row | cut -d':' -f3)
# echo $identity
[ -n "$identity" ] && mainnet_identity_map[$identity]="ok"
done < nyancat-task-temp
### acquire voted info in proposal #1 & #3 from api of nyancat explorer ordered by time.
curl -s 'https://nyancat.irisplorer.io/api/gov/proposals/1/voter_txs?page=1&size=100' | jq '.items | reverse | .[] | .voter+":"+.moniker+":"+.option+"::"+.timestamp' | sed 's/"//g' > nyancat-task-temp
curl -s 'https://nyancat.irisplorer.io/api/gov/proposals/3/voter_txs?page=1&size=100' | jq '.items | reverse | .[] | .voter+":"+.moniker+":"+.option+"::"+.timestamp' | sed 's/"//g' >> nyancat-task-temp
index=0
while read row; do
# echo $row
fva=$(echo $row | cut -d':' -f1)
moniker=$(echo $row | cut -d':' -f2)
vote=$(echo $row | cut -d':' -f3)
timestamp=$(echo $row | awk -F:: '{print $2}')
### filter validators of duplicate vote and unsigned one of 300 blocks after software upgrade.
if [ -z "${fva_vote_map[$fva]}" -a -n "${fva_hash_map[$fva]}" ] && [ -n "${hash_height_map[${fva_hash_map[$fva]}]}" ]; then
if [ $index -lt 40 ]; then
index=$[$index + 1]
fva_vote_map[$fva]=$vote
echo -e $index"\t"$fva"\t"$timestamp"\t"${testnet_identity_map[$fva]}"\t"$moniker
### validators of mainnet beyond top 40 is passed by verifying their identity.
elif [ -n "${testnet_identity_map[$fva]}" ] && [ "${mainnet_identity_map[${testnet_identity_map[$fva]}]}" == "ok" ]; then
index=$[$index + 1]
fva_vote_map[$fva]=$vote
echo -e $index"\t"$fva"\t"$timestamp"\t"${testnet_identity_map[$fva]}"\t"$moniker"\t"ok
fi
fi
# echo $fva,$moniker,$vote
done < nyancat-task-temp
rm -f nyancat-task-temp
| true
|
953af8f5567478c8f0871653d81df57351a16bfe
|
Shell
|
AnotherOctopus/CITest
|
/enablei2c.sh
|
UTF-8
| 2,168
| 3.8125
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# this takes the raspiconfig functions and hardcodes them to be turned
# on at run. WARNING, WE NEED TO KEEP UP TO DATE WITH RASPICONFIG
# raspiconfig can be found here
# https://github.com/asb/raspi-config/blob/master/raspi-config
do_i2c() {
DEVICE_TREE="yes" # assume not disabled
DEFAULT=
if [ -e $CONFIG ] && grep -q "^device_tree=$" $CONFIG; then
DEVICE_TREE="no"
fi
CURRENT_SETTING="off" # assume disabled
DEFAULT=--defaultno
if [ -e $CONFIG ] && grep -q -E "^(device_tree_param|dtparam)=([^,]*,)*i2c(_arm)?(=(on|true|yes|1))?(,.*)?$" $CONFIG; then
CURRENT_SETTING="on"
DEFAULT=
fi
if [ $DEVICE_TREE = "yes" ]; then
RET=0
if [ $RET -eq 0 ]; then
SETTING=on
STATUS=enabled
elif [ $RET -eq 1 ]; then
SETTING=off
STATUS=disabled
else
return 0
fi
TENSE=is
REBOOT=
if [ $SETTING != $CURRENT_SETTING ]; then
TENSE="will be"
REBOOT=" after a reboot"
ASK_TO_REBOOT=1
fi
sed $CONFIG -i -r -e "s/^((device_tree_param|dtparam)=([^,]*,)*i2c(_arm)?)(=[^,]*)?/\1=$SETTING/"
if ! grep -q -E "^(device_tree_param|dtparam)=([^,]*,)*i2c(_arm)?=[^,]*" $CONFIG; then
printf "dtparam=i2c_arm=$SETTING\n" >> $CONFIG
fi
echo "The ARM I2C interface $TENSE $STATUS$REBOOT" 20 60 1
if [ $SETTING = "off" ]; then
return 0
fi
fi
CURRENT_STATUS="yes" # assume not blacklisted
DEFAULT=
if [ -e $BLACKLIST ] && grep -q "^blacklist[[:space:]]*i2c[-_]bcm2708" $BLACKLIST; then
CURRENT_STATUS="no"
DEFAULT=--defaultno
fi
if ! [ -e $BLACKLIST ]; then
touch $BLACKLIST
fi
RET=0
if [ $RET -eq 0 ]; then
sed $BLACKLIST -i -e "s/^\(blacklist[[:space:]]*i2c[-_]bcm2708\)/#\1/"
modprobe i2c-bcm2708
echo "I2C kernel module will now be loaded by default" 20 60 1
elif [ $RET -eq 1 ]; then
sed $BLACKLIST -i -e "s/^#\(blacklist[[:space:]]*i2c[-_]bcm2708\)/\1/"
if ! grep -q "^blacklist i2c[-_]bcm2708" $BLACKLIST; then
printf "blacklist i2c-bcm2708\n" >> $BLACKLIST
fi
echo "I2C kernel module will no longer be loaded by default" 20 60 1
else
return 0
fi
}
do_i2c
| true
|
c8d93d0b4588100f39ed0bbcce6899860407f459
|
Shell
|
lululau/homebrew-emacsmacport
|
/build-scripts/build-emacs.sh
|
UTF-8
| 495
| 2.671875
| 3
|
[
"BSD-2-Clause"
] |
permissive
|
#!/bin/sh
source_dir=$1
installprefix=`pwd`/$source_dir/tmproot
app_dir=$installprefix/Emacs.app/Contents/Resources
emacs_version=$2
compver=x86_64-apple-darwin`uname -r`
export PATH="/usr/local/opt/texinfo/bin:$PATH"
set -e
cd $source_dir
./autogen.sh
./configure --with-mac --enable-locallisppath=/usr/local/share/emacs/site-lisp:/opt/homebrew/share/emacs/site-lisp --enable-mac-app=$installprefix --prefix=$installprefix --enable-mac-self-contained --with-modules
make
make install
set +e
| true
|
c4894e8a1aa32d0ee0c25476cdba97c5e3306052
|
Shell
|
federatedcloud/WRFv4-Benchmarking
|
/Docker/docker-build.sh
|
UTF-8
| 840
| 3.40625
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/usr/bin/env bash
# A simple script for building the container
export BUILD_OUTPUT_FILE=./build_output.txt
echo "Build started, output in ${BUILD_OUTPUT_FILE}"
# Save build output log instead of printing to the screen
#docker build --network=host --tag cornellcac/wrf:4.2.2 ./ 2>&1 > $BUILD_OUTPUT_FILE
# Uncomment below to save build output log AND print to screen
docker build --network=host --tag cornellcac/wrf:4.2.2-intel-7415915e0b8e ./ 2>&1 | tee $BUILD_OUTPUT_FILE
# Some useful summary output
echo "-------------------------------------------------------------"
echo "Build complete. Check output file for final status. Errors:"
cat build_output.txt | grep ERROR
cat build_output.txt | grep Error
echo "-------------------------------------------------------------"
echo "Tail of build output:"
cat build_output.txt | tail
| true
|
9f4922e23e8bfd3ef2a46828f81aa822be061bc3
|
Shell
|
dgarnier/hub75_bb
|
/debian_fixups/add_fw_to_initramfs.sh
|
UTF-8
| 1,088
| 3.515625
| 4
|
[] |
no_license
|
#!/bin/bash
if [ ! -f /etc/initramfs-tools/hooks/pruss ] ; then
cat <<EOF > /etc/initramfs-tools/hooks/pruss
#!/bin/sh -e
# Copy the am335x-pru?-fw into the initramfs
if [ "\$1" = "prereqs" ]; then exit 0; fi
. /usr/share/initramfs-tools/hook-functions
if [ -d /lib/firmware/ ] ; then
mkdir -p \$DESTDIR/lib/firmware
if [ -f /lib/firmware/am335x-pru0-fw ] ; then
echo "copying pru0-fw"
cp -a /lib/firmware/am335x-pru0-fw \$DESTDIR/lib/firmware/
elif [ -f \$DESTDIR/lib/firmware/am335x-pru0-fw ] ; then
rm \$DESTDIR/lib/firmware/am335x-pru0-fw
fi
if [ -f /lib/firmware/am335x-pru1-fw ] ; then
echo "copying pru1-fw"
cp -a /lib/firmware/am335x-pru1-fw \$DESTDIR/lib/firmware/
elif [ -f \$DESTDIR/lib/firmware/am335x-pru1-fw ] ; then
rm \$DESTDIR/lib/firmware/am335x-pru1-fw
fi
fi
EOF
chmod a+x /etc/initramfs-tools/hooks/pruss
fi
update_initramfs () {
if [ -f /boot/initrd.img-$(uname -r) ] ; then
sudo update-initramfs -u -k $(uname -r)
else
sudo update-initramfs -c -k $(uname -r)
fi
}
update_initramfs
| true
|
55fbba45d475043645dc3fff289315e2faee798d
|
Shell
|
CW-Huang/gradescope-autograder-template
|
/make_assignment.sh
|
UTF-8
| 909
| 3.53125
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
# CHANGE THESE FOR YOUR REPO!
GITHUB_REPO='git@github.com:mnoukhov/gradescope-autograder-template.git'
REPO_NAME="gradescope-autograder-template"
# input the assignment number and the file that students have to fill out
name=$1
solution=${2:-"solution.py"}
# delete previous files if any
rm $name.zip
# copy all files necessary for assignment
# make sure you have copied your deploy key to gradescope_base/
mkdir -p zip_$name
cp gradescope_base/* zip_$name/
# add assignment name and solution filename to run_autograder
sed -i "2 i\NAME=$name" zip_$name/run_autograder
sed -i "2 i\SOLUTION=$solution" zip_$name/run_autograder
sed -i "2 i\REPO_NAME=$REPO_NAME" zip_$name/run_autograder
sed -i "2 i\GITHUB_REPO=$GITHUB_REPO" zip_$name/setup.sh
sed -i "2 i\REPO_NAME=$REPO_NAME" zip_$name/setup.sh
# zip the assignement and delete folder
zip -r -m -j $name.zip zip_$name/*
rmdir zip_$name
| true
|
e34e3f730f79c10ca550d06be8254a6696b946fd
|
Shell
|
heckfeld/cosy-pgwish
|
/tgmt/scripts/xtload
|
IBM852
| 385
| 3.109375
| 3
|
[] |
no_license
|
#!/bin/bash
#$Header$
#
# xtload - ldt Kernel, Applikation und Konfiguration in das Target
#
# Autor: Michael Simon
# Forschungszentrum Jlich, COSY AP
#
# Usage:
# xtload [-n] [-b] [-s]
#
TLOAD_PROG=`basename $0`
TLOAD_INFO=cat
TLOAD_USAGE="$PROGNAME [-n] [-b] [-o interface name] [-r] [-s]"
export TLOAD_PROG TLOAD_INFO TLOAD_USAGE
tload $*
exit # return exit status of tload
| true
|
df0ec5444fe93b0afad7dc52626e3479b4c7a930
|
Shell
|
mambrus/script3-util
|
/crypt.sh
|
UTF-8
| 1,402
| 4.34375
| 4
|
[] |
no_license
|
#!/bin/bash
#
# Will tar/untar and encrypt/decrypt file or directory
# Script will determine whichever itself by
# inspecting the extension (if any) of the first argument.
# Normal usage is:
# util.crypt.sh mydir yourkeystring
# util.crypt.sh mydir.tar.gz yourkeystring
if [ -z $CRYPT_SH ]; then
CRYPT_SH="crypt.sh"
# Tar the named directory in $1 with the key $2
# Optional 3:rd parameter adds a tag string to the filename generated.
function crypt_pack()
{
if [ -z $3 ]; then
tar -zcf - $1 | openssl des3 -salt -k $2 | \
dd of=$1.des3
else
tar -zcf - $1 | openssl des3 -salt -k $2 | \
dd of=$1_$3.des3
fi
}
# Un-tar the named file $1 with the key $2
function crypt_unpack()
{
echo "1=$1"
echo "2=$2"
dd if=$1 |openssl des3 -d -k $2 | tar zxf -
}
source s3.ebasename.sh
if [ "$CRYPT_SH" == $( ebasename $0 ) ]; then
#Not sourced, do something with this.
source s3.user_response.sh
set -e
#set -u
if [ "X$( which openssl )" == "X" ]; then
set +u
ask_user_continue \
"Detected that opnssl isn't installed. Continue by install it fist? (Y/n)" || exit $?
set -u
sudo apt-get install openssl
fi
if [ $# -lt 2 ]; then
echo "Snytax error: $( basename $0 ) [directory|package-file] [key] [<extra suffix>]"
exit 1
fi
EXT=$(echo $1 | sed -e 's/^.*\.//')
echo $EXT
if [ "x${EXT}" == "xdes3" ]; then
crypt_unpack "$@"
else
crypt_pack "$@"
fi
exit $?
fi
fi
| true
|
6f1e87f7b251fd49b8c8eecbb1ef6f4f76b6bc7c
|
Shell
|
2035themes/play-games-plugin-for-unity
|
/scripts/clean_repo
|
UTF-8
| 402
| 2.875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
if ! [ -f README.md ]; then
echo "Run this script from the base of the repo."
exit 2
fi
for d in samples/CubicPilot samples/Minimal; do
rm -rvf $d/Assets/Editor $d/Assets/Editor.meta
rm -rvf $d/Assets/GooglePlayGames $d/Assets/GooglePlayGames.meta
rm -rvf $d/Assets/Plugins $d/Assets/Plugins.meta
rm -rvf $d/Library
find $d -name '*.apk' -exec rm -v {} \;
done
| true
|
0040a0710539b7e169b156be4bbaffb23e0e70cf
|
Shell
|
vzlatkov/myscripts
|
/MyScripts/TerraformAUTOMATION/InitialTests/TerraformInitUsingFucntions.sh
|
UTF-8
| 17,663
| 3.796875
| 4
|
[] |
no_license
|
#!/bin/bash
###This is a test to check if I can add JSON files in functions, create an array of those functions and then call the functions and substitude the variables in them.
##############################################################################################################################################################################################################
#
# THIS IS AN EXAMPLE/EXIBIT OF THE SCRIPT INSIDE OF THE SCRIPT, SO I DO NOT GET CONFUSED ( BECAUSE XZIBIT, THAT'S WHY!!! )
#
##############################################################################################################################################################################################################
### Define several "files" in here
##############################################
#example:
#fn1 () {
# cat >> /$HOME/testforVarArr.txt <<EOF
#" This is a pseudo JSON1
# {
# This line is called: "$var1"
# And this line is called: "$var2"
#} "
#EOF
#}
##############################################
### END OF THE fn's
###############################################################################################
##############################################
#example:
### create an array of functions
#fn1=JSON1pr
#fn2=JSON2pr
#declare -a arr
#
#arr=("fn1" "fn2")
##############################################
###END OF THE ARRAY DECLARATION
###############################################################################################
##############################################
#example:
### "For" loop for the prompts
#
#for i in "${arr[@]}"; do
#
#read -p "Do you want to use $i " yes
###PROMPT here
#
#if [ $yes == y ]
#then
#arr1=(${#arr[@]} "$i")
#fi
#for x in "${arr1[@]}"; do
#
# if [ $x == fn1 ]; then
# read -p "add name for var1" var1
# read -p "add name for var2" var2
#
# elif [ $x == fn2 ]; then
# read -p "add name for var3" var3
# read -p "add name for var4" var4
# fi
#done
#
#$x >> /$HOME/testforVarArr.txt
#done
##############################################
##############################################################################################################################################################################################################
#
# END OF THE EXAMPLE! END OF THE EXAMPLE! END OF THE EXAMPLE! END OF THE EXAMPLE! END OF THE EXAMPLE!
#
##############################################################################################################################################################################################################
##### ACTUAL SCRIPT STARTS HERE:
###HERE WE DEFINE SOME GLOB VARIABLES AND PRETTY COLOURS SO WE MAKE OUR LIFES EASIER AND PROVIDE THE SCRIPT WITH SOME BLING!
# THOSE ARE THE VARIABLES FOR THE COLORS:
GREEN="\e[32m"
RED="\e[31m"
ENDCOLOR="\e[0m"
# Get a carriage return into `cr`
cr=`echo $'\n.'`
cr=${cr%.}
#echo the ">> insert here:"
st=`echo '>> insert here: '`
#This Checks for the OS's package manager
#Declaring the most common package managers!
declare -a pkgs=("yum" "apt")
#For loop that checks which is the correct one
for i in "${pkgs[@]}"; do
echo which $i 1> /dev/null
done
echo '......................................................'
#This is an "if" statement to determine the package manager
if [ $? -eq 0 ]
then echo "The package in use is $i"
fi
echo '......................................................'
#The initialization starts from here
read -p "Would you like to update (y/n) $cr" update
if [ $update == y ]
then sudo $i -y update
fi
read -p "Would you like to upgrade (y/n) $cr" upgrade
if [ $upgrade == y ]
then sudo $i -y upgrade
fi
echo which terraform
if [ $? != 0 ]
then continue
elif [ $i == apt ]
then snap install --edge terraform
else $i install -y terraform
fi
############################################ END OF BLING-BLING ###############################################
############################################ THE TRANSPORTERS (JSON Statements) ###############################################
### Define Jason Stathams here
VPC () {
cat >> tf.tf <<EOF
###Assign a VPC Name
resource "aws_vpc" "$vpc_name" {
cidr_block = "10.0.0.0/16"
tags = {
Name = "$vpc_name"
}
}
EOF
}
SUBNETS () {
cat >> tf.tf <<EOF
### I will leave all the subnets hardcoded, just for redundancy's sake
resource "aws_subnet" "subnet1" {
vpc_id = aws_vpc.$vpc_name.id
cidr_block = "10.0.1.0/24"
availability_zone = "us-east-2a"
tags = {
Name = "Subnet1 us-east-2a"
}
}
resource "aws_subnet" "subnet2" {
vpc_id = aws_vpc.$vpc_name.id
cidr_block = "10.0.2.0/24"
availability_zone = "us-east-2b"
tags = {
Name = "Subnet2 us-east-2b"
}
}
EOF
}
GATEWAY () {
cat >> tf.tf <<EOF
### Create Gateway
resource "aws_internet_gateway" "$gw" {
vpc_id = aws_vpc.$vpc_name.id
}
EOF
}
ROUTETBL () {
cat >> tf.tf <<EOF
### Create route table pointing both subnets to the gateway
resource "aws_route_table" "route_table" {
vpc_id = aws_vpc.$vpc_name.id
route {
cidr_block = "0.0.0.0/0"
gateway_id = aws_internet_gateway.$gw.id
}
tags = {
Name = "Route Table for Subnets 1 & 2"
}
}
resource "aws_route_table_association" "subnet1_association" {
subnet_id = aws_subnet.subnet1.id
route_table_id = aws_route_table.route_table.id
}
resource "aws_route_table_association" "subnet2_association" {
subnet_id = aws_subnet.subnet2.id
route_table_id = aws_route_table.route_table.id
}
EOF
}
SGELB () {
cat >> tf.tf <<EOF
### create security group for the load balancer
### allow http traffic to the instances only from the LB
resource "aws_security_group" "$elb_sg" {
name = "$elb_sg"
description = "Allow http traffic through the Application Load Balancer"
vpc_id = aws_vpc.$vpc_name.id
ingress {
from_port = $elb_fp
to_port = $elb_tp
protocol = "$protocol"
cidr_blocks = ["0.0.0.0/0"]
}
egress {
from_port = $elb_fp_out
to_port = $elb_tp_out
protocol = "$protocol"
cidr_blocks = ["0.0.0.0/0"]
}
tags = {
Name = "Allow http traffic through the Application Load Balancer"
}
}
EOF
}
SGASG () {
cat >> tf.tf <<EOF
}
### security group for ASG
resource "aws_security_group" "$asg_sg" {
name = "$asg_sg"
description = "Allow http traffic from load balancer"
vpc_id = aws_vpc.$vpc_name.id
ingress {
from_port = $ec2_fp
to_port = $ec2_tp
protocol = "$protocol_ec2"
cidr_blocks = ["0.0.0.0/0"]
}
egress {
from_port = 0
to_port = 65535
protocol = "tcp"
# security_groups = [
# aws_security_group.$elb_sg.id
# ]
cidr_blocks = ["0.0.0.0/0"]
}
tags = {
Name = "Allow http traffic from LB and ssh from the internet"
}
}
EOF
}
LAUNCHCONF () {
cat >> tf.tf <<EOF
}
resource "aws_launch_configuration" "asg_launch_config" {
name_prefix = "asg_launch-"
image_id = "ami-00399ec92321828f5" #ubuntu ### This should be a variable . I have to hardcode (in an array) the possible options, so you can provision all types of AMI's
instance_type = "t2.micro" #This should be a variable. I have to hardcode the instances available (in an array) and use them to allow the option to provision multiple instance types.
security_groups = [aws_security_group.$asg_sg.id]
associate_public_ip_address = true
### TEST - link iam instance profile here? ask ###
iam_instance_profile = aws_iam_instance_profile.test_profile.id
lifecycle {
create_before_destroy = true
}
user_data = <<-EOF2
#!/bin/bash
###This is a script for automatization of the installation and configuration of a Salt Stack master server. By VZlatkov (a.k.a. Xerxes)
###v1.0.1
### In this version we are going to set the basic installation of the Salt-master by updating and upgrading the OS and installing the needed dependencies. Then we are going to install the master server itself.
###v1.0.2
###Fixed the issue where if the conf file exists, it skips and does not add the local IP in it.
###v1.0.3
###This revision adds the missing "!" to the ShaBang, which was causing my userdata to fail whole day :D :D :D
###This is the initial dependency installation part of the script
apt -y update
apt -y upgrade
apt -y install python3
apt -y install python3-pip
pip install salt #You first install the Salt Stack, then you configure it as a Master or Slave
# Download key
curl -fsSL -o /usr/share/keyrings/salt-archive-keyring.gpg https://repo.saltproject.io/py3/ubuntu/20.04/amd64/latest/salt-archive-keyring.gpg
# Create apt sources list file
echo "deb [signed-by=/usr/share/keyrings/salt-archive-keyring.gpg] https://repo.saltproject.io/py3/ubuntu/20.04/amd64/latest focal main" | sudo tee /etc/apt/sources.list.d/salt.list
apt-get update
apt-get -y install salt-master
###Get the local IP and create a variable
IP=$(ip addr show eth0 | grep "inet\b" | awk '{print $2}' | cut -d/ -f1) #Assigns the local IP addr of the machine to a variable so we can use it
###Start the service
systemctl daemon-reload
systemctl start salt-master
systemctl restart salt-master
### Tests if the conf file/dir exists ###
if [ -d /etc/salt/master ]; #Checks if the dir exists
then
echo "interface: $IP" >> /etc/salt/master #Adds the local IP addr of the master in the confi file
#continue #If exists - skips the "elif/else" statement
else #If it doesn't exist - creates the path and file
mkdir /etc/salt/
(
cat > /etc/salt/master <<EOF3
interface: $IP
EOF3
)
fi
###Create a Salt-key
salt-key -F master
###Restart the service
systemctl daemon-reload
systemctl restart salt-master
systemctl status salt-master
EOF2
}
EOF
}
ASG () {
cat >> tf.tf <<EOF
### create auto scaling group, reference launch config, set load balancer as health monitor
resource "aws_autoscaling_group" "asg" {
name = "$asg_test"
max_size = $max
min_size = $min
health_check_grace_period = 300
health_check_type = "ELB"
desired_capacity = $cap
# force_delete = true
launch_configuration = aws_launch_configuration.asg_launch_config.name #.name??
vpc_zone_identifier = [aws_subnet.subnet1.id, aws_subnet.subnet2.id]
target_group_arns = ["${aws_lb_target_group.tgtest.arn}"]
}
EOF
}
MONITORSCALE () {
cat >> tf.tf <<EOF
###create scaling threshold of 65% CPU
resource "aws_autoscaling_policy" "asg_cpu_threshold" {
name = "asg-cpu-threshold-policy"
policy_type = "TargetTrackingScaling"
autoscaling_group_name = aws_autoscaling_group.asg.name
target_tracking_configuration {
predefined_metric_specification {
predefined_metric_type = "ASGAverageCPUUtilization"
}
target_value = 65.0
}
}
EOF
}
S3B () {
cat >> tf.tf <<EOF
### S3 bucket
resource "aws_s3_bucket" "s3_bucket" {
bucket = "s3alekstestbucket"
acl = "private"
tags = {
Name = "test_bucket"
}
}
EOF
}
INDEXS3B () {
cat >> tf.tf <<EOF
### upload index file to bucket
resource "aws_s3_bucket_object" "test_indexfile" {
bucket = aws_s3_bucket.s3_bucket.id
key = "indexfile"
source = "./s3file/index.html"
}
EOF
}
ELB () {
cat >> tf.tf <<GG
resource "aws_lb" "$LBname" {
name = "$LBname"
internal = false
load_balancer_type = "application"
security_groups = [aws_security_group.$elb_sg.id]
subnets = [
aws_subnet.subnet1.id,
aws_subnet.subnet2.id
]
enable_cross_zone_load_balancing = true
}
#create target group
resource "aws_lb_target_group" "$testtg" {
name = "$testtg"
port = 80
protocol = "HTTP"
vpc_id = aws_vpc.$vpc_name.id
target_type = "instance"
## TEST changing /index.html to /
health_check {
healthy_threshold = 2
interval = 30
protocol = "HTTP"
unhealthy_threshold = 2
path = "/"
}
}
resource "aws_lb_listener" "alb_listener" {
load_balancer_arn = aws_lb.$LBname.arn
port = "80"
protocol = "HTTP"
default_action {
type = "forward"
target_group_arn = aws_lb_target_group.$testtg.arn
}
}
GG
}
###################################################################################
#TEST INIT SCRIPT
#initscript () {
#cat >> tf.tf <<EOF2
#!/bin/bash
###This is a script for automatization of the installation and configuration of a Salt Stack master server. By VZlatkov (a.k.a. Xerxes)
###v1.0.1
### In this version we are going to set the basic installation of the Salt-master by updating and upgrading the OS and installing the needed dependencies. Then we are going to install the master server itself.
###v1.0.2
###Fixed the issue where if the conf file exists, it skips and does not add the local IP in it.
###v1.0.3
###This revision adds the missing "!" to the ShaBang, which was causing my userdata to fail whole day :D :D :D
###This is the initial dependency installation part of the script
#apt -y update
#apt -y upgrade
#apt -y install python3
#apt -y install python3-pip
#pip install salt #You first install the Salt Stack, then you configure it as a Master or Slave
# Download key
#curl -fsSL -o /usr/share/keyrings/salt-archive-keyring.gpg https://repo.saltproject.io/py3/ubuntu/20.04/amd64/latest/salt-archive-keyring.gpg
# Create apt sources list file
#echo "deb [signed-by=/usr/share/keyrings/salt-archive-keyring.gpg] https://repo.saltproject.io/py3/ubuntu/20.04/amd64/latest focal main" | sudo tee /etc/apt/sources.list.d/salt.list
#apt-get update
#apt-get -y install salt-master
###Get the local IP and create a variable
#IP=$(ip addr show eth0 | grep "inet\b" | awk '{print $2}' | cut -d/ -f1) #Assigns the local IP addr of the machine to a variable so we can use it
###Start the service
#systemctl daemon-reload
#systemctl start salt-master
#systemctl restart salt-master
### Tests if the conf file/dir exists ###
#if [ -d /etc/salt/master ]; #Checks if the dir exists
# then
# echo "interface: $IP" >> /etc/salt/master #Adds the local IP addr of the master in the confi file
#continue #If exists - skips the "elif/else" statement
# else #If it doesn't exist - creates the path and file
# mkdir /etc/salt/
# (
#cat > /etc/salt/master<<EOF
#interface: $IP
#EOF
#)
#fi
###Create a Salt-key
#salt-key -F master
###Restart the service
#systemctl daemon-reload
#systemctl restart salt-master
#systemctl status salt-master
#EOF2
#}
##############################################################################################################################################################################################
#
# END OF JASON STATHAMS END OF JSON STATEMENTS END OF JASON STATHAMS END OF JSON STATEMENTS
#
##############################################################################################################################################################################################
##### DECLARE AN ARRAY OF THE STATEMENTS
declare -a arr
arr=("VPC" "SUBNETS" "GATEWAY" "ROUTETBL" "SGELB" "LAUNCHCONF" "ASG" "SGASG" "S3B" "MONITORSCALE" "INDEXS3B" "ELB")
echo -e "${GREEN}############################################################################################################################${ENDCOLOR}"
echo -e "${RED}############################################################################################################################${ENDCOLOR}"
echo ""
echo " THIS SCRIPT IS A SCRIPT FOR AUTOMATED DEPLOYMENT OF AWS RESOURCES USING TERRAFORM"
echo ""
echo -e "${RED}############################################################################################################################${ENDCOLOR}"
echo -e "${GREEN}############################################################################################################################${ENDCOLOR}"
echo $cr
echo $cr
echo $cr
### "For" loop for the prompts
for i in "${arr[@]}"; do
###PROMPT here
echo -e "##### ${GREEN} THIS IS THE CONFIG FOR $i ${ENDCOLOR} #####"
read -p "Do you want to use $i $cr $st" yes
if [ $yes == y ]
then
arr1=(${#arr[@]} "$i")
fi
for x in "${arr1[@]}"; do
if [ $x == VPC ]; then
read -p "add name for the VPC's name $cr $st" vpc_name
elif [ $x == GATEWAY ]; then
read -p "add name for the Gateway $cr $st" gw
elif [ $x == SGELB ]; then
read -p "add name for the ELB's security group $cr $st" elb_sg
read -p "choose INCOMING STARTING port $cr $st" elb_fp
read -p "choose INCOMING END port $cr $st" elb_tp
read -p "choose protocol for the ELB $cr $st" protocol
read -p "choose OUTGOING STARTING port $cr $st" elb_fp_out
read -p "choose OUTGOING END port $cr $st" elb_tp_out
elif [ $x == SGASG ]; then
read -p "add name for the ASG's security group $cr $st" asg_sg
read -p "choose INCOMING STARTING port for the EC2 instances" ec2_fp
read -p "choose INCOMING END port for the EC2 instances" ec2_tp
read -p "choose protocol for the EC2 instances$cr $st" protocol_ec2
elif [ $x == ASG ]; then
read -p "add name for the ASG $cr $st" asg_test
read -p "select minimum number of instances $cr $st" min
read -p "select desired number of instances $cr $st" cap
read -p "select maximum number of instances $cr $st" max
elif [ $x == ELB ]; then
read -p "add name for the ELB $cr $st" LBname
read -p "add name for the ELB's target group $cr $st" testtg
fi
done
$x >> /$PWD/tf.tf
done
terraform plan
| true
|
21bc82023446aba6ef90b4a822053773b9711ae1
|
Shell
|
hyliang96/serverENV
|
/local/bin/7z
|
UTF-8
| 242
| 2.9375
| 3
|
[] |
no_license
|
#!/bin/bash
# get absoltae path to the dir this is in, work in bash, zsh
# if you want transfer symbolic link to true path, just change `pwd` to `pwd -P`
here=$(cd "$(dirname "${BASH_SOURCE[0]-$0}")"; pwd)
"$here/p7zip/7z" "$@"
unset -v here
| true
|
6b500302bde7d242f7dfead689b5758932c6f2e2
|
Shell
|
Huarong/zufang
|
/run.sh
|
UTF-8
| 1,559
| 3.625
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
# zhangxiaoyang.hit[at]gmail.com
imgdir="../db/imgdir"
mongopath="../db"
imglist="data-aggregator/output/imglist"
pagedir="page-spider/output/douban"
function load_env()
{
source env/bin/activate
}
function clean_img_cache()
{
local imglist="$1"
local imgdir="$2"
while read url
do
imgname=`echo -n "$url" | cut -d "/" -f 8`
mv "$imgdir/$imgname" "$imgdir/$imgname.bak"
done < "$imglist"
for imgname in `ls $imgdir | grep -v "bak$"`
do
rm -f "$imgdir/$imgname"
done
for imgname in `ls $imgdir`
do
newname=`echo $imgname | sed 's/\.bak$//'`
mv "$imgdir/$imgname" "$imgdir/$newname"
done
}
function clean_page_cache()
{
local pagedir="$1"
rm -rf "$pagedir"
}
echo "START: $(date '+%Y-%m-%d %H:%M:%S')"
load_env
lastestCmd=`ps aux | grep "zufang.*run\|manage.sh" | grep -v grep`
if [ "$lastestCmd" == "" ]
then
echo "Updating ..."
bash manage.sh run all
echo "Moving images ..."
#mv data-aggregator/output/imgdir/* "$imgdir"
find data-aggregator/output/imgdir/ -name '*.*' | xargs -i mv {} "$imgdir"
echo "Cleaning img cache ..."
clean_img_cache "$imglist" "$imgdir"
echo "Cleaning page cache ..."
clean_page_cache "$pagedir"
echo "Cleaning DB ..."
"$mongopath/mongo" zufang --eval "db.dropDatabase()"
echo "Pushing DB ..."
"$mongopath/mongoimport" -d zufang -c house data-aggregator/output/zufang.house.json
else
echo "Stopped"
fi
echo "DONE: $(date '+%Y-%m-%d %H:%M:%S')"
| true
|
2e2109a61e35636586c2f9c1b84ba3610bcf00cb
|
Shell
|
Soulflare3/conf-files
|
/conf-(un)installers/install
|
UTF-8
| 3,546
| 3.4375
| 3
|
[] |
no_license
|
#!/bin/sh
# Create ~/GitCorp directory, if not exists
if [ ! -d ~/GitCorp ]; then
echo "creating ~/GitCorp"
mkdir ~/GitCorp;
echo "command executed: mkdir ~/GitCorp;"
echo "created ~/GitCorp"
fi
# Create ~/Sites directory, if not exists
if [ ! -d ~/Sites ]; then
echo "creating ~/Sites"
mkdir ~/Sites;
echo "command executed: mkdir ~/Sites;"
echo "created ~/Sites"
fi
# Create ~/Downloads directory, if not exists
if [ ! -d ~/Downloads ]; then
echo "creating ~/Downloads"
mkdir ~/Downloads;
echo "command executed: mkdir ~/Downloads;"
echo "created ~/Downloads"
fi
# Create ~/.vim/view directory, if not exists
if [ ! -d ~/.vim/view ]; then
echo "creating ~/.vim/view"
mkdir -p ~/.vim/view;
echo "command executed: mkdir -p ~/.vim/view;"
echo "created ~/.vim/view"
fi
# Create ~/.vim/autoload directory, if not exists
if [ ! -d ~/.vim/autoload ]; then
echo "creating ~/.vim/autoload"
mkdir -p ~/.vim/autoload;
echo "command executed: mkdir -p ~/.vim/autoload;"
echo "created ~/.vim/autoload"
fi
# Create ~/.vim/bundle directory, if not exists
if [ ! -d ~/.vim/bundle ]; then
echo "creating ~/.vim/bundle"
mkdir -p ~/.vim/bundle;
echo "command executed: mkdir -p ~/.vim/bundle;"
echo "created ~/.vim/bundle"
fi
# Create ~/.vim/autoload/pathogen.vim, if not exists
if [ ! -f ~/.vim/autoload/pathogen.vim ]; then
echo "creating ~/.vim/autoload/pathogen.vim"
curl -LSso ~/.vim/autoload/pathogen.vim https://tpo.pe/pathogen.vim;
echo "command executed: curl -LSso ~/.vim/autoload/pathogen.vim https://tpo.pe/pathogen.vim;"
echo "created ~/.vim/autoload/pathogen.vim"
fi
# Backup existing files
existingConfFileNames=('.common-shell-confs' '.bash-confs' '.zsh-confs' '.git-confs' '.vim-confs' '.bashrc' '.bash_profile' '.zshrc' '.zprofile' '.gitconfig' 'gitignore_global' '.vimrc' '.screenrc' '.docker');
for existingConfFileName in "${existingConfFileNames[@]}"
do
if [ -f ~/$existingConfFileName -o -d ~/$existingConfFileName -o -L ~/$existingConfFileName ]; then
modifiedConfFileName=$existingConfFileName-prev;
echo "backing up ~/$existingConfFileName as ~/$modifiedConfFileName"
mv ~/$existingConfFileName ~/$modifiedConfFileName
echo "command executed: mv ~/$existingConfFileName ~/$modifiedConfFileName"
echo "backed up ~/$existingConfFileName as ~/$modifiedConfFileName"
echo
fi
done
# Create SymLinks
# ToDo Make the path relative
confDirNames=('common-shell-confs' 'bash-confs' 'zsh-confs' 'git-confs' 'vim-confs' 'docker-confs');
for confDirName in "${confDirNames[@]}"
do
if [ -d ~/GitHub/conf-files/$confDirName ]; then
echo "creating SymLink ~/.$confDirName to ~/GitHub/conf-files/$confDirName"
ln -s ~/GitHub/conf-files/$confDirName ~/.$confDirName
echo "command executed: ln -s ~/GitHub/conf-files/$confDirName ~/.$confDirName"
echo "created SymLink ~/.$confDirName to ~/GitHub/conf-files/$confDirName"
echo
fi
done
confFileNames=('.bash-confs/bashrc' '.bash-confs/bash_profile' '.zsh-confs/zshrc' '.zsh-confs/zprofile' '.git-confs/gitconfig' '.git-confs/gitignore_global' '.vim-confs/vimrc' '.docker-confs/docker');
for confFileName in "${confFileNames[@]}"
do
if [ -f ~/$confFileName ]; then
symLinkName=.`echo ~/$confFileName | sed ' s!.*/!! ; s/"$// '`
echo "creating SymLink ~/$symLinkName to ~/$confFileName"
ln -s ~/$confFileName ~/$symLinkName
echo "command executed: ln -s ~/$confFileName ~/$symLinkName"
echo "created SymLink ~/$symLinkName to ~/$confFileName"
echo
fi
done
| true
|
08be606dabce3bf177055d98cd3beeff0b21e1fb
|
Shell
|
sclorg/s2i-php-container
|
/8.0/s2i/bin/usage
|
UTF-8
| 611
| 3.4375
| 3
|
[
"Apache-2.0",
"LicenseRef-scancode-proprietary-license"
] |
permissive
|
#!/bin/sh
DISTRO=`cat /etc/*-release | grep ^ID= | grep -Po '".*?"' | tr -d '"'`
NAMESPACE=centos
[[ $DISTRO =~ rhel* ]] && NAMESPACE=rhscl
cat <<EOF
This is a S2I PHP-${PHP_VERSION} ${DISTRO} base image:
To use it in Openshift, run:
oc new-app php:${PHP_VERSION}~https://github.com/sclorg/cakephp-ex.git
To access the application:
oc get pods
oc exec <pod> -- curl 127.0.0.1:8080
Alternatively, to run the image directly using podman or docker, or how to use it as a parent image in a Dockerfile, see documentation at
https://github.com/sclorg/s2i-php-container/blob/master/${PHP_VERSION}/README.md
EOF
| true
|
acd69576cc0751d7c344abb8284f51f2d4e420ec
|
Shell
|
laelbarlow/jackhmmerer
|
/scripts/determine_snakemake_env_options.sh
|
UTF-8
| 306
| 3.09375
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
# Determine which environment management options to use.
env_options=""
if test "$(command -v conda)"; then
env_options="--use-conda"
elif test "$(command -v module)"; then
#env_options="--use-envmodules"
env_options=""
else
env_options="--use-conda --use-singularity"
fi
| true
|
c3fc732b2136df6afb722feebde48d90a141377e
|
Shell
|
chiminey/chiminey
|
/chiminey/PAYLOAD_ROOT/bootstrap_done.sh
|
UTF-8
| 172
| 3.203125
| 3
|
[
"BSD-3-Clause",
"MIT"
] |
permissive
|
#!/bin/sh
PID=`cat bootstrap.pid`
if [ `ps -p $PID | wc -l` -gt 1 ]
then
# program is still running
echo stillrunning
else
echo Environment Setup Completed
fi
| true
|
6d67518c7f1b3598488cd6864d6932a71bd31690
|
Shell
|
cmonr/.dotfiles
|
/setup.sh
|
UTF-8
| 566
| 2.78125
| 3
|
[] |
no_license
|
#!/bin/bash
# TODO: If this file is not in ~/Projects/.dotfiles, exit
# Move to base directory
cd ~/Projects/.dotfiles
#
# Symlinks, symlinks everywhere!
#
# Bash
ln -s {`pwd`,~}/.bashrc
# Vim
ln -s {`pwd`,~}/.vim
ln -s {`pwd`,~}/.vimrc
# Screen
ln -s {`pwd`,~}/.screenrc
# Urxvt
ln -s {`pwd`,~}/.Xdefaults
ln -s {`pwd`,~}/.Xresources
# Git
ln -s {`pwd`,~}/.gitconfig
# Gtk
ln -s {`pwd`,~}/.gtkrc-2.0
# X11
ln -s {`pwd`,~}/.xinitrc
# Remap Caps Lock to Ctrl
ln -s {`pwd`,~}/.Xmodmap
sudo ln -s `pwd`/00-keyboard.conf /etc/X11/xorg.conf.d/00-keyboard.conf
| true
|
439d75df789519329782b792a447159d002153e1
|
Shell
|
nguyet04/entreeai
|
/servers/flaskserver/app/deployflask.sh
|
UTF-8
| 742
| 2.640625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
export SQLADDR="sql:3306"
export FLASKADDR="flaskapp:80"
export MYSQL_ROOT_PASSWORD="flasktest"
cd ../databases
./builddb.sh
cd ../app
./buildflask.sh
# docker push gebizar/flaskapp
# docker pull gebizar/flaskapp
# docker pull gebizar/flaskdb
docker rm -f flaskapp
docker rm -f flaskdb
docker network rm sharednetwork
docker network create sharednetwork
cd ../../gateway
./deploy.sh
docker run -d \
--name flaskdb \
--network sharednetwork \
-e MYSQL_ROOT_PASSWORD=$MYSQL_ROOT_PASSWORD \
-e MYSQL_DATABASE=schema \
-p 3306:3306 \
gebizar/flaskdb
sleep 30
docker run -d \
--name flaskapp \
--network sharednetwork \
-e FLASKADDR=$FLASKADDR \
-e MYSQL_ROOT_PASSWORD=$MYSQL_ROOT_PASSWORD \
-e SQLADDR=$SQLADDR \
gebizar/flaskapp
| true
|
e35bd5f7d5b3fc7e3188d613bd2ddb3a0e8ee812
|
Shell
|
crypdex/blackbox
|
/services/decred/scripts/dashboard.sh
|
UTF-8
| 2,367
| 3.171875
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
printf "
${color_yellow}Decred
─────────────────────────────────────────────────────${color_clear}"
# Get the location of this script
__dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
blockchaininfo=$(${blackboxcmd} exec dcrctl getblockchaininfo 2>/dev/null)
if [[ $? -ne 0 ]]; then
echo "Not running"
return
fi
blocks=$(echo "${blockchaininfo}" | jq -r '.blocks')
verificationprogress=$(echo "${blockchaininfo}" | jq -r '.verificationprogress')
progress="$(echo ${verificationprogress} | awk '{printf( "%.2f", 100 * $1)}')%%"
#blockchaininfo=$(${blackboxcmd} exec dcrctl getblockchaininfo 2>/dev/null)
bestblockhash=$(echo "${blockchaininfo}" | jq -r '.bestblockhash')
chain=$(echo "${blockchaininfo}" | jq -r '.chain')
syncheight=$(echo "${blockchaininfo}" | jq -r '.syncheight')
stakeinfo=$(${blackboxcmd} exec dcrctl -- --wallet getstakeinfo 2>/dev/null)
voted=$(echo "${stakeinfo}" | jq -r '.voted')
immature=$(echo "${stakeinfo}" | jq -r '.immature // 0')
revoked=$(echo "${stakeinfo}" | jq -r '.revoked // 0')
expired=$(echo "${stakeinfo}" | jq -r '.expired // 0')
missed=$(echo "${stakeinfo}" | jq -r '.missed // 0')
live=$(echo "${stakeinfo}" | jq -r '.live // 0')
balances=$(${blackboxcmd} exec dcrctl -- --wallet getbalance 2>/dev/null)
total=$(echo "${balances}" | jq -r '.balances[0].total')
lockedbytickets=$(echo "${balances}" | jq -r '.balances[0].lockedbytickets')
spendable=$(echo "${balances}" | jq -r '.balances[0].spendable')
unconfirmed=$(echo "${balances}" | jq -r '.balances[0].unconfirmed')
balance=$(echo "${balances}" | jq -r '.balances[0]')
peerinfo=$(${blackboxcmd} exec dcrctl getpeerinfo 2>/dev/null)
peercount=$(echo "${peerinfo}" | jq '. | length')
printf "
${color_white}[chain]
${color_boldgreen}${chain}${color_boldwhite} ${progress}${color_clear} ${blocks}/${syncheight}
${color_gray}${bestblockhash}
${color_gray}${peercount} peers
${color_white}[tickets]
${color_gray}live: ${live}, voted: ${voted}, immature: ${immature}
${color_gray}missed: ${missed}, expired: ${expired}, revoked: ${revoked}
${color_white}[account]
${color_gray}${total} DCR
${color_gray}${lockedbytickets} locked, ${spendable} spendable, ${unconfirmed} unconfirmed
"
| true
|
7c4a854ceaa6cc0c5041167ab94a20f638f48cce
|
Shell
|
TarasKoval/BashScripts
|
/leap.sh
|
UTF-8
| 379
| 4.1875
| 4
|
[] |
no_license
|
#!/bin/bash
#Given a year, report if it is a leap year.
set -o errexit
set -o nounset
if [[ $# -ne 1 ]]; then
echo 'Usage: leap.sh <year>'
exit 1
fi
number=$1
if ! [[ $number =~ ^[0-9]+$ ]]; then
echo 'Usage: leap.sh <year>'
exit 1
fi
if [[ $(($number%4)) == 0 ]] && [[ $(($number%100)) != 0 ]] || [[ $(($number%400)) == 0 ]]; then
echo true
else
echo false
fi
exit 0
| true
|
ce428bba4c4060b43c553ac9ecc1b1ff42c9e5a1
|
Shell
|
theshubhamk/linuxEmer
|
/Assignments/07_largestWHIle.sh
|
UTF-8
| 405
| 3.5625
| 4
|
[] |
no_license
|
#!/bin/bash
#Written by SHUBHAM KUMAR SINGH
#script to compare larger integer values from a 'n' number of arguments using command line arguments
arr=($@)
count=$#
c=$(expr $count - 1)
p=0
q=0
n=0
l=0
echo ${arr[@]}
echo the count is $count
p=${arr[0]}
while [ $c -gt 0 ]
do
let n=$n+1
q=${arr[$n]}
if [[ $p -gt $q ]]
then
l=$p
else
l=$q
fi
p=$l
let c=$c-1
done
echo the largest number is $l
| true
|
61fc9f5da63e2e432494eec716595658a64d7c4e
|
Shell
|
MMTObservatory/pyINDI
|
/pyindi/www/static/js9/tests/testwait
|
UTF-8
| 1,093
| 3.671875
| 4
|
[
"BSD-3-Clause",
"MIT"
] |
permissive
|
#!/bin/bash
# set -x
if [ x$1 != x ]; then
cmd="$1";
else
echo "usage: $0 [CreateMosaic | DisplaySection | LoadCatalog | LoadRegions | RotateData | RunAnalysis]"
exit 0
fi
error() {
echo "$1" | egrep ERROR 1>/dev/null 2>&1
if [ $? = 0 ]; then
echo "$*"
else
echo "ERROR: $*"
fi
exit 1
}
case $cmd in
CreateMosaic)
file="../data/mosaic/megacam.fits"
args="current"
;;
DisplaySection)
file="data/fits/casa.fits"
args="full"
;;
LoadCatalog)
file="data/fits/casa.fits"
args="mycat casa/casa.cat"
;;
LoadRegions)
file="data/fits/casa.fits"
args="casa/casa.reg"
;;
RotateData)
file="data/fits/casa.fits"
args="45"
;;
RunAnalysis)
file="data/fits/casa.fits"
args="counts"
;;
*)
error "unknown cmd: $cmd"
;;
esac
echo "loading $file ..."
x=$(./js9load -v $file 2>&1)
echo "$x" | egrep ERROR 1>/dev/null 2>&1
if [ $? = 0 ]; then
error $x
else
echo $x
fi
echo "run: $cmd"
js9 -v $cmd $args
js9wait -v $cmd $file
| true
|
611f5be7d33249718064e685115cd9be42cd73ee
|
Shell
|
alice1017/setupscripts
|
/setup-zsh_linux.sh
|
UTF-8
| 1,054
| 3.6875
| 4
|
[] |
no_license
|
#!/bin/bash
# Load library
LIBPATH="$(dirname $(readlink -f $0))/lib"
source "$LIBPATH/load.sh"
# Display banner
display_banner_msg "Install zsh 5.5.1 from source"
# Define variables and function
URL="http://www.zsh.org/pub/zsh-5.5.1.tar.gz"
FILE="zsh-5.5.1.tar.gz"
DIR="zsh-5.5.1"
DIRPATH="${SRCDIR}/${DIR}"
DEPENDENCIES=( \
"libc6" "libcap2" "libtinfo5" \
"libncursesw5" "libncursesw5-dev" \
"libncurses5" "libncurses5-dev" \
"libpcre3"\
)
clean() {
echo -n "Cleaning temporary files..."
rm "$SRCDIR/$FILE"
rm -rf "$SRCDIR/zsh-5.5.1"
echo "$(ink "green" "done")"
}
# Install dependics
install_dependencies "(${DEPENDENCIES[*]})"
# Download & Extract tarball
download -np $URL && extract "${SRCDIR}/${FILE}"
# move source dir
echo "Moved directory from "$(pwd)" to "$(ink "yellow" "$DIRPATH")" "
cd "$DIRPATH"
# execute make commands
execute_cmd "./configure" "/tmp/zsh-configure.log"
execute_cmd "sudo make install" "/tmp/zsh-make-install.log"
execute_cmd "make clean" "/tmp/zsh-make-clean.log"
clean
exit 0
| true
|
ddeae08e16e39faf96314e399f02e4b622f6f06f
|
Shell
|
claudiojunior-me/linux-debian-based-setup
|
/scripts/config-so-theme.sh
|
UTF-8
| 640
| 2.78125
| 3
|
[] |
no_license
|
#!/usr/bin/env sh
echo ""
echo "#############################"
echo "# Styling the SO with Dracula Theme"
echo "#############################"
echo ""
echo " Cloning to Ant-Dracula...."
git clone https://github.com/Diego-Brocanelli/Ant-Dracula.git ~/.themes/Ant-Dracula
echo " Set to Ant-Dracula theme...."
gsettings set org.gnome.desktop.interface gtk-theme "Ant-Dracula"
gsettings set org.gnome.desktop.wm.preferences theme "Ant-Dracula"
echo " Set to Dracula wallpaper...."
cp img/dracula_wallpaper.png $HOME/Imagens/
gsettings set org.gnome.desktop.background picture-uri file://$HOME/Downloads/dracula_wallpaper.png
echo ""
| true
|
270be64abd01166130d0af831056bf1966008459
|
Shell
|
nanoxas/PintOS
|
/tarea2/src/compile.sh
|
UTF-8
| 1,051
| 3.75
| 4
|
[
"MIT-Modern-Variant",
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] |
permissive
|
#!/bin/bash
function compile-dir {
A=$(pwd)
echo "Compiling $1"
cd $1
if make > /dev/null; then
cd $A
else
cd $A
echo "Compiling $1 failed"
exit $2
fi
}
function pintos-compile {
compile-dir filesys 102
compile-dir userprog 103
compile-dir examples 104
}
function pintos-cp {
echo " Copying $1 to the disk"
../../utils/pintos -p ../../examples/$1 -a $1 -- -q
}
function pintos-mkfs {
echo "Preparing the disk"
echo " Creating disk (2MiB!)"
# Create new disk image
rm $1 2> /dev/null
../../utils/pintos-mkdisk $1 --filesys-size=2 &> /dev/null
# Format
echo " Formating disk"
../../utils/pintos -- -f -q &> /dev/null
# copy programs
echo " Copying programs"
pintos-cp 'halt'
pintos-cp 'echo'
pintos-cp 'interecho'
pintos-cp 'cowsay'
pintos-cp 'recursor'
}
echo "Compiling $(pwd)"
if pintos-compile; then
echo "$(pwd) compiled!"
if cd userprog/build; then
pintos-mkfs 'filesys.dsk'
cd ../..
fi #cd userprog/build
else
echo "$(pwd) does not compile!"
fi
| true
|
504c86d0c2270092092dd766d11a134ce73ff73a
|
Shell
|
csu-anzai/Cloud-computing-with-AWS
|
/infrastructure/aws/cloudformation/csye6225-aws-cf-terminate-application-stack.sh
|
UTF-8
| 351
| 3.265625
| 3
|
[
"Apache-2.0"
] |
permissive
|
echo "stackname : $1"
echo "$stackname"
stack_delete=$(aws cloudformation delete-stack --stack-name $1)
echo "Stack is getting deleted...."
result=$(aws cloudformation wait stack-delete-complete --stack-name $1)
if [[ -z "result" ]]; then
echo "---Stack has not been deleted---"
else
echo "---Stack has been successfully deleted---"
fi
| true
|
7caaa846a86d2239cf6ae7672e5f199dbaa30c87
|
Shell
|
frebib/dotfiles-1
|
/mk-image/src/03-dotfiles.sh
|
UTF-8
| 724
| 3.171875
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
set -o xtrace
set -e
DF_PATH=$HOME/dotfiles
REPO_HTTPS='https://github.com/mishajw/dotfiles.git'
REPO_SSH='git@github.com:mishajw/dotfiles.git'
[[ "$USER" == "misha" ]] || exit 1
sudo pacman -S git zsh python --noconfirm --needed
if [[ ! -e $DF_PATH ]]; then
echo "Cloning dotfiles"
git clone $REPO_HTTPS $DF_PATH
fi
cd $DF_PATH
git remote set-url origin $REPO_HTTP \
&& git fetch \
&& git remote set-url origin $REPO_SSH
git checkout master
source $HOME/dotfiles/init/scripting.sh
echo "Setting up yaourt"
$init/install/yaourt.sh
echo "Setting up python"
$init/install/python.sh
echo "Setting up zsh"
$init/install/zsh.sh
echo "Setting up config files"
$DF_PYTHON $scr/make-symlinks.py
| true
|
ef410b15ae0db087ddabb3af5d952c548a5f5e59
|
Shell
|
LittleHelicase/jumper
|
/apps/jumper/scripts/download-assets.sh
|
UTF-8
| 312
| 2.765625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
# this script downloads sprite assets from github.
WORKING_DIR=$(pwd)
SCRIPT_DIR=$(dirname "$0")
cd "$SCRIPT_DIR"
mkdir ../assets/images/sprites/external
cd ../assets/images/sprites/external
curl -LO https://github.com/hastebrot/sprites/raw/master/pico8/38190_sprites.png
cd "$WORKING_DIR"
| true
|
0639af770070e0d38e56922fbaf11959df67c38f
|
Shell
|
fsmo/anasfullstack-dotfiles
|
/.atom/local-history/Users/anas/projects/anas-dotfiles-v2/2015-10-17_11-02-36 AM_brew.sh
|
UTF-8
| 5,305
| 2.796875
| 3
|
[] |
no_license
|
# Ask for the administrator password upfront.
sudo -v
# Keep-alive: update existing `sudo` time stamp until the script has finished.
while true; do sudo -n true; sleep 60; kill -0 "$$" || exit; done 2>/dev/null &
# Check for Homebrew,
# Install if we don't have it
if test ! $(which brew); then
echo "Installing homebrew..."
ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)"
fi
# Make sure we’re using the latest Homebrew.
brew update
# Upgrade any already-installed formulae.
brew upgrade --all
# Install Xcode command line tools
xcode-select --install
# Install GNU core utilities (those that come with OS X are outdated).
# Don’t forget to add `$(brew --prefix coreutils)/libexec/gnubin` to `$PATH`.
brew install coreutils
sudo ln -s /usr/local/bin/gsha256sum /usr/local/bin/sha256sum
# Install some other useful utilities like `sponge`.
brew install moreutils
# Install GNU `find`, `locate`, `updatedb`, and `xargs`, `g`-prefixed.
brew install findutils
# Install GNU `sed`, overwriting the built-in `sed`.
brew install gnu-sed --with-default-names
# Install Bash 4.
brew install bash
brew tap homebrew/versions
brew install bash-completion2
# We installed the new shell, now we have to activate it
echo "Adding the newly installed shell to the list of allowed shells"
# Prompts for password
sudo bash -c 'echo /usr/local/bin/bash >> /etc/shells'
# Change to the new shell, prompts for password
chsh -s /usr/local/bin/bash
# Install `wget` with IRI support.
brew install wget --with-iri
# Install Python
brew install python
brew install python3
# Install more recent versions of some OS X tools.
brew install vim --override-system-vi
brew install homebrew/dupes/grep
brew install homebrew/dupes/openssh
brew install homebrew/dupes/screen
brew install homebrew/php/php55 --with-gmp
# Install some CTF tools; see https://github.com/ctfs/write-ups.
brew install aircrack-ng
brew install bfg
brew install binutils
brew install binwalk
brew install cifer
brew install dex2jar
brew install dns2tcp
brew install fcrackzip
brew install foremost
brew install hashpump
brew install hydra
brew install john
brew install knock
brew install netpbm
brew install nmap
brew install pngcheck
brew install socat
brew install sqlmap
brew install tcpflow
brew install tcpreplay
brew install tcptrace
brew install ucspi-tcp # `tcpserver` etc.
brew install xpdf
brew install xz
# Install other useful binaries.
brew install ack
brew install dark-mode
#brew install exiv2
brew install git
brew install git-lfs
brew install git-flow
brew install imagemagick --with-webp
brew install lua
brew install lynx
brew install p7zip
brew install pigz
brew install pv
brew install rename
brew install rhino
brew install speedtest_cli
brew install ssh-copy-id
brew install tree
brew install webkit2png
brew install zopfli
brew install pkg-config libffi
brew install tmux
brew install ngrep
# Install Heroku
brew install heroku-toolbelt
heroku update
# Install Cask
brew install caskroom/cask/brew-cask
# Core casks
brew cask install --appdir="~/Applications" iterm2
brew cask install --appdir="~/Applications" java
brew cask install --appdir="~/Applications" xquartz
# Development tool casks
brew cask install --appdir="/Applications" sublime-text
brew cask install --appdir="/Applications" atom
brew cask install --appdir="/Applications" virtualbox
brew cask install --appdir="/Applications" vagrant
brew cask install --appdir="/Applications" heroku-toolbelt
# Misc casks
brew cask install --appdir="/Applications" google-chrome
brew cask install --appdir="/Applications" firefox
brew cask install --appdir="/Applications" skype
brew cask install --appdir="/Applications" slack
brew cask install --appdir="/Applications" dropbox
brew cask install --appdir="/Applications" evernote
brew cask install --appdir="/Applications" gimp
brew cask install --appdir="/Applications" inkscape
brew cask install --appdir="/Applications" macpass
brew cask install --appdir="/Applications" pgadmin3
brew cask install --appdir="/Applications" google-drive
brew cask install --appdir="/Applications" google-hangouts
brew cask install --appdir="/Applications" sourcetree
brew cask install --appdir="/Applications" adobe-reader
brew cask install --appdir="/Applications" utorrent
brew cask install --appdir="/Applications" spotify
brew cask install --appdir="/Applications" bankid
brew cask install --appdir="/Applications" diffmerge
brew cask install --appdir="/Applications" octave
brew cask install --appdir="/Applications" genymotion
brew cask install --appdir="/Applications" filezilla
brew cask install --appdir="/Applications" u-torrent
# android development packages
brew cask install --appdir="~/Applications" intellij-idea-ce
brew cask install --appdir="~/Applications" android-studio
brew install android-sdk
# Install data stores
brew install mysql
brew install postgresql
brew install mongo
brew cask install --appdir="/Applications" mysqlworkbench
# Web Development
brew install node
npm install -g coffee-script
npm install -g grunt-cli
npm install -g jshint
npm install -g less
npm install -g nodemon
npm install -g bower
curl https://install.meteor.com/ | sh
npm install -g mongodb
npm install -g mongoose
# Remove outdated versions from the cellar.
brew cleanup
| true
|
3761e2dc30c5446590b68184e677698ad2535662
|
Shell
|
NCAR/container-dtc-nwp
|
/components/scripts/common/met_point_verf_all.ksh
|
UTF-8
| 5,850
| 3.671875
| 4
|
[] |
no_license
|
#!/bin/ksh -l
##########################################################################
#
# Script Name: met_point_verf_all.ksh
#
# Author: John Halley Gotway
# NCAR/RAL/DTC
#
# Released: 10/26/2010
#
# Description:
# This script runs the MET/Point-Stat tool to verify gridded output
# from the WRF PostProcessor using point observations. The MET/PB2NC
# tool must be run on the PREPBUFR observation files to be used prior
# to running this script.
#
# START_TIME = The cycle time to use for the initial time.
# FCST_TIME = The two-digit forecast that is to be verified.
# DOMAIN_LIST = A list of domains to be verified.
# GRID_VX =
# MET_EXE_ROOT = The full path of the MET executables.
# MET_CONFIG = The full path of the MET configuration files.
# DATAROOT = Directory containing /postprd and /metprd.
# RAW_OBS = Directory containing observations to be used.
# MODEL = The model being evaluated.
#
##########################################################################
LS=/usr/bin/ls
MKDIR=/usr/bin/mkdir
ECHO=/usr/bin/echo
CUT=/usr/bin/cut
DATE=/usr/bin/date
typeset -Z2 FCST_TIME
# Print run parameters/masks
${ECHO}
${ECHO} "met_point_verf_all.ksh started at `${DATE}`"
${ECHO}
${ECHO} " START_TIME = ${START_TIME}"
${ECHO} " FCST_TIME = ${FCST_TIME}"
${ECHO} " DOMAIN_LIST = ${DOMAIN_LIST}"
${ECHO} " GRID_VX = ${GRID_VX}"
${ECHO} " MET_EXE_ROOT = ${MET_EXE_ROOT}"
${ECHO} " MET_CONFIG = ${MET_CONFIG}"
${ECHO} " DATAROOT = ${DATAROOT}"
${ECHO} " RAW_OBS = ${RAW_OBS}"
${ECHO} " MODEL = ${MODEL}"
# Make sure $DATAROOT exists
if [ ! -d "${DATAROOT}" ]; then
${ECHO} "ERROR: DATAROOT, ${DATAROOT} does not exist"
exit 1
fi
# Make sure RAW_OBS directory exists
if [ ! -d ${RAW_OBS} ]; then
${ECHO} "ERROR: RAW_OBS, ${RAW_OBS}, does not exist!"
exit 1
fi
# Create output directories
PS_DIR=${DATAROOT}/metprd/point_stat
${MKDIR} -p ${PS_DIR}
PB2NC_DIR=${DATAROOT}/metprd/pb2nc
${MKDIR} -p ${PB2NC_DIR}
export MODEL
export FCST_TIME
########################################################################
# Compute VX date - only need to calculate once
########################################################################
# Compute the verification date
VDATE=` ${CALC_DATE} ${START_TIME} +${FCST_TIME}`
VYYYYMMDD=`${ECHO} ${VDATE} | ${CUT} -c1-8`
VHH=` ${ECHO} ${VDATE} | ${CUT} -c9-10`
${ECHO} 'valid time for ' ${FCST_TIME} 'h forecast = ' ${VDATE}
########################################################################
# Run pb2nc on prepbufr obs file - only need to run once
########################################################################
# Check for pb2nc output file name
OBS_FILE="${PB2NC_DIR}/prepbufr.ndas.${VYYYYMMDD}.t${VHH}z.nc"
if [ ! -e ${OBS_FILE} ]; then
# Specify the MET PB2NC configuration file to be used
export CONFIG_PB2NC="${MET_CONFIG}/PB2NCConfig_RefConfig"
# Make sure the MET configuration files exists
if [ ! -e ${CONFIG_PB2NC} ]; then
echo "ERROR: ${CONFIG_PB2NC} does not exist!"
exit 1
fi
# Process time information -- NDAS specific
if [[ ${VHH} == "00" || ${VHH} == "06" || ${VHH} == "12" || ${VHH} == "18" ]]; then
TMMARK="tm12"
elif [[ ${VHH} == "03" || ${VHH} == "09" || ${VHH} == "15" || ${VHH} == "21" ]]; then
TMMARK="tm09"
else
echo "ERROR: Valid hour is not compatible with using NDAS data."
exit 1
fi
# Determine the NDAS time stamps
TM_HR=`echo ${TMMARK} | cut -c3-4`
NDAS_YMDH=`${CALC_DATE} ${VDATE} +${TM_HR} -fmt %Y%m%d%H`
NDAS_HR=` ${CALC_DATE} ${VDATE} +${TM_HR} -fmt %H`
# List observation file to be run through pb2nc
PB_FILE=`${LS} ${RAW_OBS}/${NDAS_YMDH}/ndas.t${NDAS_HR}z.prepbufr.${TMMARK}.nr | head -1`
if [ ! -e ${PB_FILE} ]; then
echo "ERROR: Could not find observation file: ${PB_FILE}"
exit 1
fi
# Call PB2NC
${RUN_CMD} /usr/bin/time ${MET_EXE_ROOT}/pb2nc \
${PB_FILE} ${OBS_FILE} ${CONFIG_PB2NC} -v 2
if [ $? -ne 0 ]; then
exit 1
fi
fi
########################################################################
# Run point_stat for each domain
########################################################################
# Loop through the domain list
for DOMAIN in ${DOMAIN_LIST}; do
export DOMAIN
export ${GRID_VX}
${ECHO} "DOMAIN=${DOMAIN}"
# Get the forecast to verify
FCST_FILE=${DATAROOT}/postprd/wrfprs_${DOMAIN}.${FCST_TIME}
if [ ! -e ${FCST_FILE} ]; then
${ECHO} "ERROR: Could not find UPP output file: ${FCST_FILE}"
exit 1
fi
#######################################################################
# Run Point-Stat
#######################################################################
# Specify the MET Point-Stat configuration files to be used
PS_CONFIG_LIST="${MET_CONFIG}/PointStatConfig_ADPUPA \
${MET_CONFIG}/PointStatConfig_ADPSFC \
${MET_CONFIG}/PointStatConfig_ADPSFC_MPR \
${MET_CONFIG}/PointStatConfig_WINDS"
for CONFIG_FILE in ${PS_CONFIG_LIST}; do
# Only verify ADPUPA for 00 and 12
if [[ ${CONFIG_FILE} =~ "ADPUPA" && ${VHH} != "00" && ${VHH} != "12" ]]; then
continue
fi
# Make sure the configuration file exists
if [ ! -e ${CONFIG_FILE} ]; then
${ECHO} "ERROR: ${CONFIG_FILE} does not exist!"
exit 1
fi
${RUN_CMD} /usr/bin/time ${MET_EXE_ROOT}/point_stat \
${FCST_FILE} ${OBS_FILE} ${CONFIG_FILE} \
-outdir ${PS_DIR} -v 2
if [ $? -ne 0 ]; then
exit 1
fi
done # for CONFIG_FILE
done # for DOMAIN
##########################################################################
${ECHO} "met_point_verf_all.ksh completed at `${DATE}`"
exit 0
| true
|
23795bddd31089223213185879d34e6a7efd0ce4
|
Shell
|
erantapaa/do-tools
|
/setup
|
UTF-8
| 10,598
| 4.0625
| 4
|
[] |
no_license
|
#!/bin/bash
#
# Script to set up a droplet.
read -d '' -r ALL_COMMANDS <<END
build_pandoc
build_pandoc_as_user
fedora_init
fedora_install_pkgs
fedora_test_all
fetch_ghc_7_10_1_bindist
fetch_ghc_7_8_4_bindist
git_clone_haskell_platform
git_clone_haskell_platform_erantapaa
git_clone_hl
install_haskell_platform
install_hp_2014_2_0_0
missing_libs
setup_swap
setup_user
debian_init
install_stack
usage
END
STACK_URL="https://github.com/commercialhaskell/stack/releases/download/v1.1.2/stack-1.1.2-linux-x86_64.tar.gz"
run () {
echo "+" "$@"
"$@"
}
run_conditionally () {
marker="$1"
shift
if [ ! -e "$marker" ]; then
run "$@"
touch "$marker"
fi
}
assert_running_as_root () {
if [ ! -w / ]; then
echo Must be running as root
exit 1
fi
}
executable_exists() {
type "$1" > /dev/null 2>&1
}
cmd_install_redo() {
if [ -x "./setup.d/build-redo" ]; then
PREFIX=/usr/local ./setup.d/build-redo
fi
}
cmd_make_setup_readable() {
if [ -d "./setup.d" ]; then
chmod a+rX -R ./setup.d
fi
if [ -w "/root" ]; then
chmod a+rx /root
if [ -d "/root/setup.d" ]; then
chmod a+rX -R /root/setup.d
fi
if [ -e "/root/setup" ]; then
chmod a+rX /root/setup
fi
fi
}
cmd_test_stack_stuff() {
local url="$STACK_URL"
local leaf stem
leaf=$(basename "$url")
stem=${leaf%.tar.gz}
echo "leaf: $leaf"
echo "stem: $stem"
}
cmd_install_stack() {
local url="$STACK_URL"
local prog=/usr/local/bin/stack
local tmp="$prog.tmp"
local dir="/root/download"
if [ ! -e "$prog" ]; then
[ -d "$dir" ] || run mkdir "$dir"
fetch_url "$url" "$dir"
local leaf stem
leaf=$(basename "$url")
stem=${leaf%.tar.gz}
if [ -e "$dir/$leaf" ]; then
tar xOzf "$dir/$leaf" "$stem/stack" > "$tmp"
mv "$tmp" "$prog"
fi
fi
run chmod a+rx "$prog"
}
cmd_install_lighter() {
run apt-get install -y lighttpd
}
setup_gitconfig() {
local user="$1"
local fullname="$2"
local email="$3"
local path="/home/$user/.gitconfig"
if [ ! -e "$path" ]; then
cat > "$path" <<END
[user]
email = $email
name = $fullname
[core]
editor = vi
END
fi
}
cmd_setup_swap() {
swap_path="/swap"
swap_size=4096 # size in MB
if ! test -e "$swap_path"; then
if executable_exists fallocate; then
run fallocate -l ${swap_size}M "$swap_path"
else
echo Using dd to create a swap file...
run dd if=/dev/zero of=/swap bs=1024 count=$((swap_size*1024))
fi
run chmod 600 $swap_path
run mkswap $swap_path
run swapon $swap_path
fi
if ! grep -q swap /etc/fstab; then
echo "adding $swap_path to /etc/fstab"
echo "$swap_path none swap defaults 0 0" >> /etc/fstab
fi
}
copy_file_to_user() {
# Copy a file and give a user ownership and control over it.
assert_running_as_root
local src="$1"
local dest="$2"
local user="$3"
run cp "$src" "$dest"
run chown "$user.$user" "$dest"
}
cmd_setup_user() {
user="$1"
if [ -z "$user" ]; then
echo "setup_user: no user name supplied"
return
fi
if ! grep "^$user" /etc/passwd; then
home=/home/$user
run useradd -m -d "$home" -s /bin/bash -k /etc/skel "$user"
run mkdir "$home/.ssh"
run chmod go-rwx "$home/.ssh"
run chown "$user.$user" "$home/.ssh"
run cp /root/.ssh/authorized_keys "$home/.ssh/"
run chown "$user.$user" "$home/.ssh/authorized_keys"
run chmod go-rwx "$home/.ssh/authorized_keys"
run mkdir "$home/download"
run chown "$user.$user" "$home/download"
mkdir "$home/bash"
chown "$user.$user" "$home/bash"
mkdir "$home/bin"
if [ -e "setup.d/fgm.bash" ]; then
cp "setup.d/fgm.bash" "$home/bash/"
chown "$user.$user" "$home/bash"
fi
if [ -e "$home/.bashrc" ]; then
cat >> "$home.bashrc" << '__END__'
[ -r "$HOME/bash/fgm.bash" ] && source "$HOME/bash/fgm.bash"
__END__
fi
fi
}
set_user() {
user="$1"
home="/home/$user"
}
check_var () {
if [ -z "$2" ]; then
echo "variable $1 is not set"
exit 1
fi
}
create_dir () {
check_var user "$user"
# create_dir dir
local dir="$1"
if [ ! -d "$dir" ]; then
run mkdir -p "$dir"
fi
run chown "$user.$user" "$dir"
run chmod 0755 "$dir"
}
get_clone() {
check_var home "$home"
dir="$home/repos"
create_dir "$dir"
git clone "$1" "$dir"
}
cmd_fetch_ghc_7_10_1_bindist() {
download_url https://www.haskell.org/ghc/dist/7.10.1/ghc-7.10.1-x86_64-unknown-linux-deb7.tar.xz
}
cmd_fetch_ghc_7_8_4_bindist() {
download_url https://www.haskell.org/ghc/dist/7.8.4/ghc-7.8.4-x86_64-unknown-linux-deb7.tar.xz
}
cmd_git_clone_haskell_platform() {
git_clone https://github.com/haskell/haskell-platform
}
cmd_git_clone_haskell_platform_erantapaa() {
git_clone https://github.com/erantapaa/haskell-platform
}
cmd_git_clone_hl() {
git_clone https://github.com/haskell-infra/hl
}
cmd_install_hp_2014_2_0_0 () {
assert_running_as_root
# Install the 2014.2.0.0 version of the Haskell Platform
dir=/tmp
hptar="haskell-platform-2014.2.0.0-unknown-linux-x86_64.tar.gz"
# install the 2014.2.0.0 Haskell Platform
if [ ! -d /usr/local/haskell ]; then
if [ -e "$dir/$hptar" ] ; then
run wget -P $dir https://www.haskell.org/platform/download/2014.2.0.0/haskell-platform-2014.2.0.0-unknown-linux-x86_64.tar.gz
fi
if [ ! -e "$dir/$hptar" ]; then
echo "=== failed to download $hptar"
exit 1
fi
cd /
run sudo tar xvf "$dir/$hptar"
run sudo /usr/local/haskell/ghc-7.8.3-x86_64/bin/activate-hs
fi
run cabal update
run cabal install cabal-install hscolour
}
make_directory() {
local dir="$1"
if [ ! -d "$dir" ]; then
run mkdir "$dir"
if [ ! -d "$dir" ]; then
return 1
fi
fi
return 0
}
download_url() {
local dir="$HOME/download"
if make_directory "$dir"; then
fetch_url "$1" "$dir"
else
echo "unable to create directory $dir"
exit 1
fi
}
fetch_url() {
local url="$1"
local dir="$2"
if [ -z "$dir" ]; then
echo "fetch_url - no directory specified"
echo " url = $url"
exit 1
fi
local dest
dest="$dir/"$(basename "$url")
if [ ! -e "$dest" ]; then
run wget -P "$dir" "$url"
fi
result="$dest"
}
cmd_missing_libs () {
cd /usr/local/haskell
local out=/tmp/ldd-report-$$
find . -name '*.so' | while read -r x; do echo "=== $x"; ldd "$x"; done > $out
echo "Report written to $out"
echo "Missing libraries:"
grep -v libHS "$out" | grep -i 'not found'
}
fetch_rc1() {
local dir="$1"
local url="http://www.ozonehouse.com/mark/platform/haskell-platform-7.10.1.20150612-x86_64-unknown-linux-deb7.tar.gz"
fetch_url $url "$dir"
}
cmd_install_haskell_platform() {
cd
fetch_rc1 "download"
run tar xf "$result"
run ./install-haskell-platform.sh
}
cmd_debian_xenial_init() {
cat > /etc/apt/sources.list.d/offical-ubuntu-mirrors.list << 'END'
deb mirror://mirrors.ubuntu.com/mirrors.txt xenial main restricted universe multiverse
deb mirror://mirrors.ubuntu.com/mirrors.txt xenial-updates main restricted universe multiverse
deb mirror://mirrors.ubuntu.com/mirrors.txt xenial-backports main restricted universe multiverse
deb mirror://mirrors.ubuntu.com/mirrors.txt xenial-security main restricted universe multiverse
END
}
cmd_debian_init () {
assert_running_as_root
cmd_setup_swap
cmd_setup_user erantapaa
cmd_make_setup_readable
run_conditionally "/did-apt-get-update" apt-get update
run apt-get install -y tmux
run apt-get install -y build-essential libgmp10-dev zlib1g-dev libglu1-mesa freeglut3-dev
run apt-get install -y libicu-dev
run apt-get install -y lighttpd git tree
run apt-get install -y python-dev python-setuptools python-pip python-requests
setup_gitconfig erantapaa "Erik Rantapaa" erantapaa@gmail.com
}
cmd_debian_stack_init() {
cmd_debian_xenial_init
cmd_debian_init
cmd_install_stack
cmd_install_redo
apt-get install -y parallel
apt-get install -y apt-file
apt-file update
apt-get install -y pkg-config
apt-get install -y moreutils libtime-duration-perl libtimedate-perl
}
cmd_full_xenial_stack_init() {
cmd_debian_stack_init
cmd_install_xenial_packages
}
cmd_install_xenial_packages() {
local xp
xp="./setup.d/xenial-packages"
if [ ! -e "$xp" ]; then
echo "file does not exist: $xp"
return 1
fi
apt-get install -y $(cat "$xp")
}
cmd_build_pandoc_as_user() {
chmod a+rx /root/setup
local user_setup="/home/erantapaa/setup"
if [ ! -e "$user_setup" ]; then
copy_file_to_user /root/setup "$user_setup" erantapaa
run chmod a+rx "$user_setup"
fi
tmp=/tmp/build-pandoc.$$
echo "build output redirected to $tmp"
su - erantapaa "$user_setup" build-pandoc > "$tmp" 2>&1 &
}
cmd_fedora_init() {
assert_running_as_root
cmd_setup_swap
cmd_setup_user erantapaa
yum install -y tmux wget
yum groupinstall -y 'Development Tools'
yum install -y gmp-devel zlib-devel freeglut mesa-libGLU
yum install -y lighttpd git
setup_gitconfig erantapaa "Erik Rantapaa" erantapaa@gmail.com
# to be figured out...
# run apt-get install -y build-essential libgmp10-dev zlib1g-dev libglu1-mesa
# run apt-get install -y libicu-dev
# run apt-get install -y lighttpd git
# run apt-get install -y python-dev python-setuptools python-pip
}
cmd_fedora_install_pkgs() {
assert_running_as_root
yum groupinstall -y 'Development Tools'
yum install -y gmp-devel zlib-devel freeglut mesa-libGLU
}
cmd_fedora_test_all() {
assert_running_as_root
cmd_fedora_init
cmd_fedora_install_pkgs
cmd_build_pandoc_as_user
}
cmd_build_pandoc () {
pandoc_dir=pandoc-1.14.0.4
if [ ! -d $pandoc_dir ]; then
run_conditionally "$HOME/did-cabal-update" cabal update
run cabal get pandoc-1.14.0.4
fi
cd "$pandoc_dir" || { echo "unable to cd into $pandoc_dir"; exit 1
}
run cabal sandbox init
run cabal install --only-dependencies
run cabal build
echo "=== done building pandoc"
}
cmd_usage () {
echo
echo "Usage: setup <command>"
echo
echo "Commands:"
for cmd in $ALL_COMMANDS; do
echo " ${cmd//_/-}"
done
}
list_contains() {
for word in $1; do
[[ $word = "$2" ]] && return 0
done
return 1
}
main () {
if [ "$#" == 0 ]; then
cmd_usage
exit 0
elif list_contains "$ALL_COMMANDS" "${1//-/_}"; then
local cmd="${1//-/_}"
shift
set -e
"cmd_$cmd" "$@"
else
cmd="cmd_${1//-/_}"
cmd_type=$(type -t "$cmd")
if [ "$cmd_type" == "function" ]; then
shift
set -e
"$cmd" "$@"
else
echo "unrecognized command: $1"
exit 1
fi
fi
}
main "$@"
| true
|
eabc08856b64f7232462f426d0c83368d65397e5
|
Shell
|
oleskiewicz/bin
|
/tex2svg
|
UTF-8
| 699
| 3.4375
| 3
|
[] |
no_license
|
#!/bin/sh -e
[ ! -t 0 ] && eq=$(cat); eq="${eq:-$1}"
tmpdir="$(mktemp -d)"
mkdir -p "$tmpdir"
trap 'rm -r "$tmpdir"' EXIT
texfile="${tmpdir}/0.tex"
dvifile="${tmpdir}/0.dvi"
svgfile="${tmpdir}/0.svg"
if [ -z "$eq" ]
then
cat <<EOF
usage:
tex2svg "a+b=c"
echo "a+b=c" | tex2svg
EOF
exit 1
fi
cat <<EOF > "${texfile}"
\\documentclass[border=1pt,varwidth]{standalone}
\\usepackage{standalone}
\\usepackage{amsmath}
\\usepackage{amssymb}
\\usepackage{cancel}
\\begin{document}
\\(${eq}\\)
\\end{document}
EOF
latex -output-directory="${tmpdir}" "${texfile}" > /dev/null 2>&1 || exit 1
dvisvgm -v 0 -b2pt -Z1.2 -n -o "${svgfile}" "${dvifile}" > /dev/null 2>&1 || exit 1
cat "${svgfile}"
exit 0
| true
|
2f6b80ffc0473d9a38f4110b541b2e0fa0a70d1c
|
Shell
|
rubencaro/gmail_checker
|
/check_gmail.sh
|
UTF-8
| 2,781
| 3.78125
| 4
|
[] |
no_license
|
#!/bin/bash
# poner en crontab, a conveniencia
# * * * * * /bin/bash -l -c /home/user/check_gmail.sh
# depende de yad, sox, python-keyring, firefox...
# necesita info de dbus si se ejecuta desde fuera de X (como en crontab)
accounts=("ruben@elpulgardelpanda.com")
file_name="check_gmail.sh"
notify_flag="$HOME/.check_gmail_notified.flag"
directory=$(cd `dirname $0` && pwd)
log="$directory/check_gmail.log"
source $HOME/.Xdbus # dbus info
function check_account() {
gmail_login="$1"
gmail_password=$(python -c "import keyring;print keyring.get_password('""$file_name""', '""$gmail_login""')")
if [ "None" = "$gmail_password" ]; then
gmail_password=$(zenity --password --title="$gmail_login")
python -c "import keyring;print keyring.set_password('""$file_name""', '""$gmail_login""', '""$gmail_password""')" > /dev/null
fi
gmail_xml=$(wget -q -O - https://mail.google.com/a/gmail.com/feed/atom --http-user=${gmail_login} --http-password=${gmail_password} --no-check-certificate)
count=$(echo $gmail_xml | sed 's/^.*<fullcount>\([0-9]*\)<\/fullcount>.*$/\1/')
if [ $count -gt 0 ]; then
echo "[$(date)] $count emails in $gmail_login !" >> $log
#extract entries one by one
msg=""
sep="\n"
rest="$gmail_xml"
for (( i=0 ; i < $count ; i++ )); do
entry_xml=$(echo $rest | sed 's/\(.*\)\(<entry>.*<\/entry>\)\(.*\)/\2/')
rest=$(echo $rest | sed 's/\(.*\)\(<entry>.*<\/entry>\)\(.*\)/\1/')
title=$(echo $entry_xml | sed 's/.*<title>\(.*\)<\/title>.*/\1/')
summary=$(echo $entry_xml | sed 's/.*<summary>\(.*\)<\/summary>.*/\1/')
author=$(echo $entry_xml | sed 's/.*<author>.*<email>\(.*\)<\/email>.*<\/author>.*/\1/')
name=$(echo $entry_xml | sed 's/.*<author>.*<name>\(.*\)<\/name>.*<\/author>.*/\1/')
mail_url_params=$(echo $entry_xml | sed 's/.*<link \(.*\) \/>.*/\1/' | sed 's/.*href=".*?\(.*\)" .*/\1/' | sed 's/\&/\&/g')
msg="$msg($name):$title$sep"
done
msg=$(echo $msg | sed 's/"//g')
notify-send "$gmail_login ($count)" "\n$msg" -i emblem-ohno &>> $log
play -q /usr/share/sounds/freedesktop/stereo/dialog-warning.oga reverse repeat repeat repeat vol 5 &>> $log
if [ ! -f $notify_flag ]; then
touch $notify_flag
# add handler for tray icon left click
function on_click() {
url="http://mail.google.com?$1"
firefox --new-tab $url
}
export -f on_click
cmd="bash -c \"on_click '$mail_url_params'\""
yad --notification --image=emblem-ohno --text="$gmail_login ($count)" --command="$cmd" # block until notification is cleaned
rm $notify_flag
fi
fi
}
for account in "${accounts[@]}"
do
echo -n '.' >> $log
check_account $account &>> $log
sleep 10 # yes, sleep between accounts, let notify do its job
done
| true
|
dd4734c197260809f4d19cd6204984bfdd68e328
|
Shell
|
wymcmh/monsys
|
/web/nginx-1.2.6/c.sh
|
UTF-8
| 379
| 2.90625
| 3
|
[
"BSD-2-Clause"
] |
permissive
|
#!/bin/bash
cd $(dirname $0)
case $1 in
start)
echo "stopping...done"
./out/sbin/nginx -s stop 2>&1 >/dev/null
echo "starting...done"
./out/sbin/nginx
;;
stop)
echo "stopping...done"
./out/sbin/nginx -s stop
;;
status)
ps -ef | grep nginx
;;
test)
wget -q -O - --server-response http://127.0.0.1:8082/monsys
;;
*)
./out/sbin/nginx -h
;;
esac
| true
|
40dff0db6c6e0d7d73bfa7dd04ac4911b51daabc
|
Shell
|
eelregit/fftgal
|
/src/app/job_Psb.sh
|
UTF-8
| 611
| 2.515625
| 3
|
[] |
no_license
|
#!/bin/bash
#SBATCH --ntasks=1
#SBATCH --partition=shared
#SBATCH --mem=5GB
#SBATCH --time=9:00:00
#SBATCH --job-name=Psb
#SBATCH --output=P%j.out
APP=$SCRATCH/fftgal/Psb
Ng=512
L=2560
Nsub=4
wisdom=${Ng}.wsdm
alpha=0.02
dK=0.01
indir=/project/projectdirs/boss/galaxy/QPM/dr12d_cubic_mocks
a=0.6452
outdir=$SCRATCH/ssm.d
echo ${SLURM_JOB_ID} starting $(date) on $(hostname)
module load gcc fftw gsl
make Psb
for id in $@
do
log=$outdir/a${a}_$(printf '%04d' $id)/Psb.log
time GSL_RNG_SEED=$id $APP $Ng $L $Nsub $wisdom $alpha $dK $indir $a $id $outdir 2> $log
done
echo ${SLURM_JOB_ID} ending $(date)
| true
|
e3ab459dc77a5372a6516b6541501b423d2b406a
|
Shell
|
zgwldrc/repos01
|
/regex/regexlib.regex
|
UTF-8
| 411
| 2.890625
| 3
|
[] |
no_license
|
#!/bin/bash
# Example:
# echo xx:xx:aa:bb:cc:dd_192.168.1.1 | sed -i "$regex_fetchIpaddrFormat"
# output:192.168.1.1
regex_fetchIpaddrFormat='s/.*[^0-9]\([0-9]\{1,3\}\.[0-9]\{1,3\}\.[0-9]\{1,3\}\.[0-9]\{1,3\}\).*/\1/'
regex_0to255='([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])'
regex_ipv4address='^(([1-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])'
| true
|
76d9da072220561ac66e68ecf1e21638213f59a7
|
Shell
|
jusito/docker-vsftpd-alpine
|
/base/entrypointTestMode.sh
|
UTF-8
| 240
| 3.109375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
if [ "${DEBUGGING}" = "true" ]; then
set -o xtrace
fi
set -o errexit
set -o nounset
set -o pipefail
# start
#trap 'pkill -15 vsftpd' 15
#/usr/sbin/vsftpd "$CONFIG_FILE" &
echo "vsftpd started"
wait "$!"
echo "Vsftpd ended"
| true
|
c0503879c4d692130fe8d55f1c57087c77d89db0
|
Shell
|
binary-punks/Mail-Toaster-6
|
/provision-clamav.sh
|
UTF-8
| 5,840
| 3.296875
| 3
|
[] |
no_license
|
#!/bin/sh
# shellcheck disable=1091
. mail-toaster.sh || exit
export JAIL_CONF_EXTRA="
mount += \"$ZFS_DATA_MNT/clamav \$path/var/db/clamav nullfs rw 0 0\";"
install_clamav_unofficial()
{
if [ -z "$CLAMAV_UNOFFICIAL" ]; then
local _es_mess="
eXtremeSHOK maintains the ClamAV UNOFFICIAL project at
https://github.com/extremeshok/clamav-unofficial-sigs
The project is a set of scripts that download and keep updated
a collection of unofficial ClamAV signatures that significantly
increase ClamAV's virus detection rate. However, they also
increase the False Positive hits.
Unofficial DBs are best used with Haraka's karma plugin (scoring)
and with the clamav plugin configured with [reject]virus=false.
Do you want to install ClamAV UNOFFICIAL?"
dialog --yesno "$_es_mess" 18 74 || return
fi
tell_status "installing ClamAV unofficial 4.8"
local CLAMAV_UV=4.8
local STAGE_ETC="$STAGE_MNT/usr/local/etc"
stage_pkg_install gnupg1 rsync bind-tools
fetch -m -o "$STAGE_MNT/tmp/" \
"https://github.com/extremeshok/clamav-unofficial-sigs/archive/$CLAMAV_UV.tar.gz"
tar -xz -C "$STAGE_MNT/tmp/" -f "$STAGE_MNT/tmp/$CLAMAV_UV.tar.gz" || exit
local _dist="$STAGE_MNT/tmp/clamav-unofficial-sigs-4.8"
local _conf="$STAGE_ETC/clamav-unofficial-sigs.conf"
if [ ! -f "$_conf" ]; then
if [ -f "clamav-unofficial-sigs.conf" ]; then
tell_status "installing local clamav-unofficial-sigs.conf"
cp "clamav-unofficial-sigs.conf" "$STAGE_ETC/"
fi
fi
if [ ! -f "$_conf" ]; then
if [ -f "$ZFS_JAIL_MNT/clamav/usr/local/etc/clamav-unofficial-sigs.conf" ]; then
tell_status "preserving clamav-unofficial-sigs.conf"
cp "$ZFS_JAIL_MNT/clamav/usr/local/etc/clamav-unofficial-sigs.conf" \
"$STAGE_ETC/"
fi
fi
if [ ! -f "$_conf" ]; then
tell_status "updating clamav-unofficial-sigs.conf"
local _dist_conf="$_dist/clamav-unofficial-sigs.conf"
sed -i .bak \
-e 's/\/var\/lib/\/var\/db/' \
-e 's/^clam_user="clam"/clam_user="clamav"/' \
-e 's/^clam_group="clam"/clam_group="clamav"/' \
"$_dist_conf"
cp "$_dist_conf" "$_conf" || exit
fi
local _sigs_sh="$_dist/clamav-unofficial-sigs.sh"
sed -i .bak -e 's/^#!\/bin\/bash/#!\/usr\/local\/bin\/bash/' "$_sigs_sh"
chmod 755 "$_sigs_sh" || exit
cp "$_sigs_sh" "$STAGE_MNT/usr/local/bin" || exit
cp "$_dist/clamav-unofficial-sigs.8" "$STAGE_MNT/usr/local/man/man8" || exit
mkdir -p "$STAGE_MNT/var/log/clamav-unofficial-sigs" || exit
mkdir -p "$STAGE_ETC/periodic/daily" || exit
tee "$STAGE_ETC/periodic/daily/clamav-unofficial-sigs" <<EOSIG
#!/bin/sh
/usr/local/bin/clamav-unofficial-sigs.sh -c /usr/local/etc/clamav-unofficial-sigs.conf
EOSIG
chmod 755 "$STAGE_ETC/periodic/daily/clamav-unofficial-sigs" || exit
mkdir -p "$STAGE_ETC/newsyslog.conf.d" || exit
echo '/var/log/clamav-unofficial-sigs.log root:wheel 640 3 1000 * J' \
> "$STAGE_ETC/newsyslog.conf.d/clamav-unofficial-sigs"
stage_exec /usr/local/etc/periodic/daily/clamav-unofficial-sigs
if [ -z "$CLAMAV_UNOFFICIAL" ]; then
dialog --msgbox "ClamAV UNOFFICIAL is installed. Be sure to visit
https://github.com/extremeshok/clamav-unofficial-sigs and follow
the steps *after* the Quick Install Guide." 10 70
fi
}
install_clamav_nrpe()
{
if [ -z "$TOASTER_NRPE" ]; then
echo "TOASTER_NRPE unset, skipping nrpe plugin"
return
fi
tell_status "installing clamav nrpe plugin"
stage_pkg_install nagios-check_clamav
}
install_clamav()
{
stage_pkg_install clamav || exit
echo "done"
install_clamav_nrpe
install_clamav_unofficial
}
configure_clamd()
{
tell_status "configuring clamd"
local _conf="$STAGE_MNT/usr/local/etc/clamd.conf"
sed -i .bak \
-e 's/^#TCPAddr 127.0.0.1/TCPAddr 0.0.0.0/' \
-e 's/^#TCPSocket 3310/TCPSocket 3310/' \
-e 's/^#LogFacility LOG_MAIL/LogFacility LOG_MAIL/' \
-e 's/^#LogSyslog yes/LogSyslog yes/' \
-e 's/^LogFile /#LogFile /' \
-e 's/^#ExtendedDetectionInfo /ExtendedDetectionInfo /' \
-e 's/^#DetectPUA/DetectPUA/' \
-e 's/^#DetectBrokenExecutables/DetectBrokenExecutables/' \
-e 's/^#StructuredDataDetection/StructuredDataDetection/' \
-e 's/^#ArchiveBlockEncrypted no/ArchiveBlockEncrypted yes/' \
-e 's/^#OLE2BlockMacros no/OLE2BlockMacros yes/' \
-e 's/^#PhishingSignatures yes/PhishingSignatures yes/' \
-e 's/^#PhishingScanURLs yes/PhishingScanURLs yes/' \
-e 's/#HeuristicScanPrecedence yes/HeuristicScanPrecedence no/' \
-e 's/^#StructuredDataDetection yes/StructuredDataDetection yes/' \
-e 's/^#StructuredMinCreditCardCount 5/StructuredMinCreditCardCount 10/' \
-e 's/^#StructuredMinSSNCount 5/StructuredMinSSNCount 10/' \
-e 's/^#StructuredSSNFormatStripped yes/StructuredSSNFormatStripped no/' \
-e 's/^#ScanArchive yes/ScanArchive yes/' \
"$_conf" || exit
echo "done"
}
configure_freshclam()
{
tell_status "configuring freshclam"
local _conf="$STAGE_MNT/usr/local/etc/freshclam.conf"
sed -i .bak \
-e 's/^UpdateLogFile /#UpdateLogFile /' \
-e 's/^#LogSyslog yes/LogSyslog yes/' \
-e 's/^#LogFacility LOG_MAIL/LogFacility LOG_MAIL/' \
-e 's/^#SafeBrowsing yes/SafeBrowsing yes/' \
-e 's/^#DatabaseMirror db.XY.clamav.net/DatabaseMirror db.us.clamav.net/' \
"$_conf" || exit
echo "done"
}
configure_clamav()
{
configure_clamd
configure_freshclam
}
start_clamav()
{
tell_status "downloading virus definition databases"
stage_exec freshclam
tell_status "starting ClamAV daemons"
stage_sysrc clamav_clamd_enable=YES
stage_exec service clamav-clamd start
stage_sysrc clamav_freshclam_enable=YES
stage_exec service clamav-freshclam start
}
test_clamav()
{
echo "testing ClamAV..."
stage_exec sockstat -l -4 | grep 3310 || exit
echo "It works! (clamd is listening)"
}
base_snapshot_exists || exit
create_staged_fs clamav
start_staged_jail
install_clamav
configure_clamav
start_clamav
test_clamav
promote_staged_jail clamav
| true
|
d61c646896b6dafe7a96542d2c47a31d89072366
|
Shell
|
chelalita/operativ
|
/Original/Logep.sh
|
UTF-8
| 418
| 3.265625
| 3
|
[] |
no_license
|
LOG=".log"
DIA=$(date +"%D")
HORA=$(date +"%H:%M:%S")
if [ $# -gt 3 ] || [ 2 -gt $# ]
then
echo " Mal uso del log "
exit
fi
if [ ! $3 ]
then
RAZON="INFO"
else
RAZON=$3
fi
echo "$DIA$HORA-$USER-$1-$3-$2" >> $DIRLOG/$1$LOG
if [ -f $1$LOG ]
then
LINEAS=`wc -l $DIRLOG/$1`
else
LINEAS=0
fi
if [[ LINEAS -gt 10 ]]
then
sed '1,5d' $DIRLOG/$1
sed -i '$USER at $DIA $HORA: log reducido...' $DIRLOG/$1
fi
| true
|
3956ca59511c446c9017255d4d5c88086fb1c4d0
|
Shell
|
draca-be/atlassian
|
/update-jira.sh
|
UTF-8
| 228
| 2.796875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
VERSION=$@
git checkout master &&
sed -i "s/JIRA_VERSION=.*/JIRA_VERSION=$@/g" atlassian-jira*/Dockerfile &&
git add atlassian-jira*/Dockerfile &&
git commit -m "Update JIRA $@" &&
git tag -f jira-$@
| true
|
1992e22de8be8db6926240121457181838742aa2
|
Shell
|
luis-caldas/mydesktop
|
/programs/local/control/bluetooth.bash
|
UTF-8
| 6,947
| 4.09375
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
# {{{ Globals
# Blue folder
BLUE_CACHE_FOLDER="${HOME}/.cache/neoblue"
# Warning popup variables
BLUE_WARNING_PERCENTAGE=25
BLUE_POP_COOLDOWN_TIME=60 # Seconds
# Cache variables
BLUE_READ_COOLDOWN=$(( 60 * 2 )) # Seconds
# State of folder creation
cache_created=0
# }}}
# {{{ Utils
folder_now="$(dirname -- "$(readlink -f -- "${BASH_SOURCE[0]}")")"
# Creates the cache folder
check_cache_create() {
if [ "$cache_created" -eq 0 ]; then # Check wether it was created by this same script
if [ ! -d "$BLUE_CACHE_FOLDER" ]; then
mkdir -p "${BLUE_CACHE_FOLDER}"
fi
cache_created=1
fi
}
# Fixes mac addresses to a simple string
fix_mac() {
tr '[:lower:]' '[:upper:]' <<< "${1}" | tr ':' '_'
}
revert_mac() {
tr '_' ':' <<< "${1}"
}
# Get full dbus bluez object
bluez_full() {
dbus-send \
--system \
--print-reply \
--type=method_call \
--dest='org.bluez' '/' \
org.freedesktop.DBus.ObjectManager.GetManagedObjects \
| grep "object path" \
| sed -e 's/variant//' \
-e 's/object path//' \
-e 's/^\s*//' \
-e 's/^"//' \
-e 's/"$//' \
| sort -h \
| uniq
}
# Find adapter from mac
find_path_bluez_dbus() {
# Extract the wanted device
grep "${1}" <<< "${2}" | head -n1
}
# }}}
# {{{ Devices
devices() {
# Create array for all devices
all_connected_devices=()
# Check if the bluetooth service is running
if ! systemctl is-active --quiet bluetooth &&
! systemctl is-active --quiet bluetoothd ; then
return
fi
# Get every device in bluetooth
all_devices="$(bluetoothctl devices | awk '{print $2}')"
# Iterate devices and find connected ones
while read -r each_mac; do
is_conn="$(bluetoothctl info "${each_mac}" | grep Connected | awk '{print $2}')"
# Check if device is connected
if [ "${is_conn}" == "yes" ]; then
all_connected_devices+=("${each_mac}")
fi
done <<< "${all_devices}"
# If there are connected devices
if [ "${#all_connected_devices[@]}" -ne 0 ]; then
# Print the list of connected devices
printf "%s\n" "${all_connected_devices[@]}"
fi
}
# }}}
# {{{ Battery
check_battery_rfcomm() {
while true; do
# Run command and save both stdout and stderr
{
IFS=$'\n' read -r -d '' stderr_now;
IFS=$'\n' read -r -d '' stdout_now;
} < <((printf '\0%s\0' "$(bluetooth_battery "$1")" 1>&2) 2>&1)
# Check for output on stdout
output_value="$(awk '{print $NF}' <<< "${stdout_now}" | sed 's/[^0-9]*//g')"
# Check if device busy was the cause
if [ -z "$output_value" ]; then
if ! grep -q 'Device or resource busy' <<< "$stderr_now"; then
break
fi
else
echo "${output_value}"
break
fi
done
}
check_battery_dbus() {
# Fix given mac
fixed_mac="$(fix_mac "${1}")"
# Find which adapter is connected to device
dev_path="$(find_path_bluez_dbus "${fixed_mac}" "${2}")"
# Get the battery percentage
dbus-send \
--print-reply=literal \
--system \
--dest=org.bluez \
"${dev_path}" \
org.freedesktop.DBus.Properties.Get \
string:"org.bluez.Battery1" \
string:"Percentage" 2> /dev/null | awk '{print $NF}'
}
check_battery() {
# Print battery by whatever method necessary
rf_bat="$(check_battery_rfcomm "${1}")"
db_bat="$(check_battery_dbus "${1}" "${2}")"
# Create print number
print_nr=0
if [ -n "$rf_bat" ]; then
print_nr="$rf_bat"
elif [ -n "$db_bat" ]; then
print_nr="$db_bat"
else
printf "?\n"
return
fi
printf "%3d\n" "$print_nr"
}
main_check() {
# Create cache for dbus bluez
bluez_cache="$(bluez_full)"
# Iterate the devices
while read -r each_dev; do
# Clear variable for next iteration
batt_now=""
# Create fixed mac for it and cache name
fixed_mac="$(fix_mac "${each_dev}")"
cache_name="batt-check-${fixed_mac}"
cache_path="${BLUE_CACHE_FOLDER}/${cache_name}"
# Check if current device is cached and requires to be reevaluated
timestamp_now="$(date +%s)"
if [ -f "${cache_path}" ]; then
# Get file
cache_file="$(cat "${cache_path}")"
# Get time within file
time_before="$(awk '{print $1}' <<< "${cache_file}")"
# Check if time has not expired
if (( timestamp_now < (time_before + BLUE_READ_COOLDOWN) )); then
batt_now="$(awk '{print $2}' <<< "${cache_file}")"
fi
fi
# Check if cache was not successful
if [ -z "$batt_now" ]; then
# Check battery of current device
batt_now="$(check_battery "${each_dev}" "${bluez_cache}")"
# Check cache folder
check_cache_create
# Get updated timestamp
timestamp_updated="$(date +%s)"
# Save to cache the new information
printf "%s %s\n" "${timestamp_updated}" "${batt_now}" > "${cache_path}"
fi
# Check if only a print is sufficient
if [ "${2}" == "print" ]; then
printf "%s %s\n" "${each_dev}" "${batt_now}"
continue
fi
# Check if no percentage was found
if [ "${batt_now}" == "?" ]; then
continue
fi
# Check battery percentage
if (( "${batt_now}" <= "${BLUE_WARNING_PERCENTAGE}" )); then
warning "${each_dev}" "${batt_now}" &
fi
done <<< "${1}"
}
# }}}
# {{{ Warning
warning() {
# Get the timestamp now
timestamp_now="$(date +%s)"
# Create specific string for device
fixed_mac="$(fix_mac "${1}")"
pop_name="pop-${fixed_mac}"
pop_path="${BLUE_CACHE_FOLDER}/${pop_name}"
# Get previous timestamp
# If it exists and is within time dont pop anything
if [ -f "$pop_path" ]; then
# Extract before time with the file
time_before="$(cat "$pop_path")"
# Check if time has not expired
if (( timestamp_now < (time_before + BLUE_POP_COOLDOWN_TIME) )); then
return
fi
fi
# Update the timer it has been shown
check_cache_create
echo "$timestamp_now" > "$pop_path"
# Show the popup
"${folder_now}/../notifications/popup-bluetooth.bash" single "${1}" "${2}"
}
# }}}
# {{{ Printing
pretty_section() {
source "${folder_now}/../visual/xmobar-style.bash"
build_block "popneoblue" " ${1} " " "
}
# Covers the string with given chars
cover() {
cleft="["
cright="]"
printf "%c%s%c" "$cleft" "$*" "$cright"
}
print() {
# Get devices
devices_now="$(devices)"
# Start number of connections as zero
nr_conn=0
# Get number of connected devices
if [ -n "$devices_now" ]; then
nr_conn="$(wc -l <<< "${devices_now}")"
fi
# If there are connected devices print them
if [ "${nr_conn}" -gt 0 ]; then
# Check if pretty or simple print
if [ "$1" == "pretty" ]; then
pretty_section "${nr_conn}"
else
simple_string="$(printf "B %d" "${nr_conn}")"
cover "${simple_string}"
fi
# Start the battery checking in a new subprocess
main_check "${devices_now}" &
fi
}
# Only print devices and percentages (api)
print_devices() {
devices_now="$(devices)"
main_check "${devices_now}" print
}
# }}}
# {{{ Main
usage() {
echo "Usage: $0 {pretty,simple,devices}"
}
case "$1" in
pretty)
print pretty
;;
simple)
print
;;
devices)
print_devices
;;
-h|--help)
usage
exit 64
;;
*)
usage
exit 1
;;
esac
# }}}
| true
|
64f4faa8f1806ac32ce080c028816bd1425d2b67
|
Shell
|
aziz781/camunda-docker
|
/build-deploy.sh
|
UTF-8
| 1,035
| 3.328125
| 3
|
[] |
no_license
|
#!/bin/bash
echo "Setting CAMUNDA env variables"
export CAMUNDA_USER=camunda
export CAMUNDA_PASSWORD=camunda
export CAMUNDA_DB=process-engine
export CAMUNDA_DATA=$PWD/docker/volumes/postgres
echo "creating volume directory for postgres db"
mkdir -p $PWD/docker/volumes/postgres
chmod 777 -R $PWD/docker
echo "Building Images.... [DB,APP]"
docker build --target DB -t camunda/db --build-arg DBUSER=$CAMUNDA_USER --build-arg \
DBPWD=$CAMUNDA_PASSWORD --build-arg DBNAME=$CAMUNDA_DB .
docker build --target APP -t camunda/app --build-arg DBUSER=$CAMUNDA_USER --build-arg \
DBPWD=$CAMUNDA_PASSWORD --build-arg DBNAME=$CAMUNDA_DB .
# create custom camunda DB bridge network
docker network create camunda-db-bridge
echo "DB Container ... Starting [DB]"
# db
docker run --rm --name db --net camunda-db-bridge \
-d -p 5432:5432 \
-v $CAMUNDA_DATA:/var/lib/postgresql/data \
camunda/db
echo "APP Container...Starting [APP]"
# app
docker run -d --name camunda --net camunda-db-bridge -p 8081:8080 \
camunda/app
echo "DONE"
| true
|
24551a12260ed4f890e770ed1a1b2baa5ea18d62
|
Shell
|
codehunte/tools
|
/2fa.sh
|
UTF-8
| 1,699
| 3.625
| 4
|
[] |
no_license
|
#!/bin/bash
# Root
[[ $(id -u) != 0 ]] && echo -e "\n 哎呀……请使用 ${red}root ${none}用户运行 ${yellow}~(^_^) ${none}\n" && exit 1
cmd="apt"
# 笨笨的检测方法
if [[ $(command -v apt-get) || $(command -v yum) ]] && [[ $(command -v systemctl) ]]; then
if [[ $(command -v yum) ]]; then
cmd="yum"
fi
else
echo -e "
哈哈……这个 ${red}辣鸡脚本${none} 不支持你的系统。 ${yellow}(-_-) ${none}
备注: 仅支持 Ubuntu 16+ / Debian 8+ / CentOS 7+ 系统
" && exit 1
fi
#设置时区
timedatectl set-timezone Asia/Shanghai
timedatectl set-ntp true
echo "已将主机设置为Asia/Shanghai时区并通过systemd-timesyncd自动同步时间。"
#安装2fa
if [[ $cmd == "yum" ]]; then
echo "开始安装依赖"
yum install gcc make pam-devel libpng-devel libtool wget git autoconf automake qrencode -y
rpm -Uvh https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm
yum install google-authenticator -y
#CentOS 7系统
sed -i "/auth[ ]*substack[ ]*pass*/a\auth required pam_google_authenticator.so" /etc/pam.d/sshd
else
$cmd update && $cmd install libpam-google-authenticator -y
echo 'auth required pam_google_authenticator.so' >>/etc/pam.d/sshd
fi
#修改sshd配置
sed -i -r 's#(ChallengeResponseAuthentication) no#\1 yes#g' /etc/ssh/sshd_config
echo "安装完成,请执行google-authenticator进行配置"
echo "selinux状态:" && getenforce
echo "如果状态非disabled则运行sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config 关闭SELINUX"
| true
|
3ef244a0042ac8a39309cc1437f61755f0d8303d
|
Shell
|
KairosAerospace/terraform_wrapper_scripts
|
/terraform/private_cloud/deploy.sh
|
UTF-8
| 1,248
| 3.484375
| 3
|
[] |
no_license
|
#!/bin/bash
set -xe
if [ ! -f $1 ]; then
echo "$1 is not a file"
exit 1
fi
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
# Some dependent AWS modules (EKS, RDS) do not yet support Terraform 0.12,
# So we need to clone the repo, which has patched versions of the dependent
# modules locally installed.
if [ ! -d $DIR/modules/terraform-aws-astronomer-aws ]; then
mkdir -p $DIR/modules || true
cd $DIR/modules
git clone https://github.com/astronomer/terraform-aws-astronomer-aws.git
cd $DIR
fi
# We still need to publish the top-level umbrella chart. Right now,
# are only publishing each subchart individually (chart - helm terminology)
if [ ! -d $DIR/helm.astronomer.io ]; then
git clone https://github.com/astronomer/helm.astronomer.io.git
fi
terraform init
# deploy EKS, RDS
terraform apply -var-file=$1 --target=module.aws --auto-approve
# install Tiller in the cluster
terraform apply -var-file=$1 --target=module.system_components --auto-approve
# install astronomer in the cluster
terraform apply -var-file=$1 --target=module.astronomer --auto-approve
# write CNAME record based on the fetched internal LB name
terraform apply -var-file=$1 --target=aws_route53_record.astronomer --auto-approve
| true
|
53212656fcee03bae09d9aab8b08f8d045b208cd
|
Shell
|
jadbox/leaderboard-d
|
/test.sh
|
UTF-8
| 1,263
| 2.828125
| 3
|
[] |
no_license
|
#! /bin/bash
# Requires HTTPie.
# Description: Scenario test script for the leaderboard server
#
# If using the Redis server, del uniqueID and scores_list table between runs.
echo "Creating users"
http --print=bB -f POST localhost:3000 name=Don event:=1 &&
http --print=bB -f POST localhost:3000 name=Bill event:=1 &&
echo "Assigning points to users" &&
http --print=bB -f POST localhost:3000 playerID="1" score:=10 event:=2 &&
http --print=bB -f POST localhost:3000 playerID="2" score:=20 event:=2 &&
echo "Getting current range" &&
http --print=bB -f POST localhost:3000 event:=3 range:=[0,1] &&
echo "Updating player's 1 score to outdo player 2" &&
http --print=bB -f POST localhost:3000 playerID="1" score:=30 event:=2 &&
echo "Getting update range" &&
http --print=bB -f POST localhost:3000 event:=3 range:=[0,1] &&
echo "Getting score by user ID" &&
http --print=bB -f POST localhost:3000 playerID="1" event:=4 &&
echo "Delete a user" &&
http --print=bB -f POST localhost:3000 playerID="1" event=5 &&
echo "Getting update range after a user removed" &&
http --print=bB -f POST localhost:3000 event:=3 range:=[0,1] &&
echo "Cleaning up, removing other created user by script." &&
http --print=bB -f POST localhost:3000 playerID="2" event=5
| true
|
e167aa8ec968e60c2354e1a9745211a572a29dc3
|
Shell
|
whitfin/docker-geoipupdate
|
/src/exec.sh
|
UTF-8
| 635
| 3.4375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh -e
# set a configuration file if not already set
! (: "${GEOIP_CONFIG_FILE?}") 2>/dev/null && {
GEOIP_CONFIG_FILE="/etc/GeoIP.conf"
[[ ! -z $GEOIP_USER_ID ]] && {
echo "UserId $GEOIP_USER_ID" > $GEOIP_CONFIG_FILE
}
[[ ! -z $GEOIP_LICENSE_KEY ]] && {
echo "LicenseKey $GEOIP_LICENSE_KEY" >> $GEOIP_CONFIG_FILE
}
echo "ProductIds ${GEOIP_PRODUCT_IDS:-"GeoLite2-City GeoLite2-Country"}" >> $GEOIP_CONFIG_FILE
echo "DatabaseDirectory ${GEOIP_DIRECTORY:-"/usr/local/share/GeoIP"}" >> $GEOIP_CONFIG_FILE
}
# execute the updates with verbose output
geoipupdate -f $GEOIP_CONFIG_FILE -v
| true
|
71e6917b3e1809b7796186e5a2cfa9e9b44070ec
|
Shell
|
pbsinclair42/configs
|
/.githooks/prepare-commit-msg
|
UTF-8
| 764
| 3.65625
| 4
|
[] |
no_license
|
#!/bin/sh
case "$2" in
template)
gittemplatefile=$(git config --get commit.template | sed "s|~|$HOME|g");
cat $gittemplatefile > $1;
printf "\n#\n" >> $1;
git status -sb | head -n 1 | sed 's/^##/#/g' >> $1;
git diff --cached --stat | sed 's/^/#/g' >> $1;
printf "#\n#\n" >> $1;
git diff --cached | sed 's/^/# /g' >> $1;;
commit)
a=$(cat $1 | grep -v "^#");
echo "$a" > $1;
gittemplatefile=$(git config --get commit.template | sed "s|~|$HOME|g");
cat $gittemplatefile >> $1;
printf "\n#\n# Comitted " >> $1;
git log --pretty=format:'%cr' -n 1 >> $1;
echo "" >> $1;
git diff --cached --stat HEAD^ | sed 's/^/#/g' >> $1;
echo "#\n#" >> $1;
git diff --cached HEAD^ | sed 's/^/# /g' >> $1;;
esac
| true
|
49ee4a21660ba2cc190a599f88a92974edcbf66f
|
Shell
|
Rowantatai/beyondz-platform
|
/docker-compose/scripts/run.bat
|
UTF-8
| 597
| 3.0625
| 3
|
[] |
no_license
|
#!/bin/bash
# When you stop the container, it doesn't clean itself up properly so it fails to start next time. Cleanup!
if [ -e /app/tmp/pids/server.pid ]; then
echo "Cleaning up previous server state"
rm /app/tmp/pids/server.pid
fi
cp -a /app/docker-compose/config/* /app/config/
cp -a /app/docker-compose/.env-docker /app/.env
echo ""
echo "Note: If this is the first time you're starting this container, you may have to run the following:"
echo ""
echo " bundle exec rake db:create; bundle exec rake db:migrate; bundle exec rake db:seed"
bundle exec bin/rails s -p 3001 -b '0.0.0.0'
| true
|
cd1b4cc9d6f6e58b437101f322d7ed1ed69706c4
|
Shell
|
dapr/components-contrib
|
/.github/scripts/components-scripts/certification-pubsub.aws.snssqs-setup.sh
|
UTF-8
| 1,604
| 2.546875
| 3
|
[
"Apache-2.0",
"LicenseRef-scancode-public-domain"
] |
permissive
|
#!/bin/sh
set -e
# Set variables for GitHub Actions
echo "AWS_REGION=us-east-1" >> $GITHUB_ENV
echo "PUBSUB_AWS_SNSSQS_QUEUE_1=sqssnscerttest-q1-$UNIQUE_ID" >> $GITHUB_ENV
echo "PUBSUB_AWS_SNSSQS_QUEUE_2=sqssnscerttest-q2-$UNIQUE_ID" >> $GITHUB_ENV
echo "PUBSUB_AWS_SNSSQS_QUEUE_3=sqssnscerttest-q3-$UNIQUE_ID" >> $GITHUB_ENV
echo "PUBSUB_AWS_SNSSQS_TOPIC_3=sqssnscerttest-t3-$UNIQUE_ID" >> $GITHUB_ENV
echo "PUBSUB_AWS_SNSSQS_QUEUE_MVT=sqssnscerttest-q-mvt-$UNIQUE_ID" >> $GITHUB_ENV
echo "PUBSUB_AWS_SNSSQS_TOPIC_MVT=sqssnscerttest-tp-mvt-$UNIQUE_ID" >> $GITHUB_ENV
echo "PUBSUB_AWS_SNSSQS_QUEUE_DLIN=sqssnscerttest-dlq-in-$UNIQUE_ID" >> $GITHUB_ENV
echo "PUBSUB_AWS_SNSSQS_QUEUE_DLOUT=sqssnscerttest-dlq-out-$UNIQUE_ID" >> $GITHUB_ENV
echo "PUBSUB_AWS_SNSSQS_TOPIC_DLIN=sqssnscerttest-dlt-in-$UNIQUE_ID" >> $GITHUB_ENV
echo "PUBSUB_AWS_SNSSQS_QUEUE_FIFO=sqssnscerttest-q-fifo-$UNIQUE_ID.fifo" >> $GITHUB_ENV
echo "PUBSUB_AWS_SNSSQS_TOPIC_FIFO=sqssnscerttest-t-fifo-$UNIQUE_ID.fifo" >> $GITHUB_ENV
echo "PUBSUB_AWS_SNSSQS_FIFO_GROUP_ID=sqssnscerttest-q-fifo-$UNIQUE_ID" >> $GITHUB_ENV
echo "PUBSUB_AWS_SNSSQS_QUEUE_NODRT=sqssnscerttest-q-nodrt-$UNIQUE_ID" >> $GITHUB_ENV
echo "PUBSUB_AWS_SNSSQS_TOPIC_NODRT=sqssnscerttest-t-nodrt-$UNIQUE_ID" >> $GITHUB_ENV
# Navigate to the Terraform directory
cd ".github/infrastructure/terraform/certification/pubsub/aws/snssqs"
# Run Terraform
terraform init
terraform validate -no-color
terraform plan -no-color -var="UNIQUE_ID=$UNIQUE_ID" -var="TIMESTAMP=$CURRENT_TIME"
terraform apply -auto-approve -var="UNIQUE_ID=$UNIQUE_ID" -var="TIMESTAMP=$CURRENT_TIME"
| true
|
821bc2a91998dfbf2bb1c5e8e583c709938c0419
|
Shell
|
loftuxab/alfresco-swedish
|
/updateAlfrescoSource.sh
|
UTF-8
| 4,331
| 2.8125
| 3
|
[] |
no_license
|
#!/bin/bash
#Base locations of source files, chnage if you need to pull from enterprise branch
baseurl=http://svn.alfresco.com/repos/alfresco-open-mirror/alfresco/HEAD/root
function svnget()
{
if [ ! -d "alfresco/sv_se/source/$1" ]; then
svn co -r $3 $2 alfresco/sv_se/source/$1
else
svn update -r $3 alfresco/sv_se/source/$1
fi
}
#Test if we have a specific revision
if [ "$1" == "" ]; then
rev=HEAD
else
rev=$1
fi
#Extra commit message
if [ "$2" == "" ]; then
message=""
else
message=$2
fi
#Add the ignore files
if [ ! -f ".gitignore" ]; then
echo .svn >> .gitignore
echo *.bak >> .gitignore
echo *.ftl >> .gitignore
echo *.png >> .gitignore
echo *.jar >> .gitignore
echo *.css >> .gitignore
echo *.acp >> .gitignore
echo *.json >> .gitignore
echo *.xml >> .gitignore
echo *.html >> .gitignore
echo *.gif >> .gitignore
echo *.jpg >> .gitignore
echo *.woff >> .gitignore
echo !alfresco/sv_se/*.xml >> .gitignore
echo !alfresco/sv_se/omegat/*.xml >> .gitignore
echo *.js >> .gitignore
echo !alfresco/sv_se/resources/tiny_mce/*.js >> .gitignore
echo !alfresco/sv_se/resources/tiny_mce/*/*.js >> .gitignore
echo !alfresco/sv_se/resources/tiny_mce/*/*/*.js >> .gitignore
echo !alfresco/sv_se/resources/tiny_mce/*/*/*/*.js >> .gitignore
echo !alfresco/sv_se/resources/tiny_mce/*/*/*/*/*.js >> .gitignore
echo *.inc >> .gitignore
#Add some share extras specific ignores
echo README.txt >> .gitignore
echo COPYING.txt >> .gitignore
echo MAINTAINERS.txt >> .gitignore
echo LICENSE.* >> .gitignore
echo _translationstatus.txt >> .gitignore
#end Share extras specific
echo *_de.properties >> .gitignore
echo *_DE.properties >> .gitignore
echo *_es.properties >> .gitignore
echo *_ES.properties >> .gitignore
echo *_fr.properties >> .gitignore
echo *_FR.properties >> .gitignore
echo *_it.properties >> .gitignore
echo *_IT.properties >> .gitignore
echo *_IT.get.properties >> .gitignore
echo *_ja.properties >> .gitignore
echo *_JA.properties >> .gitignore
echo *_ru.properties >> .gitignore
echo *_RU.properties >> .gitignore
echo *_us.properties >> .gitignore
echo *_US.properties >> .gitignore
echo *_nl.properties >> .gitignore
echo *_NL.properties >> .gitignore
echo *_cn.properties >> .gitignore
echo *_CN.properties >> .gitignore
echo *_en.properties >> .gitignore
echo *_EN.properties >> .gitignore
echo *_no.properties >> .gitignore
echo *_NO.properties >> .gitignore
echo *_nb.properties >> .gitignore
echo *_NB.properties >> .gitignore
echo *_br.properties >> .gitignore
echo *_BR.properties >> .gitignore
echo *build.properties >> .gitignore
echo *module.properties >> .gitignore
echo *log4j.properties >> .gitignore
echo *alfresco-global.properties >> .gitignore
echo *file-mapping.properties >> .gitignore
#echo loadalf.sh >> .gitignore
echo .DS_Store >> .gitignore
#Ignor build and target from OmegaT project
echo build/ >> .gitignore
echo alfresco/sv_se/target/** >> .gitignore
echo !alfresco/sv_se/target/.empty >> .gitignore
echo !alfresco/sv_se/resources/** >> .gitignore
echo alfresco/sv_se/omegat/project_stats.txt >> .gitignore
echo alfresco/sv_se/sv_se-level1.tmx >> .gitignore
echo alfresco/sv_se/sv_se-level2.tmx >> .gitignore
echo alfresco/sv_se/sv_se-omegat.tmx >> .gitignore
fi
#Create Mercurial repo if not present
if [ ! -d ".git" ]; then
git init
git add .
git commit -m "Initial commit"
fi
#Make sure we are on propsource branch
#BRANCHEXIST=`git branch|grep propsource|wc -l`
#if [ $BRANCHEXIST -eq 0 ]; then
# git checkout -b propsource
#else
# git checkout propsource
#fi
#Make sure source dir exist and is a svn
mkdir -p alfresco/sv_se/source
if [ ! -d "alfresco/sv_se/source/Repository/messages/.svn" ]; then
#We may have a populated source dir, but we need to start from scratch for svn co/update.
rm -rf alfresco/sv_se/source/Repository/*
fi
svnget Repository/messages $baseurl/projects/repository/config/alfresco/messages $rev
svnget Repository/scripts $baseurl/projects/remote-api/config/alfresco/ $rev
#svnget web-client $baseurl/projects/web-client/config/alfresco/messages $rev
svnget Repository/workflow $baseurl/projects/repository/config/alfresco/workflow $rev
git add .
git ls-files --deleted | xargs git rm
#git commit -m "Updated language source files $baseurl to revision $rev $message"
| true
|
a8b814b4e323d1ca2b2a05f69996a8123ecf52c9
|
Shell
|
MohanNalgire/ng7
|
/run.sh
|
UTF-8
| 211
| 3.09375
| 3
|
[] |
no_license
|
#!/bin/bash
if node -v ge 12.0.0;then
echo "Node is present"
else
echo "Install nodejs version 12 and greater"
fi
if npm -v ge 6.0.0 then
echo ""
else
echo "Install npm version 6.0.0 and greater"
fi
| true
|
acb59c25d2b269ff57230afdf06b74f62aff1e18
|
Shell
|
Ram-Z/ccr-tools
|
/ccr-tools
|
UTF-8
| 10,536
| 3.5625
| 4
|
[] |
no_license
|
#!/bin/bash
ver=0.2
tempdir="/tmp/ccr-tools-$UID"
configdir="$HOME/.config/ccr-tools"
configrc="$configdir/config"
ccrbaseurl="http://chakra-project.org/ccr/"
rpcurl="${ccrbaseurl}rpc.php?type"
ccrurl="${ccrbaseurl}packages.php"
idurl="${ccrbaseurl}rpc.php?type=info&arg="
voteurl="${ccrbaseurl}packages.php"
checkurl="${ccrbaseurl}packages.php?ID="
submiturl="${ccrbaseurl}pkgsubmit.php"
mkdir -p "$configdir"
mkdir -p "$tempdir"
# Defining some functions needed by main program
declare -A categories
categories=(
["daemons"]="2"
["devel"]="3"
["editors"]="4"
["educational"]="15"
["emulators"]="5"
["games"]="6"
["gnome"]="7"
["i18n"]="8"
["kde"]="9"
["lib"]="10"
["lib32"]="19"
["modules"]="11"
["multimedia"]="12"
["network"]="13"
["office"]="14"
["system"]="16"
["utils"]="18"
["x11"]="17"
)
die() {
if [[ -d "$tempdir" && $rememberme != 1 ]]; then
rm -rf "$tempdir"
fi
exit $1
}
err() {
echo -e "$1"
die 1
}
version() {
echo "ccr-tools - version $ver"
echo
echo " http://ccr-tools.github.com/"
exit
}
usage() {
echo "ccr-tools - version $ver"
echo
echo "usage: ccr-tools <option> <pkgname1> <pkgname2> ..."
echo
echo "ccr-tools --version, -V shows version"
echo "ccr-tools --help, -h shows this help"
echo "ccr-tools --vote, -v vote for packages"
echo "ccr-tools --unvote, -u unvote packages"
echo "ccr-tools --check, -c check for voted packages"
echo "ccr-tools --submit, -s submit a new package"
echo " --category, -C category for new packages"
echo "ccr-tools -sC list all valid categories"
echo "ccr-tools --maintainer, -m list all packages by maintainer"
echo "ccr-tools --forget, -f don't keep cookies"
echo
echo " example: ccr-tools --vote shake bfilter"
echo
echo "You can create ~/.config/ccr-tools/config containing:"
echo "user=YOUR_CCR_USERNAME"
echo "pass=YOUR_CCR_PASS"
echo
echo "To create a new account just go to:"
echo "http://chakra-linux.org/ccr/account.php"
exit
}
# Tests whether $1 exists on the ccr
existsinccr() {
retval=$(curl -LfGs --data-urlencode "arg=$pkgname" "$rpcurl=info" | \
jshon -Qe type -u)
[[ $retval = "info" && ! $category ]]
}
getcred() {
# Check config file
if [ -r ~/.ccr-toolsrc ] && [ ! -r "$configrc" ]; then
echo "Moving ~/.ccr-toolsrc to $configrc"
install -Dm644 ~/.ccr-toolsrc "$configrc" && rm ~/.ccr-toolsrc
fi
if [ -r ~/.config/ccr-toolsrc ] && [ ! -r "$configrc" ]; then
echo "Moving ~/.config/ccr-toolsrc to $configrc"
install -Dm644 ~/.config/ccr-toolsrc "$configrc" && rm ~/.config/ccr-toolsrc
fi
if [ ! -r "$configrc" ]; then
cat << 'EOF' > "$configrc"
#user=
#pass=
#rememberme=1
EOF
fi
[[ -r "$configrc" ]] && source $configrc
rememberme=${rememberme:-1} # default to 1 if not set
}
expired() {
# check to see if the session has expired
local etime
[[ -r "$tempdir/cjar" ]] && etime=$(awk '{print $5}' <<< $(grep AURSID "$tempdir/cjar"))
if [[ $etime == 0 ]]; then
echo 0
elif [[ $etime == "" || $etime -le $(date +%s) || "$(awk '{print $7}' <<< $(grep AURSID "$tempdir/cjar"))" == "deleted" ]]; then
echo 1
else
echo 0
fi
}
login() {
getcred
# logs in to ccr and keeps session alive
umask 077
failure=0
curl -Ss --cookie-jar "$tempdir/cjar" --output /dev/null ${ccrbaseurl} || failure=1
if [[ $failure = 0 && ( ! -e "$tempdir/cjar" || $rememberme != 1 || $(expired) == 1 ) ]]; then
[[ $(expired) == 1 ]] && echo "Your session has expired."
while [[ -z $user ]]; do read -p "please enter your CCR username: " user; done
while [[ -z $pass ]]; do read -p "please enter your CCR password: " -s pass && echo; done
[[ $rememberme == 1 ]] && args=('-d' 'remember_me=on')
curl -Ss --cookie "$tempdir/cjar" --cookie-jar "$tempdir/cjar" \
--data "user=$user" --data-urlencode "passwd=$pass" \
--location --output "$tempdir/ccrvote.login" \
${args[@]} ${ccrbaseurl} || failure=1
if grep --quiet "'error'>Bad username or password" "$tempdir/ccrvote.login";then
echo "incorrect password: check $configrc file"
die 1
fi
fi
if [[ ! $failure = 0 ]]; then
echo "Failure (no connection?)"
die 1
fi
}
getccrid() {
wget --quiet "${idurl}${1}" -O - | sed -n 's/.*"ID":"\([^"]*\)".*/\1/p'
}
maintainer() {
if [[ ! $maintainer ]]; then
getcred
[[ $user ]] && maintainer=$user || err "You must specify a maintainer."
fi
# this will break the output if the description contains '@'
curl -LfGs --data-urlencode "arg=$maintainer" "$rpcurl=msearch" |
jshon -Q -e results -a -e OutOfDate -u -p -e Name -u -p -e Version -u -p -e Category -u -p -e NumVotes -u -p -e Description -u |
paste -s -d " @@@\n" | column -t -s '@' | sort -k2 |
sed "s/^1 \([^ ]* [^ ]*\)/$(printf "\033[1;31m")\1$(printf "\033[0m")/; s/^0 //" |
sed "/[^ ]* [^ ]* *none/s/\( *\)none/$(printf "\033[33m")\1none$(printf "\033[0m")/" |
cut -c1-$(tput cols)
exit
}
vote() {
login
[[ -z ${pkgargs[@]} ]] && echo "You must specify a package." && die 1
for pkgname in ${pkgargs[@]}; do
ccrid=$(getccrid $pkgname)
[[ -z "$ccrid" ]] && echo "$pkgname was not found on CCR" && continue
if curl -Ss --cookie "$tempdir/cjar" --cookie-jar "$tempdir/cjar" --data "IDs[${ccrid}]=1" \
--data "ID=${ccrid}" --data "do_Vote=1" \
--output /dev/null ${voteurl}; then
echo "$pkgname now voted"
else
echo "ERROR: Can't access $ccrurl"
fi
done
die 0
}
unvote() {
login
[[ -z ${pkgargs[@]} ]] && echo "You must specify a package." && die 1
for pkgname in ${pkgargs[@]}; do
ccrid=$(getccrid $pkgname)
[ -z "$ccrid" ] && echo "$pkgname was not yound in CCR" && continue
if curl -Ss --cookie "$tempdir/cjar" --cookie-jar "$tempdir/cjar" --data "IDs[${ccrid}]=1" \
--data "ID=${ccrid}" --data "do_UnVote=1" \
--output /dev/null ${voteurl}; then
echo "$pkgname now unvoted"
else
echo "ERROR: Can't access $ccrurl"
fi
done
die 0
}
comment() {
login
[[ -z ${pkgargs[@]} ]] && echo "You must specify a package." && die 1
for pkgname in ${pkgargs[@]}; do
ccrid=$(getccrid $pkgname)
[ -z "$ccrid" ] && echo "$pkgname was not yound in CCR" && continue
if curl -Ss --cookie "$tempdir/cjar" --cookie-jar "$tempdir/cjar" \
--data "ID=${ccrid}" --data "comment=${comment}" \
--output /dev/null ${checkurl}${ccrid}; then
else
echo "ERROR: Can't access $ccrurl"
fi
done
die 0
}
check() {
login
for pkgname in ${pkgargs[@]}; do
ccrid=$(getccrid $pkgname)
[ -z "$ccrid" ] && echo "$pkgname was not yound in CCR" && continue
curl -Ss --cookie "$tempdir/cjar" --cookie-jar "$tempdir/cjar" \
--output "$tempdir/$pkgname" \
"${checkurl}${ccrid}"
echo -n "$pkgname: "
if grep -q "type='submit' class='button' name='do_UnVote'" $tempdir/$pkgname; then
echo "already voted"
elif grep -q "type='submit' class='button' name='do_Vote'" $tempdir/$pkgname; then
echo "not voted"
else
echo "voted status not found"
fi
done
die 0
}
submit() {
[[ $category == "" ]] && catvalue=1 || catvalue=${categories[$category]}
if [[ "${catvalue}" == "" ]]; then
echo "'$category' is not a valid category."
die 1
fi
# we don't even want to submit something different than source files
if [[ -z $pkgargs ]]; then
echo "You must specify a source package to upload."
die 1
elif [[ ! -f $pkgargs || $pkgargs != *.src.tar.gz ]]; then
echo "`basename ${pkgargs}` is not a source package!"
die 1
fi
# get the pkgname from the archive
pkgname=$(tar -xzOf $pkgargs --no-anchored 'PKGBUILD' | sed -n "s/^pkgname=['\"]\?\([^'\"]*\)['\"]\?/\1/p")
if ! existsinccr $pkgname && [[ $catvalue == 1 ]]; then
err "Since $pkgname is not in CCR yet, you must provide a category."
die 2
fi
login
# TODO allow multiple files to be uploaded.
#+advantages: you can update lots of packages at once
#+drawback, only one category can be selected for all of them
local error
error=$(curl -sS --cookie "$tempdir/cjar" --cookie-jar "$tempdir/cjar" \
--form "pkgsubmit=1" \
--form "category=${catvalue}" \
--form "pfile=@${pkgargs}" \
"${submiturl}" |
sed -n "s|.*<span class='error'>\(.*\)</span>.*|\1|p; s|^\(You must create an .* packages\.\).*|Sorry, you are not logged in.|p" |
head -1)
[[ $error ]] || error="Successfully uploaded."
echo "`basename ${pkgargs} ".src.tar.gz"`: $error"
die 0
}
listcat() {
for c in "${!categories[@]}"; do
echo "$c"
done | sort
exit 0
}
### MAIN PROGRAM ###
pkgargs=()
while [ "$#" -ne "0" ]; do
case $1 in
--help|-h) usage ;;
--version|-V) version ;;
--check|-c) shift
pkgargs=$@
check
;;
--vote|-v) shift
pkgargs=$@
vote
;;
--unvote|-u) shift
pkgargs=$@
unvote
;;
--comment) shift
comment=$1; shift
pkgargs=$@
comment
;;
--submit|-s) shift
while [[ "$#" != 0 ]]; do
case $1 in
--category|-C) shift
category=$1; shift
[[ $category == "" ]] && listcat
;;
*) pkgargs+=$1; shift
;;
esac
done
submit
;;
-sC) shift
category=$1; shift
[[ "$category" == "" ]] && listcat
pkgargs=$@
submit
;;
--maintainer|-m) shift
maintainer=$1
maintainer
;;
--forget|-f) rememberme=0 ;;
--*|-*) echo "ccr-tools: Option \`$1' is not valid."; exit 5 ;;
esac
shift
done
usage
# vim: sts=4 ts=4 sw=4 et
| true
|
24aad6aa2a9935a7fc4633d9c8a8602917cc3f6f
|
Shell
|
hhadian/kaldi
|
/egs/wenetspeech/s5/local/wenetspeech_test_aishell.sh
|
UTF-8
| 2,588
| 3.1875
| 3
|
[
"Apache-2.0",
"LicenseRef-scancode-public-domain"
] |
permissive
|
#!/usr/bin/env bash
# Copyright 2021 ASLP, NWPU (Author: Hang Lyu)
# Mobvoi Inc (Author: Binbin Zhang)
# Apache 2.0
# To accommodate with the setups of other toolkits, we give up the techniques
# about SpecAug and Ivector in this script.
# 1c use multi-stream cnn model.
# Set -e here so that we catch if any executable fails immediately
set -euo pipefail
stage=15
train_nj=20
decode_nj=20
train_set="train_l"
nnet3_affix=_cleaned
# The rest are configs specific to this script. Most of the parameters
# are just hardcoded at this level, in the commands below.
affix=_1c # affix for the TDNN directory name
decode_iter=
# decode options
test_sets="test_aishell1"
test_online_decoding=false # if true, it will run the last decoding stage.
# End configuration section.
echo "$0 $@" # Print the command line for logging
. ./cmd.sh
. ./path.sh
. ./utils/parse_options.sh
dir=exp/${train_set}/chain${nnet3_affix}/cnn_tdnn${affix}_sp
if [ $stage -le 15 ]; then
steps/make_mfcc.sh --nj $decode_nj --mfcc-config conf/mfcc_hires.conf \
--cmd "$decode_cmd" data/test_aishell1_hires || exit 1;
steps/compute_cmvn_stats.sh data/test_aishell1_hires || exit 1;
utils/fix_data_dir.sh data/test_aishell1_hires
fi
if [ $stage -le 16 ]; then
steps/online/nnet2/extract_ivectors_online.sh --cmd "$decode_cmd" \
--nj $decode_nj \
data/test_aishell1_hires exp/${train_set}/nnet3${nnet3_affix}/extractor \
exp/${train_set}/nnet3${nnet3_affix}/ivectors_test_aishell1_hires || exit 1;
fi
if [ $stage -le 17 ]; then
rm $dir/.error 2>/dev/null || true
for part_set in $test_sets; do
(
steps/nnet3/decode.sh --acwt 1.0 --post-decode-acwt 10.0 \
--nj $decode_nj --cmd "$decode_cmd" \
--online-ivector-dir exp/${train_set}/nnet3${nnet3_affix}/ivectors_${part_set}_hires \
$dir/graph data/${part_set}_hires $dir/decode_${part_set}${decode_iter:+_$decode_iter} || exit 1
) || touch $dir/.error
done
wait
if [ -f $dir/.error ]; then
echo "$0: something went wrong in decoding"
exit 1
fi
fi
if [ $stage -le 19 ]; then
# decode with rnnlm
# If an rnnlm has been provided, we should set the "stage" to 4 for testing.
./local/wenetspeech_run_rnnlm.sh --stage 4 \
--train-stage -10 \
--ngram-order 5 \
--num-epoch 8 \
--num-jobs-initial 1 \
--num-jobs-final 8 \
--words-per-split 400000 \
--text data/corpus/lm_text \
--ac-model-dir $dir \
--test-sets "$test_sets" \
--decode-iter "$decode_iter" \
--lang data/lang_test \
--dir exp/rnnlm
fi
exit 0;
| true
|
48f1a1018c87ba070cee7ba0aaaa55fed862c293
|
Shell
|
renowncoder/sysbox
|
/tests/helpers/k8s.bash
|
UTF-8
| 16,170
| 3.984375
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
load ../helpers/run
load ../helpers/docker
load ../helpers/systemd
load ../helpers/fs
#
# K8s Test Helper Functions
# (for tests using bats)
#
function kubeadm_get_token() {
local k8s_master=$1
local join=$(__docker exec $k8s_master sh -c "kubeadm token create --print-join-command 2> /dev/null")
echo $join
}
# Sets up a proper k8s config in the node being passed.
function k8s_config() {
local cluster_name=$1
local node=$2
docker exec "$node" sh -c "mkdir -p /root/.kube && \
cp -i /etc/kubernetes/admin.conf /root/.kube/config && \
chown $(id -u):$(id -g) /root/.kube/config"
[ "$status" -eq 0 ]
}
# Checks that the host has sufficient storage to run K8s clusters
function k8s_check_sufficient_storage() {
# K8s requires nodes to have a decent amount of storage (otherwise the
# kubelet refuses to deploy pods on the node). Here, we specify that
# we need 6GB (~1.8GB for the k8s node image, plus plenty room for
# inner containers).
#
# Note that Sysbox does not yet support virtualizing the storage
# space allocated to the K8s node sys-container, so each node sees
# the storage space of the host.
local req_storage=$((6 * 1024 * 1024 * 1024))
local avail_storage=$(fs_avail "/")
[ "$avail_storage" -ge "$req_storage" ]
}
function k8s_node_ready() {
local node=$1
ret=$(kubectl get node $node | tail -n 1)
if [ $? -ne 0 ]; then
return 1
fi
echo $ret | awk '{print $2}' | grep -qw Ready
}
function k8s_node_ip() {
local node=$1
docker_cont_ip $node
}
function k8s_apply() {
local cluster_name=$1
local k8s_master=$2
local yaml=$3
run kubectl apply -f $yaml
[ "$status" -eq 0 ]
}
function k8s_delete() {
local cluster_name=$1
local k8s_master=$2
local yaml=$3
run kubectl delete -f $yaml
[ "$status" -eq 0 ]
}
function k8s_create_pod() {
local cluster_name=$1
local k8s_master=$2
local pod_yaml=$3
run kubectl apply -f $pod_yaml
echo "status = ${status}"
echo "output = ${output}"
[ "$status" -eq 0 ]
}
function k8s_del_pod() {
local pod=$1
local ns
if [ $# -eq 2 ]; then
ns="-n $2"
fi
run kubectl delete pod $pod $ns --grace-period=0
[ "$status" -eq 0 ]
}
# Determines pod readiness (Running) state.
# $1 - K8s cluster pod belongs to
# $2 - k8s node to extract info from
# $3 - k8s pod to query
# $4 - k8s namespace where pod is expected (optional)
function k8s_pod_ready() {
local pod=$1
local ns
if [ $# -eq 2 ]; then
ns="-n $2"
fi
run kubectl get pod $pod $ns
echo "status = ${status}"
echo "output = ${output}"
[ "$status" -eq 0 ]
local pod_status="${lines[1]}"
# Looking for:
#
# NAME READY STATUS RESTARTS
# pod x/x Running 0
local total=$(sh -c "echo '$pod_status' | awk '{print \$2}' | cut -d \"/\" -f 2")
echo "$pod_status" | awk -v OFS=' ' '{print $1, $2, $3, $4}' | egrep -q "$pod $total/$total Running 0"
}
# Determines readiness (Running) state of all pods within array.
# $1 - K8s cluster pods belong to
# $2 - k8s node to extract info from
# $3 - array of k8s pod to query
# $4 - k8s namespace where pods are expected (optional)
function k8s_pod_array_ready() {
local pod_array=("$@")
local pod
for pod in "${pod_array[@]}"; do
k8s_pod_ready $pod $ns
if [ $? -ne 0 ]; then
return 1
fi
done
return 0
}
function k8s_pod_absent() {
local cluster_name=$1
local k8s_master=$2
local pod=$3
local ns
if [ $# -eq 4 ]; then
ns="-n $4"
fi
run kubectl get pod $pod $ns
echo "status = ${status}"
echo "output = ${output}"
[ "$status" -eq 1 ]
}
# Returns the IP address associated with a given pod
function k8s_pod_ip() {
local cluster_name=$1
local k8s_master=$2
local pod=$3
run kubectl get pod $pod -o wide
[ "$status" -eq 0 ]
local pod_status="${lines[1]}"
echo $pod_status | awk '{print $6}'
}
# Returns the node associated with a given pod
function k8s_pod_node() {
local cluster_name=$1
local k8s_master=$2
local pod=$3
run kubectl get pod $pod -o wide
[ "$status" -eq 0 ]
local pod_status="${lines[1]}"
echo $pod_status | awk '{print $7}'
}
# Checks if a pod is scheduled on a given node
function k8s_pod_in_node() {
local cluster_name=$1
local k8s_master=$2
local pod=$3
local node=$4
# TODO: Find out why function doesn't behave as expected when using 'kubectl'
# instead of 'docker exec' instruction; ideally, we want to avoid using
# 'docker exec' here.
run kubectl get pod "$pod" -o wide
[ "$status" -eq 0 ]
local cur_node=$(echo "${lines[1]}" | awk '{print $7}')
[[ "$cur_node" == "$node" ]]
}
# Returns the IP address associated with a given service
function k8s_svc_ip() {
local cluster_name=$1
local k8s_master=$2
local ns=$3
local svc=$4
run kubectl --namespace $ns get svc $svc
[ "$status" -eq 0 ]
local svc_status="${lines[1]}"
echo $svc_status | awk '{print $3}'
}
function k8s_check_proxy_mode() {
local cluster=$1
local controller=$2
local proxy_mode=$3
run sh -c "kubectl -n kube-system get pods | grep -m 1 kube-proxy | awk '{print \$1}'"
echo "status1 = ${status}"
echo "output1 = ${output}"
[ "$status" -eq 0 ]
local kube_proxy=$output
run sh -c "kubectl -n kube-system logs $kube_proxy 2>&1 | grep \"Using $proxy_mode Proxier\""
echo "status2 = ${status}"
echo "output2 = ${output}"
[ "$status" -eq 0 ]
}
function k8s_deployment_ready() {
local cluster_name=$1
local k8s_master=$2
local ns=$3
local deployment=$4
kubectl --namespace $ns get deployment $deployment
[ "$status" -eq 0 ]
local dpl_status="${lines[1]}"
# Looking for:
#
# NAME READY UP-TO-DATE AVAILABLE
# name x/x 1 1
local total=$(sh -c "echo $dpl_status | awk '{print \$2}' | cut -d \"/\" -f 2")
echo $dpl_status | awk -v OFS=' ' '{print $1, $2, $3, $4}' | grep -q "$deployment $total/$total $total $total"
}
function k8s_deployment_rollout_ready() {
local cluster_name=$1
local k8s_master=$2
local ns=$3
local deployment=$4
local i
kubectl --namespace $ns rollout status deployment.v1.apps/$deployment
[ "$status" -eq 0 ]
[[ "$output" == "deployment \"$deployment\" successfully rolled out" ]]
}
function k8s_daemonset_ready() {
local cluster_name=$1
local k8s_master=$2
local ns=$3
local ds=$4
kubectl --namespace $ns get ds $ds
[ "$status" -eq 0 ]
local dpl_status="${lines[1]}"
# Looking for:
#
# NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE
# $ds 2 2 2 2 2
local total=$(echo $dpl_status | awk '{print $2}')
echo $dpl_status | awk -v OFS=' ' '{print $1, $2, $3, $4, $5, $6}' | grep "$ds $total $total $total $total $total"
}
function k8s_cluster_is_clean() {
local cluster_name=$1
local k8s_master=$2
run kubectl get all
[ "$status" -eq 0 ]
# Looking for:
#
# NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
# service/kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 24m
[ "${#lines[@]}" -eq "2" ]
echo ${lines[1]} | grep -q "service/kubernetes"
}
# Install Helm v2.
function helm_v2_install() {
local cluster_name=$1
local k8s_master=$2
docker exec "$k8s_master" sh -c "curl -Os https://get.helm.sh/helm-v2.16.3-linux-amd64.tar.gz && \
tar -zxvf helm-v2.16.3-linux-amd64.tar.gz && \
mv linux-amd64/helm /usr/local/bin/helm && \
kubectl create serviceaccount --namespace kube-system tiller
kubectl create clusterrolebinding tiller-cluster-rule --clusterrole=cluster-admin --serviceaccount=kube-system:tiller
kubectl patch deploy --namespace kube-system tiller-deploy -p '{"spec":{"template":{"spec":{"serviceAccount":"tiller"}}}}'
helm init --service-account tiller --upgrade
helm repo add stable https://kubernetes-charts.storage.googleapis.com/ && \
helm repo update"
[ "$status" -eq 0 ]
sleep 5
# Identify tiller's pod name.
run sh -c "kubectl get pods -o wide --all-namespaces | egrep \"tiller\""
echo "status = ${status}"
echo "output = ${output}"
[ "$status" -eq 0 ]
local tiller_pod=$(echo ${output} | awk '{print $2}')
# Wait till tiller's pod is up and running.
retry_run 60 5 "k8s_pod_ready $tiller_pod kube-system"
}
# Uninstall Helm v2.
function helm_v2_uninstall() {
local cluster_name=$1
local k8s_master=$2
# Obtain tiller's pod-name.
run sh -c "kubectl get pods -o wide --all-namespaces | egrep \"tiller\""
[ "$status" -eq 0 ]
local tiller_pod=$(echo ${lines[0]} | awk '{print $2}')
# Delete all tiller's deployments.
run kubectl delete deployment tiller-deploy --namespace kube-system
[ "$status" -eq 0 ]
# Wait till tiller pod is fully destroyed.
retry_run 40 2 "k8s_pod_absent $cluster_name $k8s_master $tiller_pod kube-system"
}
# Uninstall Helm v3. Right, much simpler than v2 version above, as there's no need to
# deal with 'tiller' complexities.
function helm_v3_install() {
local k8s_master=$1
docker exec "$k8s_master" sh -c "curl -Os https://get.helm.sh/helm-v3.1.2-linux-amd64.tar.gz && \
tar -zxvf helm-v3.1.2-linux-amd64.tar.gz && \
mv linux-amd64/helm /usr/local/bin/helm && \
helm repo add stable https://kubernetes-charts.storage.googleapis.com/ && \
helm repo update"
[ "$status" -eq 0 ]
}
# Uninstall Helm v3.
function helm_v3_uninstall() {
local k8s_master=$1
docker exec "$k8s_master" sh -c "helm reset"
}
# Installs Istio.
function istio_install() {
local k8s_master=$1
# Bear in mind that the Istio version to download has not been explicitly defined,
# which has its pros (test latest releases) & cons (test instability).
docker exec "$k8s_master" sh -c "curl -L https://istio.io/downloadIstio | sh - && \
cp istio*/bin/istioctl /usr/local/bin/ && \
istioctl manifest apply --set profile=demo && \
kubectl label namespace default istio-injection=enabled"
[ "$status" -eq 0 ]
}
# Uninstalls Istio.
function istio_uninstall() {
local k8s_master=$1
# Run uninstallation script.
docker exec "$k8s_master" sh -c "istio-*/samples/bookinfo/platform/kube/cleanup.sh"
[ "$status" -eq 0 ]
# Remove istio namespace.
run kubectl delete ns istio-system
[ "$status" -eq 0 ]
run kubectl label namespace default istio-injection-
[ "$status" -eq 0 ]
# Remove installation script
docker exec "$k8s_master" sh -c "rm -rf istio-*"
[ "$status" -eq 0 ]
}
# Verifies an nginx ingress controller works; this function assumes
# the nginx ingress-controller has been deployed to the cluster.
function verify_nginx_ingress() {
local cluster=$1
local controller=$2
local ing_controller=$3
local ing_worker_node=$4
# We need pods to serve our fake website / service; we use an nginx
# server pod and create a service in front of it (note that we could
# have chosen any other pod for this purpose); the nginx ingress
# controller will redirect traffic to these pods.
run kubectl create deployment nginx --image=${CTR_IMG_REPO}/nginx:1.16-alpine
[ "$status" -eq 0 ]
run kubectl expose deployment/nginx --port 80
[ "$status" -eq 0 ]
retry_run 40 2 "k8s_deployment_ready $cluster $controller default nginx"
# create an ingress rule that maps nginx.nestykube -> nginx service;
# this ingress rule is enforced by the nginx ingress controller.
cat >"$test_dir/nginx-ing.yaml" <<EOF
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: nginx
annotations:
kubernetes.io/ingress.class: nginx
spec:
rules:
- host: nginx.nestykube
http:
paths:
- path: /
backend:
serviceName: nginx
servicePort: 80
EOF
run kubectl apply -f $test_dir/nginx-ing.yaml
[ "$status" -eq 0 ]
# setup the ingress hostname in /etc/hosts
cp /etc/hosts /etc/hosts.orig
local node_ip=$(k8s_node_ip ${ing_worker_node})
echo "$node_ip nginx.nestykube" >>/etc/hosts
# verify ingress to nginx works
sleep 1
run sh -c "kubectl get service/$ing_controller -o json | jq '.spec.ports[0].nodePort'"
[ "$status" -eq 0 ]
local nodePort=$output
retry_run 10 2 "wget nginx.nestykube:$nodePort -O $test_dir/index.html"
grep "Welcome to nginx" $test_dir/index.html
rm $test_dir/index.html
# Cleanup
run kubectl delete ing nginx
[ "$status" -eq 0 ]
run kubectl delete svc nginx
[ "$status" -eq 0 ]
run kubectl delete deployment nginx
[ "$status" -eq 0 ]
# "cp + rm" because "mv" fails with "resource busy" as /etc/hosts is
# a bind-mount inside the container
cp /etc/hosts.orig /etc/hosts
rm /etc/hosts.orig
}
################################################################################
# KinD specific functions
################################################################################
function kind_all_nodes_ready() {
local cluster=$1
local controller=$2
local num_workers=$3
local delay=$4
local timestamp=$(date +%s)
local timeout=$(($timestamp + $delay))
local all_ok
while [ $timestamp -lt $timeout ]; do
all_ok="true"
for i in $(seq 1 $num_workers); do
local worker
if [ $i -eq 1 ]; then
worker="${cluster}"-worker
else
worker="${cluster}"-worker$i
fi
run k8s_node_ready $worker
if [ "$status" -ne 0 ]; then
all_ok="false"
break
fi
done
if [[ "$all_ok" == "true" ]]; then
break
fi
sleep 2
timestamp=$(date +%s)
done
if [[ "$all_ok" != "true" ]]; then
echo 1
else
echo 0
fi
}
################################################################################
# KindBox specific functions
################################################################################
function kindbox_all_nodes_ready() {
local cluster_name=$1
local num_workers=$2
local delay=$3
local timestamp=$(date +%s)
local timeout=$(($timestamp + $delay))
local all_ok
while [ $timestamp -lt $timeout ]; do
all_ok="true"
for ((i = 0; i < $num_workers; i++)); do
master=${cluster_name}-master
worker=${cluster_name}-worker-${i}
run k8s_node_ready $worker
if [ "$status" -ne 0 ]; then
all_ok="false"
break
fi
done
if [[ "$all_ok" == "true" ]]; then
break
fi
sleep 2
timestamp=$(date +%s)
done
if [[ "$all_ok" != "true" ]]; then
echo 1
else
echo 0
fi
}
# Deploys a k8s cluster through KindBox tool. The cluster has one master node
# and the given number of worker nodes. The cluster uses the K8s flannel cni.
# The master node sys container is called k8s-master and the worker nodes are
# called k8s-worker-0, k8s-worker-1, etc.
#
# usage: k8s_cluster_setup <cluster_name> <num_workers> <network> <node_image> <k8s_version>
#
# cluster: name of the cluster; nodes in the cluster are named "<cluster_name>-master",
# "<cluster-name>-worker-0", "<cluster-name>-worker-1", etc.
# num_workers: number of k8s worker nodes
# network: docker network to which the k8s nodes are connected (e.g., bridge,
# user-defined, etc.)
function kindbox_cluster_setup() {
local cluster=$1
local num_workers=$2
local net=$3
local node_image=$4
local k8s_version=$5
local cni=$6
local pod_net_cidr=10.244.0.0/16
if [[ ${cni} == "" ]]; then
run tests/scr/kindbox create --num=$num_workers --image=$node_image --k8s-version=$k8s_version --net=$net $cluster
[ "$status" -eq 0 ]
else
run tests/scr/kindbox create --num=$num_workers --image=$node_image --k8s-version=$k8s_version --net=$net --cni=$cni $cluster
[ "$status" -eq 0 ]
fi
local join_timeout=$(($num_workers * 30))
kindbox_all_nodes_ready $cluster $num_workers $join_timeout
}
# Tears-down a k8s cluster created with kindbox_cluster_setup().
#
# usage: kindbox_cluster_teardown cluster_name num_workers
function kindbox_cluster_teardown() {
local cluster=$1
local net=$2
if [[ $net == "bridge" ]]; then
run tests/scr/kindbox destroy $cluster
[ "$status" -eq 0 ]
else
run tests/scr/kindbox destroy --net $cluster
[ "$status" -eq 0 ]
fi
# Delete cluster config.
rm -rf /root/.kube/${cluster}-config
}
| true
|
34635be5cafc7c8be4961433f4c7fe2e48c70049
|
Shell
|
usdot-fhwa-stol/carma-platform
|
/carma/launch/utility/save_pcd_map.bash
|
UTF-8
| 1,028
| 2.859375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# Copyright (C) 2020-2021 LEIDOS.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
set -e
PCD_FILE="$1"
if [ -z "$PCD_FILE" ]; then
FILENAME="$(date '+%Y-%m-%d_%H-%M-%S')_generated_map.pcd"
PCD_FILE="/tmp/$FILENAME"
echo "No file save path provided. File will be saved to:"
echo "$PCD_FILE"
fi
rostopic pub -l /config/ndt_mapping_output autoware_config_msgs/ConfigNDTMappingOutput \
"header:
seq: 0
stamp:
secs: 0
nsecs: 0
frame_id: ''
filename: '$PCD_FILE'
filter_res: 0.2"
| true
|
ce0cac9c59e4e7423ad28842ac0dccd7ac28d92f
|
Shell
|
Actat/flocking
|
/flocking.sh
|
UTF-8
| 1,990
| 3.671875
| 4
|
[] |
no_license
|
#!/bin/sh
video_length=30
frame_rate=10
frame_number=`expr $video_length \* $frame_rate`
field_x=50
field_y=50
fish_amount=200
rm -f out.mp4
rm -rf dat
rm -rf eps
rm -rf png
mkdir dat
if [ -f main.c ]; then
echo "コンパイルします."
gcc -o main.out main.c -lm
else
echo "main.c が存在しないようです."
exit 0
fi
if [ -f main.out ]; then
echo "コンパイルに成功したようです."
echo "プログラムを実行し、連番ファイルを出力します."
./main.out $video_length $frame_rate $field_x $field_y $fish_amount
echo "プログラムの実行が終了しました."
else
echo "コンパイルに失敗したようです."
exit 0
fi
rm main.out
echo "gnuplotで画像ファイルに変換します."
mkdir eps
touch plotimg.gp
i=0
while [ $i -lt $frame_number ]
do
if [ -e dat/${i}.dat ]; then
echo "set encoding iso_8859_1" >> plotimg.gp
echo "set term postscript color eps enhanced" >> plotimg.gp
echo "set output 'eps/$i.eps'" >> plotimg.gp
echo "set size ratio 1" >> plotimg.gp
echo "set xrange[-$field_x:$field_x]" >> plotimg.gp
echo "set yrange[-$field_y:$field_y]" >> plotimg.gp
echo "plot 'dat/${i}.dat' u 1:2:3:4:(sqrt(\$3*\$3+\$4*\$4)) w vector ti ''" >> plotimg.gp
else
echo "${i}は存在しないです."
fi
gnuplot plotimg.gp
rm plotimg.gp
i=`expr $i + 1`
done
mkdir png
echo "epsからpngに変換します."
i=0
while [ $i -lt $frame_number ]
do
if [ -e eps/${i}.eps ]; then
convert -density 500x500 eps/${i}.eps png/${i}.png
else
echo "eps/${i}.eps は存在しません."
fi
i=`expr $i + 1`
done
echo "pngから動画を作ります."
if [ -e png/0.png ]; then
ffmpeg -r $frame_rate -i png/%d.png -vcodec libx264 -pix_fmt yuv420p -r 60 out.mp4
fi
if [ -e out.mp4 ]; then
echo "出力が完了しました."
else
echo "出力に失敗したようです."
fi
| true
|
d288158600599c61b71b79c0bc9ebe7f989f537b
|
Shell
|
eastmallingresearch/Metatranscriptomics_pipeline
|
/scripts/assemble.sh
|
UTF-8
| 1,588
| 3.890625
| 4
|
[] |
no_license
|
#!/bin/bash
#$ -S /bin/bash
#$ -cwd
#$ -l virtual_free=1G
read -r -d '' HELP << EOM
#########################################################################
# #
# Wrapper script for assmbling data #
# #
# usage: assemble.sh -p <program> [options] #
# #
# -p <metaspades|megahit> #
# #
# assemble.sh -p metaspades Forward Reverse Output PREFIX #
# assemble.sh -p megahit Output PREFIX -1 Forward -2 Reverse #
# #
#########################################################################
EOM
function print_help {
echo;echo "$HELP" >&1;echo;
exit 0
}
if [ $# -eq 2 ];
then
print_help
fi
OPTIND=1
while getopts ":hsp:" options; do
case "$options" in
s)
SCRIPT_DIR=$OPTARG
;;
p)
program=$OPTARG
break
;;
h)
print_help
exit 0
;;
esac
done
shift $((OPTIND-1))
[ "$1" = "--" ] && shift
if [[ -z "$SCRIPT_DIR" ]]; then
SCRIPT_DIR=$(readlink -f ${0%/*})
fi
case $program in
metaspades|spades)
qsub $SCRIPT_DIR/sub_metaspades.sh $@
exit 0
;;
megahit)
qsub $SCRIPT_DIR/sub_megahit.sh $@
exit 0
;;
*)
echo "Invalid assembly program: $program" >&2
exit 1
esac
| true
|
837e22ab4493796eb2335d4ff2a719da79f2da67
|
Shell
|
camm9909/pushbullet_ip_notify
|
/pb_ipcheck.sh
|
UTF-8
| 1,215
| 3.859375
| 4
|
[] |
no_license
|
#!/bin/bash
ipcon="ip.txt" # Path place to store IP
pubip="ifconfig.me" # WAN IP resolve host
pbapi="123456789ABCDEFGHIJKLMNOPQRSTUVWXY" # PushBullet token
title="New IP:" # Title of PushBullet Msg
#Check if IP store exists
if [ -f "$ipcon" ]; then
echo "file exists!"
else
echo "" > "$ipcon"
echo "file doesn't exist, creating"
fi
# Check hosts are reachable
ping -q -c 1 $pubip >/dev/null 2>&1; ping1=$?
if [[ $ping1 ]] ; then
curip=$(curl -f -s "$pubip")
stoip=$(cat "$ipcon")
# IP Regex check
if [[ $curip =~ ^[0-9]+(\.[0-9]+){3}$ ]] ; then
if [ "$curip" != "$stoip" ]; then
echo "updating IP..."
echo "${curip}" > "$ipcon"
curl -s -u $pbapi: -X POST https://api.pushbullet.com/v2/pushes \
--header 'Content-Type: application/json' \
--data-binary '{"type": "note", "title": "'"$title"'", "body": "'"$curip"'"}' \
>/dev/null 2>&1
else
echo "no change in IP"
fi
else
echo "No valid IP to parse"
fi
else
# Either machine or resolver is offline
echo "Cannot resolve WAN IP"
fi
exit 0
| true
|
369786cdebcffb8fad991e0fe5f57cd5ab0eaaa2
|
Shell
|
phantasmicmeans/webrtc-native-docker
|
/startapp.sh
|
UTF-8
| 347
| 2.921875
| 3
|
[] |
no_license
|
#!/bin/sh
echo "script start"
app_type=${1}
new_handle_id=${2}
webrtc_client="yolo"
echo "app_type = $app_type"
echo "new_handle_id = $new_handle_id"
if [ "$app_type" = "$webrtc_client" ]
then
echo "$app_type start now..."
./WebRTC_SFU_OldLibs $new_handle_id
else
echo "app_type $app_type is not valid, please check your data"
fi
| true
|
30d9ef1f6e6259a9869191c7882bf08d4129b739
|
Shell
|
michaelcunningham/oracledba
|
/adhoc/gather_stats_npdb510.sh
|
UTF-8
| 1,804
| 3.125
| 3
|
[] |
no_license
|
#!/bin/sh
target_server=npdb510
this_server=`uname -n | cut -f1 -d.`
if [ "$this_server" != "$target_server" ]
then
echo "You are trying to run this script on the wrong server."
echo "It is intended to only run on the "$target_server" server."
exit
fi
log_date=`date +%a`
adhoc_dir=/dba/adhoc
log_file=$adhoc_dir/log/gather_stats_$target_server.log
echo "Gather stats started on "$target_server" at "`date`"." > $log_file
echo >> $log_file
echo " Starting gather stats for tdcdv7 : "`date`"." >> $log_file
#/dba/admin/gather_sys_stats.sh tdcdv7
#/dba/admin/gather_schema_stats_100.sh tdcdv7 npic
#/dba/admin/gather_schema_stats_100.sh tdcdv7 ignite
#/dba/admin/gather_schema_stats_100.sh tdcdv7 novaprd
#/dba/admin/gather_schema_stats_100.sh tdcdv7 tdcglobal
#/dba/admin/gather_schema_stats_100.sh tdcdv7 vistaprd
#/dba/admin/gather_schema_stats_100.sh tdcdv7 fp_informix
#/dba/admin/gather_schema_stats_100.sh tdcdv7 fpicusr
echo " Starting gather stats for tdcuat4 : "`date`"." >> $log_file
#/dba/admin/gather_sys_stats.sh tdcuat4
/dba/admin/gather_schema_stats_100.sh tdcuat4 npic
/dba/admin/gather_schema_stats_100.sh tdcuat4 ignite
/dba/admin/gather_schema_stats_100.sh tdcuat4 novaprd
/dba/admin/gather_schema_stats_100.sh tdcuat4 ignite43
/dba/admin/gather_schema_stats_100.sh tdcuat4 tdcglobal
/dba/admin/gather_schema_stats_100.sh tdcuat4 vistaprd
/dba/admin/gather_schema_stats_100.sh tdcuat4 rein
/dba/admin/gather_schema_stats_100.sh tdcuat4 security
echo >> $log_file
echo "Gather stats finished on "$target_server" at "`date`"." >> $log_file
echo '' >> $log_file
echo '' >> $log_file
echo 'This report created by : '$0' '$* >> $log_file
#mail -s "Database statistics for "$target_server mcunningham@thedoctors.com < $log_file
#mail -s "Database statistics for "$target_server swahby@thedoctors.com < $log_file
| true
|
e6596a09bf92d07c508bcf9d71a06f951afc2598
|
Shell
|
CoolmanCZ/upp_cmake
|
/GenerateCMakeFiles-lib.sh
|
UTF-8
| 85,516
| 2.890625
| 3
|
[] |
no_license
|
#!/bin/bash
#
# Copyright (C) 2016-2023 Radek Malcic
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
GENERATE_CMAKE_VERSION="1.0"
GENERATE_DATE="$(export LC_ALL=C; date)"
OFN="CMakeLists.txt" # Output file name
LINK_LIST="LINK_LIST"
DEPEND_LIST="DEPEND_LIST"
SOURCE_LIST_C="SOURCE_LIST_C"
SOURCE_LIST_CPP="SOURCE_LIST_CPP"
HEADER_LIST="HEADER_LIST"
INCLUDE_LIST="INCLUDE_LIST"
SOURCE_LIST_ICPP="SOURCE_LIST_ICPP"
SOURCE_LIST_RC="SOURCE_LIST_RC"
COMPILE_FLAGS_LIST="COMPILE_FLAGS_LIST"
TARGET_RENAME="TARGET_RENAME"
PCH_FILE="PCH_FILE"
PCH_INCLUDE_LIST="PCH_INCLUDE_LIST"
PCH_COMPILE_DEFINITIONS="PCH_COMPILE_DEFINITIONS"
BIN_SUFFIX="-bin"
LIB_SUFFIX="-lib"
RE_BZIP2='[bB][zZ]2'
RE_ZIP='[zZ][iI][pP]'
RE_PNG='[pP][nN][gG]'
RE_C='\.([cC])$'
RE_CPP='\.([cC]+[xXpP]{0,2})$'
RE_ICPP='\.([iI][cC]+[xXpP]{0,2})$'
RE_RC='\.(rc)$'
RE_BRC='\.(brc)$'
RE_USES='^uses$'
RE_LINK='^link$'
RE_LIBRARY='^library$'
RE_PKG_CONFIG='^pkg_config$'
RE_STATIC_LIBRARY='^static_library$'
RE_OPTIONS='^options$'
RE_FILES='^file$'
RE_INCLUDE='^include$'
RE_TARGET='^target$'
RE_SEPARATOR='separator'
RE_IMPORT='import.ext'
RE_IMPORT_ADD='^files|^includes'
RE_IMPORT_DEL='^exclude'
RE_FILE_DOT='\.'
RE_FILE_SPLIT='(options|charset|optimize_speed|highlight)'
RE_FILE_EXCLUDE='(depends\(\))'
RE_FILE_PCH='(PCH)'
UPP_ALL_USES=()
UPP_ALL_USES_DONE=()
INCLUDE_SYSTEM_LIST=()
SECTIONS=("acceptflags" "charset" "custom" "description" "file" "flags" "include" "library" "static_library" "link" "optimize_size" "optimize_speed" "options" "mainconfig" "noblitz" "target" "uses" "pkg_config")
RE_SKIP_SECTIONS='(acceptflags|mainconfig|charset|description|optimize_size|optimize_speed|noblitz)'
get_section_name()
{
local line="${1}"
line="${line//\(/ }"
line="${line//\)/ }"
local tmp=(${line})
local name="$(string_trim_spaces_both "${tmp[0]}")"
if [[ " ${SECTIONS[@]} " =~ " ${name} " ]]; then
echo "${name}"
fi
}
get_section_line()
{
local section="${1}"
local line="$(string_trim_spaces_both "${2}")"
line="${line/#${section}/}"
echo "$(string_trim_spaces_both "${line}")"
}
test_required_binaries()
{
# Requirement for generating the CMakeList files
local my_sort="$(which sort)"
local my_date="$(which date)"
local my_find="$(which find)"
local my_xargs="$(which xargs)"
if [ -z "${my_sort}" ] || [ -z "${my_date}" ] || [ -z "${my_find}" ] || [ -z "${my_xargs}" ] ; then
echo "ERROR - Requirement for generating the CMakeList files failed."
echo "ERROR - Can not continue -> Exiting!"
echo "sort=\"${my_sort}\""
echo "date=\"${my_date}\""
echo "find=\"${my_find}\""
echo "xargs=\"${my_xargs}\""
exit 1
fi
}
string_trim_spaces_both()
{
local line="${1}"
line="${line#"${line%%[![:space:]]*}"}" # remove leading whitespace from a string
line="${line%"${line##*[![:space:]]}"}" # remove trailing whitespace from a string
echo "${line}"
}
string_remove_separators()
{
local line="${1}"
line="${line//,}" # Remove ','
line="${line//;}" # Remove ';'
echo "${line}"
}
string_remove_comma()
{
local line="$(string_remove_separators "${1}")"
line="${line//\"}" # Remove '"'
echo "${line}"
}
string_replace_dash()
{
local line="${1}"
line="${line//\//_}"
echo "${line}"
}
string_get_in_parenthesis()
{
local line="${1}"
if [[ "${line}" =~ \( ]]; then
# Get string inside parenthesis
line=${line#*(}
line=${line%)*}
line="${line//& }" # Remove all '& '
echo "${line}"
else
echo
fi
}
string_get_after_parenthesis()
{
local line="${1}"
line="${line##*) }" # Get string after the right parenthesis
echo "${line}"
}
string_get_before_parenthesis()
{
local line="${1}"
line="${line%%(*}" # Get string before the left parenthesis
echo "${line}"
}
if_options_replace()
{
local options="$(string_trim_spaces_both "${1}")"
local output=""
if [ -n "${options}" ]; then
case "${options}" in
"OR") # operand
output="OR"
;;
"SHARED")
output="BUILD_SHARED_LIBS"
;;
"WIN32")
output="WIN32"
;;
esac
if [ -n "${options}" ] && [ -z "${output}" ]; then
output="DEFINED flag${options}"
fi
echo "${output}"
fi
}
if_options_parse()
{
local operand=""
local next_operand=" AND "
local counter=0
local output=""
local list=""
local options_replacement="${1/|/ | }"
local OPTIONS=(${options_replacement})
if [ -n "${OPTIONS}" ]; then
for list in "${OPTIONS[@]}"; do
# Don't process alone '!' operand
if [[ "${list}" =~ '!' ]] && [ "${#list}" -eq 1 ]; then
list=""
fi
if [ -n "${list}" ]; then
(( counter++ ))
operand="${next_operand}"
if [ "${list}" = '|' ]; then
operand=" "
list="OR"
next_operand=" "
else
next_operand=" AND "
fi
if [[ "${list}" =~ '!' ]]; then
list="${list//!}"
if [ "${counter}" -eq 1 ]; then
operand="NOT "
else
operand+="NOT "
fi
fi
# Don't insert 'AND operand as first option parameter
if [ "${counter}" -eq 1 ] && [[ "${operand}" = " AND " ]]; then
operand=""
fi
list="$(if_options_replace "${list}")"
output+="${operand}${list}"
fi
done
echo "${output}"
fi
}
if_options_parse_all()
{
local line="${1}"
local ALL_OPTIONS=()
local list=""
local output=""
local result=""
# Split options
local begin=0
local brace=0
for i in $( seq 0 $(( ${#line} )) ); do
if [ "${line:${i}:1}" == "(" ]; then
local length=$((i - begin))
if [ ${length} -gt 1 ]; then
ALL_OPTIONS+=("${line:${begin}:${length}}")
fi
begin=$((i + 1))
(( brace++ ))
fi
if [ ${brace} -gt 0 ] && [ "${line:${i}:1}" == ")" ]; then
local length=$((i - begin))
if [ ${length} -gt 1 ]; then
ALL_OPTIONS+=("${line:${begin}:${length}}")
fi
begin=$((i + 1))
(( brace-- ))
fi
done
if [ $begin -lt ${#line} ]; then
ALL_OPTIONS+=("${line:${begin}}")
fi
if [ ${#ALL_OPTIONS[@]} -eq 0 ]; then
ALL_OPTIONS+=("$(string_trim_spaces_both "${line}")")
fi
# Process options
if [ -n "${ALL_OPTIONS}" ]; then
for list in "${ALL_OPTIONS[@]}"; do
result="("$(if_options_parse "${list}")")" # Parse options
result="${result//\(OR / OR \(}" # Move 'OR'
result="${result//\(\)}" # Delete empty parenthesis
output+="${result}"
done
fi
echo "${output//\)\(/\) AND \(}" # Put 'AND' between options
}
add_require_for_lib()
{
local link_list="${1}"
local check_lib_name="${2}"
local pkg_config_module="${3}"
local req_lib_dir="DIRS"
local req_lib_name=""
local req_lib_param=""
local use_pkg="0"
if [ "${pkg_config_module}" == "1" ]; then
req_lib_name="${check_lib_name}"
req_lib_param="${check_lib_name}"
use_pkg="1"
fi
if [ -n "${req_lib_name}" ]; then
if [ "${use_pkg}" == "0" ]; then
echo " find_package ( ${req_lib_name} REQUIRED ${req_lib_param} )" >> "${OFN}"
else
echo " find_package ( PkgConfig REQUIRED )" >> "${OFN}"
echo " pkg_check_modules ( ${req_lib_name^^} REQUIRED ${req_lib_param})" >> "${OFN}"
fi
echo " if ( ${req_lib_name^^}_FOUND )" >> "${OFN}"
echo " list ( APPEND ${INCLUDE_LIST} \${${req_lib_name^^}_INCLUDE_${req_lib_dir}} )" >> "${OFN}"
echo " list ( APPEND ${link_list} \${${req_lib_name^^}_LIBRARIES} )" >> "${OFN}"
echo " # remove leading or trailing whitespace (e.g. for SDL2)" >> "${OFN}"
echo " if ( ${link_list} )" >> "${OFN}"
echo " string ( STRIP \"\${${link_list}}\" ${link_list} )" >> "${OFN}"
echo " endif()" >> "${OFN}"
if [ "${check_lib_name}" == "pthread" ]; then
echo " if ( CMAKE_THREAD_LIBS_INIT )" >> "${OFN}"
echo " list ( APPEND ${link_list} \${CMAKE_THREAD_LIBS_INIT} )" >> "${OFN}"
echo " endif()" >> "${OFN}"
fi
echo " endif()" >> "${OFN}"
else
echo "${check_lib_name}"
fi
}
add_all_uses() {
local value="$1"
if [[ ! " ${UPP_ALL_USES[@]} " =~ " ${value} " ]]; then
UPP_ALL_USES+=(${value})
fi
}
list_parse()
{
local line="${1}"
local list="${2}"
local target_name="${3}"
local list_append="${4}"
local options=""
local parameters=""
echo >> "${OFN}"
if [ -z "${list_append}" ]; then
echo "# ${line}" >> "${OFN}"
else
echo "# ${list_append} ${line}" >> "${OFN}"
fi
# echo "\"line: $line\""
if [[ "${line}" =~ BUILDER_OPTION ]]; then
$(if_options_builder "${line}")
else
if [ -z "${list_append}" ]; then
options="$(string_get_in_parenthesis "${line}")"
# echo "\"options: $options\""
options=$(if_options_parse_all "${options}") # Parse options
# echo "\"options: $options\""
parameters="$(string_get_after_parenthesis "${line}")"
parameters="$(string_remove_comma "${parameters}")"
# echo "\"param : $parameters\""
else
# echo "\"options:\""
parameters="$(string_remove_comma "${line}")"
# echo "\"param : $parameters\""
fi
# echo "\"list : $list\""
if [ -n "${options}" ] ; then
echo "if (${options})" >> "${OFN}"
fi
# Add optional dependency target to generate CMakeLists.txt
if [[ "${list}" =~ "${DEPEND_LIST}" ]]; then
local -a new_parameters=("${parameters}")
parameters=""
for item in ${new_parameters[@]}; do
parameters+="$(string_replace_dash "${item}${LIB_SUFFIX}") "
add_all_uses "${item}"
done
local trim_link_parameters="$(string_trim_spaces_both "${parameters}")"
if [ -n "${trim_link_parameters}" ]; then
echo " list ( APPEND ${list} ${trim_link_parameters} )" >> "${OFN}"
fi
fi
local add_link_library=""
if [ -n "${target_name}" ]; then
local pkg_config_module="0"
if [[ "${line}" =~ ^pkg_config || "${list_append}" =~ ^pkg_config ]]; then
pkg_config_module="1"
fi
local -a check_library_array=(${parameters})
for check_library in "${check_library_array[@]}"; do
add_link_library+="$(add_require_for_lib "${list}" "${check_library}" "${pkg_config_module}") "
done
fi
local trim_link_library="$(string_trim_spaces_both "${add_link_library}")"
if [ -n "${trim_link_library}" ]; then
echo " list ( APPEND ${list} ${trim_link_library} )" >> "${OFN}"
fi
if [ -n "${options}" ] ; then
echo "endif()" >> "${OFN}"
fi
fi
}
target_parse()
{
local line="${1}"
local options=""
local parameters=""
echo >> "${OFN}"
echo "#${1}" >> "${OFN}"
line="${line/#${section}/}"
options="$(string_get_in_parenthesis "${line}")"
if [ -n "${options}" ]; then
options="$(if_options_parse_all "${options}")" # Parse options
fi
parameters="$(string_get_after_parenthesis "${line}")"
parameters="${parameters//;}"
parameters="${parameters//\"}"
parameters="$(string_trim_spaces_both "${parameters}")"
if [ -n "${options}" ]; then
echo "if (${options})" >> "${OFN}"
echo " set ( ${TARGET_RENAME} \"${parameters}\" PARENT_SCOPE )" >> "${OFN}"
echo "endif()" >> "${OFN}"
else
echo "set ( ${TARGET_RENAME} \"${parameters}\" PARENT_SCOPE )" >> "${OFN}"
fi
}
link_parse()
{
local line="${1}"
local options=""
local parameters=""
echo >> "${OFN}"
echo "# ${1}" >> "${OFN}"
options="$(string_get_in_parenthesis "${line}")"
if [ -n "${options}" ]; then
options="$(if_options_parse_all "${options}")" # Parse options
fi
parameters="$(string_get_after_parenthesis "${line}")"
parameters="${parameters//;}"
parameters="${parameters//\"}"
if [ -n "${options}" ]; then
echo "if (${options})" >> "${OFN}"
echo " set ( MAIN_TARGET_LINK_FLAGS "\${MAIN_TARGET_LINK_FLAGS} ${parameters}" PARENT_SCOPE )" >> "${OFN}"
echo "endif()" >> "${OFN}"
fi
}
if_options_builder()
{
local line="${1}"
local options="$(string_get_after_parenthesis "${line}")"
local parameters_gcc=""
local parameters_msvc=""
if [[ "${options}" =~ NOWARNINGS ]]; then
parameters_gcc="-w"
parameters_msvc="-W0"
fi
if [ -n "${parameters_gcc}" ]; then
echo 'if ( CMAKE_COMPILER_IS_GNUCC OR CMAKE_COMPILER_IS_CLANG )' >> "${OFN}"
echo " set ( CMAKE_CXX_FLAGS_\${CMAKE_BUILD_TYPE} \"\${CMAKE_CXX_FLAGS_\${CMAKE_BUILD_TYPE}} ${parameters_gcc}\")" >> "${OFN}"
echo " set ( CMAKE_C_FLAGS_\${CMAKE_BUILD_TYPE} \"\${CMAKE_C_FLAGS_\${CMAKE_BUILD_TYPE}} ${parameters_gcc}\")" >> "${OFN}"
echo 'elseif ( MSVC )' >> "${OFN}"
echo " set ( CMAKE_CXX_FLAGS_\${CMAKE_BUILD_TYPE} \"\${CMAKE_CXX_FLAGS_\${CMAKE_BUILD_TYPE}} ${parameters_msvc}\")" >> "${OFN}"
echo " set ( CMAKE_C_FLAGS_\${CMAKE_BUILD_TYPE} \"\${CMAKE_C_FLAGS_\${CMAKE_BUILD_TYPE}} ${parameters_msvc}\")" >> "${OFN}"
echo 'endif()' >> "${OFN}"
fi
}
binary_resource_parse()
{
local parse_file="${1}"
local binary_array_first_def=""
local binary_mask_first_def=""
if [ -n "${parse_file}" ] && [ -f "${parse_file}" ]; then
local line=""
local -a lines
local -a binary_array_names
local -a binary_array_names_library
mapfile -t lines < "${parse_file}"
for line in "${lines[@]}"; do
# Remove DOS line ending
line="${line//[$'\r']/}"
if [ -n "${line}" ]; then
local parameter="$(string_get_before_parenthesis "${line}")"
parameter="$(string_trim_spaces_both "${parameter}")"
local options="$(string_get_in_parenthesis "${line}")"
read -d '' -ra options_params < <(printf '%s\0' "${options}")
if [ "${parameter}" == "BINARY_ARRAY" ]; then
local symbol_name="$(string_trim_spaces_both "${options_params[0]//,}")"
local symbol_name_array="$(string_trim_spaces_both "${options_params[1]//,}")"
local symbol_file_name="$(string_trim_spaces_both "${options_params[2]//\"}")"
local symbol_file_compress="${options_params[4]}"
else
local symbol_name="$(string_trim_spaces_both "${options_params[0]//,}")"
local symbol_file_name="$(string_trim_spaces_both "${options_params[1]//\"}")"
local symbol_file_compress="${options_params[2]}"
fi
if [ -z "${symbol_file_compress}" ]; then
symbol_file_compress="none"
fi
# Parse BINARY resources
if [ "${parameter}" == "BINARY" ]; then
echo >> "${OFN}"
echo "# BINARY file" >> "${OFN}"
echo "create_brc_source ( ${symbol_file_name} ${symbol_name}.cpp ${symbol_name} ${symbol_file_compress} write )" >> "${OFN}"
echo "set_source_files_properties ( \${CMAKE_CURRENT_BINARY_DIR}/${symbol_name}.cpp PROPERTIES GENERATED TRUE )" >> "${OFN}"
echo "list ( APPEND ${SOURCE_LIST_CPP} \${CMAKE_CURRENT_BINARY_DIR}/${symbol_name}.cpp )" >> "${OFN}"
# parse BINARY_ARRAY resources
elif [ "${parameter}" == "BINARY_ARRAY" ]; then
local file_creation="append"
if [ -z "${binary_array_first_def}" ]; then
binary_array_first_def="done"
file_creation="write"
fi
binary_array_names+=("${symbol_name}_${symbol_name_array}")
echo >> "${OFN}"
echo "# BINARY_ARRAY file" >> "${OFN}"
echo "create_brc_source ( ${symbol_file_name} binary_array.cpp ${symbol_name}_${symbol_name_array} ${symbol_file_compress} ${file_creation} )" >> "${OFN}"
# parse BINARY_MASK resources
elif [ "${parameter}" == "BINARY_MASK" ]; then
local -a binary_mask_files=("$(eval echo "${symbol_file_name}")")
if [ -n "${binary_mask_files}" ]; then
local all_count=0
local binary_file=""
local -a all_array_files
for binary_file in "${binary_mask_files[@]}"; do
if [ -f "${binary_file}" ]; then
local file_creation="append"
if [ -z "${binary_mask_first_def}" ]; then
binary_mask_first_def="done"
file_creation="write"
fi
echo >> "${OFN}"
echo "# BINARY_MASK file" >> "${OFN}"
echo "create_brc_source ( ${binary_file} ${symbol_name}.cpp ${symbol_name}_${all_count} ${symbol_file_compress} ${file_creation} )" >> "${OFN}"
all_array_files+=("$(basename "${binary_file}")")
(( all_count++ ))
fi
done
# Generate cpp file for the BINARY_MASK
echo >> "${OFN}"
echo "# Append additional information of the BINARY_MASK binary resource (${symbol_name})" >> "${OFN}"
echo "file ( APPEND \${CMAKE_CURRENT_BINARY_DIR}/${symbol_name}.cpp \"" >> "${OFN}"
echo "int ${symbol_name}_count = ${all_count};" >> "${OFN}"
echo "int ${symbol_name}_length[] = {" >> "${OFN}"
for (( i=0; i<${all_count}; i++ )); do
echo " ${symbol_name}_${i}_length," >> "${OFN}"
done
echo "};" >> "${OFN}"
echo "unsigned char *${symbol_name}[] = {" >> "${OFN}"
for (( i=0; i<${all_count}; i++ )); do
echo " ${symbol_name}_${i}_," >> "${OFN}"
done
echo "};" >> "${OFN}"
echo "char const *${symbol_name}_files[] = {" >> "${OFN}"
local binary_filename=""
for binary_file_name in "${all_array_files[@]}"; do
echo " \\\"${binary_file_name}\\\"," >> "${OFN}"
done
echo "};" >> "${OFN}"
echo "\")" >> "${OFN}"
echo "set_source_files_properties ( \${CMAKE_CURRENT_BINARY_DIR}/${symbol_name}.cpp PROPERTIES GENERATED TRUE )" >> "${OFN}"
echo "list ( APPEND ${SOURCE_LIST_CPP} \${CMAKE_CURRENT_BINARY_DIR}/${symbol_name}.cpp )" >> "${OFN}"
else
echo >> "${OFN}"
echo "# BINARY_MASK file" >> "${OFN}"
echo "# No files match the mask: '${symbol_file_name}'" >> "${OFN}"
fi
fi # BINARY end
fi
done
# Generate cpp file for the BINARY_ARRAY
if [ -n "${binary_array_names}" ]; then
# echo "# ${binary_array_names[@]}" >> "${OFN}"
local test_first_iteration
local binary_array_name_count=0
local binary_array_name_test
local binary_array_name_first
local binary_array_name_second
echo >> "${OFN}"
echo "# Append additional information of the BINARY_ARRAY binary resource" >> "${OFN}"
echo "file ( APPEND \${CMAKE_CURRENT_BINARY_DIR}/binary_array.cpp \"" >> "${OFN}"
for binary_array_record in "${binary_array_names[@]}"; do
binary_array_name_split=(${binary_array_record//_[0-9]/ })
if [ ! "${binary_array_name_split[0]}" == "${binary_array_name_test}" ]; then
if [ -z "${test_first_iteration}" ]; then
test_first_iteration="done"
else
echo "int ${binary_array_name_test}_count = ${binary_array_name_count};" >> "${OFN}"
echo -e "${binary_array_name_first}" >> "${OFN}"
echo -e "};\n" >> "${OFN}"
echo -e "${binary_array_name_second}" >> "${OFN}"
echo -e "};\n" >> "${OFN}"
binary_array_name_count=0
fi
binary_array_name_test=${binary_array_name_split[0]};
binary_array_name_first="int ${binary_array_name_split[0]}_length[] = {"
binary_array_name_second="unsigned char *${binary_array_name_split[0]}[] = {"
fi
(( binary_array_name_count++ ))
binary_array_name_first+="\n ${binary_array_record}_length,"
binary_array_name_second+="\n ${binary_array_record}_,"
done
echo "int ${binary_array_name_test}_count = ${binary_array_name_count};" >> "${OFN}"
echo -e "${binary_array_name_first}" >> "${OFN}"
echo -e "};" >> "${OFN}"
echo -e "${binary_array_name_second}" >> "${OFN}"
echo -e "};" >> "${OFN}"
echo "\")" >> "${OFN}"
echo "set_source_files_properties ( \${CMAKE_CURRENT_BINARY_DIR}/binary_array.cpp PROPERTIES GENERATED TRUE )" >> "${OFN}"
echo "list ( APPEND ${SOURCE_LIST_CPP} \${CMAKE_CURRENT_BINARY_DIR}/binary_array.cpp )" >> "${OFN}"
fi
else
echo "File \"${parse_file}\" not found!"
fi
}
import_ext_parse()
{
local parse_file="$(string_remove_comma ${1})"
local files_add=0
local files_del=0
local line=""
local -a lines
local -a added_files
local -a excluded_files
local -a result
mapfile -t lines < "${parse_file}"
for line in "${lines[@]}"; do
# Remove DOS line ending
line="${line//[$'\r']/}"
# Begin of the add section
if [[ "${line}" =~ $RE_IMPORT_ADD ]]; then
files_add=1
fi
# Begin of the del section
if [[ "${line}" =~ $RE_IMPORT_DEl ]]; then
files_del=1
fi
if [ "${files_add}" -gt 0 ]; then
# End of the add section (line with ';')
if [[ ${line} =~ ';' ]]; then
files_add=2
fi
# Remove ',' and ';'
line="$(string_remove_separators "${line}")"
# Convert line to array
read -a line_array <<< "${line}"
for list in "${line_array[@]}"; do
list="$(string_remove_separators "${list}")"
if [[ ! "${list}" =~ $RE_IMPORT_ADD ]]; then
if [[ "${list}" =~ "*" ]]; then
added_files+=("$(find -name "${list}")")
else
added_files+=("$(find -nowarn -samefile "${list}" 2>/dev/null)")
fi
fi
done
if [ "${files_add}" -eq 2 ]; then
files_add=-1
fi
fi
if [ "${files_del}" -gt 0 ]; then
# End of the del section (line with ';')
if [ "${files_del}" -gt 0 ] && [[ "${line}" =~ ';' ]]; then
files_del=2
fi
# Remove ',' and ';'
line="$(string_remove_separators "${line}")"
# Convert line to array
read -a line_array <<< "${line}"
for list in "${line_array[@]}"; do
list="$(string_remove_separators "${list}")"
if [[ ! "${list}" =~ $RE_IMPORT_DEl ]]; then
if [[ "${list}" =~ "*" ]]; then
excluded_files+=("$(find -name "${list}")")
else
excluded_files+=("$(find -samefile "${list}" 2>/dev/null)")
fi
fi
done
if [ "${files_del}" -eq 2 ]; then
files_del=-1
fi
fi
done
for value in "${added_files[@]}"; do
if [[ ! " ${excluded_files[@]} " =~ " ${value} " ]]; then
result+=(${value})
fi
done
echo "${result[@]}"
}
generate_cmake_header()
{
if [ -f ""${OFN}"" ]; then
rm ""${OFN}""
fi
cat > "${OFN}" << EOL
# ${OFN} generated ${GENERATE_DATE}
cmake_minimum_required ( VERSION 3.4.1 )
#################################################
# In-Source builds are strictly prohibited.
#################################################
if ( \${CMAKE_SOURCE_DIR} STREQUAL \${CMAKE_BINARY_DIR} )
message ( FATAL_ERROR
"\n****************************** ERROR ******************************\n"
"In-source build are not allowed. "
"Please do not polute the sources with binaries or any project unrelated files. "
"To remove generated files run:\n"
"'rm -rf CMakeCache.txt CMakeFiles'\n"
"To build the project, please do the following:\n"
"'mkdir build && cd build && cmake ..'"
"\n****************************** ERROR ******************************\n")
endif()
# Set the default library directory to store built libraries
set ( LIBRARY_OUTPUT_PATH \${PROJECT_BINARY_DIR}/lib )
EOL
}
generate_cmake_from_upp()
{
local upp_ext="${1}"
local object_name="${2}"
local main_target="${3}"
local USES=()
local HEADER=()
local SOURCE_C=()
local SOURCE_CPP=()
local SOURCE_RC=()
local SOURCE_ICPP=()
local OPTIONS=()
local tmp=""
local line=""
local dir_array=()
INCLUDE_SYSTEM_LIST=()
if [ -f "${upp_ext}" ]; then
local target_name="$(string_replace_dash "${object_name}")"
local name=""
local content=()
local section_name=()
local section_content=()
# parse upp file
while read -r line; do
# Replace '\' to '/'
line="${line//\\//}"
# Remove DOS line ending
line="${line//[$'\r']/}"
test_name="$(get_section_name "${line}")"
if [ ! "${test_name}" == "" ]; then
if [ ! "${name}" == "" ]; then
section_name+=("${name}")
section_content+=("$(printf " \'%s\' " "${content[@]}")")
content=()
fi
name="${test_name}"
fi
section_line="$(get_section_line "${name}" "${line}")"
if [ "${section_line}" == "" ]; then
continue;
fi
content+=("${section_line}")
done < "${upp_ext}"
section_name+=("${name}")
section_content+=("$(printf " \'%s\' " "${content[@]}")")
# process sections
for index in ${!section_name[@]}; do
local section="${section_name[$index]}"
if [[ "${section}" =~ $RE_SKIP_SECTIONS ]]; then
continue;
fi
content=()
while read word; do
content+=("$word")
done < <(echo "${section_content[$index]}" | xargs -n 1)
# echo "section: ${section} (${#content[@]})"
# echo "content: ${content[@]}"
# echo "data : ${section_content[$index]}"
# echo "===================================================================="
# Parse target options
if [ -n "${main_target}" ] && [[ "${section}" =~ $RE_TARGET ]]; then
for LINE in "${content[@]}"; do
target_parse "target ${LINE}"
done
fi
# Parse compiler options
if [[ "${section}" =~ $RE_USES ]]; then
for LINE in "${content[@]}"; do
if [[ "${LINE:0:1}" == "(" ]] && [[ ${LINE} =~ ';' ]]; then
list_parse "uses${LINE}" ${target_name}_${DEPEND_LIST}
else
tmp="$(string_remove_separators "${LINE}")"
USES+=(${tmp})
add_all_uses "${tmp}"
fi
done
fi
# Parse library list options
if [[ "${section}" =~ $RE_LIBRARY ]] || [[ "${section}" =~ $RE_PKG_CONFIG ]] || [[ "${section}" =~ $RE_STATIC_LIBRARY ]]; then
for LINE in "${content[@]}"; do
if [[ "${LINE:0:1}" == "(" ]] && [[ "${LINE}" =~ ';' ]]; then
if [[ "${section}" =~ $RE_PKG_CONFIG ]]; then
list_parse "pkg_config${LINE}" "${LINK_LIST}" "${target_name}"
else
list_parse "library${LINE}" "${LINK_LIST}" "${target_name}"
fi
else
if [[ "${section}" =~ $RE_PKG_CONFIG ]]; then
list_parse "${LINE}" "${LINK_LIST}" "${target_name}" "pkg_config"
else
list_parse "${LINE}" "${LINK_LIST}" "${target_name}" "append library"
fi
fi
done
fi
# Parse options section
if [[ "${section}" =~ $RE_OPTIONS ]]; then
for LINE in "${content[@]}"; do
if [[ "${LINE:0:1}" == "(" ]] && [[ "${LINE}" =~ ';' ]]; then
list_parse "options${LINE}" "${COMPILE_FLAGS_LIST}" "${target_name}"
else
tmp="$(string_remove_separators "${LINE}")"
OPTIONS+=(${tmp})
fi
done
fi
# Parse include options
if [[ "${section}" =~ $RE_INCLUDE ]]; then
for LINE in "${content[@]}"; do
LINE="$(string_remove_separators "${LINE}")"
INCLUDE_SYSTEM_LIST+=("${LINE}")
done
fi
# Parse link options
if [[ "${section}" =~ $RE_LINK ]]; then
for LINE in "${content[@]}"; do
link_parse "link${LINE}"
done
fi
# Parse files
if [[ "${section}" =~ $RE_FILES ]]; then
local list=""
local line_array=()
for LINE in "${content[@]}"; do
# Skip lines with "separator" mark
if [[ "${LINE}" =~ $RE_SEPARATOR ]]; then
continue
fi
# Find precompiled header option
if [[ "${LINE}" =~ $RE_FILE_PCH ]] && [[ "${LINE}" =~ BUILDER_OPTION ]]; then
local pch_file=${LINE// */}
echo >> "${OFN}"
echo '# Precompiled headers file' >> "${OFN}"
echo "set ( ${PCH_FILE} "\${CMAKE_CURRENT_SOURCE_DIR}/${pch_file}" )" >> "${OFN}"
fi
# Split lines with charset, options, ...
if [[ "${LINE}" =~ $RE_FILE_SPLIT ]]; then
LINE="${LINE// */}"
fi
if [[ "${LINE}" =~ $RE_IMPORT ]]; then
line_array=("$(import_ext_parse "${LINE}")")
dir_array=("$(dirname ${line_array[@]} | sort -u)")
else
line_array+=(${LINE})
fi
done
for list in "${line_array[@]}"; do
list="$(string_remove_separators "${list}")"
if [[ "${list}" =~ $RE_FILE_EXCLUDE ]]; then
continue;
fi
if [ -d "${list}" ]; then
if [ "${GENERATE_DEBUG}" == "1" ]; then
echo "WARNING - skipping the directory \"${list}\". Directory can't be added to the source list."
fi
elif [ ! -f "${list}" ]; then
if [ "${GENERATE_DEBUG}" == "1" ]; then
echo "WARNING - file \"${list}\" doesn't exist! It was not added to the source list."
fi
else
if [[ "${list}" =~ $RE_C ]]; then # C/C++ source files
SOURCE_C+=(${list})
elif [[ "${list}" =~ $RE_CPP ]]; then # C/C++ source files
SOURCE_CPP+=(${list})
elif [[ "${list}" =~ $RE_RC ]]; then # Windows resource config files
SOURCE_RC+=(${list})
elif [[ "${list}" =~ $RE_ICPP ]]; then # icpp C/C++ source files
SOURCE_ICPP+=(${list})
elif [[ "${list}" =~ $RE_BRC ]]; then # BRC resource files
$(binary_resource_parse "$list")
HEADER+=("${list}")
elif [[ "${list}" =~ $RE_FILE_DOT ]]; then # header files
HEADER+=(${list})
fi
fi
done
fi
done
# Create include directory list
if [ -n "${dir_array}" ]; then
echo >> "${OFN}"
echo "include_directories (" >> "${OFN}"
for list in "${dir_array[@]}"; do
if [[ " ${list} " != " . " ]]; then
echo " ${list}" >> "${OFN}"
fi
done
echo ")" >> "${OFN}"
fi
# Create project option definitions
if [ -n "${OPTIONS}" ]; then
echo >> "${OFN}"
echo "add_definitions (" >> "${OFN}"
for list in "${OPTIONS[@]}"; do
echo "${list}" >> "${OFN}"
done
echo ")" >> "${OFN}"
fi
# Create header files list
if [ -n "${HEADER}" ]; then
echo >> "${OFN}"
echo "list ( APPEND ${HEADER_LIST}" >> "${OFN}"
for list in "${HEADER[@]}"; do
echo " ${list}" >> "${OFN}"
done
echo ")" >> "${OFN}"
fi
# Create C source files list
if [ -n "${SOURCE_C}" ]; then
echo >> "${OFN}"
echo "list ( APPEND ${SOURCE_LIST_C}" >> "${OFN}"
for list in "${SOURCE_C[@]}"; do
echo " ${list}" >> "${OFN}"
done
echo ")" >> "${OFN}"
fi
# Create CPP source files list
if [ -n "${SOURCE_CPP}" ]; then
echo >> "${OFN}"
echo "list ( APPEND ${SOURCE_LIST_CPP}" >> "${OFN}"
for list in "${SOURCE_CPP[@]}"; do
echo " ${list}" >> "${OFN}"
done
echo ")" >> "${OFN}"
fi
# Create icpp source files list
if [ -n "${SOURCE_ICPP}" ]; then
echo >> "${OFN}"
echo "list ( APPEND ${SOURCE_LIST_ICPP}" >> "${OFN}"
for list in "${SOURCE_ICPP[@]}"; do
echo " ${list}" >> "${OFN}"
done
echo ")" >> "${OFN}"
fi
# Create dependency list
if [ -n "${USES}" ]; then
echo >> "${OFN}"
echo "list ( APPEND ${target_name}_${DEPEND_LIST}" >> "${OFN}"
for list in "${USES[@]}"; do
local dependency_name="$(string_replace_dash "${list}")"
echo " ${dependency_name}${LIB_SUFFIX}" >> "${OFN}"
done
echo ")" >> "${OFN}"
fi
# Copy Windows resource config file
if [ -n "${main_target}" ] && [ -n "${SOURCE_RC}" ] ; then
for list in "${SOURCE_RC[@]}"; do
if [ -f "${list}" ]; then
echo >> "${OFN}"
echo "# Copy Windows resource config file to the main program build directory" >> "${OFN}"
local line_rc_params=()
while read line_rc; do
if [[ "${line_rc}" =~ ICON ]]; then
line_rc_params=(${line_rc})
echo "file ( COPY \"${list}\" DESTINATION \${PROJECT_BINARY_DIR}/\${CMAKE_PROJECT_NAME} )" >> "${OFN}"
echo "file ( COPY ${line_rc_params[3]} DESTINATION \${PROJECT_BINARY_DIR}/\${CMAKE_PROJECT_NAME} )" >> "${OFN}"
break
fi
done < "${list}"
fi
done
fi
echo >> "${OFN}"
echo "# Module properties" >> "${OFN}"
echo "create_cpps_from_icpps()" >> "${OFN}"
echo "set_source_files_properties ( \${$HEADER_LIST} PROPERTIES HEADER_FILE_ONLY ON )" >> "${OFN}"
echo "add_library ( ${target_name}${LIB_SUFFIX} \${LIB_TYPE} \${$SOURCE_LIST_CPP} \${$SOURCE_LIST_C} \${$HEADER_LIST} )" >> "${OFN}"
echo "target_include_directories ( ${target_name}${LIB_SUFFIX} PUBLIC \${${INCLUDE_LIST}} )" >> "${OFN}"
echo "set_property ( TARGET ${target_name}${LIB_SUFFIX} APPEND PROPERTY COMPILE_OPTIONS \"\${${COMPILE_FLAGS_LIST}}\" )" >> "${OFN}"
echo >> "${OFN}"
echo "# Module link" >> "${OFN}"
echo "if ( ${target_name}_${DEPEND_LIST} OR ${LINK_LIST} )" >> "${OFN}"
echo " target_link_libraries ( ${target_name}${LIB_SUFFIX} \${${target_name}_${DEPEND_LIST}} \${${LINK_LIST}} )" >> "${OFN}"
echo "endif()" >> "${OFN}"
echo >> "${OFN}"
echo '# Precompiled headers settings' >> "${OFN}"
echo "get_directory_property ( ${PCH_COMPILE_DEFINITIONS} COMPILE_DEFINITIONS )" >> "${OFN}"
echo "set_target_properties ( ${target_name}${LIB_SUFFIX} PROPERTIES ${COMPILE_FLAGS_LIST} \"\${${COMPILE_FLAGS_LIST}}\" )" >> "${OFN}"
echo "set_target_properties ( ${target_name}${LIB_SUFFIX} PROPERTIES ${PCH_FILE} \"\${${PCH_FILE}}\" )" >> "${OFN}"
echo "set_target_properties ( ${target_name}${LIB_SUFFIX} PROPERTIES ${PCH_INCLUDE_LIST} \"\${${INCLUDE_LIST}}\" )" >> "${OFN}"
echo "set_target_properties ( ${target_name}${LIB_SUFFIX} PROPERTIES ${PCH_COMPILE_DEFINITIONS} \"\${${PCH_COMPILE_DEFINITIONS}}\" )" >> "${OFN}"
echo >> "${OFN}"
echo "list ( LENGTH ${PCH_FILE} ${PCH_FILE}_LENGTH )" >> "${OFN}"
echo "if ( ${PCH_FILE}_LENGTH GREATER 1 )" >> "${OFN}"
echo ' message ( FATAL_ERROR "Precompiled headers list can contain only one header file!" )' >> "${OFN}"
echo 'endif()' >> "${OFN}"
echo >> "${OFN}"
echo "if ( ${PCH_FILE} AND DEFINED flagPCH )" >> "${OFN}"
echo " get_filename_component ( PCH_NAME \${${PCH_FILE}} NAME )" >> "${OFN}"
echo " set ( PCH_DIR \${PROJECT_PCH_DIR}/${target_name}${LIB_SUFFIX} )" >> "${OFN}"
echo ' set ( PCH_HEADER ${PCH_DIR}/${PCH_NAME} )' >> "${OFN}"
echo ' if ( ${CMAKE_CXX_COMPILER_ID} STREQUAL "GNU" )' >> "${OFN}"
echo ' if ( ${CMAKE_VERBOSE_MAKEFILE} EQUAL 1 )' >> "${OFN}"
echo ' set ( PCH_INCLUDE_PARAMS " -H -Winvalid-pch -include ${PCH_HEADER}" )' >> "${OFN}"
echo ' else()' >> "${OFN}"
echo ' set ( PCH_INCLUDE_PARAMS " -Winvalid-pch -include ${PCH_HEADER}" )' >> "${OFN}"
echo ' endif()' >> "${OFN}"
echo ' endif()' >> "${OFN}"
echo ' if ( ${CMAKE_CXX_COMPILER_ID} STREQUAL "Clang" )' >> "${OFN}"
echo ' set ( PCH_INCLUDE_PARAMS " -Winvalid-pch -include-pch ${PCH_HEADER}.pch" )' >> "${OFN}"
echo ' endif()' >> "${OFN}"
echo ' if ( MSVC )' >> "${OFN}"
echo " set_target_properties ( ${target_name}${LIB_SUFFIX} PROPERTIES COMPILE_FLAGS \"-Yu\${PCH_NAME} -Fp\${PCH_HEADER}.pch\" )" >> "${OFN}"
echo " set_source_files_properties ( \${$SOURCE_LIST_CPP} PROPERTIES COMPILE_FLAGS \"Yc\${PCH_NAME} -Fp\${PCH_HEADER}.pch\" )" >> "${OFN}"
echo ' endif()' >> "${OFN}"
echo ' if ( PCH_INCLUDE_PARAMS )' >> "${OFN}"
echo " set_source_files_properties ( \${$SOURCE_LIST_CPP} PROPERTIES COMPILE_FLAGS \"\${PCH_INCLUDE_PARAMS}\" )" >> "${OFN}"
echo ' endif()' >> "${OFN}"
echo 'endif()' >> "${OFN}"
echo >> "${OFN}"
fi
}
generate_cmake_file()
{
local param1="$(string_remove_comma "${1}")"
local param2="$(string_remove_comma "${2}")"
local cur_dir="$(pwd)"
local sub_dir="$(dirname "${param1}")"
local upp_name="$(basename "${param1}")"
local object_name="${param2}"
local cmake_flags="${3}"
if [ "${GENERATE_VERBOSE}" == "1" ]; then
echo "full path: ${cur_dir}"
echo "sub_dir: ${sub_dir}"
echo "upp_name: ${upp_name}"
echo "object_name: ${object_name}"
fi
if [ -f "${sub_dir}/${upp_name}" ]; then
cd "${sub_dir}"
generate_cmake_header
if [ -n "${cmake_flags}" ]; then
echo >> "${OFN}"
echo "# Module definitions" >> "${OFN}"
echo "add_definitions ( "${cmake_flags}" )" >> "${OFN}"
fi
local main_target=""
if [[ "${cmake_flags}" =~ (flagMAIN) ]]; then
main_target="true"
fi
generate_cmake_from_upp "${upp_name}" "${object_name}" "${main_target}"
cd "${cur_dir}"
else
echo "ERROR: file \"${sub_dir}/${upp_name}\" doesn't exist!"
fi
if [ "${GENERATE_VERBOSE}" == "1" ]; then
echo "--------------------------------------------------------------------"
fi
}
get_upp_to_process()
{
local -a upp_all_only
local upp_all
local upp_all_done
for upp_all in "${UPP_ALL_USES[@]}"; do
local in_both=""
for upp_all_done in "${UPP_ALL_USES_DONE[@]}"; do
[ "${upp_all}" = "$upp_all_done" ] && in_both="Yes"
done
if [ ! "${in_both}" ]; then
upp_all_only+=("${upp_all}")
fi
done
if [ -n "${upp_all_only}" ]; then
echo "${upp_all_only[0]}"
fi
}
generate_package_file()
{
if [ -z "${PROJECT_NAME}" ]; then
echo "ERROR - BASH variable \$PROJECT_NAME is not defined! Can't create archive package!"
exit 1
else
echo -n "Creating archive "
local -a sorted_UPP_ALL_USES_DONE=$(printf "%s\n" "${UPP_ALL_USES_DONE[@]}" | sort -u);
local package_src_name_archive="$(basename "${PROJECT_NAME}").tar.bz2"
local package_src_name_archive_list="package_archive_list.txt"
echo "CMakeLists.txt" > "${package_src_name_archive_list}"
find -H $(dirname "${PROJECT_NAME}") -type d '(' -name .svn -o -name .git ')' -prune -o -name '*' -type f >> "${package_src_name_archive_list}"
echo "${UPP_SRC_DIR}/uppconfig.h" >> "${package_src_name_archive_list}"
echo "${UPP_SRC_DIR}/guiplatform.h" >> "${package_src_name_archive_list}"
for pkg_name in ${sorted_UPP_ALL_USES_DONE[@]}; do
if [ -d "${UPP_SRC_DIR}/${pkg_name}" ]; then
find "${UPP_SRC_DIR}/${pkg_name}" -name '*' -type f >> "${package_src_name_archive_list}"
elif [ -d "${PROJECT_EXTRA_INCLUDE_DIR}/${pkg_name}" ]; then
find "${PROJECT_EXTRA_INCLUDE_DIR}/${pkg_name}" -name '*' -type f >> "${package_src_name_archive_list}"
fi
done
tar -c -j -f "${package_src_name_archive}" -T "${package_src_name_archive_list}"
rm "${package_src_name_archive_list}"
echo "... DONE"
fi
}
generate_main_cmake_file()
{
local main_target="${1}"
local main_definitions="${2//\"}"
local main_target_dirname="$(dirname "${1}")"
local main_target_basename="$(basename "${1}")"
local main_target_name="${main_target_basename%%.*}"
if [ ! -f "${main_target}" ]; then
echo "Usage: generate_main_cmake_file <full path to the ultimate++ project file> [build flags]"
echo
echo "ERROR - Target \"${main_target}\" doesn't exist!"
exit 1
fi
test_required_binaries
generate_cmake_file "${main_target}" "${main_target_name}" "-DflagMAIN"
generate_cmake_header
if [ -z "${GENERATE_NOT_Cxx}" ] || [ "${GENERATE_NOT_Cxx}" != "1" ]; then
main_definitions+=" -DflagGNUC14"
fi
if [ -z "${GENERATE_NOT_PARALLEL}" ] || [ "${GENERATE_NOT_PARALLEL}" != "1" ]; then
main_definitions+=" -DflagMP"
fi
if [ -z "${GENERATE_NOT_PCH}" ] || [ "${GENERATE_NOT_PCH}" != "1" ]; then
main_definitions+=" -DflagPCH"
fi
REMOVE_UNUSED_CODE="OFF"
if [ -z "${GENERATE_NOT_REMOVE_UNUSED_CODE}" ] || [ "${GENERATE_NOT_REMOVE_UNUSED_CODE}" != "1" ]; then
REMOVE_UNUSED_CODE="ON"
fi
if [ -n "${PROJECT_EXTRA_INCLUDE_DIR}" ]; then
PROJECT_EXTRA_INCLUDE="${PROJECT_EXTRA_INCLUDE_DIR}"
if [ "${PROJECT_EXTRA_INCLUDE_SUBDIRS}" == "1" ]; then
subdirs="$(ls -d -- ${PROJECT_EXTRA_INCLUDE_DIR}/*)"
PROJECT_EXTRA_INCLUDE="${PROJECT_EXTRA_INCLUDE} ${subdirs//$'\n'/$' '}"
fi
fi
# Begin of the cat (CMakeFiles.txt)
cat >> "${OFN}" << EOL
# Overwrite cmake verbose makefile output
# (e.g. do not generate cmake verbose makefile output even when the debug flag is set)
# not set - do not overwrite settings
# 0 - do not generate cmake verbose makefile output
# 1 - always generate cmake verbose makefile output
set ( CMAKE_VERBOSE_OVERWRITE ${CMAKE_VERBOSE_OVERWRITE} )
# Project name
project ( ${main_target_name} )
# Set the project common path
set ( UPP_SOURCE_DIRECTORY ${UPP_SRC_DIR} )
set ( UPP_EXTRA_INCLUDE ${PROJECT_EXTRA_INCLUDE} )
set ( PROJECT_INC_DIR \${PROJECT_BINARY_DIR}/inc )
set ( PROJECT_PCH_DIR \${PROJECT_BINARY_DIR}/pch )
# Set the default include directory for the whole project
include_directories ( BEFORE \${UPP_SOURCE_DIRECTORY} )
include_directories ( BEFORE \${PROJECT_INC_DIR} \${UPP_EXTRA_INCLUDE} )
include_directories ( BEFORE \${CMAKE_CURRENT_SOURCE_DIR} )
EOL
# End of the cat (CMakeFiles.txt)
# include directories relevant to the package
local include_dirname="${main_target_dirname}"
while [ ! "${include_dirname}" == "." ]; do
echo "include_directories ( BEFORE \${CMAKE_SOURCE_DIR}/${include_dirname} )" >> "${OFN}"
include_dirname="$(dirname "${include_dirname}")"
done
# Begin of the cat (CMakeFiles.txt)
cat >> "${OFN}" << EOL
# Set the default path for built executables to the bin directory
set ( EXECUTABLE_OUTPUT_PATH \${PROJECT_BINARY_DIR}/bin )
# Project definitions
add_definitions ( ${main_definitions} )
# Option to distinguish whether to build binary with removed unused code and functions
option ( REMOVE_UNUSED_CODE "Build binary with removed unused code and functions." ${REMOVE_UNUSED_CODE} )
# Option to enable static analysis with include-what-you-use
option ( ENABLE_INCLUDE_WHAT_YOU_USE "Enable static analysis with include-what-you-use" OFF )
if ( ENABLE_INCLUDE_WHAT_YOU_USE )
find_program( INCLUDE_WHAT_YOU_USE include-what-you-use )
if ( INCLUDE_WHAT_YOU_USE )
set( CMAKE_CXX_INCLUDE_WHAT_YOU_USE \${INCLUDE_WHAT_YOU_USE} )
else()
message( WARNING "include-what-you-use requested but executable not found" )
set( CMAKE_CXX_INCLUDE_WHAT_YOU_USE "" CACHE STRING "" FORCE )
endif()
endif()
# Option to enable static analysis with cppcheck
option ( ENABLE_CPPCHECK "Enable static analysis with cppcheck" OFF )
if ( ENABLE_CPPCHECK )
find_program( CPPCHECK cppcheck)
if ( CPPCHECK )
set( CMAKE_CXX_CPPCHECK
\${CPPCHECK}
--suppress=missingInclude
--enable=all
--inline-suppr
--inconclusive
-i
\${CMAKE_SOURCE_DIR}/imgui/lib )
else()
message( WARNING "cppcheck requested but executable not found" )
set( CMAKE_CXX_CPPCHECK "" CACHE STRING "" FORCE )
endif()
endif()
# Option to enable static analysis with clang-tidy
option ( ENABLE_CLANG_TIDY "Run clang-tidy with the compiler." OFF )
set( CLANG_TIDY_OPTIONS "" CACHE STRING "Extra clang-tidy options separated by \`;\`" )
if ( ENABLE_CLANG_TIDY )
if ( CMake_SOURCE_DIR STREQUAL CMake_BINARY_DIR )
message ( FATAL_ERROR "ENABLE_CLANG_TIDY requires an out-of-source build!" )
endif()
if ( CMAKE_VERSION VERSION_LESS 3.5 )
message ( WARNING "ENABLE_CLANG_TIDY is ON but CMAKE_VERSION is less than 3.5!" )
set( CMAKE_C_CLANG_TIDY "" CACHE STRING "" FORCE )
set( CMAKE_CXX_CLANG_TIDY "" CACHE STRING "" FORCE )
else()
find_program ( CLANG_TIDY_COMMAND NAMES clang-tidy )
if ( NOT CLANG_TIDY_COMMAND )
message ( WARNING "ENABLE_CLANG_TIDY is ON but clang-tidy is not found!" )
set( CMAKE_C_CLANG_TIDY "" CACHE STRING "" FORCE )
set( CMAKE_CXX_CLANG_TIDY "" CACHE STRING "" FORCE )
else()
set( CMAKE_C_CLANG_TIDY "\${CLANG_TIDY_COMMAND}" \${CLANG_TIDY_OPTIONS} )
set( CMAKE_CXX_CLANG_TIDY "\${CLANG_TIDY_COMMAND}" \${CLANG_TIDY_OPTIONS} )
endif()
endif()
endif()
# Extra compilation and link flags
set ( PROJECT_EXTRA_COMPILE_FLAGS "${PROJECT_EXTRA_COMPILE_FLAGS}" )
message ( STATUS "Extra compilation flags: \${PROJECT_EXTRA_COMPILE_FLAGS}" )
set ( PROJECT_EXTRA_LINK_FLAGS "${PROJECT_EXTRA_LINK_FLAGS}" )
message ( STATUS "Extra link flags: \${PROJECT_EXTRA_LINK_FLAGS}" )
# Remove flags which are set by CMake
remove_definitions( -DflagLINUX )
remove_definitions( -DflagBSD )
remove_definitions( -DflagFREEBSD )
remove_definitions( -DflagNETBSD )
remove_definitions( -DflagOPENBSD )
remove_definitions( -DflagSOLARIS )
remove_definitions( -DflagOSX )
remove_definitions( -DflagDRAGONFLY )
remove_definitions( -DflagANDROID )
# Read compiler definitions - used to set appropriate modules
get_directory_property ( FlagDefs COMPILE_DEFINITIONS )
# Platform flags settings
if ( WIN32 )
remove_definitions( -DflagPOSIX )
remove_definitions( -DflagOSX11 )
if ( NOT "\${FlagDefs}" MATCHES "flagWIN32(;|$)" )
add_definitions ( -DflagWIN32 )
endif()
if ( CMAKE_SYSTEM_VERSION STREQUAL "10.0" AND NOT "\${FlagDefs}" MATCHES "flagWIN10(;|$)" )
add_definitions ( -DflagWIN10 )
endif()
else()
remove_definitions( -DflagWIN32 )
if ( NOT "\${FlagDefs}" MATCHES "flagSHARED(;|$)" )
add_definitions ( -DflagSHARED )
endif()
if ( NOT "\${FlagDefs}" MATCHES "POSIX(;|$)" )
add_definitions ( -DflagPOSIX )
endif()
if ( \${CMAKE_SYSTEM_NAME} STREQUAL "Linux" AND NOT "\${FlagDefs}" MATCHES "flagLINUX(;|$)" )
add_definitions ( -DflagLINUX )
endif()
if ( \${CMAKE_SYSTEM_NAME} STREQUAL "BSD" AND NOT "\${FlagDefs}" MATCHES "flagBSD(;|$)" )
add_definitions ( -DflagBSD )
endif()
if ( \${CMAKE_SYSTEM_NAME} STREQUAL "FreeBSD" AND NOT "\${FlagDefs}" MATCHES "flagFREEBSD(;|$)" )
add_definitions ( -DflagFREEBSD )
if ( NOT "\${FlagDefs}" MATCHES "flagBSD(;|$)" )
add_definitions ( -DflagBSD )
endif()
endif()
if ( \${CMAKE_SYSTEM_NAME} STREQUAL "NetBSD" AND NOT "\${FlagDefs}" MATCHES "flagNETBSD(;|$)" )
add_definitions ( -DflagNETBSD )
if ( NOT "\${FlagDefs}" MATCHES "flagBSD(;|$)" )
add_definitions ( -DflagBSD )
endif()
endif()
if ( \${CMAKE_SYSTEM_NAME} STREQUAL "OpenBSD" AND NOT "\${FlagDefs}" MATCHES "flagOPENBSD(;|$)" )
add_definitions ( -DflagOPENBSD )
if ( NOT "\${FlagDefs}" MATCHES "flagBSD(;|$)" )
add_definitions ( -DflagBSD )
endif()
endif()
if ( \${CMAKE_SYSTEM_NAME} STREQUAL "Solaris" AND NOT "\${FlagDefs}" MATCHES "flagSOLARIS(;|$)" )
add_definitions ( -DflagSOLARIS )
set ( REMOVE_UNUSED_CODE OFF )
endif()
if ( \${CMAKE_SYSTEM_NAME} STREQUAL "SunOS" AND NOT "\${FlagDefs}" MATCHES "flagSOLARS(;|$)" )
add_definitions ( -DflagSOLARIS )
set ( REMOVE_UNUSED_CODE OFF )
endif()
if ( \${CMAKE_SYSTEM_NAME} STREQUAL "Darwin" AND NOT "\${FlagDefs}" MATCHES "flagOSX(;|$)" )
add_definitions ( -DflagOSX )
endif()
if ( \${CMAKE_SYSTEM_NAME} STREQUAL "DragonFly" AND NOT "\${FlagDefs}" MATCHES "flagDRAGONFLY(;|$)" )
add_definitions ( -DflagDRAGONFLY )
endif()
if ( \${CMAKE_SYSTEM_NAME} STREQUAL "Android" AND NOT "\${FlagDefs}" MATCHES "flagANDROID(;|$)" )
add_definitions ( -DflagANDROID )
endif()
endif()
get_directory_property ( FlagDefs COMPILE_DEFINITIONS )
# Check supported compilation architecture environment
if ( "\${FlagDefs}" MATCHES "flagGCC32(;|$)" OR NOT CMAKE_SIZEOF_VOID_P EQUAL 8 )
set ( STATUS_COMPILATION "32" )
set ( EXTRA_GCC_FLAGS "\${EXTRA_GCC_FLAGS} -m32 -msse2 \${PROJECT_EXTRA_COMPILE_FLAGS}" )
else()
set ( STATUS_COMPILATION "64" )
set ( EXTRA_GCC_FLAGS "\${EXTRA_GCC_FLAGS} -m64 \${PROJECT_EXTRA_COMPILE_FLAGS}" )
set ( MSVC_ARCH "X64" )
endif()
message ( STATUS "Build compilation: \${STATUS_COMPILATION} bits" )
# Set GCC builder flag
if ( \${CMAKE_CXX_COMPILER_ID} MATCHES "GNU" )
set ( CMAKE_COMPILER_IS_GNUCC TRUE )
if ( "\${FlagDefs}" MATCHES "flagGNUC14(;|$)" AND NOT CMAKE_CXX_COMPILER_VERSION VERSION_GREATER 4.9 )
message ( FATAL_ERROR "GNU GCC version 4.9+ is required to use -std=c++14 parameter!" )
endif()
remove_definitions ( -DflagMSC )
remove_definitions ( -DflagCLANG )
if ( NOT "\${FlagDefs}" MATCHES "flagGCC(;|$)" )
add_definitions ( -DflagGCC )
endif()
get_directory_property ( FlagDefs COMPILE_DEFINITIONS )
endif()
# Set CLANG builder flag
if ( \${CMAKE_CXX_COMPILER_ID} STREQUAL "Clang" )
set ( CMAKE_COMPILER_IS_CLANG TRUE )
set ( EXTRA_GCC_FLAGS "\${EXTRA_GCC_FLAGS} -Wno-logical-op-parentheses" )
remove_definitions ( -DflagMSC )
remove_definitions ( -DflagGCC )
if ( NOT "\${FlagDefs}" MATCHES "flagCLANG(;|$)" )
add_definitions ( -DflagCLANG )
endif()
get_directory_property ( FlagDefs COMPILE_DEFINITIONS )
endif()
# Set MSVC builder flags
if ( \${CMAKE_CXX_COMPILER_ID} STREQUAL "MSVC" )
remove_definitions ( -DflagGCC )
remove_definitions ( -DflagCLANG )
if ( NOT "\${FlagDefs}" MATCHES "flagUSEMALLOC(;|$)" )
add_definitions ( -DflagUSEMALLOC )
endif()
if ( NOT "\${FlagDefs}" MATCHES "flagMSC(;|$)" )
add_definitions ( -DflagMSC )
endif()
if ( \${MSVC_VERSION} EQUAL 1200 )
add_definitions ( -DflagMSC6\${MSVC_ARCH} )
endif()
if ( \${MSVC_VERSION} EQUAL 1300 OR \${MSVC_VERSION} EQUAL 1310)
add_definitions ( -DflagMSC7\${MSVC_ARCH} )
add_definitions ( -DflagMSC71\${MSVC_ARCH} )
endif()
if ( \${MSVC_VERSION} EQUAL 1400 )
add_definitions ( -DflagMSC8\${MSVC_ARCH} )
endif()
if ( \${MSVC_VERSION} EQUAL 1500 )
add_definitions ( -DflagMSC9\${MSVC_ARCH} )
endif()
if ( \${MSVC_VERSION} EQUAL 1600 )
add_definitions ( -DflagMSC10\${MSVC_ARCH} )
endif()
if ( \${MSVC_VERSION} EQUAL 1700 )
add_definitions ( -DflagMSC11\${MSVC_ARCH} )
endif()
if ( \${MSVC_VERSION} EQUAL 1800 )
add_definitions ( -DflagMSC12\${MSVC_ARCH} )
endif()
if ( \${MSVC_VERSION} EQUAL 1900 )
add_definitions ( -DflagMSC14\${MSVC_ARCH} )
endif()
if ( (\${MSVC_VERSION} GREATER_EQUAL 1910) AND (\${MSVC_VERSION} LESS_EQUAL 1919) )
add_definitions ( -DflagMSC15\${MSVC_ARCH} )
endif()
if ( (\${MSVC_VERSION} GREATER_EQUAL 1920) AND (\${MSVC_VERSION} LESS_EQUAL 1929) )
add_definitions ( -DflagMSC16\${MSVC_ARCH} )
endif()
if ( "\${FlagDefs}" MATCHES "flagMP(;|$)" AND NOT \${MSVC_VERSION} LESS 1400 )
set ( EXTRA_MSVC_FLAGS "\${EXTRA_MSVC_FLAGS} -MP" )
endif()
get_directory_property ( FlagDefs COMPILE_DEFINITIONS )
endif()
# Set Intel builder flag
if ( \${CMAKE_CXX_COMPILER_ID} STREQUAL "Intel" AND NOT "\${FlagDefs}" MATCHES "flagINTEL(;|$)" )
add_definitions ( -DflagINTEL )
get_directory_property ( FlagDefs COMPILE_DEFINITIONS )
endif()
# Set link directories on BSD systems
if ( \${CMAKE_SYSTEM_NAME} MATCHES BSD )
link_directories ( /usr/local/lib )
endif()
# Set debug/release compiler options
if ( "\${FlagDefs}" MATCHES "flagDEBUG(;|$)" )
set ( CMAKE_VERBOSE_MAKEFILE 1 )
set ( CMAKE_BUILD_TYPE DEBUG )
add_definitions ( -D_DEBUG )
set ( EXTRA_GCC_FLAGS "\${EXTRA_GCC_FLAGS} -O0" )
if ( NOT "\${FlagDefs}" MATCHES "flagDEBUG(;|$)" )
add_definitions ( -DflagDEBUG )
endif()
if ( MSVC )
if ( "\${FlagDefs}" MATCHES "flagMSC(8|9|10|11|12|14|15|16|17|19)" OR "\${FlagDefs}" MATCHES "flagMSC(8|9|10|11|12|14|15|16|17|19)X64" )
set ( CMAKE_EXE_LINKER_FLAGS "\${CMAKE_EXE_LINKER_FLAGS} -debug -OPT:NOREF" )
else()
set ( CMAKE_EXE_LINKER_FLAGS "\${CMAKE_EXE_LINKER_FLAGS} -incremental:yes -debug -OPT:NOREF" )
endif()
endif()
get_directory_property ( FlagDefs COMPILE_DEFINITIONS )
else()
set ( CMAKE_VERBOSE_MAKEFILE 0 )
set ( CMAKE_BUILD_TYPE RELEASE )
add_definitions ( -D_RELEASE )
set ( EXTRA_GCC_FLAGS "\${EXTRA_GCC_FLAGS} -O2" )
set ( EXTRA_MSVC_FLAGS "\${EXTRA_MSVC_FLAGS} -GS-" )
if ( NOT "\${FlagDefs}" MATCHES "flagRELEASE(;|$)" )
add_definitions ( -DflagRELEASE )
endif()
if ( MSVC )
if ( "\${FlagDefs}" MATCHES "flagMSC(8|9|10|11|12|14|15|16|17|19)" OR "\${FlagDefs}" MATCHES "flagMSC(8|9|10|11|12|14|15|16|17|19)X64" )
set ( CMAKE_EXE_LINKER_FLAGS "\${CMAKE_EXE_LINKER_FLAGS} -release -OPT:REF,ICF" )
else()
set ( CMAKE_EXE_LINKER_FLAGS "\${CMAKE_EXE_LINKER_FLAGS} -incremental:no -release -OPT:REF,ICF" )
endif()
endif()
get_directory_property ( FlagDefs COMPILE_DEFINITIONS )
endif()
message ( STATUS "Build type: " \${CMAKE_BUILD_TYPE} )
if ( REMOVE_UNUSED_CODE AND ( CMAKE_COMPILER_IS_GNUCC OR CMAKE_COMPILER_IS_CLANG ) )
message ( STATUS "Build with remove unused code: TRUE" )
set ( EXTRA_GCC_FLAGS "\${EXTRA_GCC_FLAGS} -ffunction-sections -fdata-sections" )
set ( CMAKE_EXE_LINKER_FLAGS "\${CMAKE_EXE_LINKER_FLAGS} -Wl,-s,--gc-sections" )
else()
message ( STATUS "Build with remove unused code: FALSE" )
endif()
if ( CMAKE_VERBOSE_OVERWRITE EQUAL 0 OR CMAKE_VERBOSE_OVERWRITE EQUAL 1 )
set ( CMAKE_VERBOSE_MAKEFILE \${CMAKE_VERBOSE_OVERWRITE} )
endif()
if ( "\${FlagDefs}" MATCHES "flagDEBUG_MINIMAL(;|$)" )
if ( NOT MINGW )
set ( EXTRA_GCC_FLAGS "\${EXTRA_GCC_FLAGS} -ggdb" )
endif()
set ( EXTRA_GCC_FLAGS "\${EXTRA_GCC_FLAGS} -g1" )
set ( EXTRA_MSVC_FLAGS "\${EXTRA_MSVC_FLAGS} -Zd" )
endif()
if ( "\${FlagDefs}" MATCHES "flagDEBUG_FULL(;|$)" )
if ( NOT MINGW )
set ( EXTRA_GCC_FLAGS "\${EXTRA_GCC_FLAGS} -ggdb" )
endif()
set ( EXTRA_GCC_FLAGS "\${EXTRA_GCC_FLAGS} -g2" )
set ( EXTRA_MSVC_FLAGS "\${EXTRA_MSVC_FLAGS} -Zi" )
endif()
# Set static/shared compiler options
if ( "\${FlagDefs}" MATCHES "flagSO(;|$)" )
set ( BUILD_SHARED_LIBS ON )
set ( LIB_TYPE SHARED )
if ( NOT "\${FlagDefs}" MATCHES "flagSHARED(;|$)" )
add_definitions ( -DflagSHARED )
get_directory_property ( FlagDefs COMPILE_DEFINITIONS )
endif()
endif()
if ( "\${FlagDefs}" MATCHES "flagSHARED(;|$)" )
set ( STATUS_SHARED "TRUE" )
set ( EXTRA_GXX_FLAGS "\${EXTRA_GXX_FLAGS} -fuse-cxa-atexit" )
else()
set ( STATUS_SHARED "FALSE" )
set ( BUILD_SHARED_LIBS OFF )
set ( LIB_TYPE STATIC )
set ( EXTRA_GCC_FLAGS "\${EXTRA_GCC_FLAGS} -static -fexceptions" )
if ( MINGW AND WIN32 AND "\${CMAKE_HOST_WIN32}" STREQUAL "")
# This link options are put at the end of link command. Required for MinGW cross compilation.
# There can be an error: "rsrc merge failure: duplicate leaf: type: 10 (VERSION) name: 1 lang: 409" => it is OK, win32 version information of libwinpthread-1 is skipped
set ( CMAKE_CXX_STANDARD_LIBRARIES "\${CMAKE_CXX_STANDARD_LIBRARIES} -Wl,-Bstatic,--whole-archive -lpthread -Wl,--no-whole-archive" )
# This link options are put at the beginning of link command.
# Disadvantage of using linker flags => win32 version information of libwinpthread-1 are used in the output binary instead of win32 version information of main target
#set ( CMAKE_EXE_LINKER_FLAGS "\${CMAKE_EXE_LINKER_FLAGS} -Wl,-Bstatic,--whole-archive -lpthread -Wl,--no-whole-archive" )
endif()
endif()
message ( STATUS "Build with flagSHARED: \${STATUS_SHARED}" )
# Precompiled headers support
if ( "\${FlagDefs}" MATCHES "flagPCH(;|$)" )
if ( CMAKE_COMPILER_IS_GNUCC OR CMAKE_COMPILER_IS_CLANG )
if ( \${CMAKE_CXX_COMPILER_ID} STREQUAL "GNU" AND CMAKE_CXX_COMPILER_VERSION VERSION_LESS 3.4 )
message ( WARNING
"Precompiled headers are introduced with GCC 3.4.\n"
"No support of the PCH in any earlier releases. (current version \${CMAKE_CXX_COMPILER_VERSION})." )
remove_definitions ( -DflagPCH )
endif()
if ( \${CMAKE_CXX_COMPILER_ID} STREQUAL "Clang" AND CMAKE_CXX_COMPILER_VERSION VERSION_LESS 3.5 )
message ( WARNING
"There are some problems with precompiled headers in Clang version less 3.5.\n"
"No support of the PCH in any earlier releases. (current version \${CMAKE_CXX_COMPILER_VERSION})." )
remove_definitions ( -DflagPCH )
endif()
else()
remove_definitions ( -DflagPCH )
endif()
get_directory_property ( FlagDefs COMPILE_DEFINITIONS )
endif()
if ( "\${FlagDefs}" MATCHES "flagPCH(;|$)" )
message ( STATUS "Build with flagPCH: TRUE" )
else()
message ( STATUS "Build with flagPCH: FALSE" )
endif()
# Set compiler options
get_directory_property ( FlagDefs COMPILE_DEFINITIONS )
if ( CMAKE_COMPILER_IS_GNUCC OR CMAKE_COMPILER_IS_CLANG )
if ( "\${FlagDefs}" MATCHES "flagGNUC14(;|$)" )
set ( EXTRA_GXX_FLAGS "\${EXTRA_GXX_FLAGS} -std=c++14" )
endif()
if ( CMAKE_CXX_COMPILER_VERSION VERSION_GREATER 4.9 OR CMAKE_CXX_COMPILER_VERSION VERSION_EQUAL 4.9 OR CMAKE_COMPILER_IS_CLANG )
set ( EXTRA_GXX_FLAGSS "\${EXTRA_GXX_FLAGS} -fdiagnostics-color")
endif()
if ( MINGW )
# Set the minimum supported (API) version to Windows 7
# add_definitions(-DWINVER=0x0601)
# add_definitions(-D_WIN32_WINNT=0x0601)
# get_directory_property ( FlagDefs COMPILE_DEFINITIONS )
if ( "\${FlagDefs}" MATCHES "flagDLL(;|$)" )
set ( BUILD_SHARED_LIBS ON )
set ( CMAKE_EXE_LINKER_FLAGS "\${CMAKE_EXE_LINKER_FLAGS} -shared" )
string ( REGEX REPLACE "-static " "" CMAKE_EXE_LINKER_FLAGS \${CMAKE_EXE_LINKER_FLAGS} )
endif()
if ( "\${FlagDefs}" MATCHES "flagGUI(;|$)" )
list ( APPEND main_${LINK_LIST} mingw32 )
endif()
# The workaround to avoid 'error: duplicate symbol: std::__throw_bad_alloc()'
if ( CMAKE_COMPILER_IS_CLANG AND CMAKE_CXX_COMPILER_VERSION VERSION_LESS 11.0 )
add_definitions ( -DflagUSEMALLOC )
get_directory_property ( FlagDefs COMPILE_DEFINITIONS )
endif()
if ( CMAKE_COMPILER_IS_GNUCC )
# The optimalization might be broken on MinGW - remove optimalization flag (cross compile).
#string ( REGEX REPLACE "-O2" "" EXTRA_GCC_FLAGS \${EXTRA_GCC_FLAGS} )
if( "\${FlagDefs}" MATCHES "flagGUI(;|$)" )
list ( APPEND main_${LINK_LIST} mingw32 )
set ( EXTRA_GCC_FLAGS "\${EXTRA_GCC_FLAGS} -mwindows" )
else()
set ( EXTRA_GCC_FLAGS "\${EXTRA_GCC_FLAGS} -mconsole" )
endif()
if( "\${FlagDefs}" MATCHES "flagMT(;|$)" )
set ( EXTRA_GCC_FLAGS "\${EXTRA_GCC_FLAGS} -mthreads" )
endif()
endif()
endif()
set ( CMAKE_CXX_FLAGS_\${CMAKE_BUILD_TYPE} "\${CMAKE_CXX_FLAGS_\${BUILD_TYPE}} \${EXTRA_GXX_FLAGS} \${EXTRA_GCC_FLAGS}" )
set ( CMAKE_C_FLAGS_\${CMAKE_BUILD_TYPE} "\${CMAKE_C_FLAGS_\${BUILD_TYPE}} \${EXTRA_GCC_FLAGS}" )
set ( CMAKE_CXX_ARCHIVE_CREATE "<CMAKE_AR> rs <TARGET> <LINK_FLAGS> <OBJECTS>" )
set ( CMAKE_CXX_ARCHIVE_APPEND "<CMAKE_AR> rs <TARGET> <LINK_FLAGS> <OBJECTS>" )
set ( CMAKE_C_ARCHIVE_CREATE "<CMAKE_AR> rs <TARGET> <LINK_FLAGS> <OBJECTS>" )
set ( CMAKE_C_ARCHIVE_APPEND "<CMAKE_AR> rs <TARGET> <LINK_FLAGS> <OBJECTS>" )
elseif ( MSVC )
set ( CMAKE_EXE_LINKER_FLAGS "\${CMAKE_EXE_LINKER_FLAGS} -nologo" )
if ( "\${FlagDefs}" MATCHES "flagEVC(;|$)" )
if ( NOT "\${FlagDefs}" MATCHES "flagSH3(;|$)" AND NOT "\${FlagDefs}" MATCHES "flagSH4(;|$)" )
# disable stack checking
set ( EXTRA_MSVC_FLAGS "\${EXTRA_MSVC_FLAGS} -Gs8192" )
endif()
# read-only string pooling, turn off exception handling
set ( EXTRA_MSVC_FLAGS "\${EXTRA_MSVC_FLAGS} -GF -GX-" )
elseif ( "\${FlagDefs}" MATCHES "flagCLR(;|$)" )
set ( EXTRA_MSVC_FLAGS "\${EXTRA_MSVC_FLAGS} -EHac" )
elseif ( "\${FlagDefs}" MATCHES "flagMSC(8|9|10|11|12|14|15|16|17|19)" OR "\${FlagDefs}" MATCHES "flagMSC(8|9)ARM" OR "\${FlagDefs}" MATCHES "flagMSC(8|9|10|11|12|14|15|16|17|19)X64" )
set ( EXTRA_MSVC_FLAGS "\${EXTRA_MSVC_FLAGS} -EHsc" )
else()
set ( EXTRA_MSVC_FLAGS "\${EXTRA_MSVC_FLAGS} -GX" )
endif()
if ( \${CMAKE_BUILD_TYPE} STREQUAL DEBUG )
set ( EXTRA_MSVC_FLAGS_Mx "d" )
endif()
if ( "\${FlagDefs}" MATCHES "flagSHARED(;|$)" OR "\${FlagDefs}" MATCHES "flagCLR(;|$)" )
set ( EXTRA_MSVC_FLAGS "\${EXTRA_MSVC_FLAGS} -MD\${EXTRA_MSVC_FLAGS_Mx}" )
else()
if ( "\${FlagDefs}" MATCHES "flagMT(;|$)" OR "\${FlagDefs}" MATCHES "flagMSC(8|9|10|11|12|14|15|16|17|19)" OR "\${FlagDefs}" MATCHES "flagMSC(8|9)ARM" OR "\${FlagDefs}" MATCHES "flagMSC(8|9|10|11|12|14|15|16|17|19)X64" )
set ( EXTRA_MSVC_FLAGS "\${EXTRA_MSVC_FLAGS} -MT\${EXTRA_MSVC_FLAGS_Mx}" )
else()
set ( EXTRA_MSVC_FLAGS "\${EXTRA_MSVC_FLAGS} -ML\${EXTRA_MSVC_FLAGS_Mx}" )
endif()
endif()
#,5.01 needed to support WindowsXP
if ( NOT "\${FlagDefs}" MATCHES "(flagMSC(8|9|10|11|12|14|15|16|17|19)X64)" )
set ( MSVC_LINKER_SUBSYSTEM ",5.01" )
endif()
if ( "\${FlagDefs}" MATCHES "flagMSC(8|9)ARM" )
set ( CMAKE_EXE_LINKER_FLAGS "\${CMAKE_EXE_LINKER_FLAGS} -subsystem:windowsce,4.20 /ARMPADCODE -NODEFAULTLIB:\"oldnames.lib\"" )
else()
if ( "\${FlagDefs}" MATCHES "flagGUI(;|$)" OR "\${FlagDefs}" MATCHES "flagMSC(8|9)ARM" )
set ( CMAKE_EXE_LINKER_FLAGS "\${CMAKE_EXE_LINKER_FLAGS} -subsystem:windows\${MSVC_LINKER_SUBSYSTEM}" )
else()
set ( CMAKE_EXE_LINKER_FLAGS "\${CMAKE_EXE_LINKER_FLAGS} -subsystem:console\${MSVC_LINKER_SUBSYSTEM}" )
endif()
endif()
if ( "\${FlagDefs}" MATCHES "flagDLL(;|$)" )
set ( CMAKE_EXE_LINKER_FLAGS "\${CMAKE_EXE_LINKER_FLAGS} -dll" )
endif()
set ( CMAKE_CXX_FLAGS_\${CMAKE_BUILD_TYPE} "\${CMAKE_CXX_FLAGS_\${BUILD_TYPE}} \${EXTRA_MSVC_FLAGS}" )
set ( CMAKE_C_FLAGS_\${CMAKE_BUILD_TYPE} "\${CMAKE_C_FLAGS_\${BUILD_TYPE}} \${EXTRA_MSVC_FLAGS}" )
endif()
# Function to generate precompiled header
function ( generate_pch TARGET_NAME ${PCH_FILE} PCH_INCLUDE_DIRS )
set ( PCH_OUTPUT_DIR \${PROJECT_PCH_DIR}/\${TARGET_NAME} )
get_filename_component ( PCH_NAME \${${PCH_FILE}} NAME )
get_filename_component ( TARGET_DIR \${${PCH_FILE}} PATH )
file ( COPY \${PCH_FILE} DESTINATION \${PCH_OUTPUT_DIR} )
# Prepare compile flag definition
get_target_property ( ${COMPILE_FLAGS_LIST} \${TARGET_NAME} ${COMPILE_FLAGS_LIST} )
string ( REGEX REPLACE ";" " " ${COMPILE_FLAGS_LIST} "\${${COMPILE_FLAGS_LIST}}" )
set ( compile_flags "\${CMAKE_CXX_FLAGS_\${CMAKE_BUILD_TYPE}} \${${COMPILE_FLAGS_LIST}}" )
# Add main target defined include directories
get_directory_property ( include_directories DIRECTORY \${CMAKE_CURRENT_SOURCE_DIR} INCLUDE_DIRECTORIES )
foreach ( include_dir \${include_directories} )
list ( APPEND compile_flags "-I\${include_dir}" )
endforeach()
# Add source directory of the precompiled header file - for quoted include files
list ( APPEND compile_flags "-iquote\${TARGET_DIR}" )
# Add included directories of the external packages collected from defintions of all targets
foreach ( include_dir \${PCH_INCLUDE_DIRS} )
list ( APPEND compile_flags "-I\${include_dir}" )
endforeach()
# Add target compile definitions
get_target_property ( ${PCH_COMPILE_DEFINITIONS} \${TARGET_NAME} ${PCH_COMPILE_DEFINITIONS} )
foreach ( compile_def \${${PCH_COMPILE_DEFINITIONS}} )
list ( APPEND compile_flags "-D\${compile_def}" )
endforeach()
list ( REMOVE_DUPLICATES compile_flags )
separate_arguments ( compile_flags )
# Prepare compilations options
set ( PCH_BINARY_SUFFIX ".pch" )
if ( \${CMAKE_CXX_COMPILER_ID} STREQUAL "GNU" )
set ( PCH_BINARY_SUFFIX ".gch" )
endif()
set ( PCH_HEADER "\${PCH_OUTPUT_DIR}/\${PCH_NAME}" )
set ( PCH_BINARY "\${PCH_HEADER}\${PCH_BINARY_SUFFIX}" )
set ( PCH_COMPILE_PARAMS -x c++-header -o \${PCH_BINARY} \${PCH_HEADER} )
# Generate precompiled header file
add_custom_command ( OUTPUT \${PCH_BINARY}
COMMAND \${CMAKE_CXX_COMPILER} \${compile_flags} \${PCH_COMPILE_PARAMS}
COMMENT "PCH for the file \${PCH_HEADER}"
)
add_custom_target ( \${TARGET_NAME}_gch DEPENDS \${PCH_BINARY} )
add_dependencies ( \${TARGET_NAME} \${TARGET_NAME}_gch )
endfunction()
# Function to create cpp source from icpp files
function ( create_cpps_from_icpps )
file ( GLOB icpp_files RELATIVE "\${CMAKE_CURRENT_SOURCE_DIR}" "\${CMAKE_CURRENT_SOURCE_DIR}/*.icpp" )
foreach ( icppFile \${icpp_files} )
set ( output_file "\${PROJECT_BINARY_DIR}/\${CMAKE_PROJECT_NAME}/\${icppFile}.cpp" )
file ( WRITE "\${output_file}" "#include \"\${CMAKE_CURRENT_SOURCE_DIR}/\${icppFile}\"\n" )
endforeach()
endfunction()
# Function to create cpp source file from binary resource definition
function ( create_brc_source input_file output_file symbol_name compression symbol_append )
if ( NOT EXISTS \${CMAKE_CURRENT_SOURCE_DIR}/\${input_file} )
message ( FATAL_ERROR "Input file does not exist: \${CMAKE_CURRENT_SOURCE_DIR}/\${input_file}" )
endif()
message ( STATUS "Creating cpp source file \"\${output_file}\" from the binary resource \"\${input_file}\"" )
file ( REMOVE \${CMAKE_CURRENT_BINARY_DIR}/\${symbol_name} )
if ( \${compression} MATCHES "[bB][zZ]2" )
find_program ( BZIP2_EXEC bzip2 )
if ( NOT BZIP2_EXEC )
message ( FATAL_ERROR "BZIP2 executable not found!" )
endif()
set ( COMPRESS_SUFFIX "bz2" )
set ( COMMAND_COMPRESS \${BZIP2_EXEC} -k -f \${CMAKE_CURRENT_BINARY_DIR}/\${symbol_name} )
elseif ( \${compression} MATCHES "[zZ][iI][pP]" )
find_program ( ZIP_EXEC zip )
if ( NOT ZIP_EXEC )
message ( FATAL_ERROR "ZIP executable not found!" )
endif()
set ( COMPRESS_SUFFIX "zip" )
set ( COMMAND_COMPRESS \${ZIP_EXEC} \${CMAKE_CURRENT_BINARY_DIR}/\${symbol_name}.\${COMPRESS_SUFFIX} \${symbol_name} )
elseif ( \${compression} MATCHES "[lL][zZ][mM][aA]" )
find_program ( LZMA_EXEC lzma )
if ( NOT LZMA_EXEC )
message ( FATAL_ERROR "LZMA executable not found!" )
endif()
set ( COMPRESS_SUFFIX "lzma" )
set ( COMMAND_COMPRESS \${LZMA_EXEC} \${CMAKE_CURRENT_BINARY_DIR}/\${symbol_name} )
elseif ( \${compression} MATCHES "[lL][zZ]4" )
find_program ( LZ4_EXEC lz4c )
if ( NOT LZ4_EXEC )
message ( FATAL_ERROR "LZ4 executable not found!" )
endif()
set ( COMPRESS_SUFFIX "lz4" )
set ( COMMAND_COMPRESS \${LZ4_EXEC} -f \${CMAKE_CURRENT_BINARY_DIR}/\${symbol_name} \${CMAKE_CURRENT_BINARY_DIR}/\${symbol_name}.\${COMPRESS_SUFFIX} )
elseif ( \${compression} MATCHES "[zZ][sS][tT[dD]" )
find_program ( ZSTD_EXEC zstd )
if ( NOT ZSTD_EXEC )
message ( FATAL_ERROR "ZSTD executable not found!" )
endif()
set ( COMPRESS_SUFFIX "zst" )
set ( COMMAND_COMPRESS \${ZSTD_EXEC} \${CMAKE_CURRENT_BINARY_DIR}/\${symbol_name} -o \${CMAKE_CURRENT_BINARY_DIR}/\${symbol_name}.\${COMPRESS_SUFFIX} )
endif()
file ( COPY \${CMAKE_CURRENT_SOURCE_DIR}/\${input_file} DESTINATION \${CMAKE_CURRENT_BINARY_DIR} )
get_filename_component ( input_file_name \${CMAKE_CURRENT_SOURCE_DIR}/\${input_file} NAME )
file ( RENAME \${CMAKE_CURRENT_BINARY_DIR}/\${input_file_name} \${CMAKE_CURRENT_BINARY_DIR}/\${symbol_name} )
if ( COMMAND_COMPRESS )
execute_process ( COMMAND \${COMMAND_COMPRESS} WORKING_DIRECTORY \${CMAKE_CURRENT_BINARY_DIR} OUTPUT_VARIABLE XXXX )
file ( RENAME \${CMAKE_CURRENT_BINARY_DIR}/\${symbol_name}.\${COMPRESS_SUFFIX} \${CMAKE_CURRENT_BINARY_DIR}/\${symbol_name} )
endif()
file ( READ \${CMAKE_CURRENT_BINARY_DIR}/\${symbol_name} hex_string HEX )
set ( CUR_INDEX 0 )
string ( LENGTH "\${hex_string}" CUR_LENGTH )
math ( EXPR FILE_LENGTH "\${CUR_LENGTH} / 2" )
set ( \${hex_string} 0)
string ( REGEX REPLACE "([0-9a-f][0-9a-f])" "0x\\\\1, " hex_converted \${hex_string} )
set ( output_string "static unsigned char \${symbol_name}_[] = {\n" )
set ( output_string "\${output_string} \${hex_converted}0x00 }\;\n\n" )
set ( output_string "\${output_string}unsigned char *\${symbol_name} = \${symbol_name}_\;\n\n" )
set ( output_string "\${output_string}int \${symbol_name}_length = \${FILE_LENGTH}\;\n\n" )
if ( \${symbol_append} MATCHES "append" )
file ( APPEND \${CMAKE_CURRENT_BINARY_DIR}/\${output_file} \${output_string} )
else()
file ( WRITE \${CMAKE_CURRENT_BINARY_DIR}/\${output_file} \${output_string} )
endif()
endfunction()
# Initialize definition flags (flags are used during targets compilation)
get_directory_property ( FlagDefs COMPILE_DEFINITIONS )
foreach( comp_def \${FlagDefs} )
message ( STATUS " initialize flag " \${comp_def} )
set ( \${comp_def} 1 )
endforeach()
message ( STATUS "Project compilation flags: \${EXTRA_GCC_FLAGS}" )
EOL
# End of the cat (CMakeFiles.txt)
local PKG_DIR=""
local dir=""
local dir_include=()
local dir_add=()
while [ ${#UPP_ALL_USES_DONE[@]} -lt ${#UPP_ALL_USES[@]} ]; do
local process_upp="$(get_upp_to_process)"
# echo "num of elements all : ${#UPP_ALL_USES[@]} (${UPP_ALL_USES[@]})"
# echo "num of elements done: ${#UPP_ALL_USES_DONE[@]} (${UPP_ALL_USES_DONE[@]})"
# echo "process_upp=\"${process_upp}\""
if [ -n "${process_upp}" ]; then
if [ -d "${UPP_SRC_DIR}/${process_upp}" ]; then
PKG_DIR=${UPP_SRC_DIR}
elif [ -d "${PROJECT_EXTRA_INCLUDE_DIR}/${process_upp}" ]; then
PKG_DIR="${PROJECT_EXTRA_INCLUDE_DIR}"
else
PKG_DIR=""
echo "ERROR"
echo "ERROR - package \"${process_upp}\" was not foud!"
echo "ERROR"
fi
if [ -d "${PKG_DIR}/${process_upp}" ]; then
if [[ "${process_upp}" =~ '/' ]]; then
tmp_upp_name="$(basename "${process_upp}").upp"
generate_cmake_file "${PKG_DIR}/${process_upp}/${tmp_upp_name}" "${process_upp}"
else
generate_cmake_file "${PKG_DIR}/${process_upp}/${process_upp}".upp "${process_upp}"
fi
# include directories from packages
for dir in "${INCLUDE_SYSTEM_LIST[@]}"; do
dir_include+=("include_directories ( \${PROJECT_SOURCE_DIR}/${PKG_DIR}/${process_upp}/${dir} )")
done
dir_add+=("add_subdirectory ( ${PKG_DIR}/${process_upp} \${CMAKE_CURRENT_BINARY_DIR}/${process_upp} )")
fi
fi
UPP_ALL_USES_DONE+=("${process_upp}")
done
echo '# Include dependent directories of the project' >> "${OFN}"
for dir in "${dir_include[@]}"; do
echo "$dir" >> "${OFN}"
done
for dir in "${dir_add[@]}"; do
echo "$dir" >> "${OFN}"
done
echo "add_subdirectory ( ${main_target_dirname} \${CMAKE_CURRENT_BINARY_DIR}/${main_target_name} )" >> "${OFN}"
local -a array_library=$(printf "%s\n" "${UPP_ALL_USES_DONE[@]}" | sort -u );
local library_dep="${main_target_name}${LIB_SUFFIX};"
for list_library in ${array_library[@]}; do
library_dep+="${list_library//\//_}${LIB_SUFFIX};"
done
# Link dependecy correction
library_dep="${library_dep/Core-lib;Core_SSL-lib/Core_SSL-lib;Core-lib}"
library_dep="${library_dep/Core-lib;Core_Rpc-lib/Core_Rpc-lib;Core-lib}"
library_dep="${library_dep//plugin_zstd-lib}"
library_dep="${library_dep/ZstdTest-lib/ZstdTest-lib;plugin_zstd-lib}"
# Beginning of the cat (CMakeFiles.txt)
cat >> "${OFN}" << EOL
# Creation of the file build_info.h
set ( BUILD_INFO_H \${PROJECT_INC_DIR}/build_info.h )
string ( TIMESTAMP bmYEAR %Y )
string ( TIMESTAMP bmMONTH %m )
string ( TIMESTAMP bmDAY %d )
string ( TIMESTAMP bmHOUR %H )
string ( TIMESTAMP bmMINUTE %M )
string ( TIMESTAMP bmSECOND %S )
string ( REGEX REPLACE "^0(.*)" \\\\1 bmMONTH \${bmMONTH} )
string ( REGEX REPLACE "^0(.*)" \\\\1 bmDAY \${bmDAY} )
string ( REGEX REPLACE "^0(.*)" \\\\1 bmHOUR \${bmHOUR} )
string ( REGEX REPLACE "^0(.*)" \\\\1 bmMINUTE \${bmMINUTE} )
string ( REGEX REPLACE "^0(.*)" \\\\1 bmSECOND \${bmSECOND} )
cmake_host_system_information ( RESULT bmHOSTNAME QUERY HOSTNAME )
file ( WRITE \${BUILD_INFO_H} "#define bmYEAR \${bmYEAR}\n#define bmMONTH \${bmMONTH}\n#define bmDAY \${bmDAY}\n" )
file ( APPEND \${BUILD_INFO_H} "#define bmHOUR \${bmHOUR}\n#define bmMINUTE \${bmMINUTE}\n#define bmSECOND \${bmSECOND}\n" )
file ( APPEND \${BUILD_INFO_H} "#define bmTIME Time(\${bmYEAR}, \${bmMONTH}, \${bmDAY}, \${bmHOUR}, \${bmMINUTE}, \${bmSECOND})\n" )
file ( APPEND \${BUILD_INFO_H} "#define bmMACHINE \"\${bmHOSTNAME}\"\n" )
if ( WIN32 )
file ( APPEND \${BUILD_INFO_H} "#define bmUSER \"\$ENV{USERNAME}\"\n" )
else()
file ( APPEND \${BUILD_INFO_H} "#define bmUSER \"\$ENV{USER}\"\n" )
endif()
find_package(Subversion)
if ( SUBVERSION_FOUND AND EXISTS "\${CMAKE_SOURCE_DIR}/.svn" )
Subversion_WC_INFO(\${CMAKE_SOURCE_DIR} SVN)
endif()
find_package(Git)
if ( GIT_FOUND AND EXISTS "\${CMAKE_SOURCE_DIR}/.git" )
# Get the current working branch
execute_process(
COMMAND git rev-parse --abbrev-ref HEAD
WORKING_DIRECTORY \${CMAKE_SOURCE_DIR}
OUTPUT_VARIABLE GIT_BRANCH
OUTPUT_STRIP_TRAILING_WHITESPACE
)
# Get the latest abbreviated commit hash of the working branch
execute_process(
COMMAND git log -1 --format=%h
WORKING_DIRECTORY \${CMAKE_SOURCE_DIR}
OUTPUT_VARIABLE GIT_COMMIT_HASH
OUTPUT_STRIP_TRAILING_WHITESPACE
)
# Get remote tracking of actual branch
execute_process(
COMMAND git config --local branch.\${GIT_BRANCH}.remote
WORKING_DIRECTORY \${CMAKE_SOURCE_DIR}
OUTPUT_VARIABLE GIT_REMOTE_TRACKING
OUTPUT_STRIP_TRAILING_WHITESPACE
)
# Get remote tracking URL of actual branch
execute_process(
COMMAND git config --local remote.\${GIT_REMOTE_TRACKING}.url
WORKING_DIRECTORY \${CMAKE_SOURCE_DIR}
OUTPUT_VARIABLE GIT_REMOTE_URL
OUTPUT_STRIP_TRAILING_WHITESPACE
)
endif()
if ( GIT_COMMIT_HASH )
file (APPEND \${BUILD_INFO_H} "#define bmGIT_REVISION \"\${GIT_COMMIT_HASH}\"\n" )
file (APPEND \${BUILD_INFO_H} "#define bmGIT_BRANCH \"\${GIT_BRANCH}\"\n" )
file (APPEND \${BUILD_INFO_H} "#define bmGIT_URL \"\${GIT_REMOTE_URL}\"\n" )
elseif ( SVN_WC_REVISION )
file (APPEND \${BUILD_INFO_H} "#define bmSVN_REVISION \"\${SVN_WC_REVISION}\"\n" )
endif()
# Collect icpp files
file ( GLOB_RECURSE cpp_ini_files "\${PROJECT_BINARY_DIR}/\${CMAKE_PROJECT_NAME}/*.icpp.cpp" )
# Collect windows resource config file
if ( WIN32 )
file ( GLOB rc_file "\${PROJECT_BINARY_DIR}/\${CMAKE_PROJECT_NAME}/*.rc" )
endif()
# Main program definition
file ( WRITE \${PROJECT_BINARY_DIR}/\${CMAKE_PROJECT_NAME}/null.cpp "" )
if ( "\${FlagDefs}" MATCHES "(flagSO)(;|$)" )
add_library ( ${main_target_name}${BIN_SUFFIX} \${PROJECT_BINARY_DIR}/\${CMAKE_PROJECT_NAME}/null.cpp \${rc_file} \${cpp_ini_files} )
if ( WIN32 )
include ( GenerateExportHeader )
generate_export_header ( ${main_target_name}${BIN_SUFFIX}
BASE_NAME ${main_target_name}${BIN_SUFFIX}
EXPORT_MACRO_NAME ${main_target_name}${BIN_SUFFIX}_EXPORT
EXPORT_FILE_NAME ${main_target_name}${BIN_SUFFIX}_Export.h
STATIC_DEFINE ${main_target_name}${BIN_SUFFIX}_BUILT_AS_STATIC
)
endif()
else()
add_executable ( ${main_target_name}${BIN_SUFFIX} \${PROJECT_BINARY_DIR}/\${CMAKE_PROJECT_NAME}/null.cpp \${rc_file} \${cpp_ini_files} )
endif()
# Main program dependecies
set ( ${main_target_name}_${DEPEND_LIST} "${library_dep}" )
add_dependencies ( ${main_target_name}${BIN_SUFFIX} \${${main_target_name}_${DEPEND_LIST}} )
if ( DEFINED MAIN_TARGET_LINK_FLAGS )
set_target_properties ( ${main_target_name}${BIN_SUFFIX} PROPERTIES LINK_FLAGS \${MAIN_TARGET_LINK_FLAGS} )
endif()
# Precompiled headers processing
if ( "\${FlagDefs}" MATCHES "flagPCH(;|$)" )
if ( CMAKE_COMPILER_IS_GNUCC OR CMAKE_COMPILER_IS_CLANG )
# Collect included directories of the external packages from all targets
foreach ( target \${${main_target_name}_${DEPEND_LIST}} )
get_target_property ( ${PCH_INCLUDE_LIST} \${target} ${PCH_INCLUDE_LIST} )
list ( APPEND PCH_INCLUDE_DIRS \${${PCH_INCLUDE_LIST}} )
endforeach()
if ( PCH_INCLUDE_DIRS )
list ( REMOVE_DUPLICATES PCH_INCLUDE_DIRS )
endif()
foreach ( target \${${main_target_name}_${DEPEND_LIST}} )
get_target_property ( ${PCH_FILE} \${target} ${PCH_FILE} )
if ( ${PCH_FILE} )
generate_pch ( \${target} \${${PCH_FILE}} "\${PCH_INCLUDE_DIRS}" )
endif()
endforeach()
endif()
endif()
# Main program link
target_link_libraries ( ${main_target_name}${BIN_SUFFIX} \${main_$LINK_LIST} \${${main_target_name}_${DEPEND_LIST}} \${PROJECT_EXTRA_LINK_FLAGS} )
if ( ${TARGET_RENAME} )
set_target_properties ( ${main_target_name}${BIN_SUFFIX} PROPERTIES OUTPUT_NAME \${${TARGET_RENAME}} )
else()
set_target_properties ( ${main_target_name}${BIN_SUFFIX} PROPERTIES OUTPUT_NAME ${main_target_name} )
endif()
EOL
# End of the cat (CMakeFiles.txt)
# Show used plugins
if [ "${GENERATE_DEBUG}" == "1" ]; then
declare -A sorted_UPP_ALL_USES=$(printf "%s\n" "${UPP_ALL_USES[@]}" | sort -u);
declare -A sorted_UPP_ALL_USES_DONE=$(printf "%s\n" "${UPP_ALL_USES_DONE[@]}" | sort -u);
echo "Plugins used : " ${sorted_UPP_ALL_USES[@]}
echo "CMake generated: " ${sorted_UPP_ALL_USES_DONE[@]}
fi
# Generate package file
if [ "${GENERATE_PACKAGE}" == "1" ]; then
generate_package_file
fi
UPP_ALL_USES=()
UPP_ALL_USES_DONE=()
}
| true
|
f52f2c3a914735f4adb7c9dd312b45397e93443a
|
Shell
|
brandonjones085/os
|
/transpose.sh
|
UTF-8
| 278
| 3.484375
| 3
|
[] |
no_license
|
#!/bin/bash
row=0
i=1
#This is mostly to just get the number of rows
#by counting the first row
while IFS= read -r myLine || [ -n "$myLine" ]
do
row=$( head -n 1 $1 | wc -w ) #number of rows
done < $1
for i in $(seq 1 $row)
do
cat $1 | cut -f$i $1 | paste -s
done
| true
|
4f9922e7fd160c42b216bc6d8e97a453b7caa659
|
Shell
|
resslerruntime/rice
|
/cat-cat/tint2/volume.sh
|
UTF-8
| 1,024
| 2.875
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
muted=$(amixer get Master | grep "\[" | cut -d[ -f4 | grep -o "[a-z]*")
cur=$(amixer get Master | grep "\[" | cut -d[ -f2 | grep -o "[0-9]*")
if (( cur<=0 )); then
vol="│\n│\n│\n│\nX"
elif (( 1<=cur && cur<=10 )); then
vol="│\n│\n│\n│\n╽"
elif (( 11<=cur && cur<=20 )); then
vol="│\n│\n│\n│\n┃"
elif (( 21<=cur && cur<=30 )); then
vol="│\n│\n│\n╽\n┃"
elif (( 31<=cur && cur<=40 )); then
vol="│\n│\n│\n┃\n┃"
elif (( 41<=cur && cur<=50 )); then
vol="│\n│\n╽\n┃\n┃"
elif (( 51<=cur && cur<=60 )); then
vol="│\n│\n┃\n┃\n┃"
elif (( 61<=cur && cur<=70 )); then
vol="│\n╽\n┃\n┃\n┃"
elif (( 71<=cur && cur<=80 )); then
vol="│\n┃\n┃\n┃\n┃"
elif (( 81<=cur && cur<=90 )); then
vol="╽\n┃\n┃\n┃\n┃"
elif (( 91<=cur && cur<=100 )); then
vol="┃\n┃\n┃\n┃\n┃"
fi
if [ "$muted" == "off" ]; then
vol="│\n│\n│\n│\nX"
fi
echo -e "$vol\n"
| true
|
7b14a3ab05e5c82e527456e843f7ee97d5ba3938
|
Shell
|
drewgrubb0/dev-tools
|
/scripts/github-clone.sh
|
UTF-8
| 293
| 3.703125
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
PROJECT_NAME=$1
if [ -z $PROJECT_NAME ]; then
read -p "Please enter the project name: " PROJECT_NAME
fi
cd ~/Projects
if [ $? -ne 0 ]; then
mkdir ~/Projects
fi
git clone git@github.com:drewgrubb0/${PROJECT_NAME}.git
echo "Cloned ${PROJECT_NAME} into Projects"
exit 0
| true
|
ae377926f7a029e6bd3b52fc226629653666dedf
|
Shell
|
JonasLaksen/deeptsa
|
/pushing.bash
|
UTF-8
| 145
| 2.515625
| 3
|
[] |
no_license
|
FILE=$1
cp $FILE "server_${FILE}" -R
git add server_results/
git commit -m "Automatic commit, adding server result ${FILE}"
git push origin HEAD
| true
|
304a72d43141fa508a7426eb894ac326829eb783
|
Shell
|
jasonLaster/dotfiles
|
/aliases.zsh
|
UTF-8
| 2,226
| 2.609375
| 3
|
[] |
no_license
|
alias g="git"
alias s="git grep -e \"$1\""
export OS="~/src/_os"
export JS="$OS/_js"
export MOZ="~/src/moz"
export GECKO="~/src/moz/gecko-dev"
export NIGHTLY="/Applications/Firefox Nightly.app/Contents/MacOS/firefox"
alias os="cd $OS"
alias moz="cd $MOZ"
alias gecko="cd $GECKO"
alias dbg="cd $GECKO/devtools/client/debugger"
alias m="$GECKO/mach"
alias mb="cd $GECKO; $GECKO/mach build; cd -"
alias mbf="cd $GECKO; $GECKO/mach build faster; cd -"
alias mcb="cd $GECKO; ./mach clobber; ./mach build; cd -"
alias mbr="cd $GECKO; ./mach build faster; ./mach run -P dev1 --jsdebugger; cd -"
alias mr="cd $GECKO; ./mach run -P dev5; cd -"
alias mrd="cd $GECKO; ./mach run -P dev5 --jsdebugger; cd -"
alias launch="cd ~/src/moz/devtools-core/packages/devtools-launchpad"
alias rc="vim ~/.zshrc"
alias src="ruby ~/src/dotfiles/setup.rb; source ~/.zshrc"
# post a gist file to github if the gist tool is in the path
alias gist-diff="gist --type diff"
alias v="vim"
alias fix-camera="sudo killall VDCAssistant"
alias python-server="python -m SimpleHTTPServer 8000"
# Git aliases
alias ga="git a"
alias gac="git ac"
alias gb="git b"
alias gba="git ba"
alias gbc="git bc"
alias gbd="git bd"
alias gci="git ci --no-verify"
alias gcia="git cia --no-verify"
alias gcim="git cim --no-verify"
alias gciv="git civ"
alias gcl="git cl"
alias gclf="git clf"
alias gco="git co"
alias gcob="git cob"
alias gcom="git com"
alias gcop="git cop"
alias gd="git d"
alias gdc="git dc"
alias gdcs="git dcs"
alias gdm="git dm"
alias gdms="git dms"
alias gds="git ds"
alias gdw="git dw"
alias gf="git f"
alias gfp="git fp"
alias gg="git g"
alias gl="git l"
alias glo="git lo"
alias glh="git lh"
alias glhs="git lhs"
alias glp="git lp"
alias glpj="git lpj"
alias gls="git ls"
alias gmv="git mv"
alias gr="git r"
alias gra="git ra"
alias grc="git rc"
alias gre="git re"
alias greh="git reh"
alias grehh="git rehh"
alias gri="git ri"
alias grih2="git rih2"
alias grih3="git rih3"
alias grih4="git rih4"
alias grih5="git rih5"
alias grih= "git rih"
alias grmc="git rmc"
alias grpull="git rpull"
alias grs="git rs"
alias gs="git s"
alias gsa="git sa"
alias gsh="git sh"
alias gshs="git shs"
alias gsnp="git snp"
alias gst="git st"
alias gstm="git stm"
| true
|
7bd9d96bbbd4e08cc55345196da0a4f357963917
|
Shell
|
Cloudxtreme/xztech
|
/work/peer1/addons/p1post/src/p1post-1.0/usr/local/p1post/script.d/999mh.sh
|
UTF-8
| 2,251
| 3.078125
| 3
|
[] |
no_license
|
#!/bin/bash
# File: 999mh.sh
# Package: p1post
# Install Location: /usr/local/p1post/script.d
# Name: Managed Hosting Legacy Configurator
#
# Supported Platforms:
# Redhat Enterprise Linux
#
# Description: Perform standard managed hosting server configuration
# and package installation of legacy options and addons.
#
# Author: Adam Hubscher <ahubscher AT peer1 DOT com>
# Version: 1.0
# Last Updated: July 6th, 2011
# Revision: 3
# If this is not a redhat based machine, exit now.
[ -f /etc/redhat-release ] || exit 0;
# Set Path Variable
PATH=/sbin:/bin:/usr/sbin:/usr/bin:/usr/local/sbin:/usr/local/bin
# Postconf contains customer information and any selected addons
. /usr/local/p1post/postconf.info
# KS Library contains functions and required variables
. /usr/local/p1post/lib/p1ks_lib.sh
postlog "INFO" "Beginning Managed Hosting Configuration"
postlog "INFO" "Please note: This does nothing unless MH Legacy Addons are Selected"
# First, the imperatives.
# This is taken from the previous kickstart config file from rhel5.
# These are the packages selected. I have removed packages that are inappropriate.
# The reason that this line is here, is to provide parity for managed hosting builds.
# This parity was requested by Managed Hosting so that no new education is needed related to packages installed.
postlog "INFO" "Installing base packages."
yum install -y chkraid autoconf automake boost busybox caching-nameserver compat-gcc-34 compat-gcc-34-c++ compat-libgcc-296 compat-libstdc++-296 compat-libstdc++-33 dialog expat emacs-nox expect gcc gcc-c++ gnutls libtool libtool-ltdl lockdev ltrace lynx ntp openssl097a p1mhqa perl-Crypt-SSLeay perl-Date-Calc perl-DateManip perl-LDAP perl-libxml-perl perl-XML-Dumper perl-XML-LibXML perl-XML-Simple redhat-rpm-config rpm-build ruby sharutils strace sysstat system-config-securitylevel-tui vim-enhanced x86info xinetd
# Vars we care about:
# RPSMON = "p1rps"
# SMARTKEY = "Y"
# PATCHING = "managed_patching"
if [[ "${SMARTKEY}" == "Y" ]];
yum install -y smartkey
fi
if [[ "${RPSMON}" == "p1rps" ]];
yum install -y rpsmonitor
fi
if [[ "${PATCHING}" == "managed_patching" ]];
yum install -y yum-p1mh-autoupdates yum-p1mh-repo
fi
| true
|
c63142792687cd686a00128f5a63b3248d4bf80d
|
Shell
|
mogenson/eco-speech-detect
|
/run_me.sh
|
UTF-8
| 260
| 2.8125
| 3
|
[] |
no_license
|
#!/bin/bash
ctrl_c()
{
echo "" # new line
echo "killing web server"
kill $WEB_SERVER_PID
exit 0
}
trap ctrl_c SIGINT
python -m SimpleHTTPServer &
WEB_SERVER_PID=$!
sleep 1
google-chrome http://0.0.0.0:8000
while true
do
sleep 1
done
| true
|
2126dd116a77893f9e08273c2500fd0863dd4fa1
|
Shell
|
A2795/Week10
|
/Week10.sh
|
UTF-8
| 614
| 3.140625
| 3
|
[] |
no_license
|
#!/bin/bash
# Show what your IP address is is by using the ifconfig command
ifconfig
# Show can now catch a glimpse into your routing table
route -n
# Show ping ourselves, press ctrlC to stop this
ping 127.0.0.1
# Show traceroute to see what path
traceroute google.com
# Show check their IP addresss with the host command
host google.com
# Showdisplay very detailed info about your computer such as network devices
netstat -nt
# Show where well known port numbers are in the etc services
cat /etc/services | tail
# Show the iptables command will let you see what your current settings
sudo iptables -L
| true
|
89159ba2ce083506a7e12476288939fbfafb3789
|
Shell
|
joebernard/rn-diff-purge
|
/multiple-versions-diffs.sh
|
UTF-8
| 299
| 3.109375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
set -euxo pipefail
IFS=$'\n' GLOBIGNORE='*' command eval 'versions=($(cat VERSIONS))'
for vfrom in "${versions[@]}"
do
echo "from $vfrom"
for vto in "${versions[@]}"
do
git diff origin/version/"$vfrom"..origin/version/"$vto" > diffs/"$vfrom".."$vto".diff
done
done
| true
|
ec4d9b648826cf36187c6a1b4715f13ca292d80b
|
Shell
|
tnakaicode/jburkardt
|
/opengl/yellow_window.sh
|
UTF-8
| 419
| 2.921875
| 3
|
[] |
no_license
|
#!/bin/bash
#
g++ -c yellow_window.cpp
if [ $? -ne 0 ]; then
echo "Errors compiling yellow_window.cpp"
exit
fi
#
g++ yellow_window.o -framework GLUT -framework OpenGL
#g++ yellow_window.o -lGL -lGLU -lglut -lm
if [ $? -ne 0 ]; then
echo "Errors linking and loading yellow_window.o"
exit
fi
#
rm yellow_window.o
mv a.out ~/bincpp/$ARCH/yellow_window
#
echo "Executable installed as ~/bincpp/$ARCH/yellow_window"
| true
|
c05767829c272069b5dbbc2866986855c8170c6f
|
Shell
|
GsDevKit/GsDevKit_home
|
/tests/errorTests.sh
|
UTF-8
| 6,345
| 3.09375
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
#=========================================================================
# Copyright (c) 2015, 2016 GemTalk Systems, LLC. All Rights Reserved <dhenrich@gemtalksystems.com>.
#
# MIT license: https://github.com/GsDevKit/GsDevKit_home/blob/master/license.txt
#=========================================================================
set -x # print commands
# the exit status should be 1 for all error conditions
# the error message should either be
# "The appropriate install script has not been run..."
# "The stone travis2 does not exist..."
# "Perhaps a stone has not been created..."
# "the GsDevKit_server project has not bee installed..."
# "The reqewst client: tode1 does not exist"
run_test() {
local status
bash -c "$1" &> /tmp/gsDevKit_test
status=$?
cat /tmp/gsDevKit_test | grep "Error on or near line"
if [ "$?" -ne 1 ] ; then
# should not get an 'Error on or near line' during these tests
cat /tmp/gsDevKit_test
echo "Unexpected unhandled error"
exit 1
fi
cat /tmp/gsDevKit_test
test_exit_status $status
}
run_simple_test() {
$1
test_exit_status $?
}
test_exit_status() {
local ansi_red="\033[91;1m"
local ansi_reset="\033[0m"
status="$1"
if [ "$status" -ne 1 ] ; then
printf "${ansi_red}Unexpected exit status (%s)${ansi_reset}\n" "$status"
exit 1
fi
}
# test that these commands fail gracefully if installServer has not been run yet
set +e
run_test "createStone ${STONENAME2} $GS_VERSION"
run_test "createClient tode"
run_simple_test "devKitCommandLine --list"
run_simple_test "$GS_HOME/bin/status"
run_test "startStatmonitor ${STONENAME2}"
run_test "startStone ${STONENAME2}"
run_test "startNetldi ${STONENAME2}"
run_test "stopStone ${STONENAME2}"
run_test "updateGsDevKit"
run_test "newExtent ${STONENAME2}"
run_test "stopNetldi ${STONENAME2}"
run_test "upgradeStone ${STONENAME1} ${STONENAME2} 3.2.9"
run_test "startTopaz ${STONENAME1} -l"
run_test "todeBackup ${STONENAME2} backup.dbf"
run_test "todeRestore ${STONENAME2} backup.dbf"
run_test "todeUpdate ${STONENAME2}"
run_test "startClient tode1"
set -e
installServerClient
set +e
# Now test argument error conditions
run_test "attachForeignStone"
run_test "attachOldDevKitStone"
run_test "attachOldDevKitStone -Z"
run_test "attachOldDevKitStone -dm"
run_test "attachOldDevKitStone -md"
run_test "clients x"
run_test "createClient"
run_test "createClient -Z"
run_test "createClient -t xxx"
run_test "createStone"
run_test "createStone -Z"
run_test "deleteClient"
run_test "deleteSession"
run_test "deleteStone"
run_test "downloadGemStone"
run_test "installClient x"
run_test "installClient -Z"
run_test "installServer x"
run_test "installServer -Z"
run_test "installServerClient x"
run_test "installServerClient -Z"
run_test "newExtent"
run_test "newExtent -Z"
run_test "newExtent foo"
run_test "products x"
run_test "sessions x"
run_test "setupGsDevKit"
run_test "setupGsDevKit -Z"
run_test "smalltalkCI"
run_test "smalltalkCI -Z"
run_test "startClient"
run_test "startNetldi"
run_test "startNetldi foo"
run_test "startStatmonitor"
run_test "startStatmonitor foo"
run_test "startStone"
run_test "startStone foo"
run_test "startStone -NC"
run_test "startStone -NR"
run_test "startStone -CN"
run_test "startStone -CR"
run_test "startStone -RC"
run_test "startStone -RN"
run_test "startStone -Z"
run_test "startTopaz"
run_test "startTopaz foo"
run_test "$GS_HOME/bin/status x"
run_test "$GS_HOME/bin/status -Z"
run_simple_test "stones x"
run_test "stopNetldi"
run_test "stopNetldi foo"
run_test "stopStone"
run_test "stopStone -Z"
run_test "stopStone foo"
run_test "todeBackup"
run_test "todeBackup -Z"
run_test "todeBackup foo"
run_test "todeIt -Z"
run_test "todeLoad"
run_test "todeRestore"
run_test "todeRestore foo"
run_test "todeRestore -Z"
run_test "todeUpdate"
run_test "todeUpdate foo"
run_test "updateGsDevKit -Z"
run_test "upgradeStone"
run_test "upgradeStone foo"
run_test "upgradeStone -Z"
run_test "$GS_HOME/bin/utils/cloneGsDevKitProjects"
run_test "$GS_HOME/bin/utils/cloneGsDevKitProjects -Z"
run_test "$GS_HOME/bin/utils/installOsPrereqs x"
run_test "$GS_HOME/bin/private/attachStone"
run_test "$GS_HOME/bin/private/attachStone -fm"
run_test "$GS_HOME/bin/private/attachStone -df"
run_test "$GS_HOME/bin/private/attachStone -md"
run_test "$GS_HOME/bin/private/attachStone -Z"
run_test "$GS_HOME/bin/private/clone_gs_client_dev -Z"
run_test "$GS_HOME/bin/private/clone_gs_server -Z"
run_test "$GS_HOME/bin/private/clone_sys_local -Z"
run_test "$GS_HOME/bin/private/clone_todeClient -Z"
run_test "$GS_HOME/bin/private/cloneGitHubProject"
run_test "$GS_HOME/bin/private/cloneGitHubProject -Z"
run_test "$GS_HOME/bin/private/downloadSmalltalkClient"
run_test "$GS_HOME/bin/private/downloadSmalltalkClient foo"
run_test "$GS_HOME/bin/private/downloadSmalltalkClient -Z"
run_test "$GS_HOME/bin/private/downloadSmalltalkClient -v XXX -d $GS_HOME/shared/pharo"
run_test "mkdir /tmp/pharo$$; $GS_HOME/bin/private/downloadSmalltalkClient -v Pharo0.5 -d /tmp/pharo$$"
run_test "$GS_HOME/bin/private/installGci"
run_test "$GS_HOME/bin/private/installGci -d /tmp -t xxx 3.3.0"
run_test "$GS_HOME/bin/private/installGci -t pharo 3.3.0"
run_test "$GS_HOME/bin/private/installGci -Z"
run_test "$GS_HOME/bin/private/scanProductsForInstallingGciLibs"
run_test "$GS_HOME/bin/private/scanProductsForInstallingGciLibs -Z"
run_test "$GS_HOME/bin/patches/patchIssue15"
run_test "$GS_HOME/bin/patches/patchIssue15 -Z"
run_test "$GS_HOME/bin/patches/patchIssue7 x"
run_test "$GS_HOME/shared/bin/create_gs_server_image"
run_test "$GS_HOME/shared/bin/create_gs_server_image -d /tmp xx"
run_test "$GS_HOME/shared/bin/create_gs_server_image -v Pharo3.0"
run_test "$GS_HOME/shared/bin/create_gs_server_image -Z"
run_test "$GS_HOME/shared/bin/installClientGciLibraries"
run_test "$GS_HOME/sys/bin/todeLoad"
run_test "$GS_HOME/sys/default/gsdevkit_bin/cloneSharedTodeProjects"
run_test "$GS_HOME/sys/default/gsdevkit_bin/cloneSharedTodeProjects -Z"
run_test "$GS_HOME/sys/default/gsdevkit_bin/cloneSharedTodeProjects foo"
run_test "$GS_HOME/sys/default/gsdevkit_bin/cloneUpdate"
run_test "$GS_HOME/sys/default/gsdevkit_bin/cloneUpdate -Z"
run_test "$GS_HOME/sys/default/gsdevkit_bin/cloneUpdate foo"
#======= full test complement above the line
| true
|
95d4f86d68eb2e50c1c1a1da14f00319b7b85283
|
Shell
|
realtymaps/map
|
/scripts/app/test
|
UTF-8
| 1,610
| 3.703125
| 4
|
[] |
no_license
|
#!/bin/bash
set -e
set -o pipefail
SCRIPT_DIR="$( cd -P "$( dirname "${BASH_SOURCE[0]}" )" > /dev/null && pwd )/.."
if [[ "$CIRCLECI" != "" ]]
then
# when on CircleCI, there's stuff we have to do to prep the dbs before we run the tests
echo "***** Creating DBs:"
$SCRIPT_DIR/database/createDbs ubuntu
for DB in main normalized
do
echo "===== Setting up $DB db:"
DB_UPCASE=`echo "$DB" | tr '[:lower:]' '[:upper:]'`
echo "~~~~~ Creating extensions:"
$SCRIPT_DIR/database/createExtensions $DB ubuntu
echo "~~~~~ Downloading db schema cache file:"
curl -# -o /tmp/ci_data_${DB}_schema.db ${CI_DB_SECRET_URL}_${DB_UPCASE}_SCHEMA
echo "~~~~~ Loading db schema cache file:"
pg_restore -d realtymaps_${DB} -U ubuntu -e -O /tmp/ci_data_${DB}_schema.db
echo "~~~~~ Downloading dbsync cache file:"
curl -# -o /tmp/ci_data_${DB}_dbsync.db ${CI_DB_SECRET_URL}_${DB_UPCASE}_DBSYNC
echo "~~~~~ Loading dbsync cache files:"
pg_restore -d realtymaps_${DB} -U ubuntu -e -O /tmp/ci_data_${DB}_dbsync.db
echo "~~~~~ Setting DB url var:"
DB_URL="postgres://ubuntu@localhost:5432/realtymaps_${DB}"
eval "export ${DB_UPCASE}_DATABASE_URL=${DB_URL}"
# need to put it in the .env file so foreman will use it
echo "${DB_UPCASE}_DATABASE_URL=${DB_URL}" >> ./.env
echo "~~~~~ Applying new migrations:"
$SCRIPT_DIR/database/syncDb $DB
echo "~~~~~ Inserting mock data:"
$SCRIPT_DIR/database/syncDb $DB --ci
done
fi
echo "***** Running tests:"
if [[ `which foreman` == "" ]]
then
gem install foreman
fi
foreman run gulp rspec
| true
|
211128e1d369cf867ee99b1372429911ad2cf292
|
Shell
|
feroze/.dotfiles
|
/git_scripts/terminalColors.sh
|
UTF-8
| 885
| 2.890625
| 3
|
[] |
no_license
|
#!/bin/bash
# ======================
# TERMINAL OUTPUT COLORS
# text formats
RESET=`tput sgr0`
BOLD=`tput bold`
DIM=`tput dim`
REVERSE=`tput rev`
NORMAL="\033[21m"
# text colors
BLACK=`tput setaf 0`
RED=`tput setaf 1`
GREEN=`tput setaf 2`
YELLOW=`tput setaf 3`
BLUE=`tput setaf 4`
MAGENTA=`tput setaf 5`
CYAN=`tput setaf 6`
LIGHTGRAY=`tput setaf 7`
DARKGRAY=`tput setaf 0; tput bold`
WHITE=`tput setaf 7; tput bold`
# TERMINAL OUTPUT COLORS
# ======================
# ======================
# MARKS
INFOMARK="$DARKGRAY$BOLD❖$RESET"
CHECKMARK="$GREEN$BOLD✔$RESET"
ALERTMARK="$YELLOW$BOLD❗$RESET"
CROSSMARK="$RED$BOLD✖$RESET"
# MARKS
# ======================
# ======================
# PREFIXES
YEP="$BOLD${GREEN}YEP$RESET"
NOPE="$BOLD${RED}NOPE$RESET"
EHH="$BOLD${RED}EHH$RESET"
ERR="$BOLD${RED}ERROR$RESET"
MEH="$BOLD${MAGENTA}MEH$RESET"
# PREFIXES
# ======================
| true
|
8766770e97553fbf0f29734ba6fa3e4fe8758b0f
|
Shell
|
neil78duan/apollolib
|
/tool/liveupdate-node/liveupdate-ubuntu.sh
|
UTF-8
| 898
| 3.5625
| 4
|
[] |
no_license
|
#! /bin/sh
#
#
PATH=/bin:/usr/bin:/sbin:/usr/sbin:/usr/local/bin/
WORKING=/home/liveupdate/liveupdate-node
CONFIG_FILE=$WORKING/configuration.json
DAEMON=$WORKING/index.js
PIDFILE=/var/run/liveupdate.pid
PROG="liveupdate"
OPTS="--path $WORKING --pidfile $PIDFILE --config $CONFIG_FILE "
test -x $DAEMON || exit 0
. /lib/lsb/init-functions
case "$1" in
start)
log_daemon_msg "Starting deferred execution scheduler" "$PROG"
start_daemon $DAEMON $OPTS & > /dev/null
log_end_msg $?
;;
stop)
log_daemon_msg "Stopping deferred execution scheduler" "$PROG"
killproc -p $PIDFILE $DAEMON
log_end_msg $?
rm -rf $PIDFILE
;;
force-reload|restart)
$0 stop
$0 start
;;
status)
status_of_proc -p $PIDFILE $DAEMON $PROG && exit 0 || exit $?
;;
*)
echo "Usage: /etc/init.d/liveupdate {start|stop|restart|force-reload|status}"
exit 1
;;
esac
exit 0
| true
|
aefe24960e9ac09aeec91008f5a6ac377794e5c7
|
Shell
|
anynines/broker-registrar-boshrelease-OLD
|
/jobs/broker-deregistrar/templates/run.sh
|
UTF-8
| 1,216
| 2.875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
exec 2>&1
export PATH=$PATH:/var/vcap/packages/jq/bin
export PATH=$PATH:/var/vcap/packages/cf-cli/bin
set -eu
CF_API_URL='<%= p("cf.api_url") %>'
CF_ADMIN_USERNAME='<%= p("cf.username") %>'
CF_ADMIN_PASSWORD='<%= p("cf.password") %>'
CF_SKIP_SSL_VALIDATION='<%= p("cf.skip_ssl_validation") %>'
<%
broker_name = p("servicebroker.name", nil)
unless broker_name
broker = link("servicebroker")
broker_name = broker.p("name")
end
%>
BROKER_NAME='<%= broker_name %>'
echo "CF_API_URL=${CF_API_URL}"
echo "CF_SKIP_SSL_VALIDATION=${CF_SKIP_SSL_VALIDATION}"
echo "CF_ADMIN_USERNAME=${CF_ADMIN_USERNAME}"
echo "BROKER_NAME=${BROKER_NAME}"
if [[ ${CF_SKIP_SSL_VALIDATION} == "true" ]]; then
cf api ${CF_API_URL} --skip-ssl-validation
else
cf api ${CF_API_URL}
fi
cf auth \
${CF_ADMIN_USERNAME} \
${CF_ADMIN_PASSWORD}
BROKER_GUID=$(cf curl /v2/service_brokers\?q=name:${BROKER_NAME} | jq -r ".resources[0].metadata.guid")
SERVICE_NAMES=($(cf curl /v2/services\?q=service_broker_guid:${BROKER_GUID} | jq -r ".resources[].entity.label"))
for service_name in "${SERVICE_NAMES[@]}"; do
cf purge-service-offering $service_name -f
done
cf delete-service-broker \
${BROKER_NAME} \
-f
| true
|
069bbfa0a7e8838c80bc6b5ae6d7a8da5676e569
|
Shell
|
andyneff/esxi-nut
|
/internal_package/etc/init.d/install-upsmon
|
UTF-8
| 2,264
| 3.703125
| 4
|
[] |
no_license
|
#!/bin/sh
#
# NUT:
# Install/Upgrade/Remove the network UPS tools client
#
# description: NUT client post-install script
#
#
# Log action
#
NUT_log() {
echo "$1"
logger -t NUT "$1"
return 0
}
add_advcfg_string() {
esxcfg-advcfg -q -g "/UserVars/$1" 2>/dev/null
if [ $? = 1 ]
then
esxcfg-advcfg -A "$1" -T string -E "$2" -F "$3" && NUT_log "Created string advcfg $1" || NUT_log "Failed to create string advcfg $1"
else
NUT_log "Using existing string advcfg $1"
fi
}
add_advcfg_int() {
esxcfg-advcfg -q -g "/UserVars/$1" 2>/dev/null
if [ $? = 1 ]
then
esxcfg-advcfg -A "$1" -T int -E "$2" -N "$3" -M "$4" -F "$5" && NUT_log "Created int advcfg $1" || NUT_log "Failed to create int advcfg $1"
else
NUT_log "Using existing int advcfg $1"
fi
}
del_advcfg() {
esxcfg-advcfg -L "$1" && NUT_log "Deleted advcfg $1" || NUT_log "Failed to delete advcfg $1"
}
upsmon_install() {
add_advcfg_string NutUpsName 'NUT remote ups name (eg: upsname@nutserver) use space as a separator for multiple upses' upsname@nutserver
add_advcfg_string NutUser 'NUT username to connect to remote ups' upsuser
add_advcfg_string NutPassword 'NUT password to connect to remote ups' upspassword
add_advcfg_string NutMailTo 'NUT send mail notification to this address' root@domain
add_advcfg_int NutFinalDelay 'NUT seconds to wait on low battery event before shutting down' 0 3600 5
add_advcfg_int NutSendMail 'NUT send mail notification (1=yes 0=no)' 0 1 0
add_advcfg_string NutSmtp 'NUT send mail smtp server' 'smtp.example.com'
add_advcfg_string NutMailFrom 'NUT send mail notification from this address' 'admin@example.com'
}
upsmon_remove() {
del_advcfg NutUpsName
del_advcfg NutUser
del_advcfg NutPassword
del_advcfg NutMailTo
del_advcfg NutFinalDelay
del_advcfg NutSendMail
del_advcfg NutSmtp
del_advcfg NutMailFrom
}
if [[ "${1}" == "start" ]] && [[ "${2}" == "install" ]]
then
NUT_log "Installing NUT client"
upsmon_install
fi
if [[ "${1}" == "start" ]] && [[ "${2}" == "upgrade" ]]
then
NUT_log "Upgrading NUT client"
upsmon_install
fi
if [[ "${1}" == "stop" ]] && [[ "${2}" == "remove" ]]
then
NUT_log "Removing NUT client"
upsmon_remove
fi
exit 0
| true
|
adae6fb9e6d538d9fc647002d58fb887be6036a2
|
Shell
|
ChameleonCloud/chi-in-a-box
|
/roles/chameleon_mariadb/templates/mariadb.j2
|
UTF-8
| 2,397
| 3.953125
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
#
# MySQL Backup Script
#
ROTATE="{{ mariadb_backup_file_age }}"
DIR="{{ backup_location }}"
set -o pipefail
{% if enable_mariabackup|bool == false %}
# Dumps mysql databases to a file
##### START CONFIG ###################################################
USER="{{ backup_database_user }}"
PASS="{{ backup_database_password }}"
MAX_ALLOWED_PACKET="{{ mariadb_backup_max_packet }}"
# Create temporary mysql cnf file.
TMPFILE=`mktemp /tmp/backup.XXXXXX` || exit 1
cat >"$TMPFILE" <<EOF
[client]
password=$PASS
user=$USER
max_allowed_packet=$MAX_ALLOWED_PACKET
EOF
PREFIX=mysql_backup_
ADDITIONAL_OPTIONS="--ignore-table=mysql.event"
##### STOP CONFIG ####################################################
backup_file_name=`date +%Y%m%d-%H%M%S`.sql.bz2
backup_file=${DIR}/${PREFIX}${backup_file_name}
cleanup() {
rm -f "$TMPFILE"
# Rotate backups
find "${DIR}/" -maxdepth 1 -type f -name "${PREFIX}*.sql*" -mtime +${ROTATE} -print0 | xargs -0 -r rm -f
}
mkdir -p $DIR
mysqldump --defaults-extra-file="$TMPFILE" --opt --flush-logs --single-transaction \
${ADDITIONAL_OPTIONS} \
--all-databases | bzcat -zc > "$backup_file"
{% endif %}
{% if enable_mariabackup|bool == true %}
# mariadb backup is handled by kolla. just do cleanups here.
PREFIX=mysqlbackup-
backup_file_name=$(docker run --rm -v ${DIR}:/backups "{{ kolla_toolbox_image_full }}" /bin/bash -c "ls -Art /backups/${PREFIX}* | tail -n 1")
backup_file_name=$(basename $backup_file_name)
cleanup() {
# Rotate backups
docker run -u 0 --rm -v ${DIR}:/backup "{{ kolla_toolbox_image_full }}" \
/bin/bash -c \
"find /backup -maxdepth 1 -type f -name "${PREFIX}*" -mtime +${ROTATE} -print0 | xargs -0 -r rm -f"
}
{% endif %}
trap cleanup EXIT
{% if mariadb_backup_s3_endpoint is defined and mariadb_backup_s3_key_id is defined and mariadb_backup_s3_key is defined %}
# Additionally copy backup to S3 bucket
_aws() {
docker run --rm \
-e AWS_ACCESS_KEY_ID="{{ mariadb_backup_s3_key_id }}" \
-e AWS_SECRET_ACCESS_KEY="{{ mariadb_backup_s3_key }}" \
-v "{{ mariadb_backup_s3_conf_dir }}":/root/.aws \
-v ${DIR}:/backups \
amazon/aws-cli \
--endpoint="{{ mariadb_backup_s3_endpoint }}" \
--region=us-east-1 \
"$@"
}
_aws s3 mb s3://"{{ mariadb_backup_container_name }}"
_aws s3 cp /backups/"$backup_file_name" s3://"{{ mariadb_backup_container_name }}"
{% endif %}
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.