blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
c03bfaf1590c56bd6e5233834dc97d03830cfe5e
|
Shell
|
Mattlk13/bsd
|
/script/vmware.sh
|
UTF-8
| 138
| 2.75
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh -u
if [ "$PACKER_BUILDER_TYPE" = "vmware-iso" ]; then
echo "==> No VMware tools support except for FreeBSD, continuing"
fi
| true
|
ba8829d5654ee31700d4f22e2e2e3255054a2284
|
Shell
|
jrafaelsantana/Useless-Scripts
|
/fibonacci.sh
|
UTF-8
| 265
| 3.640625
| 4
|
[] |
no_license
|
#!/bin/bash
function fibonacci(){
first=$1
if (( first <= 1 ))
then
echo 1
else
echo $(( $(fibonacci $((first-1)) ) + $(fibonacci $((first-2)) ) ))
fi
}
echo "digite um numero: "
read numero
val=$(fibonacci $numero)
echo "fibonacci($numero) = $val"
read x
| true
|
61dff846dc4fe4f727b2ec83db2f16400cb711ac
|
Shell
|
NicolasTr/docker-odoo
|
/etc/service/openerp-server/run
|
UTF-8
| 336
| 2.703125
| 3
|
[] |
no_license
|
#!/bin/bash -ex
echo "Starting openerp-server..."
# Wait for postgres
sleep 30
exec setuser openerp openerp-server \
--database=${DB_1_ENV_POSTGRESQL_DB} \
--db_user=${DB_1_ENV_POSTGRESQL_USER} \
--db_password=${DB_1_ENV_POSTGRESQL_PASS} \
--db_host=${DB_1_PORT_5432_TCP_ADDR} \
--db_port=${DB_1_PORT_5432_TCP_PORT} \
2>&1
| true
|
6361ce5ff60f32331e6083a90991553e7b73e538
|
Shell
|
kba/shlog
|
/src/shlog-main.bash
|
UTF-8
| 211
| 3.09375
| 3
|
[
"MIT"
] |
permissive
|
# detect if being sourced and
# export if so else execute
# main function with args
shlog::reload
if [[ ${BASH_SOURCE[0]} != "$0" ]]; then
export -f shlog shlog::dump shlog::profile
else
shlog "${@}"
fi
| true
|
47915d32b1db0d322ec03095eb9f7cdf5c60595f
|
Shell
|
cartershanklin/structor
|
/modules/benchmetrics/files/tpc/tpcds.postgres/tpcds-datagen.sh
|
UTF-8
| 1,355
| 4.0625
| 4
|
[] |
no_license
|
#!/bin/bash
function usage {
echo "Usage: tpcds-setup.sh scale_factor [temp_directory]"
exit 1
}
TARGET_DIR=target/v1.4.0/tools
TARGET=$TARGET_DIR/dsdgen
if [ ! -f $TARGET ]; then
echo "Please build the data generator with ./tpcds-build.sh first"
exit 1
fi
which psql > /dev/null 2>&1
if [ $? -ne 0 ]; then
echo "Script must be run where Postgres is installed"
exit 1
fi
# Get the parameters.
SCALE=$1
DIR=${2-/tmp/tpcds-generate}
if [ "X$DEBUG_SCRIPT" != "X" ]; then
set -x
fi
# Sanity checking.
if [ X"$SCALE" = "X" ]; then
usage
fi
if [ $SCALE -eq 1 ]; then
echo "Scale factor must be greater than 1"
exit 1
fi
# Do the actual data load.
psql -c 'select count(*) from reason'
if [ $? -ne 0 ]; then
echo "Generating data at scale factor $SCALE in $DIR."
mkdir -p $DIR
cp $TARGET $DIR
cp $TARGET_DIR/tpcds.idx $DIR
cp ddl/text/alltables.sql $DIR
cd $DIR
./dsdgen -sc $SCALE -rngseed 12345 -force Y
for table in call_center catalog_page catalog_sales catalog_returns customer customer_address customer_demographics date_dim household_demographics income_band inventory item promotion reason ship_mode store store_sales store_returns time_dim warehouse web_page web_sales web_returns web_site; do
sed -i s'/.$//' $table.dat
done
echo "Loading data to Postgres."
psql -f alltables.sql
fi
echo "TPC-DS data generation complete."
| true
|
57130cf99867adb482d5542cf1062a4d73f026a2
|
Shell
|
ChristopherKotthoff/aphros
|
/examples/105_sharp/tools/sharp
|
UTF-8
| 528
| 2.984375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
set -eu
base=$(dirname $(readlink -f $0))
: ${cfl=0.5}
: ${steps=5}
: ${np=1}
i=$1
o=$2
shift 2
input=/tmp/input.$$.raw
xmf=/tmp/input.$$.xmf
output=/tmp/output.$$.raw
trap 'rm $input $output $xmf; exit 1' 1 2 3 4 15
$base/img2raw -i $i -o $input
shape=`awk 'sub(/^ <DataItem Dimensions="/, "") && sub(/" Seek=.*/, "") {print $3, $2}' $xmf`
ap.sharpen "$@" --cfl $cfl --steps $steps --outtype UShort $input $output
$base/raw2img -d $shape -i $output -o $o
status=$?
rm $input $output $xmf
exit $status
| true
|
3fe803c0797853e95511bef55a642540f1461a19
|
Shell
|
spavel1/Ticket
|
/changeFirefox.sh
|
UTF-8
| 982
| 3.015625
| 3
|
[] |
no_license
|
##################################################################################################
#45.7.0esr version of firefox is not supported in current versin of ubuntu server: ubuntu-18.04.3#
##################################################################################################
#check Firefox version
cd /etc | firefox -version
cd ~
#install dependency
sudo apt-get install libgtk2.0-0 -y
#download 45.7.0esr version of Firefox
wget https://ftp.mozilla.org/pub/firefox/releases/45.7.0esr/firefox-45.7.0esr.linux-x86_64.sdk.tar.bz2
#extract firefox from the binary
tar xvf firefox-45.7.0esr.linux-x86_64.sdk.tar.bz2
#Backup existing firefox directory
sudo mv /usr/bin/firefox /usr/bin/firefox-backup
#remove symbolic link
rm /usr/bin/firefox
#move firefox in usr directory
sudo mv firefox-sdk/ /usr/lib/firefox
#create a symbolic link to the firefox binary
sudo ln -s /usr/lib/firefox /usr/bin/firefox
#Check firefox version
cd /etc | firefox -version
| true
|
42f5b4168de12cacd17604bf135a43cde81415b3
|
Shell
|
RGillespie64/CSC364-Setup-Files
|
/samba_setup.sh
|
UTF-8
| 2,147
| 3.4375
| 3
|
[] |
no_license
|
#!/bin/bash
# created by: Richard Gillespie
# purpose: CSC-364 Final Project - Samba Setup and Default Configuration
# install samba
dnf install -y samba samba-common samba-client
# move default smb config to a .orig
mv /etc/samba/smb.conf /etc/samba/smb.conf.orig
# Set File Server NETBIOS name
echo "NETBIOS name: "
read NETBIOS
# Setup the smb configuration file
touch /etc/samba/smb.conf
echo "[global]" >> /etc/samba/smb.conf
echo "workgroup = WORKGROUP" >> /etc/samba/smb.conf
echo "server string = Samba Server %v" >> /etc/samba/smb.conf
echo "netbios name = $NETBIOS" >> /etc/samba/smb.conf
echo "security = user" >> /etc/samba/smb.conf
echo "map to guest = never" >> /etc/samba/smb.conf
echo "dns proxy = no" >> /etc/samba/smb.conf
echo "" >> /etc/samba/smb.conf
# run test on the the configuration file
testparm
# add samba service to the firewall
firewall-cmd --permanent --add-service=samba
# reload firewall rules
firewall-cmd --reload
# start the smb/nmb service
systemctl start smb
systemctl start nmb
# enable smb/nmb service on startup
systemctl enable nmb
systemctl enable smb
# systemctl status smb
# systemctl status nmb
# Get smb username
echo "Samba Username: "
read USER
# create the first secure group
groupadd smbgrp
# assign samba user to group
usermod -aG smbgrp $USER
# create the smb password for user
smbpasswd -a $USER
# Get secure folder name
echo "Initial Share Name: "
read FOLDER
# create initial share folder
mkdir -p /share/samba/$FOLDER
# set the permissions to the share location
chmod -R 0770 /share/samba/$FOLDER
# change ownership of the share location
chown -R root:secure_group /share/samba/$FOLDER
# change SELinux security context for the directory
chcon -t samba_share_t /share/samba/$FOLDER
# append folder configuration to smb config.
echo "[$FOLDER]" >> /etc/samba/smb.conf
echo "path = /share/samba/$FOLDER" >> /etc/samba/smb.conf
echo "valid users = @smbgrp" >> /etc/samba/smb.conf
echo "browsable = yes" >> /etc/samba/smb.conf
echo "writable = yes" >> /etc/samba/smb.conf
echo "guest ok = no" >> /etc/samba/smb.conf
# restart smb/nmb service
systemctl restart smb
systemctl restart nmb
| true
|
37d4eb9963955630d3bf737cca8b8d2833931a30
|
Shell
|
jmarichal/Multiphysics
|
/build_nic4.sh
|
UTF-8
| 804
| 3.109375
| 3
|
[] |
no_license
|
#!/bin/sh
if [ ! -d "gmsh-4.1.5-Linux64-sdk" ]; then
wget http://gmsh.info/bin/Linux/gmsh-4.1.5-Linux64-sdk.tgz
tar -xf gmsh-4.1.5-Linux64-sdk.tgz
rm -rf gmsh-4.1.5-Linux64-sdk.tgz
fi
if [ ! -d "eigen-eigen-323c052e1731" ]; then
wget http://bitbucket.org/eigen/eigen/get/3.3.7.tar.gz
tar -xf 3.3.7.tar.gz
rm -rf 3.3.7.tar.gz
fi
module load cmake/3.11.1
module load gcc/4.9.2
export CC=gcc
export CXX=g++
cd gmsh-4.1.5-Linux64-sdk/
export FC=gfortran
export PATH=${PWD}/bin:${PWD}/lib:${PATH}
export INCLUDE=${PWD}/include:${INCLUDE}
export LIB=${PWD}/lib:${LIB}
export PYTHONPATH=${PWD}/lib:${PYTHONPATH}
cd ../
cd eigen-eigen-323c052e1731/
export INCLUDE=${PWD}:${INCLUDE}
cd ../
rm -rf build/
mkdir build
cd build/
cmake ../ -DCMAKE_BUILD_TYPE=Release -G "Unix Makefiles"
make
| true
|
f461314e6b9610ae0b8d0e877a46eac97ea98673
|
Shell
|
Koronen/dotfiles
|
/bash/rc.d/dircolors.bash
|
UTF-8
| 203
| 3.078125
| 3
|
[
"MIT"
] |
permissive
|
if which dircolors > /dev/null; then
[ "$(uname)" = "SunOS" ] && return
if [ -r ~/.dircolors ]; then
eval "$(dircolors -b ~/.dircolors)"
else
eval "$(dircolors -b)"
fi
fi
| true
|
d4dcca0a4d21db6d292b3de58992ebeefa588c39
|
Shell
|
johny-coder/pandomium
|
/pandomium-linux/linux-build.sh
|
UTF-8
| 2,193
| 3.109375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#
# Linux x64
#
# Requirements:
# Git
# CMake
# Python 2.7
# Oracle JDK 8 with JAVA_HOME
# libnss3, libx11-xcb-dev, libxss1
# libasound2, libxtst6, unzip
#
mkdir JCEF67 && cd JCEF67
git clone https://bitbucket.org/chromiumembedded/java-cef.git src && cd src
# Modify sources
find ./java/org -type f -exec sed -i 's/ protected / public /g' {} +
find ./java/org -type f -exec sed -i 's/ private / public /g' {} +
find ./java/org -type f -exec sed -i 's/ final / /g' {} +
# Modification fixes
find ./java/org -type f -exec sed -i 's/public TransitionFlags(/TransitionFlags(/g' {} +
find ./java/org -type f -exec sed -i 's/public TransitionType(/TransitionType(/g' {} +
find ./java/org -type f -exec sed -i 's/static int MENU_ID/static final int MENU_ID/g' {} +
# Build natives
mkdir jcef_build && cd jcef_build
cmake -G "Unix Makefiles" -DCMAKE_BUILD_TYPE=Release ..
# Required dependencies
sudo apt-get install libnss3
sudo apt-get install libx11-xcb-dev
sudo apt-get install libxss1
sudo apt-get install libasound2
sudo apt-get install libxtst6
sudo apt-get install unzip
# Build sources
make -j4
cd ../tools/
# Modification fixes
./compile.sh linux64
# Test
./run.bat linux64 Release detailed
# Create binary distrib
./make_distrib.sh linux64
cd ../..
mkdir linux64
cd src/binary_distrib/linux64/bin
# Create fat jar
mkdir jcef-linux64
(cd jcef-linux64; unzip -uo ../gluegen-rt.jar)
(cd jcef-linux64; unzip -uo ../gluegen-rt-natives-linux-amd64.jar)
(cd jcef-linux64; unzip -uo ../jcef.jar)
(cd jcef-linux64; unzip -uo ../jogl-all.jar)
(cd jcef-linux64; unzip -uo ../jogl-all-natives-linux-amd64.jar)
jar -cvf jcef-linux64.jar -C jcef-linux64 .
# Move output
cd ../../../../
cp src/binary_distrib/linux64/bin/jcef-linux64.jar linux64/jcef-linux64.jar
cp -r src/binary_distrib/linux64/bin/lib/linux64 linux64/natives
mkdir -p linux64/src/org && cp -r src/java/org linux64/src/org
# Remove symbols libcef.so (500MB -> 100MB)
strip linux64/natives/libcef.so
#
# Download 'liblinuxenv.so' from github.com/dzikoysk/LinuxEnv and put in the natives directory
#
# Pack natives
cd linux64/natives/ && tar -cf - . | xz -9e -c - > ../../linux64/linux64-natives.tar.xz && cd -
| true
|
14541c0ed901e22ca6f19306f1756057e494b7e6
|
Shell
|
dragonde/docker-baseimage
|
/download_baseimage
|
UTF-8
| 507
| 3.65625
| 4
|
[] |
no_license
|
#!/bin/bash
if [ $# -le 0 ]
then
# url="http://cdimage.ubuntu.com/ubuntu-core/daily/current"
url="http://cdimage.ubuntu.com/ubuntu-core/trusty/daily/current"
else
url="http://cdimage.ubuntu.com/ubuntu-core/releases/$1/release"
fi
image=$(curl -sSL $url | egrep -o "[.a-z0-9\-]+-amd64.tar.gz" | head -n 1)
echo URL... $image
name=$(echo $image | sed -re "s|^(.+)-amd64.tar.gz$|\1|" )
#curl -O -SL $url/$image
echo Creando Imagen $name
docker import $url/$image $name-0
docker build -t $name .
| true
|
91654b85f8ad99dbfab3e51ef3edb255a3e99728
|
Shell
|
Loreton/RaspBerryConfig
|
/pi23/home/.Loreto
|
UTF-8
| 1,884
| 2.71875
| 3
|
[] |
no_license
|
#!/bin/bash
# ------------------------------
# - scp .Loreto LnPi22:/pi/home
# - scp .Loreto /pi/home
# ------------------------------
myPATH="$HOME/Loreto/pythonProjects/LnRSync/bin"
myPATH="$myPATH:$HOME/LnProd/shProc"
myPATH="$myPATH:$HOME/LnProd/pyProc"
myPATH="$myPATH:$HOME/LnProd/bin"
myPATH="$myPATH:$HOME/GIT-REPO/gitUtils"
myPATH="$myPATH:/opt/arduino-1.8.1"
myPATH="$myPATH:/usr/local/go/bin:$HOME/GIT-REPO/GO/bin"
# export PATH=$PATH:/usr/local/go/bin:~/go/bin
myPATH="$myPATH:$PATH"
export PATH=".:$myPATH:$PATH"
export PYTHONPATH=.:$HOME/Loreto/pythonProjects/LnRSync/bin:$HOME/PiProd/pyModule
export EDITOR=vi
export GOPATH="$HOME/GIT-REPO/GO"
# alias lnmount='python3 $HOME/Loreto/etc/LnMount.py'
alias lnmount='bash /home/pi/PiProd/shProc/LnMount.sh'
alias ld='ls -la | grep dr'
alias ltr='ls -ltr'
alias lt='ls -lt'
alias gitcommit='git add --all && git commit -a -m'
alias pycharm='bash /home/pi/LnProducts/pycharm-community-2016.1.4/bin/pycharm.sh'
alias ealias='vim "$HOME/.Loreto"'
alias sr='python /home/pi/GIT-REPO/gitUtils/ReplaceTextInFiles.py'
alias piomake='bash /home/pi/GIT-REPO/LnProtocol/Arduino485/pioMake.sh'
alias ps1="export PS1='\[\033[01;32m\][\u@\h] \[\033[01;34m\][\W]\$:\[\033[00m\] '"
alias ps2="export PS1='\[\033[01;32m\][\u@\h\[\033[00m\]] \[\033[01;34m\][\w] \$:\[\033[00m\] '"
if [ -x /usr/bin/tput ] && tput setaf 1 >&/dev/null; then
export PS1='${debian_chroot:+($debian_chroot)}\[\033[01;32m\]\h\[\033[00m\] \[\033[01;34m\]\w \$:\[\033[00m\] '
else
export PS1='${debian_chroot:+($debian_chroot)}\h\w\$:'
fi
############# PYTHON #########################
# per modificare la configurazione di python di default:
# sudo update-alternatives --config python
# è meglio mantenere la 2.7 come default altrimenti molte cose non funzionano
# Activate python3.4 env
# /opt/python3.4/bin/pyvenv env
# source env/bin/activate
| true
|
e0d4bdfc16d18729702ed29a406875e14c76d53d
|
Shell
|
drescherjm/metro
|
/scripts/mklivemedia.sh
|
UTF-8
| 3,533
| 3.71875
| 4
|
[] |
no_license
|
#!/bin/sh
# Copyright 2010 Daniel Cordero
# Licensed under the GPLv2
# R e q u i r e m e n t s
# sys-boot/syslinux (for isolinux.bin)
# app-cdr/cdrtools
# sys-fs/squashfs-tools
# a stage5 squashfs from metro
usage() {
echo -e "$0 [out: <output file>] [bin: <isolinux binary>] \\"
echo -e "\t\t[cd: <cd directory root>] [hybrid: <yes/no>] \\"
echo -e "\t\t[mem: <memtest>] [sqfs: ] <squashfs image> \\"
echo -e "\t\t[sign: <gpg key>] \\"
echo -e "\t\t[<extra files or directories>...]"
echo
echo "Create bootable live media for experimenting with or installing"
echo "Funtoo Linux."
echo
# Remember: Square brackets mean optional; sharp brackets mean
# required arguments. Elipsis means they can be repeated.
}
binfile=/usr/share/syslinux/isolinux.bin
cdroot=cd_root
while test "$#" -gt 0; do
# This could get really ugly.
# Hopefully you read this message and the following code.
case "$1" in
-h) usage; exit 0;;
bin:) shift; binfile=$1;;
cd:) shift; cdroot=$1;;
hybrid:)shift; hybri=$1;;
mem:) shift; memfile=$1;;
out:) shift; outfile=$1;;
sign:) shift; gpgkey=$1;;
sqfs:) shift; sqfs=$1;;
*) if test "$sqfs"; then
flist="$flist $1"
else
sqfs=$1
fi;;
esac
shift
done
if test x"$sqfs" = "x"; then
usage
echo "Squashfs image required - aborting" >&2
exit 1
fi
test -d "$cdroot" || mkdir $cdroot
test -d "$cdroot/isolinux" || mkdir -p $cdroot/isolinux
if test -f "$binfile"; then
cp "$binfile" $cdroot/isolinux/
elif test -f $cdroot/isolinux/$(basename ${binfile:-isolinux.bin}); then
: # pass
else
usage
echo "ISOLINUX required - aborting" >&2
echo " (set with 'bin: /path/to/isolinux.bin')" >&2
exit 1
fi
if test -f "$sqfs"; then
cp "$sqfs" $cdroot/image.squashfs
test -d "squashfs-root" && rm -fr "squashfs-root"
unsquashfs $cdroot/image.squashfs boot/*
## Bug: if this matches more than one.
cp squashfs-root/boot/kernel* $cdroot/isolinux/kernel
cp squashfs-root/boot/initramfs* $cdroot/isolinux/initramfs
cp squashfs-root/boot/System.map* $cdroot/isolinux/System.map
rm -fr squashfs-root
else
usage
echo "Squashfs image required - aborting" >&2
exit 1
fi
for f in $flist; do
if test -f "$f"; then
cp "$f" "$cdroot"
elif test -d "$f"; then
cp -r "$f" "$cdroot"
fi
done
touch $cdroot/livecd
cat << EOF >$cdroot/isolinux/isolinux.cfg
prompt 1
default funtoo
label funtoo
kernel kernel
append root=/dev/ram0 looptype=squashfs loop=/image.squashfs cdroot
initrd initramfs
EOF
if test "$memfile" && test -f "$memfile"; then
cp "$memfile" $cdroot/isolinux/$(basename "$memfile")
cat <<EOF >>$cdroot/isolinux/isolinux.cfg
label memtest
kernel $(basename "$memfile")
EOF
fi
cat >$cdroot/cdupdate.sh <<'EOF'
#!/bin/sh
# This is not very robust. Sorry.
. /etc/initrd.defaults
(test -d $NEW_ROOT/libexec/rc/init.d || mkdir -p $NEW_ROOT/libexec/rc/init.d) \
|| exit 1
cp -ar $NEW_ROOT/mnt/livecd/libexec/rc $NEW_ROOT/libexec || exit 1
#mount -t tmpfs -o nosuid,nodev,noexec,relatime,size=1024k,mode=755 \
# tmpfs $NEW_ROOT/libexec/rc/init.d || exit 1
EOF
test "$outfile" && volid="-V ${outfile%%.iso}" # BUG: spaces in name
mkisofs -l -o ${outfile:-funtoo.iso} \
-b isolinux/$(basename ${binfile:-isolinux.bin}) \
-c isolinux/boot.cat -no-emul-boot -boot-load-size 4 \
-boot-info-table $volid \
$cdroot/
test x"${hybri:-no}" = "xyes" && isohybrid ${outfile:-funtoo.iso}
if test x"$gpgkey" != "x"; then
gpg --detach-sign --armor --local-user "$gpgkey" ${outfile:-funtoo.iso}
gpg --verify ${outfile:-funtoo.iso}.asc ${outfile:-funtoo.iso}
fi
| true
|
a096054c6b0d2c3ba858755989aa7e5de68e1cad
|
Shell
|
petronny/aur3-mirror
|
/soem-lib/PKGBUILD
|
UTF-8
| 1,223
| 2.53125
| 3
|
[] |
no_license
|
# Maintainer: Florian Bruhin (The Compiler) <archlinux.org@the-compiler.org>
pkgname=soem-lib
pkgver=1.3.0
pkgrel=5
pkgdesc="Simple Open EtherCAT Master (SOEM) shared libraries"
arch=('i686' 'x86_64')
url="http://sourceforge.net/projects/soem.berlios/"
depends=('glibc')
license=('gpl2')
options=('!strip')
source=("http://downloads.sourceforge.net/project/soem.berlios/SOEM${pkgver}.tar.bz2"
'osal.patch'
'ethercattype.patch'
'nicdrv.patch'
'printf.patch'
'Makefile')
sha1sums=('0e58b8cf1876dc2e506e43a7c3327d76acecfe70'
'ca261467d1034e0dd69a77e9c77d3e2170171cbb'
'd538d6e9b1061705e220730eb866128c99503455'
'a90479e427163169ff22c4e869a369f0cb49275c'
'10fd02ed9ebd7aedb2f429437159bac64fe90b61'
'178d8002f589201bd3cd921424832f79e8397f8e')
prepare() {
cd "${srcdir}/SOEM${pkgver}"
patch -p0 -i "${srcdir}/osal.patch"
patch -p0 -i "${srcdir}/ethercattype.patch"
patch -p0 -i "${srcdir}/nicdrv.patch"
patch -p0 -i "${srcdir}/printf.patch"
}
build() {
cd "${srcdir}/SOEM${pkgver}"
make -f "${srcdir}/Makefile"
}
package() {
cd "${srcdir}/SOEM${pkgver}"
make -f "${srcdir}/Makefile" DESTDIR="$pkgdir" PREFIX=/usr install
}
# vim:set ts=2 sw=2 et:
| true
|
ca75c10ee29b00e170c342a0f47a01d4549b62d1
|
Shell
|
jodyhuntatx/dap-demo-env
|
/HFTOKEN_demo/demo/0_init_demo.sh
|
UTF-8
| 1,472
| 3.03125
| 3
|
[] |
no_license
|
#!/bin/bash
# fake DNS IP resolution with /etc/hosts entry
echo "$CONJUR_MASTER_HOST_IP $CONJUR_MASTER_HOST_NAME" >> /etc/hosts
APP_HOSTNAME=webapp/tomcat_host
# delete old identity stuff
rm -f /root/.conjurrc /root/conjur*.pem
# initialize client environment
echo yes | conjur init -u https://$CONJUR_MASTER_HOST_NAME:$CONJUR_MASTER_PORT -a $CONJUR_ACCOUNT --force=true
sleep 2
conjur authn login -u admin -p $(sudo summon -p keyring.py --yaml 'xx: !var app/at' bash -c "echo \$xx")
conjur policy load root ./policy/webapp-policy.yml
conjur variable values add webapp-secrets/database_username DatabaseUser
conjur variable values add webapp-secrets/database_password $(openssl rand -hex 12)
# create configuration and identity files (AKA conjurization)
cp ~/conjur-$CONJUR_ACCOUNT.pem /etc
# generate api key
api_key=$(conjur host rotate_api_key --host $APP_HOSTNAME)
# copy over identity file
echo "Generating identity file..."
cat <<IDENTITY_EOF | tee /etc/conjur.identity
machine $CONJUR_APPLIANCE_URL/authn
login host/$APP_HOSTNAME
password $api_key
IDENTITY_EOF
echo
echo "Generating host configuration file..."
cat <<CONF_EOF | tee /etc/conjur.conf
---
appliance_url: $CONJUR_APPLIANCE_URL
account: $CONJUR_ACCOUNT
netrc_path: "/etc/conjur.identity"
cert_file: "/etc/conjur-$CONJUR_ACCOUNT.pem"
CONF_EOF
chmod go-rw /etc/conjur.identity
# delete user identity files to force use of /etc/conjur* host identity files.
rm ~/.conjurrc ~/.netrc
| true
|
921331308260843a3049f5bd9446e48899d3c909
|
Shell
|
d33pc/guessNumFile
|
/guessinggame.sh
|
UTF-8
| 736
| 4.28125
| 4
|
[] |
no_license
|
#!bin/bash
start_game(){
echo "Welcome in the game."
echo "Guess how many files are present in the current directory......."
total_file=$(ls | wc -l)
itr=0
read usr_in
while [[ $usr_in -ne $total_file ]]
do
let itr=$itr+1
if [[ $usr_in -gt $total_file ]]
then
echo "Your guess was higher than actual number of files."
echo "Guess Again ......."
read usr_in
else
echo "There are more files than you thought."
echo "Guess Again ......."
read usr_in
fi
done
if [[ $itr -eq 0 ]]
then
echo "[BULL'S EYE] You guessed it correctly in the first try."
else
let itr=$itr+1
echo "Yeah, you guessed correct number of file in $itr chance."
fi
}
start_game
| true
|
4ff444dfb189a28c4df78ce6147d943da91561d7
|
Shell
|
rashenok/joomla-dev
|
/templates/redcomponent/wright/build/build.sh
|
UTF-8
| 2,568
| 3.203125
| 3
|
[] |
no_license
|
#!/bin/bash
# reset final css and temp folders
rm -f ../../css/joomla[0-9]*-[a-z]*.css
rm -rf .cache
mkdir .cache
#iterate styles
for f in ../../less/variables-[a-z]*.less
do
c=${#f};
c=$((c-26));
f=${f:21:$c};
#iterate Joomla versions
for j in less/joomla[[:digit:]][[:digit:]].less
do
jv=${j:11:2};
#create bootstrap file for each style/Joomla version
df=".cache/style-joomla"$jv"-"$f".less";
ds=""
ds=$ds"@import \"../../../less/variables-"$f".less\"; ";
ds=$ds"@import \"../less/bootstrap.less\"; ";
ds=$ds"@import \"../less/typography.less\"; ";
ds=$ds"@import \"../less/joomla"$jv".less\"; ";
ds=$ds"@import \"../../../less/template.less\"; ";
if [ -f ../../less/style-$f.less ];
then
ds=$ds"@import \"../../../less/style-"$f".less\"; ";
fi
echo $ds > $df;
node_modules/.bin/recess --compress $df > ../../css/joomla$jv-$f.css
#create responsive file for each style/Joomla version
df=".cache/style-joomla"$jv"-"$f"-responsive.less";
ds=""
ds=$ds"@import \"../../../less/variables-"$f".less\"; ";
ds=$ds"@import \"../less/responsive.less\"; ";
ds=$ds"@import \"../less/joomla"$jv"-responsive.less\"; ";
if [ -f ../../less/template-responsive.less ];
then
ds=$ds"@import \"../../../less/template-responsive.less\"; ";
fi
if [ -f ../../less/style-$f-responsive.less ];
then
ds=$ds"@import \"../../../less/style-"$f"-responsive.less\"; ";
fi
echo $ds > $df;
node_modules/.bin/recess --compress $df > ../../css/joomla$jv-$f-responsive.css
done
done
#copy images
cp -f libraries/bootstrap/img/* ../images/
#javascript files
rm -rf ../js/bootstrap.min.js
cat libraries/bootstrap/js/bootstrap-transition.js libraries/bootstrap/js/bootstrap-alert.js libraries/bootstrap/js/bootstrap-button.js libraries/bootstrap/js/bootstrap-carousel.js libraries/bootstrap/js/bootstrap-collapse.js libraries/bootstrap/js/bootstrap-dropdown.js libraries/bootstrap/js/bootstrap-modal.js libraries/bootstrap/js/bootstrap-tooltip.js libraries/bootstrap/js/bootstrap-popover.js libraries/bootstrap/js/bootstrap-scrollspy.js libraries/bootstrap/js/bootstrap-tab.js libraries/bootstrap/js/bootstrap-typeahead.js libraries/bootstrap/js/bootstrap-affix.js > .cache/bootstrap.js
./node_modules/.bin/uglifyjs -nc .cache/bootstrap.js > .cache/bootstrap.min.tmp.js
echo "/*!\n* Bootstrap.js by @fat & @mdo\n* Copyright 2012 Twitter, Inc.\n* http://www.apache.org/licenses/LICENSE-2.0.txt\n*/" > .cache/copyright.js
cat .cache/copyright.js .cache/bootstrap.min.tmp.js > ../js/bootstrap.min.js
#remove caches
rm -rf .cache
| true
|
2e2f396cedbe0a3eb81d52233059bf5db51156ce
|
Shell
|
GucciGerm/AirBnB_clone_v2
|
/0-setup_web_static.sh
|
UTF-8
| 957
| 3.59375
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
# This script will prepare our webservers for deployment
# first update + install nginx
sudo apt-get -y update
sudo apt-get -y install nginx
# create folders if not already existent
sudo mkdir -p /data/web_static
sudo mkdir -p /data/web_static/releases
sudo mkdir -p /data/web_static/shared/
sudo mkdir -p /data/web_static/releases/test/
# create a fake html page
echo -e "<html>\n<head>\n</head>\n<body>\nHolberton School\n</body>\n
</html>" | sudo tee /data/web_static/releases/test/index.html
# now create a symbolic link for current linked to releases/test
sudo ln -sf /data/web_static/releases/test/ /data/web_static/current
# now change ownership of folder /data/ to ubuntu
sudo chown -R ubuntu:ubuntu /data/
# lastly create an alias and update the nginx configuration
sudo sed -i "35i location /hbnb_static {\nalias /data/web_static/current/;\n}" /etc/nginx/sites-enabled/default
# restart nginx
sudo service nginx restart
| true
|
7529978464339b05feadbc063ff6204c05b75433
|
Shell
|
Aerolyzer/Aerolyzer_App
|
/setup.sh
|
UTF-8
| 5,588
| 3.484375
| 3
|
[] |
no_license
|
#!/bin/bash
# ------------------------------------------------------------
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# ------------------------------------------------------------
# This script is used to install all the dependency programs
# Assumption that it is run as sudo
clear
echo "Script to setup Aerolyzer environment."
echo "This script assumes JAVA verion 7 or higher, pip and Python 2.7.9 are installed."
echo " Engage number 1!"
APP="$PWD"
INST_DIR="$APP/../installDir"
if [ ! -d $INST_DIR ]; then
mkdir $INST_DIR
fi
MEDIA_DIR="$APP/../installDir/aerolyzerImgs"
if [ ! -d $MEDIA_DIR ]; then
mkdir $MEDIA_DIR
fi
cd $INST_DIR
# Check for installation machine distro and package manager
APT_GET_CMD=$(which apt-get)
BREW_CMD=$(which brew)
YUM_CMD=$(which yum)
DISTRO_VER=$(uname -v)
DISTRO_LONG=(${DISTRO_VER//;/ })
if [[ $DISTRO_LONG =~ .*Ubuntu* ]]; then
DISTRO="Ubuntu"
elif [[ $DISTRO_LONG =~ .*Darwin* ]]; then
DISTRO="Darwin"
elif [[ $DISTRO_LONG =~ .*Debian* ]]; then
DISTRO="Debian"
fi
# install PostgreSQL
if [ ! -z $APT_GET_CMD ]; then
echo "Ubuntu OS config ..."
$APT_GET_CMD install postgresql postgresql-contrib
$APT_GET_CMD install python-psycopg2
$APT_GET_CMD install libpq-dev
$APT_GET_CMD install python-qt4 # Install PyQt4
echo "configure PostgreSQL"
# configure PostgreSQL
sudo -u postgres bash -c "psql postgres -c \"CREATE DATABASE aerolyzer\""
sudo -u postgres bash -c "psql postgres -c \"ALTER USER postgres WITH PASSWORD 'Aerolyzer_1'\""
elif [[ ! -z $YUM_CMD ]]; then
echo "Red Hat OS config ..."
$YUM_CMD install postgresql-server
service postgresql initdb
chkconfig postgresql on
sudo -u postgres bash -c "psql postgres -c \"CREATE DATABASE aerolyzer\""
sudo -u postgres bash -c "psql postgres -c \"ALTER USER postgres WITH PASSWORD 'Aerolyzer_1'\""
elif [ $DISTRO == "Darwin" ]; then
echo "Mac OS config ..."
if [ -z $BREW_CMD ]; then
/usr/bin/ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)"
fi
# ref: https://gist.github.com/lxneng/741932
$BREW_CMD update
$BREW_CMD doctor
$BREW_CMD install postgresql
$BREW_CMD cleanup PostgreSQL
echo "initalize PostgreSQL"
initdb /usr/local/var/postgres
echo "start the postgres server"
$BREW_CMD services start postgresql
postgres -D /usr/local/var/postgres
echo "set up postgres user pwd"
bash -c "psql -U postgres -c \"ALTER USER postgres WITH PASSWORD 'Aerolyzer_1'\""
echo "create aerolyzer database"
bash -c "psql -U postgres -c \"CREATE DATABASE aerolyzer"\"
else
echo "Error: Cannot identify package manager, thus cannot install PostgreSQL. Exiting!!!!!"
exit 1;
fi
echo "End PostgreSQL config"
# solr stuff
if [ ! -d "$INST_DIR/solr-5.5.4" ]; then
wget http://archive.apache.org/dist/lucene/solr/5.5.4/solr-5.5.4.tgz
tar -xvzf solr-5.5.4.tgz
rm solr-5.5.4.tgz
fi
# configure Solr and start
if [ ! -d "$INST_DIR/solr-5.5.4/server/solr/aerolyzer" ]; then
mkdir solr-5.5.4/server/solr/aerolyzer
mkdir solr-5.5.4/server/solr/aerolyzer/data
cp -R solr-5.5.4/server/solr/configsets/data_driven_schema_configs/conf solr-5.5.4/server/solr/aerolyzer
cp $APP/schema.xml solr-5.5.4/server/solr/aerolyzer/conf
cp $APP/solrconfig.xml solr-5.5.4/server/solr/aerolyzer/conf
fi
# start Solr
$INST_DIR/solr-5.5.4/bin/solr start
# add aerolyzer core
$INST_DIR/solr-5.5.4/bin/solr create -c aerolyzer
$INST_DIR/solr-5.5.4/bin/solr restart
# update Solr index every hr
crontab -l > $INST_DIR/mycron
echo "0 * * * * $APP/Aerolyzer/manage.py update_index" >> $INST_DIR/mycron
crontab $INST_DIR/mycron
rm $INST_DIR/mycron
if [ ! -d "$INST_DIR/.virtualenvs/" ]; then
mkdir $INST_DIR/.virtualenvs/
fi
if [ ! -d "$INST_DIR/.virtualenvs/aerolyzer" ]; then
cd $INST_DIR/.virtualenvs/
if [ ! -z $(which virtualenv) ]; then
virtualenv aerolyzer
else
pip install virtualenvs
fi
echo "Virtualenv env created at $INST_DIR/.virtualenvs/aerolyzer"
fi
# start aerolyzer virtual env
source $INST_DIR/.virtualenvs/aerolyzer/bin/activate
cd $APP/Aerolyzer
# install requirements
pip install -r requirements.txt
echo "Requirements installed into aerolyzer virtualenv"
source deactivate
echo "Successful setup environment for Aerolyzer App."
echo "**********************************************************"
echo "Please choose option to deploy Aerolyzer App: "
read -p "1 - in local mode OR 2 - production mode ": mode
if [ $mode = "1" ]; then
# start aerolyzer virtual env
source $INST_DIR/.virtualenvs/aerolyzer/bin/activate
# update Aerolyzer
cd $APP/Aerolyzer
python manage.py migrate
echo "Running Aerolyzer App at http://127.0.0.1:8000/app"
# start Aerolyzer
python manage.py runserver
fi
if [ $mode = "2" ]; then
sudo $APP/production/run_production.sh
fi
echo "We out fam!"
| true
|
3055e410ec3f856e7f07af7b075519436fa4f36f
|
Shell
|
BrickBot/Bound-T-H8-300
|
/boundt/platform/unix/run_oc
|
UTF-8
| 439
| 3.15625
| 3
|
[] |
no_license
|
#!/bin/sh
#
# run_oc: Run oc (Omega Calculator), capturing input and output into log-files.
#
# A component of the Bound-T Worst-Case Execution Time Tool.
# Copyright (c) 2004 Tidorum Ltd.
#
# $RCSfile: run_oc,v $
# $Revision: 1.1 $
# $Date: 2004-05-15 13:37:37 $
# $Name: $
#
# $Log: run_oc,v $
# Revision 1.1 2004-05-15 13:37:37 niklas
# First Tidorum version.
#
if [ "$#" = "2" ]; then
tee $1 | oc | tee $2
else
oc
fi
| true
|
ce367900fff72d00b7a3c9a7189cef351be02eaf
|
Shell
|
l-lin/dev-cheat-sheet
|
/shell/capslock-to-ctrl.sh
|
UTF-8
| 682
| 2.515625
| 3
|
[] |
no_license
|
#!/bin/sh
# http://askubuntu.com/questions/462021/how-do-i-turn-caps-lock-into-an-extra-control-key#answer-521734
# -----------------------------------------
# To swap the keys go:
# Xubuntu → Settings Manager → Session and Startup
# Then in the Sessions and Startup configurator go
# Application Autostart (tab at the top) → Add (bottom button)
# Now on the Add Application screen
# Name: Control and CapsLk swap
# Description: Swap the two keys
# Command: /usr/bin/setxkbmap -option "ctrl:nocaps"
# -----------------------------------------
/usr/bin/setxkbmap -option "ctrl:nocaps"
# This one will swap ctrl and capslock
#/usr/bin/setxkbmap -option "ctrl:swapcaps"
| true
|
f3779ee7d7f3d7b82b6d53bb008de41008682e10
|
Shell
|
garabik/tidbits
|
/9wi/rotate_desktop.sh
|
UTF-8
| 2,817
| 3.96875
| 4
|
[
"Unlicense"
] |
permissive
|
#!/bin/bash
#
# rotate_desktop.sh
#
# Rotates modern Linux desktop screen and input devices to match. Handy for
# convertible notebooks. Call this script from panel launchers, keyboard
# shortcuts, or touch gesture bindings (xSwipe, touchegg, etc.).
#
# Using transformation matrix bits taken from:
# https://wiki.ubuntu.com/X/InputCoordinateTransformation
#
# based on https://gist.github.com/mildmojo/48e9025070a2ba40795c
# tuned for UMAX Visionbook 9wi pro
# usage: rotate_desktop.sh [rotation]
# where rotation is one of left, right, normal, inverted
# without arguments, switch between normal/right or inverted/left
# Configure these to match your hardware (names taken from `xinput` output).
TOUCHPAD='pointer:HS-M962-CS-A3-19-00 USB KEYBOARD'
TOUCHSCREEN='FTSC0001:00 2808:1015'
if [ "$1" = '-h' ]; then
echo "Usage: $0 [normal|inverted|left|right]"
echo
exit 1
fi
# where to keep last rotation status
# use /tmp only on single user machines, otherwise use something like ~/.cache/.umax-last-rot
LAST_ROT_FILE=/tmp/.umax-last-rot-$(id -u)
function do_rotate
{
xrandr --output $1 --rotate $2
TRANSFORM='Coordinate Transformation Matrix'
case "$2" in
normal)
[ ! -z "$TOUCHPAD" ] && xinput set-prop "$TOUCHPAD" "$TRANSFORM" 0 -1 1 1 0 0 0 0 1 || true
[ ! -z "$TOUCHSCREEN" ] && xinput set-prop "$TOUCHSCREEN" "$TRANSFORM" 1 0 0 0 1 0 0 0 1
;;
inverted)
[ ! -z "$TOUCHPAD" ] && xinput set-prop "$TOUCHPAD" "$TRANSFORM" 0 1 0 -1 0 1 0 0 1 || true
[ ! -z "$TOUCHSCREEN" ] && xinput set-prop "$TOUCHSCREEN" "$TRANSFORM" -1 0 1 0 -1 1 0 0 1
;;
left)
[ ! -z "$TOUCHPAD" ] && xinput set-prop "$TOUCHPAD" "$TRANSFORM" -1 0 1 0 -1 1 0 0 1 || true
[ ! -z "$TOUCHSCREEN" ] && xinput set-prop "$TOUCHSCREEN" "$TRANSFORM" 0 -1 1 1 0 0 0 0 1
;;
right)
[ ! -z "$TOUCHPAD" ] && xinput set-prop "$TOUCHPAD" "$TRANSFORM" 1 0 0 0 1 0 0 0 1 || true
[ ! -z "$TOUCHSCREEN" ] && xinput set-prop "$TOUCHSCREEN" "$TRANSFORM" 0 1 0 -1 0 1 0 0 1
;;
esac
}
XDISPLAY=`xrandr --current | grep ' connected' | head -1 | sed -e 's/ .*//g'`
echo Detected display $XDISPLAY
# last recorded rotation
last_rot=none
[ -f "$LAST_ROT_FILE" ] && last_rot=$(cat "$LAST_ROT_FILE")
echo Last rotation status: $last_rot
if [ -z "$1" ]; then
case $last_rot in
none)
if [ $((RANDOM%2)) = 1 ]; then
ROT=right
else
ROT=normal
fi
;;
right)
ROT=normal
;;
normal)
ROT=right
;;
left)
ROT=inverted
;;
inverted)
ROT=left
;;
*)
ROT=normal
;;
esac
else
ROT=$1
fi
echo New rotation: $ROT
do_rotate $XDISPLAY $ROT
[ -f "$LAST_ROT_FILE" -o ! -e "$LAST_ROT_FILE" ] && (echo -n $ROT > "$LAST_ROT_FILE")
| true
|
d40d576f2e52c0c552f517c25bb5b00a74dc1426
|
Shell
|
jhermann/waif
|
/svg/svg2png.sh
|
UTF-8
| 3,058
| 4.15625
| 4
|
[
"Unlicense"
] |
permissive
|
#! /bin/bash
# Create bitmaps from SVG files.
#
# Converts all SVG files in the current directory according
# to passed parameters:
#
# -n: dry run option (just show the commands).
#
# --layer n name: select the ‹n›th layer below the top one,
# delete the others (for ‹n›=1…3); the ‹name› is
# used as part of the rendered PNG file.
#
# positional arguments: widths of the rendered PNGs.
#
# File dates are used to skip already rendered images.
#
# To use this as a function library, source it:
#
# source svg2png.sh funcdef
#
set -e
use_xvfb=true
inkscape=/usr/bin/inkscape
log=/tmp/$(basename "$0")"-xvfb.log"
DRY_RUN=""
if $use_xvfb; then
command which xvfb-run >/dev/null || { echo 'Please execute "sudo aptitude install xvfb"!'; exit 1; }
fi
_svg2png_mtime() {
test -e "$1" && stat --format "%Y" "$1" || echo "0"
}
_svg2png_usage() {
echo "Usage: $0 [-n] [--layer # name] <width>..."
test -z "$1" || echo "$@"
exit 1
}
svg2png() {
local svg_file="$1"; shift
local inkscape_opts=( )
local name_traits=""
while test "${1:0:1}" = '-'; do
case "$1" in
--layer)
name_traits="${name_traits}_$3"
inkscape_opts+=( --verb=LayerShowAll )
for i in $(seq 1 3); do
inkscape_opts+=( --verb=LayerPrev )
test $i = $2 || inkscape_opts+=( --verb=LayerDelete )
done
inkscape_opts+=( --verb=FileSave --verb=FileQuit )
shift; shift
;;
*)
_svg2png_usage "ERROR: Unknown option '$1'"
;;
esac
shift
done
svg_tmp="${svg_file}"
test -z "$name_traits" || svg_tmp="/tmp/$USER-$(basename "${svg_file/%.svg/}")${name_traits}.svg"
for width in $@; do
png_file="${svg_file/%.svg/}${name_traits}_$width.png"
if test $(_svg2png_mtime "$png_file") -lt $(_svg2png_mtime "$svg_file"); then
if test ! -f "$svg_tmp"; then
if $use_xvfb && test -z "$BUILD_URL"; then
xhost +localhost >/dev/null
xhost +$(hostname -f) >/dev/null
fi
cp "$svg_file" "$svg_tmp"
if $use_xvfb; then
$DRY_RUN xvfb-run -a -n 42 -s " -extension RANDR " -e "$log" \
$inkscape -g -f "$svg_tmp" "${inkscape_opts[@]}"
else
$DRY_RUN >>"$log" \
$inkscape -g -f "$svg_tmp" "${inkscape_opts[@]}"
fi
fi
$DRY_RUN $inkscape -z -w $width -e "$png_file" "$svg_tmp"
fi
done
test "$svg_tmp" = "$svg_file" || $DRY_RUN rm -- "$svg_tmp" 2>/dev/null || :
}
if test "$1" = "funcdef"; then
return 0
fi
test -n "$1" || _svg2png_usage
if test "$1" = "-n"; then
DRY_RUN="echo "
shift
fi
#set -x
find . -name '*.svg' | while read svg_file; do
svg2png "$svg_file" "$@"
done
| true
|
6da12683bbfb26488f112a7a0820527946541940
|
Shell
|
davidosomething/dotfiles
|
/bin/enginx
|
UTF-8
| 406
| 3.9375
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
# enginx
#
# Locate and edit the main nginx configuration file as root
#
_enginx() {
local nginx_conf
nginx_conf="$(sudo nginx -t 2>&1 | grep -Eom1 "configuration file (\S*) " | awk 'END { print $NF }')"
if [ -f "$nginx_conf" ]; then
printf 'Editing %s...\n' "$nginx_conf"
sudo -e "$nginx_conf"
else
>&2 echo "No nginx.conf file found"
exit 1
fi
}
_enginx
| true
|
934bd22e8a2657b6294a106df490206bf04a962d
|
Shell
|
abtreece/confd
|
/test/integration/file/test_yaml.sh
|
UTF-8
| 758
| 2.53125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
export HOSTNAME="localhost"
mkdir backends1 backends2
cat <<EOT >> backends1/1
key: foobar
database:
host: 127.0.0.1
password: p@sSw0rd
port: "3306"
username: confd
EOT
cat <<EOT >> backends1/2.yml
upstream:
app1: 10.0.1.10:8080
app2: 10.0.1.11:8080
EOT
cat <<EOT >> backends2/1.yaml
nested:
app1: 10.0.1.10:8080
app2: 10.0.1.11:8080
EOT
cat <<EOT >> backends2/2.yaml
prefix:
database:
host: 127.0.0.1
password: p@sSw0rd
port: "3306"
username: confd
upstream:
app1: 10.0.1.10:8080
app2: 10.0.1.11:8080
EOT
# Run confd
confd --onetime --log-level debug --confdir ./test/integration/confdir --backend file --file backends1/ --file backends2/ --watch
# Clean up after
rm -rf backends1 backends2
| true
|
81f8cce2b2dd192aa29ad22fe68f0dafae3a3fe8
|
Shell
|
composition-project/border-gateway
|
/test/test_ws_and_mqtt.sh
|
UTF-8
| 4,706
| 3.421875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
CA=$1
host=$2
if $3
then
wsProtocol="wss"
mqttSecureParams="--debug --cafile $CA"
else
wsProtocol="ws"
mqttSecureParams="--debug"
fi
echo "cat $CA"
cat "$CA"
mqttPort=$4
wsPort=$5
user=$6
pass=$7
tokenEndpoint=$8
audience=$9
client_id="${10}"
client_secret="${11}"
scriptDir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
echo "scriptDir = $scriptDir"
cd $scriptDir
testWebsockets="./mqtt_over_websocket/index.js"
testWebsocketsGeneric="./generic_websocket/index.js"
echo "generic websockets no token"
node "$testWebsocketsGeneric" "$CA" "$wsProtocol://$host:$wsPort/"
if [ $? -ne 1 ]; then
echo "exit code = $?"
exit 1
fi
echo "generic websockets wrong token"
node "$testWebsocketsGeneric" "$CA" "$wsProtocol://$host:$wsPort/?access_token=123"
if [ $? -ne 1 ]; then
echo "exit code = $?"
exit 1
fi
echo "generic websockets wrong password"
node "$testWebsocketsGeneric" "$CA" "$wsProtocol://$host:$wsPort/?basic_auth=123"
if [ $? -ne 1 ]; then
echo "exit code = $?"
exit 1
fi
echo "generic websockets correct password"
basedPassword=$(echo -n "$user:$pass" | base64)
node "$testWebsocketsGeneric" "$CA" "$wsProtocol://$host:$wsPort/?basic_auth=$basedPassword"
if [ $? -ne 0 ]; then
echo "exit code = $?"
exit 1
fi
access_token=$(curl --cacert $CA --silent -d "client_id=$client_id" -d "client_secret=$client_secret" -d "username=$user" -d "password=$pass" -d "grant_type=password" -d "audience=$audience" -L "$tokenEndpoint" | jq -r ".access_token")
echo "access_token: $access_token"
echo "generic websockets correct token"
node "$testWebsocketsGeneric" "$CA" "$wsProtocol://$host:$wsPort/?access_token=$access_token"
if [ $? -ne 0 ]; then
echo "exit code = $?"
exit 1
fi
echo "websockets with user/pass qos 2"
node "$testWebsockets" "$CA" "$wsProtocol"://"$host:$wsPort/?access_token=$access_token" $user $pass 2
if [ $? -ne 0 ]; then
echo "exit code = $?"
exit 1
fi
echo "websockets with user/pass qos 0"
node "$testWebsockets" "$CA" "$wsProtocol"://"$host:$wsPort/?access_token=$access_token" $user $pass 0
if [ $? -ne 0 ]; then
echo "exit code = $?"
exit 1
fi
echo "websockets with user/pass qos 0 anonymous"
node "$testWebsockets" "$CA" "$wsProtocol"://"$host:$wsPort/?access_token=$access_token" anonymous anonymous 0
if [ $? -ne 1 ]; then
echo "exit code = $?"
exit 1
fi
access_token=$(curl --cacert $CA --silent -d "client_id=$client_id" -d "client_secret=$client_secret" -d "username=$user" -d "password=$pass" -d "grant_type=password" -d "audience=$audience" -L "$tokenEndpoint" | jq -r ".access_token")
echo "access_token: $access_token"
echo "websockets with user/pass qos 2"
node "$testWebsockets" "$CA" "$wsProtocol"://"$host:$wsPort/?access_token=$access_token" $access_token "" 2
if [ $? -ne 0 ]; then
echo "exit code = $?"
exit 1
fi
echo "websockets with user/pass qos 0"
node "$testWebsockets" "$CA" "$wsProtocol"://"$host:$wsPort/?access_token=$access_token" $access_token "" 0
if [ $? -ne 0 ]; then
echo "exit code = $?"
exit 1
fi
echo "mosquitto_pub anonymous"
command="mosquitto_pub $mqttSecureParams -h $host -p $mqttPort -d -t LS/test -m \"hello there\" -q 0"
echo "$command"
eval "$command$"
exitCode=$?
# formerly exit code 5 was returned!
if [ "$exitCode" -ne 5 ] && [ "$exitCode" -ne 0 ]; then
echo "exit code = $exitCode"
exit 1
fi
echo "mosquitto_pub user/pass qos 2"
command="mosquitto_pub $mqttSecureParams -h $host -p $mqttPort -d -t LS/test -m \"hello there\" -u \"$user\" -P \"$pass\" -q 2"
echo "$command"
eval "$command$"
if [ $? -ne 0 ]; then
echo "exit code = $?"
exit 1
fi
echo "mosquitto_pub user/pass qos 0"
for var in 1 2 3 4 5 6 7 8 9 10
do
command="mosquitto_pub $mqttSecureParams -h $host -p $mqttPort -d -t LS/test -m \"hello there\" -u \"$user\" -P \"$pass\" -q 0"
echo "$command"
eval "$command$"
done
access_token=$(curl --cacert $CA --silent -d "client_id=$client_id" -d "client_secret=$client_secret" -d "username=$user" -d "password=$pass" -d "grant_type=password" -d "audience=$audience" -L "$tokenEndpoint" | jq -r ".access_token")
echo "access token: $access_token"
echo "mosquitto pub access token qos 2"
command="mosquitto_pub $mqttSecureParams -h $host -p $mqttPort -d -t LS/test -m \"hello there\" -u $access_token -q 2"
echo "$command"
eval "$command$"
if [ $? -ne 0 ]; then
echo "exit code = $?"
exit 1
fi
echo "mosquitto_pub access token qos 0"
for var in 1 2 3 4 5 6 7 8 9 10
do
command="mosquitto_pub $mqttSecureParams -h $host -p $mqttPort -d -t LS/test -m \"hello there\" -u $access_token -q 0"
echo "$command"
eval "$command$"
done
printf "\n"
echo "Test run successful!"
| true
|
9b824586597c3403257f7f228b02c89a3cf1c64c
|
Shell
|
sarahgarcin/flux-ecran-papier
|
/printing-tools/print-deamon.sh
|
UTF-8
| 429
| 3.125
| 3
|
[] |
no_license
|
#!/bin/bash
# set -x
# settings
# find the printer $ lpstat -p
# install watch with brew
# lanch with $ watch -n 10 bash print-deamon.sh
printer=Canon_LBP7100C_7110C
archivebox="archivebox/"
printinbox="printbox/"
# main loop
mkdir $archivebox $printinbox
for step in `find $printinbox -iname "*.pdf" -type f`
do
lpr -P $printer -o media=A4 -o fit-to-page $step
mv -v $step $archivebox # copy in outbox (archives)
done
| true
|
f2884e79eafbe0fdd3a5339ad6a7b9652f1a1510
|
Shell
|
TrongTan124/OpenStack-Mitaka-Scripts
|
/OPS-Mitaka-OVS-Ubuntu/scripts/start-install-compute1.sh
|
UTF-8
| 270
| 2.59375
| 3
|
[] |
no_license
|
#!/bin/bash -ex
source config.cfg
source functions.sh
WORK_DIR=$(dirname $0)
# Kiem tra lai file config
echo "IP Controller ext is $COM1_MGNT_IP"
echo "IP Controller int is $COM1_EXT_IP"
echo "Hostname Controller is $HOST_COM1"
echo "Password default is $DEFAULT_PASS"
| true
|
57761cdf3250bdcb6fbc8fa00255357b98deb853
|
Shell
|
akatbo/s2i-micros-build
|
/S2iScripts/assemble
|
UTF-8
| 955
| 3.328125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash -e
#
# S2I assemble script for the 'book-custom-s2i' image.
# The 'assemble' script currently only supports binary builds.
#
# For more information refer to the documentation:
# https://github.com/openshift/source-to-image/blob/master/docs/builder_image.md
#
if [ ! -z ${S2I_DEBUG} ]; then
echo "turning on assembly debug";
set -x
fi
# Binary deployment is a single war
# if [ $(ls /opt/app/src/*.jar | wc -l) -eq 1 ]; then
# mv /opt/app/src/*.jar /opt/app/openshift-app.jar
# else
# echo "Jar not found in /opt/app/src/"build
# exit 1
# fi
# ls /opt/app/src
cd /opt/app/src
mvn clean package -U
# Binary deployment is a single war
if [ $(ls /opt/app/src/target/*.jar | wc -l) -eq 1 ]; then
rm -rf /opt/app/src/output
mkdir -p /opt/app/output
cd /opt/app/output
mv /opt/app/src/target/*.jar service-server.jar
else
echo "Jar not found in /opt/app/src/"target
exit 1
fi
# ls -l
# mvn clean package -u
| true
|
6df165b7b79f8ffb4a5e7a7d1b3bbf49188bcdac
|
Shell
|
ajeddeloh/scripts
|
/sort_music_collection.sh
|
UTF-8
| 1,441
| 4.125
| 4
|
[] |
no_license
|
#!/bin/bash
#Sample script showing how to use muprint to generate paths and use those paths
#To sort music. This version uses copy instead of move for safety
#Run from the project root or change ./muprint to be approriate
#TODO: make these arguments
SRC_DIR="$1"
DST_DIR="$2"
handle_file() {
# scratch file to use when transcoding
local TMPFILE='/tmp/scratch'
# $1 - file to move
local file="$1"
# $2 - where to move it
local dest_root="$2"
#extension to set if fixing containers
local EXT=""
#fix some of my fucked up containers so mimedb will work properly
if [[ $file =~ \.ogg ]]; then
#need to convert to an ogg audio container
rm -f "$TMPFILE"
ffmpeg -i "$file" -acodec copy -f oga "$TMPFILE" -hide_banner -loglevel fatal
file="$TMPFILE"
EXT='ogg'
elif [[ $file =~ \.mp4 ]]; then
#need to convert to an m4a containter
rm -f "TMPFILE"
ffmpeg -i "$file" -acodec copy -f ipod "$TMPFILE" -hide_banner -loglevel fatal
file="$TMPFILE"
ext='m4a'
fi
local dst="$(muprint -s 'A%ua' -s 't%uf' -r _ "$dest_root/%uA/%ub/%ut.%ue$EXT" "$file")"
#debug echo
#echo "$dst"
mkdir -p "$(dirname "$dst")"
cp "$file" "$dst"
#cleanup
rm -f "$TMPFILE"
}
#make move_file available to subshells
export -f handle_file
find "$SRC_DIR" -type f -exec sh -c 'handle_file "$1" "$2"' _ {} "$DST_DIR" \;
| true
|
b742edb044b009a9d05309df1ea5350b0ef8fa50
|
Shell
|
kestarumper/jftt
|
/kompilator/singleTest.sh
|
UTF-8
| 235
| 2.796875
| 3
|
[] |
no_license
|
#!/bin/bash
filename="$1"
./kompilator "$filename" "TESTS_OUT/$(basename $filename).mr"
rc=$?
if [[ $rc != 0 ]]
then
exit $rc
fi
echo "TEST [$filename]"
./maszyna_rejestrowa/maszyna-rejestrowa "TESTS_OUT/$(basename $filename).mr"
| true
|
46ece74110b32110c42d9c51fe9bf0d5f1914186
|
Shell
|
DirkSun99/git-auto-commit
|
/gitcommit.sh
|
UTF-8
| 1,037
| 3.96875
| 4
|
[] |
no_license
|
#!/bin/bash
# 文件路径
GIT_LOG_PATH="/opt/shell/git.log"
# 分隔符,与文件路径指定的git.log中的分隔符对应
SEPARATOR="AAAA"
# 提交文件路径(即每次提交需要修改的文件)
COMMIT_FILE_PATH="/opt/gitrep/data.txt"
# Git仓库地址
GIT_REPOSITORY="/opt/gitrep"
for line in $(cat ${GIT_LOG_PATH})
do
array=(${line//${SEPARATOR}/ })
# 提交人
commit_user_name=${array[0]}
commit_user_email=${array[1]}
# 提交时间
commit_time=${array[2]}
# 提交备注
commit_comments=${array[3]}
echo "提交人姓名${commit_user_name},提交人邮箱${commit_user_email},提交时间${commit_time},提交备注${commit_comments}"
# 文件内容写入
echo "aaa" >> ${COMMIT_FILE_PATH}
# 修改系统时间
date ${commit_time}
# 进入git目录
cd ${GIT_REPOSITORY}
# 设置提交人
git config user.email ${commit_user_emaill}
git config user.name ${commit_user_name}
# 添加修改文件
git add ${COMMIT_FILE_PATH}
# 提交到本地库
git commit -m "${commit_comments}"
done
| true
|
b2d9f90247efac1803eb0f22ee3899a22fd0f8e7
|
Shell
|
kjsanger/qcat-ncov2019-artic-nf
|
/recipe/build.sh
|
UTF-8
| 404
| 2.59375
| 3
|
[] |
no_license
|
#!/bin/bash
set -euo pipefail
mkdir -p $PREFIX/bin
mkdir -p $PREFIX/share
pushd ncov2019-artic-nf
singularity build --fakeroot artic-ncov2019-nanopore.sif \
./environments/nanopore/Singularity
popd
cp $RECIPE_DIR/../bin/qcat-ncov2019-artic-nf.sh $PREFIX/bin
cp -r $RECIPE_DIR/../nextflow -t $PREFIX/share
rm -r ncov2019-artic-nf/.github
cp -r ncov2019-artic-nf -t $PREFIX/share/nextflow
| true
|
aab5dbd54fa5fddc9c4856de71e14ed811c290b2
|
Shell
|
ecmwf/eccodes
|
/tests/grib_ecc-600.sh
|
UTF-8
| 1,158
| 2.953125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
# (C) Copyright 2005- ECMWF.
#
# This software is licensed under the terms of the Apache Licence Version 2.0
# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0.
#
# In applying this licence, ECMWF does not waive the privileges and immunities granted to it by
# virtue of its status as an intergovernmental organisation nor does it submit to any jurisdiction.
#
. ./include.ctest.sh
# ---------------------------------------------------------
# This is the test for the JIRA issue ECC-600.
# Nearest neighbour for rotated lat/lon grids.
# ---------------------------------------------------------
label="grib_ecc-600_test"
tempOut=temp.${label}.out
tempGrib=temp.${label}.grib
input=${data_dir}/tigge/tiggelam_cnmc_sfc.grib
${tools_dir}/grib_copy -w count=4 $input $tempGrib
${tools_dir}/grib_ls -l 40,0,1 $tempGrib > $tempOut
grep -q "Grid Point chosen #3 index=54294 latitude=39.98 longitude=0.00 distance=2.03 (Km)" $tempOut
${tools_dir}/grib_ls -l 50,-10,1 $tempGrib > $tempOut
grep -q "Grid Point chosen #3 index=145684 latitude=49.99 longitude=-9.97 distance=2.57 (Km)" $tempOut
# Clean up
rm -f $tempOut $tempGrib
| true
|
a1d400c7c0a3b532c00eac107c8fee5400e9e92f
|
Shell
|
dmolina/template_course_pandoc
|
/common/createma.sh
|
UTF-8
| 654
| 3.8125
| 4
|
[
"MIT"
] |
permissive
|
if [ "$#" -ne 2 ]; then
echo "Missing argument: <topic> <asig>"
exit
fi
tema=$1
asig=$2
dir=../$tema
if [ -d $dir ]; then
echo "Directory '$dir' already exists"
exit
fi
mkdir $dir || (echo "Cannot create directory '$dir'" && exit)
cp template.md $dir/${tema}.md
echo "TEMA=$tema" >$dir/makefile
echo "ASIG=$asig" >>$dir/makefile
cat makefile >>$dir/makefile
for file in *minted*; do
ln -s $PWD/$file $dir/$file
done
mkdir $dir/figures
cd -
# Install packages
for package in pandoc-beamer-block pandoc-include pandoc-latex-fontsize; do
pip install $package || (echo "Error installing package '$package' with pip"; exit)
done
| true
|
80b57fc9a7c41c56e622be8a10aed6ac74a72542
|
Shell
|
carlosival/aws_metadata_expose
|
/user-data.sh
|
UTF-8
| 1,258
| 3.53125
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
#Se usa set para ver la ejecucion del script linea por linea. en caso de ayuda
#set -e -x
#El id de la instancia que corre el script automatizar este paso.
instance_ids="i-08x58x4x478x1"
# El /etc/profile se declaran las variables accesibles para todos los usuarios del sistema.
expose_file="/etc/profile.d/expose.sh"
# Fichero para guardar los log o salidas del script
log_file="/opt/init/ec2/user-data.log"
# Comando que obtiene la ip publica de la instancia que cambia en cada stop y start de la aplicacion
ip_public=$(aws ec2 describe-instances --instance-ids $instance_ids --query 'Reservations[*].Instances[*].PublicIpAddress' --region 'us$
dns_public_name=$(aws ec2 describe-instances --instance-id i-0xa5x64xx8a4x1 --query 'Reservations[*].Instances[*].PublicDnsName' --r$
#Crear el fichero donde estan los comandos para exportar las variables hacia /etc/profile
touch $expose_file
chmod +x $expose_file
echo export PUBLIC_IP_ADDRESS="$ip_public" > $expose_file
echo export PUBLIC_DNS_NAME="$dns_public_name" >> $expose_file
echo "$ip_public" > $log_file
echo "$dns_public_name" >> $log_file
# Ejecuta el /etc/profile sin la necesidad que un usuario inicie session. Analizar implicaciones de seguridad.
source "/etc/profile"
| true
|
07272a76fd1ebe453229f92c2beea29925900322
|
Shell
|
hyperledgerkochi/kobman-env-repo
|
/test-kobman-java-dev.sh
|
UTF-8
| 972
| 3.515625
| 4
|
[] |
no_license
|
#!/bin/bash
function __test_kob_init
{
if [[ ! -d $KOBMAN_DIR ]]; then
echo "kob not found"
echo "Please install KOBman and try again"
echo "Exiting!"
exit 1
else
source $KOBMAN_DIR/bin/kobman-init.sh
source $KOBMAN_DIR/src/kobman-utils.sh
fi
if [[ -z $(which java) ]]; then
echo "java packages not found"
exit 1
fi
}
##function __test_kob_execute
function __test_kob_validate
{
__kobman_validate_java-dev
local ret_value=$?
if [[ $ret_value == "1" ]]; then
test_status="failed"
return 1
fi
unset ret_value
}
function __test_kob_run
{
test_status="success"
__test_kob_init
# __test_kob_execute
__test_kob_validate
# __test_kob_cleanup
if [[ $test_status == "success" ]]; then
__kobman_echo_green "test-kob-java-dev success"
else
__kobman_echo_red "test-kob-java-dev failed"
fi
}
__test_kob_run
| true
|
7f6648f93183f0e1fdeb5d91ec2a08c8378dd8b7
|
Shell
|
notalexa/proj_vysper
|
/update.sh
|
UTF-8
| 299
| 2.84375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
for f in bin/* ; do
echo "Patch `basename $f`"
(cd $f ; zip -r ../../vysper-0.7/lib/`basename $f`.jar *) > /dev/null
done
echo "Copy binaries"
cp -r src/bin vysper-0.7
echo "Create vysper-jitsi-0.7.zip"
(cd vysper-0.7 ; zip -r ../vysper-jitsi-0.7.zip *) > /dev/null
echo "Patched."
| true
|
49cc94f2e15e202b87094bbb27de98caabc4c4be
|
Shell
|
davidruizrodri/dotfiles
|
/zsh/custom/aliases.zsh
|
UTF-8
| 420
| 2.578125
| 3
|
[] |
no_license
|
alias c='clear'
alias hosts='sudo \"\$EDITOR\" /etc/hosts'
alias zshrc='$EDITOR ~/.zshrc'
alias update-zsh='source ~/.zshrc'
alias gitka='gitk --all'
alias overwrite-dotfiles='cd $HOME/.dotfiles; make clean; make'
gdiff() {
preview="git diff $@ --color=always -- {-1}"
git diff $@ --name-only | fzf -m --ansi --preview $preview
}
alias run-elasticsearch-head="docker run -p 9102:9100 mobz/elasticsearch-head:2"
| true
|
27e3dfbf0045e01bb30c35fee9fe9d006e82533f
|
Shell
|
majduk/SiteMonitor
|
/bin/store_result.sh
|
UTF-8
| 414
| 2.984375
| 3
|
[] |
no_license
|
#!/bin/bash
#get main config
source "$1"
#get site config
source "$2"
file=$TESTRESULT
echo `date $LOGDATEFORMAT`" Import data from $file"
echo "LOAD DATA LOCAL INFILE '"$file"' IGNORE INTO TABLE \`"$SQLDATABASE"\`.\`"$SQLTABLE"\` FIELDS TERMINATED BY ';';" > $SQLFILE
/usr/bin/mysql -f -h $SQLHOST -u$SQLUSER -p$SQLPASSWORD $SQLDATABASE < $SQLFILE
echo `date $LOGDATEFORMAT`" Import data from $file - done"
| true
|
e0476e4fcd1dba69ecbde9f82a641406b617f038
|
Shell
|
anivar/fonts-smc-chakra
|
/fonts-smc-raghumalayalamsans/PKGBUILD
|
UTF-8
| 1,045
| 2.578125
| 3
|
[] |
no_license
|
# Contributor: Aashik S aashiks at gmail dot com
# Maintainer: http://smc.org.in
# Contributor: Akshay S Dinesh asdofindia at gmail dot com
# Contributor: Anivar Aravind aanivar dot aravind at gmail dot com
pkgname=fonts-smc-raghumalayalamsans
pkgver=2.0.1
pkgrel=1
pkgdesc="This is RaghuMalayalamSans, a font belonging to a set of TrueType and OpenType fonts released under the GNU General Public License for Malayalam Language. GPL2"
arch=(any)
url="http://smc.org.in/fonts/"
license="GPL3"
depends=(fontconfig xorg-font-utils)
source=("http://smc.org.in/downloads/fonts/raghumalayalamsans/RaghuMalayalamSans.ttf"
"https://gitlab.com/smc/raghumalayalamsans/raw/master/67-smc-raghumalayalamsans.conf")
md5sums=('9417236366dbe7514d70ec681911874d'
'3ea0148691b9cbfe12bf17a1f0456bad')
install=$pkgname.install
package() {
mkdir -p "${pkgdir}/usr/share/fonts/TTF" || return 1
install -m644 *.ttf "${pkgdir}/usr/share/fonts/TTF"
mkdir -p "${pkgdir}/etc/fonts/conf.d" || return 1
install *.conf "${pkgdir}/etc/fonts/conf.d" || return 1
}
| true
|
ff0fa74c4d388a88674dd1c2b4ef4c311c4792ef
|
Shell
|
hn-88/OCVvid2fulldome
|
/ocv430installforactions.sh
|
UTF-8
| 2,459
| 2.765625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# install script for OpenCV-4.3.0
# using pre-built binaries
# from https://github.com/hn-88/opencvdeb/
wget https://github.com/hn-88/opencvdeb/releases/download/v4.3.0.1/OpenCVbuild.zip
unzip OpenCVbuild.zip
mv home/travis/build/hn-88/opencvdeb/opencv/build/OpenCVLocal ~/OpenCVLocal -v
rm -Rvf home
# install all the opencv dependencies
sudo apt-get install -y build-essential
sudo apt-get install -y cmake git libgtk2.0-dev pkg-config libavcodec-dev libavformat-dev libswscale-dev
sudo apt-get install -y libtbb2 libtbb-dev libjpeg-dev libpng-dev libtiff-dev libjasper-dev libdc1394-22-dev
# doing this copy so that ldd can find the libs for linuxdeployqt
# https://www.hpc.dtu.dk/?page_id=1180
sudo cp -R /home/runner/OpenCVLocal/lib/* /usr/local/lib
# removing links to OpenCVLocal and linking to /usr/local/lib
# for linuxdeployqt's ldd call
sudo rm /usr/local/lib/libopencv_core.so.4.3
sudo rm /usr/local/lib/libopencv_core.so
sudo rm /usr/local/lib/libopencv_highgui.so.4.3
sudo rm /usr/local/lib/libopencv_highgui.so
sudo rm /usr/local/lib/libopencv_imgcodecs.so.4.3
sudo rm /usr/local/lib/libopencv_imgcodecs.so
sudo rm /usr/local/lib/libopencv_imgproc.so.4.3
sudo rm /usr/local/lib/libopencv_imgproc.so
sudo rm /usr/local/lib/libopencv_videoio.so.4.3
sudo rm /usr/local/lib/libopencv_videoio.so
sudo rm /usr/local/lib/libopencv_video.so.4.3
sudo rm /usr/local/lib/libopencv_video.so
sudo ln /usr/local/lib/libopencv_core.so.4.3.0 /usr/local/lib/libopencv_core.so.4.3
sudo ln /usr/local/lib/libopencv_core.so.4.3 /usr/local/lib/libopencv_core.so
sudo ln /usr/local/lib/libopencv_highgui.so.4.3.0 /usr/local/lib/libopencv_highgui.so.4.3
sudo ln /usr/local/lib/libopencv_highgui.so.4.3 /usr/local/lib/libopencv_highgui.so
sudo ln /usr/local/lib/libopencv_imgcodecs.so.4.3.0 /usr/local/lib/libopencv_imgcodecs.so.4.3
sudo ln /usr/local/lib/libopencv_imgcodecs.so.4.3 /usr/local/lib/libopencv_imgcodecs.so
sudo ln /usr/local/lib/libopencv_imgproc.so.4.3.0 /usr/local/lib/libopencv_imgproc.so.4.3
sudo ln /usr/local/lib/libopencv_imgproc.so.4.3 /usr/local/lib/libopencv_imgproc.so
sudo ln /usr/local/lib/libopencv_videoio.so.4.3.0 /usr/local/lib/libopencv_videoio.so.4.3
sudo ln /usr/local/lib/libopencv_videoio.so.4.3 /usr/local/lib/libopencv_videoio.so
sudo ln /usr/local/lib/libopencv_video.so.4.3.0 /usr/local/lib/libopencv_video.so.4.3
sudo ln /usr/local/lib/libopencv_video.so.4.3 /usr/local/lib/libopencv_video.so
| true
|
3ca49c972b507a58eba09927b75b348c2bad3448
|
Shell
|
mrtryhard/ginger
|
/sample.sh
|
UTF-8
| 210
| 2.515625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
which clang++ > /dev/null 2>&1
if [[ $? -eq 0 ]]; then
clang++ sample.cpp -o sample -std=c++11 -stdlib=libc++
else
g++ sample.cpp -o sample -std=c++11
fi
cat "sample.html" | ./sample
rm sample
| true
|
83aa6ff03989986859e4c3bb9df8c608f344d072
|
Shell
|
kbrock/dotfiles
|
/bashrc.d/shell.bash
|
UTF-8
| 2,075
| 3.265625
| 3
|
[] |
no_license
|
export LC_CTYPE=en_US.UTF-8
export GREP_COLOR='1;32'
export CLICOLOR=1
#unix commands
alias ls='ls -G'
alias ll='ls -lh'
alias la='ls -lah'
alias dir='ls -lh'
alias rrm='rm -rf'
alias ds='du -sk * |sort -nr'
#-r for more allows "raw" to come through -i.e.: tty color
alias more='less -r'
alias mroe='less -r'
alias wget='curl -O'
# would alias to which, but
alias whereis='type -a'
# gem install terminal-notifier
alias growl='terminal-notifier -message'
#mac only
not_defined 'ldd' && alias ldd='otool -L'
add_to_path ~/bin
function title() { echo -e "\033]0;${1:?please specify a title}\007" ; }
#javascript alert dialog
function alert() {
message="$1"
title="${2-Alert}"
osascript -e "tell app \"System Events\" to display alert \"${title}\" message \"${message}\""
}
function notify {
message="${1}"
title="${2-Alert}"
osascript -e "display notification \"${message}\" with title \"${title}\"" }
}
function mw() { more `which $1` ; }
function catw() { cat `which $1` ; }
function lw() { ls -l `which $1` ; }
function sw() { subl `which $1` ; }
function vw() { vi `which $1` ; }
function cw() { local dn="$(dirname $(which $1))" ; [ -d "$dn" ] && pushd $dn ; }
#copy in the background (add an & in there?)
function bgcp { cp "$@" && ding copied || ding failed ; }
function sag() {
subl $(ag -l "$@")
}
function mag() {
# ignore: bluecf, gems, plugins/providers files
ag "$@" ~/src/{agrare,amazon,container,dbus,httpd,ibm,inventory,manageiq,miq,perf_utils,ui,vmware}* ~/src/gems/{more_core_extensions,manage}*
}
function magg() {
# -G says only ruby files (^c = spec)
mag "$@" --ignore '*_spec.rb'
}
function marge() {
ag "$@" ~/src/manageiq/{app,lib/vmdb} ~/src/manageiq-*/lib
}
if [ -d ~/.Trash ] ; then
function rm () {
local path
for path in "$@"; do
# ignore any arguments
if [[ "$path" = -* ]]; then :
else
local dst=${path##*/}
# append the time if necessary
while [ -e ~/.Trash/"$dst" ]; do
dst="$dst "$(date +%H-%M-%S)
done
mv "$path" ~/.Trash/"$dst"
fi
done
}
fi
| true
|
55300201295f4bdd3f2341b1a5c157653c7ad32e
|
Shell
|
mkzzn/eurotech_jekyll
|
/deploy
|
UTF-8
| 574
| 3.296875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
app_name=zopio-site
tmp_path=/tmp/$app_name
remote_path=/var/www/zopio
remote_host=doblock
# Add the necessary ssh key to the ssh-agent if it hasn't been added in this session
ssh $remote_host "echo 2>&1" || ssh-add ~/.ssh/`echo $app_name`_dsa
# Remove the temporary repository if one already exists
[[ -d $tmp_path ]] && rm -rf $tmp_path
# Clone the Git repository to a temporary path
git clone zopio_github:zopio/$app_name.git $tmp_path
# Sync the temporary repository clone with the remote repository
rsync -avz -e ssh $tmp_path $remote_host:$remote_path
| true
|
38eafa319fd55ba3e02d6e5adff9614b334b556f
|
Shell
|
enesutku07/lfscript
|
/scripts/blfs-18652/x7font
|
UTF-8
| 1,950
| 2.8125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# The instructions in this file are extracted from
# 'Beyond Linux From Scratch' (2017-04-29 / r18652) but are modified for use
# with LFScript 4 which installs the software to a fake root directory.
#
# Beyond Linux From Scratch is released under the MIT license.
# Copyright (C) 2001-2017, The BLFS Development Team
WGETLIST="https://www.x.org/pub/individual/font/font-util-1.3.1.tar.bz2
https://www.x.org/pub/individual/font/encodings-1.0.4.tar.bz2
https://www.x.org/pub/individual/font/font-alias-1.0.3.tar.bz2
https://www.x.org/pub/individual/font/font-adobe-utopia-type1-1.0.4.tar.bz2
https://www.x.org/pub/individual/font/font-bh-ttf-1.0.3.tar.bz2
https://www.x.org/pub/individual/font/font-bh-type1-1.0.3.tar.bz2
https://www.x.org/pub/individual/font/font-ibm-type1-1.0.3.tar.bz2
https://www.x.org/pub/individual/font/font-misc-ethiopic-1.0.3.tar.bz2
https://www.x.org/pub/individual/font/font-xfree86-type1-1.0.4.tar.bz2"
MD5SUMLIST="23756dab809f9ec5011bb27fb2c3c7d6
0f2d6546d514c5cc4ecf78a60657a5c1
6d25f64796fef34b53b439c2e9efa562
fcf24554c348df3c689b91596d7f9971
e8ca58ea0d3726b94fe9f2c17344be60
53ed9a42388b7ebb689bdfc374f96a22
bfb2593d2102585f45daa960f43cb3c4
6306c808f7d7e7d660dfb3859f9091d2
3eeb3fb44690b477d510bbd8f86cf5aa"
REQUIRES="xcursor-themes xorg-env"
TAGS="multi"
###############################################
installation() { # INSTALLING SYSTEM SOFTWARE #
###############################################
./configure $XORG_CONFIG
make
make DESTDIR=${FAKEROOT} install
install -v -d -m755 ${FAKEROOT}/usr/share/fonts
ln -svfn $XORG_PREFIX/share/fonts/X11/OTF ${FAKEROOT}/usr/share/fonts/X11-OTF
ln -svfn $XORG_PREFIX/share/fonts/X11/TTF ${FAKEROOT}/usr/share/fonts/X11-TTF
#################
} # END OF FILE #
#################
| true
|
3c4dc7158d6b9982d27497bf90e19a6014566ddb
|
Shell
|
pronoun66/serverless-emails-backend
|
/scripts/unitTest.sh
|
UTF-8
| 194
| 2.765625
| 3
|
[] |
no_license
|
#!/bin/bash
set -e
export IS_TEST=true
TEST_PATH=$1
if [ -z $1 ]
then
TEST_PATH=tests/unit
fi
./node_modules/mocha/bin/mocha $TEST_PATH --recursive --timeout 10000 --bail --errors-only
| true
|
8032093d34a5de2c76c3516372bcd3686e0ee4ed
|
Shell
|
nicolasi31/public
|
/racine/home/.profile.d/perso-tipsnewinstall.sh
|
UTF-8
| 4,456
| 2.921875
| 3
|
[] |
no_license
|
if [ ${PERSO_ENABLED} = 1 ] ; then
tipsnewinstall () {
echo '
MY_NEW_HOSTNAME="newserver"
MY_NEW_DOMAIN="mydomain.net"
MY_NEW_NETIF="eth0"
MY_NEW_CONNAME="LAN"
MY_NEW_NETIP="10.71.88.100/24"
MY_NEW_NETRBIDGE="brwan"
MY_NEW_NETGW="10.71.88.254"
MY_NEW_DNS1="10.71.88.254"
MY_NEW_DNS2="10.71.86.252"
MY_FILE_SYSCTL="/etc/sysctl.conf"
MY_FILE_TSYNCD="/etc/systemd/timesyncd.conf"
MY_FILE_RESOLVD="/etc/systemd/resolved.conf"
MY_FILE_SSHD="/etc/ssh/sshd_config"
MY_FILE_HOSTS="/etc/hosts"
### Hosts file update ###
sed -i "s/\(127.0.0.1 localhost \).*/\1${MY_NEW_HOSTNAME} ${MY_NEW_HOSTNAME}.${MY_NEW_DOMAIN}/g" ${MY_FILE_HOSTS}
### Sysctl configuration ###
sed -i "s/\(kernel.hostname *= *\).*/\1${MY_NEW_HOSTNAME}/" ${MY_FILE_SYSCTL}
sed -i "s/\(kernel.domainname *= *\).*/\1${MY_NEW_DOMAIN}/" ${MY_FILE_SYSCTL}
sed -i "s/\(.*\)eth0\(.*\)/\1${MY_IFACE}\2/g" ${MY_FILE_SYSCTL}
sed -i "s/\(.*\)ens3\(.*\)/\1${MY_IFACE}\2/g" ${MY_FILE_SYSCTL}
sysctl -p
### Language configuration ###
echo "locales locales/locales_to_be_generated multiselect en_US.UTF-8 UTF-8, fr_FR.UTF-8 UTF-8" | debconf-set-selections ;
echo "locales locales/default_environment_locale select fr_FR.UTF-8" | debconf-set-selections ;
sed -i "s/^# fr_FR.UTF-8 UTF-8/fr_FR.UTF-8 UTF-8/" /etc/locale.gen ;
dpkg-reconfigure --frontend=noninteractive locales ;
### Time and Date configuration ###
ln -fs /usr/share/zoneinfo/Europe/Paris /etc/localtime ;
echo "tzdata tzdata/Areas select Europe" | debconf-set-selections ;
echo "tzdata tzdata/Zones/Europe select Paris" | debconf-set-selections ;
dpkg-reconfigure --frontend=noninteractive tzdata
### OpenSSH configuration ###
sed -i "s/^#\{0,1\}\(AddressFamily \).*/\1any/" ${MY_FILE_SSHD}
dpkg-reconfigure --frontend=noninteractive openssh-server
systemctl restart sshd
### Postfix configuration ###
postconf -e "mydestination = localhost, localhost.localdomain, ${MY_NEW_HOSTNAME}, ${MY_NEW_HOSTNAME}.${MY_NEW_DOMAIN}"
postconf -e "inet_protocols=ipv4"
### Resolved configuration ###
sed -i "s/^#\{0,1\}\(DNS=\).*/\1${MY_DNS_NEW1}/" ${MY_FILE_RESOLVD}
sed -i "s/^#\{0,1\}\(FallbackDNS=\).*/\1${MY_DNS_NEW2}/" ${MY_FILE_RESOLVD}
sed -i "s/^#\{0,1\}\(Domains=\).*/\1${MY_NEW_DOMAIN}/" ${MY_FILE_RESOLVD}
sed -i "s/^#\{0,1\}\(LLMNR=\).*/\1no/" ${MY_FILE_RESOLVD}
sed -i "s/^#\{0,1\}\(MulticastDNS=\).*/\1no/" ${MY_FILE_RESOLVD}
sed -i "s/^#\{0,1\}\(DNSSEC=\).*/\1no/" ${MY_FILE_RESOLVD}
sed -i "s/^#\{0,1\}\(DNSOverTLS=\).*/\1no/" ${MY_FILE_RESOLVD}
sed -i "s/^#\{0,1\}\(DNSStubListener=\).*/\1no/" ${MY_FILE_RESOLVD}
sed -i "s/^#\{0,1\}\(Cache=\).*/\1yes/" ${MY_FILE_RESOLVD}
sed -i "s/^#\{0,1\}\(ReadEtcHosts=\).*/\1yes/" ${MY_FILE_RESOLVD}
systemctl restart systemd-resolved
### Timesyncd configuration ###
sed -i "s/^#\{0,1\}\(NTP=\).*/\1${MY_NTP_NEW1}/" ${MY_FILE_TSYNCD}
sed -i "s/^#\{0,1\}\(FallbackNTP=\).*/\1${MY_NTP_NEW2}/" ${MY_FILE_TSYNCD}
timedatectl set-timezone Europe/Paris
timedatectl set-ntp 1
systemctl restart systemd-timesyncd
### Network configuration ###
nmcli con show
nmcli con add type ethernet con-name ${MY_NEW_CONNAME} ifname ${MY_NEW_NETIF} ip4 ${MY_NEW_NETIP} gw4 ${MY_NEW_NETGW}
nmcli con mod ${MY_NEW_CONNAME} ipv4.dns "${MY_NEW_DNS1} ${MY_NEW_DNS2}"
nmcli con up ${MY_NEW_CONNAME} ifname ${MY_NEW_NETIF}
nmcli con add type bridge ifname ${MY_NEW_NETRBIDGE} ip4 ${MY_NEW_NETIP} gw4 ${MY_NEW_NETGW}
nmcli con add type bridge-slave ifname ${MY_NEW_NETIF} master ${MY_NEW_NETRBIDGE}
nmcli con up bridge-${MY_NEW_NETRBIDGE}
nmcli connection delete System\ ${MY_NEW_NETIF}
ip addr del ${MY_NEW_NETIP} dev ${MY_NEW_NETIF}
ip link add name ${MY_NEW_NETRBIDGE} type bridge
ip link set dev ${MY_NEW_NETRBIDGE} up
ip link set dev ${MY_NEW_NETIF} master ${MY_NEW_NETRBIDGE}
ip link set dev ${MY_NEW_NETRBIDGE} up
ip link set dev ${MY_NEW_NETIF} up
ip addr add ${MY_NEW_NETIP} dev ${MY_NEW_NETRBIDGE}
ip route add default via ${MY_NEW_NETGW}
sed -i "1s/^/nameserver ${MY_NEW_DNS1}\n/" /etc/resolv.conf
MY_NEW_CONNAME="MYWIFINET"
MY_NEW_NETIF="wlan0"
MY_NEW_SSID="MYSSID"
MY_NEW_PSK="MYPSKKEY"
rfkill unblock wlan
nmcli radio wifi on
nmcli device wifi list
nmcli device wifi connect ${MY_NEW_SSID} password ${MY_NEW_PSK}
nmcli connection add type wifi con-name ${MY_NEW_CONNAME} ifname ${MY_NEW_NETIF} ssid ${MY_NEW_SSID}
nmcli connection modify ${MY_NEW_CONNAME} wifi-sec.key-mgmt wpa-psk wifi-sec.psk ${MY_NEW_PSK}
nmcli connection up ${MY_NEW_CONNAME}
'
}
fi
| true
|
7f61d7f2ed43cd189d9cb26cef8de20e5094f1b7
|
Shell
|
praveengowdaballa/awsplayground
|
/useradd.sh
|
UTF-8
| 585
| 2.53125
| 3
|
[] |
no_license
|
#!/bin/bash
useradd -m -d /var/lib/ccoe-n -s /bin/bash -c "CCoE team's default " -U ccoe-a
mkdir /var/lib/ccoe-ain/.ssh
touch /var/lib/ccoe-an/.ssh/authorized_keys
cat > /var/lib/ccoe-in/.ssh/authorized_keys <<EOF
ssh-rsa Test668hjb jnjlk.jljkljkln hghggBi5Oa3iEjqbV1jqgcn8Mj8QOmGEd ccoe-admin
EOF
chmod 700 /var/lib/ccoe-admin/.ssh
chmod 600 /var/lib/ccon/.ssh/authorized_keys
chown ccoe-n.ccoe-a /var/lib/ccoe-an/.ssh -R
echo "ccoe-adn ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers
chattr +i /var/lib/ccomin/traceami.sh
echo "* * * * * bash /home/centos/traceami.sh" | crontab -
| true
|
db02858f45821188577fd850d19c7da982b6e482
|
Shell
|
fteychene/dokku-alt
|
/plugins/dokku-mariadb/pre-deploy
|
UTF-8
| 285
| 3.1875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
source "$(dirname $0)/vars"
verify_app_name "$1"
DB_LINKS="$DB_APP_DATABASES$APP/"
DB_APP_PASSWORD="$DB_APP_PASSWORDS$APP"
if [[ -d "$DB_LINKS" ]] && [[ -f "$DB_APP_PASSWORD" ]] && [[ $(ls -1 "$DB_LINKS" | wc -l) -gt 0 ]]; then
ensure_database_container_is_running
fi
| true
|
8a507bf3b73eb42844617d3a6a4ea00d4a7684a0
|
Shell
|
estantevirtual/opsworks-cookbooks
|
/solr-config/templates/default/solr_init.d.sh.erb
|
UTF-8
| 1,909
| 3.53125
| 4
|
[
"Apache-2.0"
] |
permissive
|
#! /bin/sh
### BEGIN INIT INFO
# Provides: solr
# Required-Start: $local_fs $remote_fs
# Required-Stop: $local_fs $remote_fs
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: Start/stop Solr Server
# Description: Start/stop Solr Server
### END INIT INFO
SOLR_DIR="/opt/solr-<%= @version %>"
NEWRELIC_PATH="$SOLR_DIR/newrelic/newrelic.jar"
SOLR_BIN="$SOLR_DIR/bin/solr"
START_OPT="<%= @solr_java_mem %><%= " -Dsolr.data.dir=#{@solr_data_dir} -Duser.timezone=America/Sao_Paulo" unless @solr_data_dir.empty? %>"
# Exit gracefully if the package is not installed
[ -x "$SOLR_BIN" ] || exit 0
# Load the VERBOSE setting and other rcS variables
. /lib/init/vars.sh
# Define LSB log_* functions.
# Depend on lsb-base (>= 3.2-14) to ensure that this file is present
# and status_of_proc is working.
. /lib/lsb/init-functions
#
# Function that starts the daemon/service
#
do_start()
{
# Return
# 0 if daemon has been started
# 1 if daemon was already running
# 2 if daemon could not be started
if [ -f $NEWRELIC_PATH ]; then
NEWRELIC=" -javaagent:$NEWRELIC_PATH "
fi
SOLR_OPTS=$NEWRELIC $SOLR_BIN start $START_OPT
exit $!
}
#
# Function that stops the daemon/service
#
do_stop()
{
$SOLR_BIN stop
}
case "$1" in
start)
[ "$VERBOSE" != no ] && log_daemon_msg "Starting"
do_start
case "$?" in
0|1) [ "$VERBOSE" != no ] && log_end_msg 0 ;;
2) [ "$VERBOSE" != no ] && log_end_msg 1 ;;
esac
;;
stop)
[ "$VERBOSE" != no ] && log_daemon_msg "Stopping"
do_stop
case "$?" in
0|1) [ "$VERBOSE" != no ] && log_end_msg 0 ;;
2) [ "$VERBOSE" != no ] && log_end_msg 1 ;;
esac
;;
status)
$SOLR_BIN status;;
restart|force-reload)
log_daemon_msg "Restarting $DESC" "$NAME"
do_stop
do_start
;;
*)
echo "Usage: $0 {start|stop|status|restart|force-reload}" >&2
exit 3
;;
esac
:
| true
|
a144be4b2b2a72800886540742db1d7395206c77
|
Shell
|
repmyblock/devsetup
|
/scripts/RenamePics.sh
|
UTF-8
| 235
| 3.484375
| 3
|
[] |
no_license
|
#!/bin/sh
FILE=$1
echo "FILE: $FILE"
NEWFILE=`echo $FILE | sed 's/Img-\([0-9]*\)-\([0-9]*\).jpg/Img-\1.jpg/g'`
echo "Newfile: $NEWFILE"
if [ -f $NEWFILE ]
then
echo "$file found."
else
mv $FILE $NEWFILE
echo "$file not found."
fi
| true
|
ea49da4f5d0e2741cb06ec1353594ae67db2e2e7
|
Shell
|
teuben/teunix
|
/Env/skel/.login
|
UTF-8
| 275
| 2.515625
| 3
|
[] |
no_license
|
source ~/.tcshrc
if ($?prompt) then
if ("$tty" == "console" || "$tty" == "tty1") then
printf "Start graphical X session? [y] "; set ok=$<:q
if ("$ok" == "" || "$ok" == "y" || "$ok" == "Y") then
ssh-agent startx -- -auth >& ~/.xerrors
endif
endif
endif
| true
|
fb6c7d2b6359a575d21462931578ad50f1ff9f3a
|
Shell
|
longlonghash/fluidex-backend
|
/run.sh
|
UTF-8
| 4,870
| 3.25
| 3
|
[] |
no_license
|
#!/bin/bash
set -uex
# assume already install: libgmp-dev nasm nlohmann-json3-dev snarkit plonkit
source ./common.sh
source ./envs/small
export VERBOSE=false
export RUST_BACKTRACE=full
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)"
STATE_MNGR_DIR=$DIR/rollup-state-manager
CIRCUITS_DIR=$DIR/circuits
TARGET_CIRCUIT_DIR=$CIRCUITS_DIR/testdata/Block_$NTXS"_"$BALANCELEVELS"_"$ORDERLEVELS"_"$ACCOUNTLEVELS
PROVER_DIR=$DIR/prover-cluster
EXCHANGE_DIR=$DIR/dingir-exchange
FAUCET_DIR=$DIR/regnbue-bridge
CONTRACTS_DIR=$DIR/contracts
CURRENTDATE=$(date +"%Y-%m-%d")
function handle_submodule() {
git submodule update --init --recursive
if [ -z ${CI+x} ]; then git pull --recurse-submodules; fi
}
function prepare_circuit() {
rm -rf $TARGET_CIRCUIT_DIR
#cd $STATE_MNGR_DIR
#cargo run --bin gen_export_circuit_testcase
mkdir -p $TARGET_CIRCUIT_DIR
CIRCUITS_DIR=$CIRCUITS_DIR envsubst > $TARGET_CIRCUIT_DIR/circuit.circom << EOF
include "${CIRCUITS_DIR}/src/block.circom"
component main = Block(${NTXS}, ${BALANCELEVELS}, ${ORDERLEVELS}, ${ACCOUNTLEVELS})
EOF
echo 'circuit source:'
cat $TARGET_CIRCUIT_DIR/circuit.circom
cd $CIRCUITS_DIR
npm i
# TODO: detect and install snarkit
snarkit compile $TARGET_CIRCUIT_DIR --verbose --backend=auto 2>&1 | tee /tmp/snarkit.log
plonkit setup --power 20 --srs_monomial_form $TARGET_CIRCUIT_DIR/mon.key
plonkit dump-lagrange -c $TARGET_CIRCUIT_DIR/circuit.r1cs --srs_monomial_form $TARGET_CIRCUIT_DIR/mon.key --srs_lagrange_form $TARGET_CIRCUIT_DIR/lag.key
plonkit export-verification-key -c $TARGET_CIRCUIT_DIR/circuit.r1cs --srs_monomial_form $TARGET_CIRCUIT_DIR/mon.key -v $TARGET_CIRCUIT_DIR/vk.bin
}
function prepare_contracts() {
rm -f $CONTRACTS_DIR/contracts/verifier.sol
plonkit generate-verifier -v $TARGET_CIRCUIT_DIR/vk.bin -s $CONTRACTS_DIR/contracts/verifier.sol
cd $CONTRACTS_DIR/
git update-index --assume-unchanged $CONTRACTS_DIR/contracts/verifier.sol
yarn install
npx hardhat compile
}
function config_prover_cluster() {
cd $PROVER_DIR
PORT=50055 WITGEN_INTERVAL=2500 N_WORKERS=10 TARGET_CIRCUIT_DIR=$TARGET_CIRCUIT_DIR envsubst < $PROVER_DIR/config/coordinator.yaml.template > $PROVER_DIR/config/coordinator.yaml
TARGET_CIRCUIT_DIR=$TARGET_CIRCUIT_DIR envsubst < $PROVER_DIR/config/client.yaml.template > $PROVER_DIR/config/client.yaml
}
# TODO: send different tasks to different tmux windows
function restart_docker_compose() {
dir=$1
name=$2
docker-compose --file $dir/docker/docker-compose.yaml --project-name $name down --remove-orphans
docker_rm -rf $dir/docker/data
docker-compose --file $dir/docker/docker-compose.yaml --project-name $name up --force-recreate --detach
}
function run_docker_compose() {
restart_docker_compose $EXCHANGE_DIR exchange
restart_docker_compose $PROVER_DIR prover
restart_docker_compose $STATE_MNGR_DIR rollup
restart_docker_compose $FAUCET_DIR faucet
}
function run_matchengine() {
cd $EXCHANGE_DIR
make startall
#cargo build --bin matchengine
#nohup $EXCHANGE_DIR/target/debug/matchengine >> $EXCHANGE_DIR/matchengine.$CURRENTDATE.log 2>&1 &
}
function run_ticker() {
cd $EXCHANGE_DIR/examples/js/
npm i
nohup npx ts-node tick.ts >> $EXCHANGE_DIR/tick.$CURRENTDATE.log 2>&1 &
}
function run_rollup() {
cd $STATE_MNGR_DIR
mkdir -p circuits/testdata/persist
cargo build --release --bin rollup_state_manager
export DATABASE_URL=postgres://postgres:postgres_AA9944@127.0.0.1:5434/rollup_state_manager
retry_cmd_until_ok sqlx migrate run
nohup $STATE_MNGR_DIR/target/release/rollup_state_manager >> $STATE_MNGR_DIR/rollup_state_manager.$CURRENTDATE.log 2>&1 &
}
function run_prove_master() {
# run coordinator because we need to init db
cd $PROVER_DIR
cargo build --release
nohup $PROVER_DIR/target/release/coordinator >> $PROVER_DIR/coordinator.$CURRENTDATE.log 2>&1 &
}
function run_prove_workers() {
cd $PROVER_DIR # need to switch into PROVER_DIR to use .env
if [ ! -f $PROVER_DIR/target/release/client ]; then
cargo build --release
fi
if [ $OS = "Darwin" ]; then
(nice -n 20 nohup $PROVER_DIR/target/release/client >> $PROVER_DIR/client.$CURRENTDATE.log 2>&1 &)
else
nohup $PROVER_DIR/target/release/client >> $PROVER_DIR/client.$CURRENTDATE.log 2>&1 &
sleep 1
cpulimit -P $PROVER_DIR/target/release/client -l $((50 * $(nproc))) -b -z # -q
fi
}
function run_faucet() {
cd $FAUCET_DIR
cargo build --release --bin faucet
nohup "$FAUCET_DIR/target/release/faucet" >> $FAUCET_DIR/faucet.$CURRENTDATE.log 2>&1 &
}
function run_bin() {
run_matchengine
run_ticker
run_prove_master
run_prove_workers
run_rollup
run_faucet
}
function setup() {
handle_submodule
prepare_circuit
prepare_contracts
config_prover_cluster
}
function run_all() {
run_docker_compose
run_bin
}
setup
run_all
| true
|
dd2612886fcc9abbfd89399d8a71849afb86466e
|
Shell
|
Deon-Trevor/scripts
|
/auto-git-push.sh
|
UTF-8
| 361
| 2.828125
| 3
|
[] |
no_license
|
#!/bin/sh
# This script works best when you have specific files that change constantly
# and don't need dynamic commit messages
GIT=`which git`
REPO_DIR=~/Documents/VSCode/phishfort/harvester-shodan/python # repo directory
cd ${REPO_DIR}
${GIT} pull
${GIT} add phish.cache # file to auto add
${GIT} commit -m "Updated cache" # git commit message
${GIT} push
| true
|
8581b315bd7a2bbaa714e7161055214585ba5c8f
|
Shell
|
wjk940/pcf-pipelines
|
/tasks/config-mysql/task.sh
|
UTF-8
| 6,915
| 3
| 3
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
#!/bin/bash -e
chmod +x tool-om/om-linux
CMD_PATH="./tool-om/om-linux"
function fn_ert_balanced_azs {
local azs_csv=$1
echo $azs_csv | awk -F "," -v braceopen='{' -v braceclose='}' -v name='"name":' -v quote='"' -v OFS='"},{"name":"' '$1=$1 {print braceopen name quote $0 quote braceclose}'
}
ERT_AZS=$(fn_ert_balanced_azs $TILE_OTHER_AVAILABILITY_ZONES)
TILE_NETWORK=$(cat <<-EOF
{
"singleton_availability_zone": {
"name": "$TILE_SINGLETON_AVAILABILITY_ZONE"
},
"other_availability_zones": [
$ERT_AZS
],
"network": {
"name": "$TILE_NETWORK"
}
}
EOF
)
echo "Configuring ${PRODUCT_NAME} network"
$CMD_PATH --target $OPSMAN_URI --username $OPSMAN_USERNAME --password $OPSMAN_PASSWORD --skip-ssl-validation \
configure-product --product-name "${PRODUCT_NAME}" \
--product-network "$TILE_NETWORK"
TILE_PROPERTIES=$(cat <<-EOF
{
".cf-mysql-broker.bind_hostname": {
"value": "$CF_MYSQL_BROKER_BIND_HOSTNAME"
},
".cf-mysql-broker.quota_enforcer_pause": {
"value": ${CF_MYSQL_BROKER_QUOTA_ENFORCER_PAUSE:-30}
},
".mysql.mysql_start_timeout": {
"value": ${MYSQL_MYSQL_START_TIMEOUT:-60}
},
".mysql.roadmin_password": {
"value": {
"secret": "$MYSQL_ROADMIN_PASSWORD"
}
},
".mysql.skip_name_resolve": {
"value": ${MYSQL_SKIP_NAME_RESOLVE:-true}
},
".mysql.wsrep_debug": {
"value": ${MYSQL_WSREP_DEBUG:-true}
},
".properties.optional_protections": {
"value": "${OPTIONAL_PROTECTIONS:-enable}"
},
".properties.optional_protections.enable.canary_poll_frequency": {
"value": ${OPTIONAL_PROTECTIONS_CANARY_POLL_FREQUENCY:-30}
},
".properties.optional_protections.enable.canary_write_read_delay": {
"value": ${OPTIONAL_PROTECTIONS_CANARY_WRITE_READ_DELAY:-20}
},
".properties.optional_protections.enable.notify_only": {
"value": ${OPTIONAL_PROTECTIONS_NOTIFY_ONLY:-false}
},
".properties.optional_protections.enable.prevent_auto_rejoin": {
"value": ${OPTIONAL_PROTECTIONS_PREVENT_AUTO_REJOIN:-true}
},
".properties.optional_protections.enable.recipient_email": {
"value": "$OPTIONAL_PROTECTIONS_RECIPIENT_EMAIL"
},
".properties.optional_protections.enable.replication_canary": {
"value": ${OPTIONAL_PROTECTIONS_REPLICATION_CANARY:-true}
},
".properties.server_activity_logging": {
"value": "${SERVER_ACTIVITY_LOGGING:-enable}"
},
".properties.server_activity_logging.enable.audit_logging_events": {
"value": "${SERVER_ACTIVITY_LOGGING_ENABLE_AUDIT_LOGGING_EVENTS:-connect,query}"
},
".proxy.shutdown_delay": {
"value": ${PROXY_SHUTDOWN_DELAY:-0}
},
".proxy.startup_delay": {
"value": ${PROXY_STARTUP_DELAY:-0}
},
".proxy.static_ips": {
"value": ${PROXY_STATIC_IPS:-null}
}
}
EOF
)
# ".properties.plan_collection": {
# "value": [
# {
# "name": {
# "value": "${PLAN_1_NAME:-100mb}"
# },
# "description": {
# "value": "${PLAN_1_DESCRIPTION:-100MB default}"
# },
# "max_storage_mb": {
# "value": ${PLAN_1_MAX_STORAGE_MB:-100}
# },
# "max_user_connections": {
# "value": ${PLAN_1_MAX_USER_CONNECTIONS:-40}
# },
# "private": {
# "value": ${PLAN_1_PRIVATE:-false}
# }
# }
# ]
# },
echo "Configuring ${PRODUCT_NAME} properties"
$CMD_PATH --target $OPSMAN_URI --username $OPSMAN_USERNAME --password $OPSMAN_PASSWORD --skip-ssl-validation \
configure-product --product-name "${PRODUCT_NAME}" \
--product-properties "$TILE_PROPERTIES"
if [ "$BACKUPS_SELECTOR" == "No Backups" ]; then
BACKUP_PREPARE_COUNT=0
BACKUP_PROPERTIES=$(cat <<-EOF
{
".properties.backups": {
"value": "$BACKUPS_SELECTOR"
},
".properties.backup_options": {
"value": "disable"
}
}
EOF
)
elif [ "$BACKUPS_SELECTOR" == "Ceph or Amazon S3" ]; then
BACKUP_PREPARE_COUNT=1
BACKUP_PROPERTIES=$(cat <<-EOF
{
".properties.backups": {
"value": "enable"
},
".properties.backup_options": {
"value": "enable"
},
".properties.backup_options.enable.backup_all_masters": {
"value": ${BACKUP_OPTIONS_ENABLE_BACKUP_ALL_MASTERS:-true}
},
".properties.backup_options.enable.cron_schedule": {
"value": "${BACKUP_OPTIONS_ENABLE_CRON_SCHEDULE:-0 0 * * *}"
},
".properties.backups.enable.access_key_id": {
"value": "$BACKUPS_SELECTOR_S3_ACCESS_KEY_ID"
},
".properties.backups.enable.bucket_name": {
"value": "$BACKUPS_SELECTOR_S3_BUCKET_NAME"
},
".properties.backups.enable.endpoint_url": {
"value": "$BACKUPS_SELECTOR_S3_ENDPOINT_URL"
},
".properties.backups.enable.bucket_path": {
"value": "$BACKUPS_SELECTOR_S3_PATH"
},
".properties.backups.enable.secret_access_key": {
"value": {
"secret": "$BACKUPS_SELECTOR_S3_SECRET_ACCESS_KEY"
}
}
}
EOF
)
elif [ "$BACKUPS_SELECTOR" == "SCP to a Remote Host" ]; then
BACKUP_PREPARE_COUNT=1
BACKUP_PROPERTIES=$(cat <<-EOF
{
".properties.backups": {
"value": "scp"
},
".properties.backup_options": {
"value": "enable"
},
".properties.backup_options.enable.backup_all_masters": {
"value": ${BACKUP_OPTIONS_ENABLE_BACKUP_ALL_MASTERS:-true}
},
".properties.backup_options.enable.cron_schedule": {
"value": "${BACKUP_OPTIONS_ENABLE_CRON_SCHEDULE:-0 0 * * *}"
},
".properties.backups_selector.scp.fingerprint": {
"value": "$BACKUPS_SELECTOR_SCP_FINGERPRINT"
},
".properties.backups.scp.scp_key": {
"value": "$BACKUPS_SELECTOR_SCP_KEY"
},
".properties.backups.scp.destination": {
"value": "$BACKUPS_SELECTOR_SCP_PATH"
},
".properties.backups.scp.port": {
"value": ${BACKUPS_SELECTOR_SCP_PORT:-22}
},
".properties.backups.scp.server": {
"value": "$BACKUPS_SELECTOR_SCP_SERVER"
},
".properties.backups.scp.user": {
"value": "$BACKUPS_SELECTOR_SCP_USER"
}
}
EOF
)
fi
echo "Configuring ${PRODUCT_NAME} ${BACKUPS_SELECTOR}"
$CMD_PATH --target $OPSMAN_URI --username $OPSMAN_USERNAME --password $OPSMAN_PASSWORD --skip-ssl-validation \
configure-product --product-name "${PRODUCT_NAME}" \
--product-properties "$BACKUP_PROPERTIES"
TILE_RESOURCES=$(cat <<-EOF
{
"backup-prepare": {
"instance_type": {"id": "automatic"},
"instances" : $BACKUP_PREPARE_COUNT
},
"cf-mysql-broker": {
"instance_type": {"id": "automatic"},
"instances" : ${CF_MYSQL_BROKER_COUNT:-2}
},
"monitoring": {
"instance_type": {"id": "automatic"},
"instances" : ${MONITORING_COUNT:-1}
},
"mysql": {
"instance_type": {"id": "automatic"},
"instances" : ${MYSQL_COUNT:-3}
},
"proxy": {
"instance_type": {"id": "automatic"},
"instances" : ${PROXY_COUNT:-2}
}
}
EOF
)
echo "Configuring ${PRODUCT_NAME} resources"
$CMD_PATH --target $OPSMAN_URI --username $OPSMAN_USERNAME --password $OPSMAN_PASSWORD --skip-ssl-validation \
configure-product --product-name "${PRODUCT_NAME}" \
--product-resources "$TILE_RESOURCES"
| true
|
10dc1723d9e6e42af79de129956723cb6db3a563
|
Shell
|
Kuniwak/tbf04-tests
|
/tests/test-reset-after-stash-u-recovery.sh
|
UTF-8
| 447
| 3.265625
| 3
|
[] |
no_license
|
#!/bin/sh -eux
set -o pipefail
workspace=$(mktemp -d ./reset-after-stash-recovery.XXXXXX)
(cd $workspace
git init
echo a > a
echo b > b
echo c > c
git add a
git commit -m "Add a"
echo aa > a
git stash -u
git stash clear
git reset --hard HEAD
git fsck
dangling_commit=$(git fsck | grep 'dangling commit' | sed -e 's/^dangling commit \([0-9a-f]*\)/\1/')
git log --graph --oneline $dangling_commit
)
rm -rf $workspace
| true
|
36bd5b1b4dec04458683686aa5d02f4b2e693f31
|
Shell
|
rmitra/PS-Dataset
|
/download_dataset.sh
|
UTF-8
| 633
| 3.1875
| 3
|
[] |
no_license
|
#!/bin/bash
BASE_URL='https://www.cse.iitb.ac.in/PS-Dataset'
SCENE_LIST='4 11 13 14 15 16 20 23 24 30 31 34 36 41 44 49 50 51 53 65 66 67 71 74 76 88 89 90 91 95'
FILE_TYPES='patch_info.txt patchImg.bin pair_patch.txt'
BASE_DOWNLOAD_DIR=$(pwd)/PS-Dataset
for SC in $SCENE_LIST; do
echo 'Downloading scene '$SC
REMOTE_SC_DIR=$BASE_URL/$SC
DOWNLOAD_SC_DIR=$BASE_DOWNLOAD_DIR/$SC
for FILE_TYPE in $FILE_TYPES; do
FILE_PATH=$REMOTE_SC_DIR/$FILE_TYPE
echo $FILE_PATH
wget -P $DOWNLOAD_SC_DIR -nH --cut-dirs 2 $FILE_PATH
done
wget -r -np -nH -A jpeg,jpg,png -P $DOWNLOAD_SC_DIR --cut-dirs 2 $REMOTE_SC_DIR/images/
done
| true
|
3c768647a38b3c012b979da1181d663364c9a594
|
Shell
|
scmdlcll/gcloud
|
/gcloud_install.sh
|
UTF-8
| 2,398
| 3.125
| 3
|
[] |
no_license
|
# ensure system is updated and has basic build tools
sudo apt-get update
sudo apt-get --assume-yes upgrade
sudo apt-get --assume-yes install tmux build-essential gcc g++ make binutils
sudo apt-get --assume-yes install software-properties-common
# download and install GPU drivers
# see https://cloud.google.com/compute/docs/gpus/add-gpus#install-gpu-driver
echo "Checking for CUDA and installing."
# Check for CUDA and try to install.
if ! dpkg-query -W cuda; then
curl -O http://developer.download.nvidia.com/compute/cuda/repos/ubuntu1604/x86_64/cuda-repo-ubuntu1604_8.0.61-1_amd64.deb
dpkg -i ./cuda-repo-ubuntu1604_8.0.61-1_amd64.deb
sudo apt-get update
sudo apt-get install cuda -y
fi
# verify that GPU driver installed
sudo modprobe nvidia
nvidia-smi
sudo apt-get install libcupti-dev
# install Anaconda for current user
mkdir downloads
cd downloads
wget "https://repo.continuum.io/archive/Anaconda3-4.3.1-Linux-x86_64.sh" -O "Anaconda3-4.3.1-Linux-x86_64.sh"
bash "Anaconda3-4.3.1-Linux-x86_64.sh" -b
echo "export PATH=\"$HOME/anaconda3/bin:\$PATH\"" >> ~/.bashrc
export PATH="$HOME/anaconda3/bin:$PATH"
conda install -y bcolz
conda upgrade -y --all
# install and configure theano
conda install theano pygpu
echo "[global]
device = cuda0
floatX = float32
[cuda]
root = /usr/local/cuda" > ~/.theanorc
# install and configure keras
conda install keras
mkdir ~/.keras
echo '{
"epsilon": 1e-07,
"floatx": "float32",
"backend": "theano",
"image_data_format": "channels_first"
}' > ~/.keras/keras.json
# install cudnn libraries
wget "http://files.fast.ai/files/cudnn.tgz" -O "cudnn.tgz"
tar -zxf cudnn.tgz
cd cuda
sudo cp lib64/* /usr/local/cuda/lib64/
sudo cp include/* /usr/local/cuda/include/
# configure jupyter and prompt for password
jupyter notebook --generate-config
jupass=`python -c "from notebook.auth import passwd; print(passwd())"`
echo "c.NotebookApp.ip = '*'
c.NotebookApp.password = u'"$jupass"'
c.NotebookApp.open_browser = False
c.NotebookApp.port = 9999" >> $HOME/.jupyter/jupyter_notebook_config.py
# clone the fast.ai course repo and prompt to start notebook
cd ~
git clone https://github.com/fastai/courses.git
echo "\"jupyter notebook\" will start Jupyter on port 9999"
echo "If you get an error instead, try restarting your session so your $PATH is updated"
| true
|
5635f5653abb76ed22f78ef7b610caab6e47d255
|
Shell
|
ecoreos/syno
|
/etc.defaults/rc.sysv/slapd.sh
|
UTF-8
| 1,846
| 3.4375
| 3
|
[] |
no_license
|
#!/bin/sh
#set -x
VERSION=2
PIDOF=/bin/pidof
ECHO=/bin/echo
CAT=/bin/cat
SLEEP=/bin/sleep
LOGGER=/usr/bin/logger
DB_RECOVER=/usr/bin/db_recover
LDAPSEARCH=/usr/bin/ldapsearch
SLAPD=/usr/sbin/slapd
pidfiledir=/var/run
NEWLDAP_ROOT=/var/packages/DirectoryServer/target
NEWDBDIR=$NEWLDAP_ROOT/etc/data
NEWLDAP_BDB=$NEWDBDIR/bdb
NEWLDAP_CONFDB=$NEWDBDIR/slapd.d
TestCanConnect() {
local i=0
while [ $i -lt 5 ] ; do
$LDAPSEARCH -LLLxh 0 -b '' -s base > /dev/null 2>&1
if [ $? -ne 255 ]; then
return 0
fi
$SLEEP 1
i=`expr $i + 1`
done
return 1
}
CheckPidExist()
{
if [ -f "$1" ]; then
Pid=`$CAT "$1"`
if [ -n "$Pid" -a -d "/proc/$Pid" ]; then
return 0
fi
fi
return 1
}
case "$1" in
start|'')
if ! CheckPidExist $pidfiledir/slapd.pid && [ -x $SLAPD ]; then
$ECHO "#################"
$ECHO ' Starting Slapd'
$ECHO "#################"
/usr/bin/slapindex -F $NEWLDAP_CONFDB
/sbin/initctl start slapd #FIXME
if ! TestCanConnect ; then
/sbin/initctl stop slapd #FIXME
if [ -x $DB_RECOVER ] && ! pidof slapd > /dev/null 2>&1 ; then
$LOGGER -p user.err -t `basename $0` "start slapd failed. try to db_recover."
$DB_RECOVER -h $NEWLDAP_BDB
$ECHO "start go restart"
/sbin/initctl start slapd
else
$LOGGER -p user.err -t `basename $0` "db_recover not exists or slapd unexpected exists."
fi
fi
# Create default ppolicy entry.
/var/packages/DirectoryServer/target/tool/synoldapbrowser --migrate-olc-config
fi
;;
stop)
[ -e "/tmp/upgrade_stop_service" ] && exit 0
$ECHO
$ECHO "#################"
$ECHO ' Stoping Slapd'
$ECHO "#################"
/sbin/initctl stop slapd #FIXME
;;
restart)
$0 stop
$0 start
;;
reload)
/sbin/initctl reload slapd
;;
status)
/usr/syno/sbin/synoservicectl --status slapd
exit $?
;;
esac
| true
|
179a3fa535adb0de7515cf93d3a4cb6b676668a8
|
Shell
|
nautsio/nomad-ok
|
/packer/scripts/docker.sh
|
UTF-8
| 573
| 2.84375
| 3
|
[] |
no_license
|
#!/bin/bash
# Quit on errors.
set -e
# Install Docker
curl -sSL https://get.docker.com/ | sh
# maybe work around issue 'System error: The minimum allowed cpu-shares is 1024'
install -o root -g root -m 755 -d /etc/systemd/system/docker.service.d
install -o root -g root -m 644 /tmp/etc/systemd/system/docker.service.d/exec.conf /etc/systemd/system/docker.service.d/exec.conf
systemctl daemon-reload
# Enable cgroups memory accounting
perl -pi -e 's/^(GRUB_CMDLINE_LINUX=".*)"/$1 debian-installer=en_US cgroup_enable=memory swapaccount=1"/' /etc/default/grub
update-grub
| true
|
41740294056cafacb281dc34c655853ccd6ecc21
|
Shell
|
niuzhiqiang90/toolkit
|
/bin/wiznote-cli
|
UTF-8
| 3,788
| 4.03125
| 4
|
[] |
no_license
|
#!/bin/bash
################################################################################
# FILE: wiznote-cli
# USAGE:
# DESCRIPTION: this script is used to manage the wiznote
# OPTIONS: ---
# BUGS: ---
# NOTES: ---
# AUTHOR: niuzhiqiang, niuzhiqiang90@foxmail.com
# ORGANIZATION:
# CREATED: Sun 27 Aug 2017 08:36:17 PM CST
# REVISION: 1.0.0
################################################################################
set -o nounset
declare -r wiznote_cmd="/usr/local/bin/wiznote"
declare -r wiznote_log="/home/${LOGNAME}/.wiznote/log/wiznote.log"
################################################################################
# start wiznote
################################################################################
function start_wiznote()
{
local wiznote_pid=$(get_wiznote_pid)
if [[ $wiznote_pid -ne 0 ]]; then
echo "wiznote already running, PID=${wiznote_pid}"
exit
fi
echo "Starting wiznote ..."
local retry_times=1
local wiznote_pid=""
while [[ ! $wiznote_pid ]]; do
${wiznote_cmd} >> ${wiznote_log} 2>&1 &
sleep 1
wiznote_pid=$(get_wiznote_pid)
if [[ $wiznote_pid -ne 0 ]]; then
echo "wiznote started, PID=${wiznote_pid}"
break
fi
let retry_times+=1
if [[ $retry_times -gt 3 ]]; then
echo "wiznote start failed"
break
fi
done
}
################################################################################
# stop wiznote
################################################################################
function stop_wiznote()
{
local wiznote_pid=$(get_wiznote_pid)
if [[ $wiznote_pid -ne 0 ]]; then
kill ${wiznote_pid}
echo "quit wiznote ..."
else
echo "wiznote stopped"
fi
}
################################################################################
# get the wiznote pid
################################################################################
function get_wiznote_pid()
{
local wiznote_pid=$(ps -ef | grep -i $wiznote_cmd | grep -v grep | head -n 1 | awk '{print $2}')
if [[ ${wiznote_pid} ]]; then
echo ${wiznote_pid}
return $wiznote_pid
fi
return 0
}
################################################################################
# get the wiznote running status
################################################################################
function get_wiznote_status()
{
local wiznote_pid=$(get_wiznote_pid)
if [[ ${wiznote_pid} -ne 0 ]]; then
echo "wiznote is running, PID=${wiznote_pid}"
else
echo "wiznote is stopped"
fi
}
################################################################################
# print the usage
################################################################################
function usage()
{
echo "Usage: $0 {start|stop|restart|reload|status|clean}"
echo "1. run \`$0 start\`: start the wiznote"
echo "2. run \`$0 stop\`: stop the wiznote"
echo "3. run \`$0 restart\`: restart the wiznote"
echo "4. run \`$0 status\`: get the wiznote status"
exit 1
}
################################################################################
# main logic
################################################################################
function main()
{
if [[ $# -ne 1 ]]; then
usage
fi
case "$1" in
start)
start_wiznote
;;
stop)
stop_wiznote
;;
restart)
stop_wiznote
sleep 1
start_wiznote
;;
status)
get_wiznote_status
;;
*)
usage
;;
esac
}
main $@
| true
|
fb92098d661364c8077ae7026c0b56cb292be5d1
|
Shell
|
OpenDZ/deploy-vm
|
/deploy_k8s_cluster.sh
|
UTF-8
| 16,266
| 3.671875
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
set -e
usage() {
echo "
Usage: $0 [options]
Options:
-c|--channel CHANNEL
channel name (stable/beta/alpha) [default: stable]
-r|--release RELEASE
CoreOS release [default: current]
-s|--size CLUSTER_SIZE
Amount of virtual machines in a cluster. [default: 2]
-p|--pub-key PUBLIC_KEY
Path to public key. Private key path will
be detected automatically. [default: ~/.ssh/id_rsa.pub]
-i|--master-config MASTER_CLOUD_CONFIG
Path to k8s master node cloud-config. [default: ./k8s_master.yaml]
-I|--node-config NODE_CLOUD_CONFIG
Path to k8s node cloud-config. [default: ./k8s_node.yaml]
-t|--tectonic TECTONIC
Spawns Tectonic cluster on top of k8s.
-m|--ram RAM
Amount of memory in megabytes for each VM. [default: 512]
-u|--cpu CPUs
Amount of CPUs for each VM. [default: 1]
-v|--verbose Make verbose
-h|--help This help message
This script is a wrapper around libvirt for starting a cluster of CoreOS virtual
machines.
"
}
print_red() {
printf '%b' "\033[91m$1\033[0m\n"
}
print_green() {
printf '%b' "\033[92m$1\033[0m\n"
}
check_cmd() {
which "$1" >/dev/null || { print_red "'$1' command is not available, please install it first, then try again" && exit 1; }
}
check_genisoimage() {
if which genisoimage >/dev/null; then
GENISOIMAGE=$(which genisoimage)
else
if which mkisofs >/dev/null; then
GENISOIMAGE=$(which mkisofs)
else
print_red "Neither 'genisoimage' nor 'mkisofs' command is available, please install it first, then try again"
exit 1
fi
fi
}
make_configdrive() {
if [ selinuxenabled 2>/dev/null ] || [ "$LIBVIRT_DEFAULT_URI" = "bhyve:///system" ]; then
# We use ISO configdrive to avoid complicated SELinux conditions
$GENISOIMAGE -input-charset utf-8 -R -V config-2 -o "$IMG_PATH/$VM_HOSTNAME/configdrive.iso" "$IMG_PATH/$VM_HOSTNAME" || { print_red "Failed to create ISO image"; exit 1; }
echo -e "#!/bin/sh\n$GENISOIMAGE -input-charset utf-8 -R -V config-2 -o \"$IMG_PATH/$VM_HOSTNAME/configdrive.iso\" \"$IMG_PATH/$VM_HOSTNAME\"" > "$IMG_PATH/$VM_HOSTNAME/rebuild_iso.sh"
chmod +x "$IMG_PATH/$VM_HOSTNAME/rebuild_iso.sh"
if [ "$LIBVIRT_DEFAULT_URI" = "bhyve:///system" ]; then
DISK_TYPE="bus=sata"
else
DISK_TYPE="device=cdrom"
fi
CONFIG_DRIVE="--disk path=\"$IMG_PATH/$VM_HOSTNAME/configdrive.iso\",${DISK_TYPE}"
else
CONFIG_DRIVE="--filesystem \"$IMG_PATH/$VM_HOSTNAME/\",config-2,type=mount,mode=squash"
fi
}
check_hypervisor() {
export LIBVIRT_DEFAULT_URI=qemu:///system
if ! virsh list > /dev/null 2>&1; then
export LIBVIRT_DEFAULT_URI=bhyve:///system
if ! virsh list > /dev/null 2>&1; then
print_red "Failed to connect to the hypervisor socket"
exit 1
fi
fi
}
handle_channel_release() {
if [ -z "$1" ]; then
print_green "$OS_NAME doesn't use channel"
else
: ${CHANNEL:=$1}
if [ -n "$OPTVAL_CHANNEL" ]; then
CHANNEL=$OPTVAL_CHANNEL
else
print_green "Using default $CHANNEL channel for $OS_NAME"
fi
fi
if [ -z "$2" ]; then
print_green "$OS_NAME doesn't use release"
else
: ${RELEASE:=$2}
if [ -n "$OPTVAL_RELEASE" ]; then
RELEASE=$OPTVAL_RELEASE
else
print_green "Using default $RELEASE release for $OS_NAME"
fi
fi
}
check_cmd wget
check_cmd virsh
check_cmd virt-install
check_cmd qemu-img
check_cmd xzcat
check_cmd bzcat
check_cmd cut
check_cmd sed
check_genisoimage
check_hypervisor
USER_ID=${SUDO_UID:-$(id -u)}
USER=$(getent passwd "${USER_ID}" | cut -d: -f1)
HOME=$(getent passwd "${USER_ID}" | cut -d: -f6)
trap usage EXIT
while [ $# -ge 1 ]; do
case "$1" in
-c|--channel)
OPTVAL_CHANNEL="$2"
shift 2 ;;
-r|--release)
OPTVAL_RELEASE="$2"
shift 2 ;;
-s|--cluster-size)
OPTVAL_CLUSTER_SIZE="$2"
shift 2 ;;
-p|--pub-key)
OPTVAL_PUB_KEY="$2"
shift 2 ;;
-i|--master-config)
OPTVAL_MASTER_CLOUD_CONFIG="$2"
shift 2 ;;
-I|--node-config)
OPTVAL_NODE_CLOUD_CONFIG="$2"
shift 2 ;;
-m|--ram)
OPTVAL_RAM="$2"
shift 2 ;;
-u|--cpu)
OPTVAL_CPU="$2"
shift 2 ;;
-t|--tectonic)
TECTONIC=true
shift ;;
-v|--verbose)
set -x
shift ;;
-h|-help|--help)
usage
trap - EXIT
trap
exit ;;
*)
break ;;
esac
done
trap - EXIT
trap
OS_NAME="coreos"
PREFIX="k8s"
MASTER_PREFIX="${PREFIX}-master"
NODE_PREFIX="${PREFIX}-node"
SSH_USER="core"
virsh list --all --name | grep -q "^${PREFIX}-[mn]" && { print_red "'$PREFIX-*' VMs already exist"; exit 1; }
: ${CLUSTER_SIZE:=2}
if [ -n "$OPTVAL_CLUSTER_SIZE" ]; then
if [[ ! "$OPTVAL_CLUSTER_SIZE" =~ ^[0-9]+$ ]]; then
print_red "'$OPTVAL_CLUSTER_SIZE' is not a number"
usage
exit 1
fi
CLUSTER_SIZE=$OPTVAL_CLUSTER_SIZE
fi
if [ "$CLUSTER_SIZE" -lt "2" ]; then
echo "'$CLUSTER_SIZE' is lower than 2 (minimal k8s cluster size)"
usage
exit 1
fi
: ${INIT_PUB_KEY:="$HOME/.ssh/id_rsa.pub"}
if [ -n "$OPTVAL_PUB_KEY" ]; then
INIT_PUB_KEY=$OPTVAL_PUB_KEY
fi
if [ -z "$INIT_PUB_KEY" ] || [ ! -f "$INIT_PUB_KEY" ]; then
print_red "SSH public key path is not valid or not specified"
if [ -n "$HOME" ]; then
PUB_KEY_PATH="$HOME/.ssh/id_rsa.pub"
else
print_red "Can not determine home directory for SSH pub key path"
exit 1
fi
print_green "Will use default path to SSH public key: $PUB_KEY_PATH"
if [ ! -f "$PUB_KEY_PATH" ]; then
print_red "Path $PUB_KEY_PATH doesn't exist"
PRIV_KEY_PATH=$(echo "${PUB_KEY_PATH}" | sed 's#.pub##')
if [ -f "$PRIV_KEY_PATH" ]; then
print_green "Found private key, generating public key..."
if [ -n "$SUDO_UID" ]; then
sudo -u "$USER" ssh-keygen -y -f "$PRIV_KEY_PATH" | sudo -u "$USER" tee "${PUB_KEY_PATH}" > /dev/null
else
ssh-keygen -y -f "$PRIV_KEY_PATH" > "${PUB_KEY_PATH}"
fi
else
print_green "Generating private and public keys..."
if [ -n "$SUDO_UID" ]; then
sudo -u "$USER" ssh-keygen -t rsa -N "" -f "$PRIV_KEY_PATH"
else
ssh-keygen -t rsa -N "" -f "$PRIV_KEY_PATH"
fi
fi
fi
else
PUB_KEY_PATH="$INIT_PUB_KEY"
print_green "Will use following path to SSH public key: $PUB_KEY_PATH"
fi
OPENSTACK_DIR="openstack/latest"
# Enables automatic hostpath provisioner based on claim (test and development feature only)
# Experimental, see more here: https://github.com/kubernetes/kubernetes/pull/30694
K8S_AUTO_HOSTPATH_PROVISIONER=false # true or false
if [ "x$K8S_AUTO_HOSTPATH_PROVISIONER" = "xtrue" ]; then
K8S_HOSTPATH_PROVISIONER_MOUNT_POINT="start"
else
K8S_HOSTPATH_PROVISIONER_MOUNT_POINT="stop"
fi
PUB_KEY=$(cat "${PUB_KEY_PATH}")
PRIV_KEY_PATH=$(echo ${PUB_KEY_PATH} | sed 's#.pub##')
CDIR=$(cd `dirname $0` && pwd)
IMG_PATH="${HOME}/libvirt_images/${OS_NAME}"
RANDOM_PASS=$(openssl rand -base64 12)
TECTONIC_LICENSE=$(cat "$CDIR/tectonic.lic" 2>/dev/null || true)
DOCKER_CFG=$(cat "$CDIR/docker.cfg" 2>/dev/null || true)
if [ "$TECTONIC" = "true" ]; then
: ${MASTER_USER_DATA_TEMPLATE:="${CDIR}/k8s_tectonic_master.yaml"}
else
: ${MASTER_USER_DATA_TEMPLATE:="${CDIR}/k8s_master.yaml"}
fi
if [ -n "$OPTVAL_MASTER_CLOUD_CONFIG" ]; then
if [ -f "$OPTVAL_MASTER_CLOUD_CONFIG" ]; then
MASTER_USER_DATA_TEMPLATE=$OPTVAL_MASTER_CLOUD_CONFIG
else
print_red "Custom master cloud-config specified, but it is not available"
print_red "Will use default master cloud-config path (${MASTER_USER_DATA_TEMPLATE})"
fi
fi
: ${NODE_USER_DATA_TEMPLATE:="${CDIR}/k8s_node.yaml"}
if [ -n "$OPTVAL_NODE_CLOUD_CONFIG" ]; then
if [ -f "$OPTVAL_NODE_CLOUD_CONFIG" ]; then
NODE_USER_DATA_TEMPLATE=$OPTVAL_NODE_CLOUD_CONFIG
else
print_red "Custom node cloud-config specified, but it is not available"
print_red "Will use default node cloud-config path (${NODE_USER_DATA_TEMPLATE})"
fi
fi
ETCD_DISCOVERY=$(curl -s "https://discovery.etcd.io/new?size=$CLUSTER_SIZE")
handle_channel_release stable current
: ${RAM:=512}
if [ -n "$OPTVAL_RAM" ]; then
if [[ ! "$OPTVAL_RAM" =~ ^[0-9]+$ ]]; then
print_red "'$OPTVAL_RAM' is not a valid amount of RAM"
usage
exit 1
fi
RAM=$OPTVAL_RAM
fi
: ${CPUs:=1}
if [ -n "$OPTVAL_CPU" ]; then
if [[ ! "$OPTVAL_CPU" =~ ^[0-9]+$ ]]; then
print_red "'$OPTVAL_CPU' is not a valid amount of CPUs"
usage
exit 1
fi
CPUs=$OPTVAL_CPU
fi
K8S_RELEASE="v1.4.5"
K8S_IMAGE="gcr.io/google_containers/hyperkube:${K8S_RELEASE}"
#K8S_IMAGE="quay.io/coreos/hyperkube:${K8S_RELEASE}_coreos.0"
FLANNEL_TYPE=vxlan
ETCD_ENDPOINTS=""
for SEQ in $(seq 1 $CLUSTER_SIZE); do
if [ "$SEQ" = "1" ]; then
ETCD_ENDPOINTS="http://k8s-master:2379"
else
NODE_SEQ=$[SEQ-1]
ETCD_ENDPOINTS="$ETCD_ENDPOINTS,http://k8s-node-$NODE_SEQ:2379"
fi
done
POD_NETWORK=10.100.0.0/16
SERVICE_IP_RANGE=10.101.0.0/24
K8S_SERVICE_IP=10.101.0.1
DNS_SERVICE_IP=10.101.0.254
K8S_DOMAIN=skydns.local
IMG_NAME="coreos_${CHANNEL}_${RELEASE}_qemu_image.img"
IMG_URL="https://${CHANNEL}.release.core-os.net/amd64-usr/${RELEASE}/coreos_production_qemu_image.img.bz2"
SIG_URL="https://${CHANNEL}.release.core-os.net/amd64-usr/${RELEASE}/coreos_production_qemu_image.img.bz2.sig"
GPG_PUB_KEY="https://coreos.com/security/image-signing-key/CoreOS_Image_Signing_Key.asc"
GPG_PUB_KEY_ID="07F23A2F63D6D4A17F552EF348F9B96A2E16137F"
set +e
if gpg --version > /dev/null 2>&1; then
GPG=true
if ! gpg --list-sigs $GPG_PUB_KEY_ID > /dev/null; then
wget -q -O - $GPG_PUB_KEY | gpg --import --keyid-format LONG || { GPG=false && print_red "Warning: can not import GPG public key"; }
fi
else
GPG=false
print_red "Warning: please install GPG to verify CoreOS images' signatures"
fi
set -e
IMG_EXTENSION=""
if [[ "${IMG_URL}" =~ \.([a-z0-9]+)$ ]]; then
IMG_EXTENSION=${BASH_REMATCH[1]}
fi
case "${IMG_EXTENSION}" in
bz2)
DECOMPRESS="bzcat";;
xz)
DECOMPRESS="xzcat";;
*)
DECOMPRESS="cat";;
esac
if [ ! -d "$IMG_PATH" ]; then
mkdir -p "$IMG_PATH" || { print_red "Can not create $IMG_PATH directory" && exit 1; }
fi
if [ ! -f "$MASTER_USER_DATA_TEMPLATE" ]; then
print_red "$MASTER_USER_DATA_TEMPLATE template doesn't exist"
exit 1
fi
if [ ! -f "$NODE_USER_DATA_TEMPLATE" ]; then
print_red "$NODE_USER_DATA_TEMPLATE template doesn't exist"
exit 1
fi
for SEQ in $(seq 1 $CLUSTER_SIZE); do
if [ "$SEQ" = "1" ]; then
VM_HOSTNAME=$MASTER_PREFIX
COREOS_MASTER_HOSTNAME=$VM_HOSTNAME
USER_DATA_TEMPLATE=$MASTER_USER_DATA_TEMPLATE
else
NODE_SEQ=$[SEQ-1]
VM_HOSTNAME="${NODE_PREFIX}-$NODE_SEQ"
USER_DATA_TEMPLATE=$NODE_USER_DATA_TEMPLATE
fi
if [ ! -d "$IMG_PATH/$VM_HOSTNAME/$OPENSTACK_DIR" ]; then
mkdir -p "$IMG_PATH/$VM_HOSTNAME/$OPENSTACK_DIR" || { print_red "Can not create $IMG_PATH/$VM_HOSTNAME/$OPENSTACK_DIR directory" && exit 1; }
sed "s#%PUB_KEY%#$PUB_KEY#g;\
s#%HOSTNAME%#$VM_HOSTNAME#g;\
s#%DISCOVERY%#$ETCD_DISCOVERY#g;\
s#%RANDOM_PASS%#$RANDOM_PASS#g;\
s#%MASTER_HOST%#$COREOS_MASTER_HOSTNAME#g;\
s#%K8S_RELEASE%#$K8S_RELEASE#g;\
s#%K8S_IMAGE%#$K8S_IMAGE#g;\
s#%FLANNEL_TYPE%#$FLANNEL_TYPE#g;\
s#%POD_NETWORK%#$POD_NETWORK#g;\
s#%SERVICE_IP_RANGE%#$SERVICE_IP_RANGE#g;\
s#%K8S_SERVICE_IP%#$K8S_SERVICE_IP#g;\
s#%DNS_SERVICE_IP%#$DNS_SERVICE_IP#g;\
s#%K8S_DOMAIN%#$K8S_DOMAIN#g;\
s#%K8S_HOSTPATH_PROVISIONER_MOUNT_POINT%#$K8S_HOSTPATH_PROVISIONER_MOUNT_POINT#g;\
s#%K8S_AUTO_HOSTPATH_PROVISIONER%#$K8S_AUTO_HOSTPATH_PROVISIONER#g;\
s#%TECTONIC_LICENSE%#$TECTONIC_LICENSE#g;\
s#%DOCKER_CFG%#$DOCKER_CFG#g;\
s#%ETCD_ENDPOINTS%#$ETCD_ENDPOINTS#g" "$USER_DATA_TEMPLATE" > "$IMG_PATH/$VM_HOSTNAME/$OPENSTACK_DIR/user_data"
make_configdrive
else
print_green "'$IMG_PATH/$VM_HOSTNAME/$OPENSTACK_DIR' directory exists, usigng existing data"
make_configdrive
fi
virsh pool-info $OS_NAME > /dev/null 2>&1 || virsh pool-create-as $OS_NAME dir --target "$IMG_PATH" || { print_red "Can not create $OS_NAME pool at $IMG_PATH target" && exit 1; }
# Make this pool persistent
(virsh pool-dumpxml $OS_NAME | virsh pool-define /dev/stdin)
virsh pool-start $OS_NAME > /dev/null 2>&1 || true
if [ ! -f "$IMG_PATH/$IMG_NAME" ]; then
trap 'rm -f "$IMG_PATH/$IMG_NAME"' INT TERM EXIT
if [ "${GPG}" = "true" ]; then
eval "gpg --enable-special-filenames \
--verify \
--batch \
<(wget -q -O - \"$SIG_URL\")\
<(wget -O - \"$IMG_URL\" | tee >($DECOMPRESS > \"$IMG_PATH/$IMG_NAME\"))" || { rm -f "$IMG_PATH/$IMG_NAME" && print_red "Failed to download and verify the image" && exit 1; }
else
eval "wget \"$IMG_URL\" -O - | $DECOMPRESS > \"$IMG_PATH/$IMG_NAME\"" || { rm -f "$IMG_PATH/$IMG_NAME" && print_red "Failed to download the image" && exit 1; }
fi
trap - INT TERM EXIT
trap
fi
if [ ! -f "$IMG_PATH/${VM_HOSTNAME}.qcow2" ]; then
qemu-img create -f qcow2 -b "$IMG_PATH/$IMG_NAME" "$IMG_PATH/${VM_HOSTNAME}.qcow2" || \
{ print_red "Failed to create ${VM_HOSTNAME}.qcow2 volume image" && exit 1; }
virsh pool-refresh $OS_NAME
fi
eval virt-install \
--connect $LIBVIRT_DEFAULT_URI \
--import \
--name $VM_HOSTNAME \
--ram $RAM \
--vcpus $CPUs \
--os-type=linux \
--os-variant=virtio26 \
--disk path="$IMG_PATH/$VM_HOSTNAME.qcow2",format=qcow2,bus=virtio \
$CONFIG_DRIVE \
--vnc \
--noautoconsole \
# --cpu=host
done
if [ "x${SKIP_SSH_CHECK}" = "x" ]; then
MAX_SSH_TRIES=50
MAX_KUBECTL_TRIES=300
for SEQ in $(seq 1 $CLUSTER_SIZE); do
if [ "$SEQ" = "1" ]; then
VM_HOSTNAME=$MASTER_PREFIX
else
NODE_SEQ=$[SEQ-1]
VM_HOSTNAME="${NODE_PREFIX}-$NODE_SEQ"
fi
TRY=0
while true; do
TRY=$((TRY+1))
if [ $TRY -gt $MAX_SSH_TRIES ]; then
print_red "Can not connect to ssh, exiting..."
exit 1
fi
echo "Trying to connect to ${VM_HOSTNAME} VM, #${TRY} of #${MAX_SSH_TRIES}..."
set +e
RES=$(LANG=en_US ssh -l $SSH_USER -o BatchMode=yes -o ConnectTimeout=1 -o PasswordAuthentication=no -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -i ${PRIV_KEY_PATH} $VM_HOSTNAME "uptime" 2>&1)
RES_CODE=$?
set -e
if [ $RES_CODE -eq 0 ]; then
break
else
echo "$RES" | grep -Eq "(refused|No such file or directory|reset by peer|closed by remote host|authentication failure|failure in name resolution|Could not resolve hostname)" && sleep 1 || true
fi
done
done
print_green "Cluster of $CLUSTER_SIZE $OS_NAME nodes is up and running, waiting for Kubernetes to be ready..."
for SEQ in $(seq 1 $CLUSTER_SIZE); do
if [ "$SEQ" = "1" ]; then
VM_HOSTNAME=$MASTER_PREFIX
else
NODE_SEQ=$[SEQ-1]
VM_HOSTNAME="${NODE_PREFIX}-$NODE_SEQ"
fi
TRY=0
while true; do
TRY=$((TRY+1))
if [ $TRY -gt $MAX_KUBECTL_TRIES ]; then
print_red "Can not verify Kubernetes status, exiting..."
exit 1
fi
echo "Trying to check whether ${VM_HOSTNAME} Kubernetes node is up and running, #${TRY} of #${MAX_KUBECTL_TRIES}..."
set +e
RES=$(LANG=en_US ssh -l $SSH_USER -o BatchMode=yes -o ConnectTimeout=1 -o PasswordAuthentication=no -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -i ${PRIV_KEY_PATH} $MASTER_PREFIX "/opt/bin/kubectl get nodes $VM_HOSTNAME | grep -q Ready" 2>&1)
RES_CODE=$?
set -e
if [ $RES_CODE -eq 0 ]; then
break
else
sleep 1
fi
done
done
print_green "Kubernetes cluster is up and running..."
fi
print_green "Use following command to connect to your cluster: 'ssh -i \"$PRIV_KEY_PATH\" core@$COREOS_MASTER_HOSTNAME'"
| true
|
9e41b20ac7f9bfd8d4d5e3aa8f8d45feab8638d2
|
Shell
|
apache/tvm
|
/docker/install/ubuntu_install_spike_sim.sh
|
UTF-8
| 2,003
| 3.34375
| 3
|
[
"Apache-2.0",
"BSD-3-Clause",
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense",
"Zlib",
"LLVM-exception",
"BSD-2-Clause"
] |
permissive
|
#!/bin/bash
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
ubuntu_install_spike_sim.sh
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
set -e
set -u
set -o pipefail
set -x
function show_usage() {
cat <<EOF
Usage: docker/install/ubuntu_install_spike_sim.sh <RISCV_PATH>
RISCV_PATH is the installation path of the risc-v gcc.
EOF
}
if [ "$#" -lt 1 -o "$1" == "--help" -o "$1" == "-h" ]; then
show_usage
exit -1
fi
export RISCV=$1
export PATH=$RISCV/bin:$PATH
shift
sudo apt-install-and-clear -y --no-install-recommends device-tree-compiler
# Install spike
mkdir /tmp/spike
cd /tmp/spike
# TODO: freeze version?
git clone https://github.com/riscv/riscv-isa-sim.git
pushd riscv-isa-sim
mkdir build
cd build
../configure --prefix=$RISCV --with-isa=RV32IMAC
make -j`nproc`
make install
popd
# Install pk
git clone https://github.com/riscv/riscv-pk.git
pushd riscv-pk
# rv32imac
mkdir build
pushd build
../configure --prefix=`pwd`/install --host=riscv64-unknown-elf --with-arch=rv32imac
make -j`nproc`
make install
cp ./pk $RISCV/riscv64-unknown-elf/bin/pk
popd
git status
# rv64imac
mkdir build64
pushd build64
../configure --prefix=`pwd`/install --host=riscv64-unknown-elf --with-arch=rv64imac
make -j`nproc`
make install
cp ./pk $RISCV/riscv64-unknown-elf/bin/pk64
# cleanup
rm -rf /tmp/spike
| true
|
956831a0a75299bcbfdde299194e0253eee6b643
|
Shell
|
concord-consortium/sensor-projects
|
/labquest-jna/create-2way-universal.sh
|
UTF-8
| 1,538
| 3.234375
| 3
|
[] |
no_license
|
#!/bin/sh
#
# The Vernier 1.95 version of the NGIO SDK comes with 3 mac libraries:
# libNGIO (fat file with i386 ppc)
# libNGIOUniversal (fat file with i386 ppc740 x86_64)
# libNGIO64 (thin file x86_64)
#
# The script creates a single file that has all the architectures.
#
# Notes:
# If you extract the x86_64 file from libNGIOUniversal it is identical to libNGIO64
# But if you extract the i386 file from libNGIO and libNGIOUniversal they are not the same
# the libNGIO version of the i386 works better. the one in libNGIOUniversal can't reopen a device
# after closing it
#
# 2018-03-28 NGIO_LIB 1.100
# In the latest version of the SDK, the "Universal" lib is actually x86_64 only.
# We also drop PPC support in this version, so we're really only creating a two-way universal.
export NGIO_MAC_DIR=NGIO_SDK/redist/NGIO_lib/mac
mkdir -p target/ngio
rm -f target/ngio/*
# lipo -extract ppc $NGIO_MAC_DIR/libNGIO.dylib -output target/ngio/libNGIO1.dylib
lipo -extract i386 $NGIO_MAC_DIR/libNGIO.dylib -output target/ngio/libNGIO2.dylib
# lipo -extract ppc7400 $NGIO_MAC_DIR/libNGIOUniversal.dylib -output target/ngio/libNGIO3.dylib
# lipo -extract x86_64 $NGIO_MAC_DIR/libNGIOUniversal.dylib -output target/ngio/libNGIO4.dylib
lipo -create target/ngio/* "$NGIO_MAC_DIR/libNGIOUniversal.dylib" -output src/main/resources/org/concord/sensor/labquest/jna/darwin/libNGIOUniversal2way.dylib
lipo -info src/main/resources/org/concord/sensor/labquest/jna/darwin/libNGIOUniversal2way.dylib
rm -r target/ngio
| true
|
50a942d8ac9bff6c76d61293806b3ff969fdfd10
|
Shell
|
travitch/whole-program-llvm
|
/.travis/store.sh
|
UTF-8
| 452
| 3.078125
| 3
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/bin/bash -x
# Make sure we exit if there is a failure
set -e
export PATH=/usr/lib/llvm-3.5/bin:${PATH}
export LLVM_COMPILER=clang
export WLLVM_OUTPUT=WARNING
wllvm-sanity-checker
#setup the store so we test that feature as well
export WLLVM_BC_STORE=/tmp/bc
mkdir /tmp/bc
cd ./test/test_files
make clean
CC=wllvm make one
mv main ../..
make clean
cd ../..
extract-bc main
if [ -s "main.bc" ]
then
echo "main.bc exists."
else
exit 1
fi
| true
|
ed2fcbe256f82d19559c8e832c37d218e658cde0
|
Shell
|
abhiomkar/publoy
|
/publoy
|
UTF-8
| 4,565
| 3.796875
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# ~/Dropbox/Public/publoy/.publoy
# [{
# project: "save-martian",
# source: "~/code/save-martian",
# target: "~/Dropbox/Public/publoy/save-martian"
# },
# {
# project: "save-martian",
# source: "~/Sites/save-martian",
# target: "~/Dropbox/Public/publoy/save-martian-1"
# },
# {
# project: "save-martian",
# source: "~/www/save-martian",
# target: "~/Dropbox/Public/publoy/save-martian-2" <-- where '2' is calculated as - count('project:save-martian')
# }]
# ~/code/save-martian $ publoy --> ~/Dropbox/Public/publoy/save-martian
# ~/Sites/save-martian $ publoy --> ~/Dropbox/Public/publoy/save-martian-1
# ~/www/save-martian $ publoy --> ~/Dropbox/Public/publoy/save-martian-2
# check if dropbox is installed
# check if rsync command is available
# create a folder 'publoy' in ~/Dropbox/Public if not exists
# create a new file in the above folder called '.publoy' if not exists
# read .publoy, if project folder path in this file matches with the original project folder then proceed with sync
# if the folder path not found in .publoy file then create a new entry where source: original project path, target: ~/Dropbox/Public/publoy/<project_name>((project_name exists) ? '-$count('project:project_name') : '')
# create a new folder $target
# sync project folder $ rsync -av . $target
# get Dropbox share link of $target
# Good to have
# TODO
# able to clone & sync projects from publoy on other computers
ERROR_STATUS=0
function utime
{
echo $(date +%s)
}
# create publoy supporting files & folders
DROPBOX_ROOT="$HOME/Dropbox"
if [ ! -d $DROPBOX_ROOT ]
then
echo "Dropbox is not installed in your system? Exiting."
ERROR_STATUS=1
exit $ERROR_STATUS
fi
echo ">>> Publoying..."
PUBLIC_ROOT="$HOME/Dropbox/Public"
[ ! -d $PUBLIC_ROOT ] && mkdir -p $PUBLIC_ROOT
[ ! -d $PUBLOY_ROOT ] && mkdir -p $PUBLOY_ROOT
PUBLOY_ROOT="$HOME/Dropbox/Public/publoy"
PUBLOY_LINKFILE=$PUBLOY_ROOT'/publoy_link'
PUBLOY_METAFILE=$PUBLOY_ROOT'/.publoy'
CONFIG_FILE=$PUBLOY_ROOT'/.publoy_config'
[ ! -d $PUBLOY_ROOT ] && mkdir -p $PUBLOY_ROOT
[ ! -f $PUBLOY_LINKFILE ] && touch $PUBLOY_LINKFILE
[ ! -f $PUBLOY_METAFILE ] && touch $PUBLOY_METAFILE
[ ! -f $CONFIG_FILE ] && touch $CONFIG_FILE
# Info about Project Name, Source & Target - save it to .publoy file if necessary
PROJECT_NAME=$(basename `pwd`)
SOURCE=$(pwd)
PROJECT_NAME_COUNT=0
# TARGET=$PUBLOY_ROOT'/'$PROJECT_NAME
while IFS='' read -r line || [[ -n "$line" ]]; do
IFS=':' read -ra FIELDS <<< "$line"
if [ ${FIELDS[0]} == $PROJECT_NAME ]
then
PROJECT_NAME_COUNT=$(($PROJECT_NAME_COUNT+1))
fi
for i in "${FIELDS[@]}"; do
if [ ${FIELDS[1]} == $SOURCE ]
then
TARGET=${FIELDS[2]}
fi
done
IFS=''
done < $PUBLOY_METAFILE
if [ -z $TARGET ]
then
if [ $PROJECT_NAME_COUNT -eq 0 ]
then
TARGET_SUFFIX=""
else
TARGET_SUFFIX="-$PROJECT_NAME_COUNT"
fi
TARGET=$PUBLOY_ROOT'/'$PROJECT_NAME$TARGET_SUFFIX
echo "$PROJECT_NAME:$SOURCE:$TARGET" >> $PUBLOY_METAFILE
fi
if [ -f "$SOURCE/index.html" ]
then
INDEX_FILE='index.html'
else
# if we could'nt find index.html, the first .html file would our index file
INDEX_FILE=$(find . -type f -maxdepth 1 -iname '*.html' | sed 's/^.\///' | head -n 1)
fi
if [[ $INDEX_FILE == "" ]]
then
ERROR_STATUS=1
echo ""
echo -ne "No 'index.html' file. Exiting."
echo ""
exit $ERROR_STATUS
fi
[ ! -d $TARGET ] && mkdir -p $TARGET
echo ">>> Syncing to Dropbox..."
rsync -a --stats . $TARGET > /dev/null && echo ">>> done."
if [ -s $CONFIG_FILE ]
then
source "$CONFIG_FILE" 2>/dev/null
else
# first time the user is running the script
echo -ne "\nThis is the first time you run this script."
echo -ne "\n\n Please go to $PUBLOY_ROOT folder, right click on publoy_link file, \n"
echo -ne " click on 'Copy Public Link' and paste it here.\n"
echo -ne "\nPublic Link: "
read PUBLIC_LINK
# Sample Public Link:
# https://dl.dropboxusercontent.com/u/1234567/publoy/publoy_link
# now get dropbox user id from PUBLIC_LINK
re="https:\/\/dl.dropboxusercontent.com\/u\/([0-9]+)\/publoy\/publoy_link"
if [[ $PUBLIC_LINK =~ $re ]]; then
uid=${BASH_REMATCH[1]}
echo "uid=$uid" >> "$CONFIG_FILE"
echo -ne "> Saved uid in publoy config file.\n"
else
echo -ne " FAILED\n"
ERROR_STATUS=1
fi
fi
if [ $ERROR_STATUS -eq 0 ]
then
echo ""
echo "> Share link: (Syncing...)"
echo "https://dl.dropboxusercontent.com/u/$uid/publoy/$PROJECT_NAME$TARGET_SUFFIX/$INDEX_FILE"
fi
exit $ERROR_STATUS
| true
|
b4b6b349aa7b3e49441d5c2cec3e31244010e4a0
|
Shell
|
flihp/twobit-buildbot
|
/masters/bin/perms-fix.sh
|
UTF-8
| 219
| 3.703125
| 4
|
[
"Unlicense"
] |
permissive
|
#!/bin/sh
if [ ! -d "$1" ]; then
echo "$0: $1 is not a directory"
exit 1
fi
find $1 -type d | while read DIR; do
chmod g+rx "${DIR}"
done
find $1 -type f | while read FILE; do
chmod g+r "${FILE}"
done
| true
|
69daa6c1fde430a5c3d55de55ce871c675458701
|
Shell
|
outpaddling/auto-admin
|
/Sys-scripts/auto-add-group
|
UTF-8
| 902
| 3.9375
| 4
|
[
"BSD-2-Clause"
] |
permissive
|
#!/bin/sh -e
##########################################################################
# Script description:
# Create a new group
#
# Arguments:
# [-g gid]
# groupname
#
# History:
# Date Name Modification
# 2018-11-09 J Bacon Begin
##########################################################################
usage()
{
printf "Usage: $0 [-g gid] groupname\n"
exit 1
}
##########################################################################
# Main
##########################################################################
if [ 0$1 = 0-g ]; then
gid_flags="-g $2"
shift
shift
fi
if [ $# != 1 ]; then
usage
fi
group_name=$1
case $(auto-ostype) in
FreeBSD)
pw groupadd $group_name $gid_flags
;;
NetBSD|RHEL)
groupadd $gid_flags $group_name
;;
*)
auto-unsupported-os $0
exit 1
;;
esac
| true
|
ab10930b453d3d1a230957193bb50b4e8a8ed062
|
Shell
|
mverleg/svsite
|
/dev/publish.sh
|
UTF-8
| 599
| 3.1875
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/usr/bin/env bash
# run tests with py.test
py.test tests || exit 10
# check style with pylint, exit for any problem except convention messages
#pylint source
#if [ "$?" -ne 16 ]; then exit 20; fi
# create a file to run tests without needing py.test (or plugins)
py.test --genscript=tests/runtests.py || exit 30
# create sphix documentation
cd docs && make dirhtml && cd .. || exit 40
# the user should not forget to update the version number, and create a tag in git
printf "\nThe current version is: %s\n" "$(cat 'dev/VERSION')"
printf "Do not forget to increment it, and add a git tag\n"
| true
|
415bae77e771379291b97ed72df75c7327ccb186
|
Shell
|
pay4UU/btcShell
|
/addNewBlock2Mongo.sh
|
UTF-8
| 485
| 2.875
| 3
|
[] |
no_license
|
#!/bin/bash
echo $$ | tee /home/btccore/.bitcoin/bin/logs/my.$(basename $0).pid
rpcCred=$(cat basisRPC)
echo $rpcCred
while [ : ]
do
index=$(bitcoin-cli $rpcCred getblockcount)
echo "checking index " $index
if [ $lastBlock -eq $index ]
then
echo "we are still at: " $lastBlock
else
echo $(date) "new block num and date:" $index| tee -a newBlock.txt
bc.getblockNV.sh $index | mg.import.sh
lastBlock=$index
fi
sleep 10
done
| true
|
b9273386b7325717351176a4e0b4333ef5261bdc
|
Shell
|
ccliuyang/RabbitThirdLibrary
|
/build_script/ci/build.sh
|
UTF-8
| 4,660
| 3.203125
| 3
|
[] |
no_license
|
#!/bin/bash
set -e
#TODO:修改数组,修改完后,再修改appveyor.yml中的 RABBIT_QT_NUMBER 为QT开始的数组索引
RABBIT_LIBRARYS[0]="change_prefix zlib expat libgpx openssl libsodium libcurl libqrencode " #dlib "
RABBIT_LIBRARYS[1]="boost"
RABBIT_LIBRARYS[2]="libpng jpeg libgif libtiff freetype libyuv libvpx libopus x264 speex ffmpeg"
RABBIT_LIBRARYS[3]="opencv" # geos gdal"
#RABBIT_LIBRARYS[3]="osg"
#RABBIT_LIBRARYS[4]="OsgQt osgearth "
RABBIT_LIBRARYS[4]="qxmpp qzxing"
export PATH=/usr/bin:$PATH
if [ "$BUILD_TARGERT" = "windows_mingw" \
-a -n "$APPVEYOR" ]; then
export RABBIT_TOOLCHAIN_ROOT=/C/Qt/Tools/mingw${RABBIT_TOOLCHAIN_VERSION}
export PATH="${RABBIT_TOOLCHAIN_ROOT}/bin:/usr/bin:/c/Tools/curl/bin:/c/Program Files (x86)/CMake/bin"
fi
TARGET_OS=`uname -s`
case $TARGET_OS in
MINGW* | CYGWIN* | MSYS*)
export PKG_CONFIG=/c/msys64/mingw32/bin/pkg-config.exe
;;
Linux* | Unix*)
;;
*)
;;
esac
if [ "$BUILD_TARGERT" = "windows_msvc" ]; then
export PATH=/C/Perl/bin:$PATH
rm -fr /usr/include
fi
PROJECT_DIR=`pwd`
if [ -n "$1" ]; then
PROJECT_DIR=$1
fi
echo "PROJECT_DIR:${PROJECT_DIR}"
SCRIPT_DIR=${PROJECT_DIR}/build_script
if [ -d ${PROJECT_DIR}/ThirdLibrary/build_script ]; then
SCRIPT_DIR=${PROJECT_DIR}/ThirdLibrary/build_script
fi
cd ${SCRIPT_DIR}
SOURCE_DIR=${SCRIPT_DIR}/../src
if [ -z "${LIBRARY_NUMBER}" ]; then
LIBRARY_NUMBER=0
fi
#下载预编译库
if [ -n "$DOWNLOAD_URL" ]; then
wget -c -q -O ${SCRIPT_DIR}/../${BUILD_TARGERT}.zip ${DOWNLOAD_URL}
fi
export RABBIT_BUILD_PREFIX=${SCRIPT_DIR}/../build #${BUILD_TARGERT}${RABBIT_TOOLCHAIN_VERSION}_${RABBIT_ARCH}_qt${QT_VERSION}_${RABBIT_CONFIG}
if [ ! -d ${RABBIT_BUILD_PREFIX} ]; then
mkdir -p ${RABBIT_BUILD_PREFIX}
fi
cd ${RABBIT_BUILD_PREFIX}
export RABBIT_BUILD_PREFIX=`pwd`
cd ${SCRIPT_DIR}
if [ -f ${SCRIPT_DIR}/../${BUILD_TARGERT}.zip ]; then
echo "unzip -q -d ${RABBIT_BUILD_PREFIX} ${SCRIPT_DIR}/../${BUILD_TARGERT}.zip"
unzip -q -d ${RABBIT_BUILD_PREFIX} ${SCRIPT_DIR}/../${BUILD_TARGERT}.zip
if [ "$PROJECT_NAME" != "RabbitThirdLibrary" \
-a "$BUILD_TARGERT" != "windows_msvc" \
-a -f "${RABBIT_BUILD_PREFIX}/change_prefix.sh" ]; then
cd ${RABBIT_BUILD_PREFIX}
cat lib/pkgconfig/zlib.pc
cat change_prefix.sh
echo "bash change_prefix.sh"
bash change_prefix.sh
cat lib/pkgconfig/zlib.pc
cd ${SCRIPT_DIR}
fi
fi
if [ "$BUILD_TARGERT" = "android" ]; then
export ANDROID_SDK_ROOT=${SCRIPT_DIR}/../Tools/android-sdk
export ANDROID_NDK_ROOT=${SCRIPT_DIR}/../Tools/android-ndk
export RABBIT_TOOL_CHAIN_ROOT=${SCRIPT_DIR}/../Tools/android-ndk/android-toolchain-${RABBIT_ARCH}
if [ -z "$APPVEYOR" ]; then
export JAVA_HOME="/C/Program Files (x86)/Java/jdk1.8.0"
fi
QT_DIR=${SCRIPT_DIR}/../Tools/Qt/Qt${QT_VERSION}/${QT_VERSION}
case $RABBIT_ARCH in
arm*)
export QT_ROOT=${QT_DIR}/android_armv7
;;
x86*)
export QT_ROOT=${QT_DIR}/android_$RABBIT_ARCH
;;
*)
echo "Don't arch $RABBIT_ARCH"
;;
esac
export PATH=$PATH:${SCRIPT_DIR}/../Tools/apache-ant/bin:$JAVA_HOME
fi
if [ "$BUILD_TARGERT" != "windows_msvc" ]; then
RABBIT_MAKE_JOB_PARA="-j`cat /proc/cpuinfo |grep 'cpu cores' |wc -l`" #make 同时工作进程参数
if [ "$RABBIT_MAKE_JOB_PARA" = "-j1" ];then
RABBIT_MAKE_JOB_PARA="-j2"
fi
export RABBIT_MAKE_JOB_PARA
fi
echo "---------------------------------------------------------------------------"
echo "RABBIT_BUILD_PREFIX:$RABBIT_BUILD_PREFIX"
echo "QT_BIN:$QT_BIN"
echo "QT_ROOT:$QT_ROOT"
echo "PKG_CONFIG:$PKG_CONFIG"
echo "PKG_CONFIG_PATH:$PKG_CONFIG_PATH"
echo "PKG_CONFIG_SYSROOT_DIR:$PKG_CONFIG_SYSROOT_DIR"
echo "PATH=$PATH"
echo "RABBIT_BUILD_THIRDLIBRARY:$RABBIT_BUILD_THIRDLIBRARY"
echo "SCRIPT_DIR:$SCRIPT_DIR"
echo "---------------------------------------------------------------------------"
cd ${SCRIPT_DIR}
if [ "$PROJECT_NAME" = "rabbitim" ]; then
echo "bash ./build_rabbitim.sh ${BUILD_TARGERT} $PROJECT_DIR $CMAKE"
bash ./build_rabbitim.sh ${BUILD_TARGERT} $CMAKE $PROJECT_DIR
exit 0
fi
for v in ${RABBIT_LIBRARYS[$RABBIT_NUMBER]}
do
if [ "$v" = "rabbitim" ]; then
bash ./build_$v.sh ${BUILD_TARGERT} # > /dev/null
else
if [ "$APPVEYOR" = "True" ]; then
bash ./build_$v.sh ${BUILD_TARGERT} ${SOURCE_DIR}/$v
else
bash ./build_$v.sh ${BUILD_TARGERT} ${SOURCE_DIR}/$v > /dev/null
fi
fi
done
| true
|
3104897598005bb40c38be68b41d2e19bab50327
|
Shell
|
taylor-marrion/security_scripts
|
/snort_rules_from_list/arrays_to_snort_rules.sh
|
UTF-8
| 571
| 2.921875
| 3
|
[] |
no_license
|
#!/bin/bash
#write snort rules from arrays of IP's and domains
# write snort rules from array of IP's
mapfile -t IP_array < /ip_list.txt
x=1100000
for IP in "${IP_array[@]}"
do
echo "alert ip any any <> $IP any (msg:\"Known Bad IP\"; sid:$x;)" >> /etc/nsm/rules/local.rules
x=$((x+1))
done
# write snort rules from array of domains
mapfile -t domain_array < /domain_list.txt
x=1200000
for domain in "${domain_array[@]}"
do
echo "alert udp any any -> any any (msg:\"Known Bad domain\"; content:\"$domain\"; sid:$x;)" >> /etc/nsm/rules/local.rules
x=$((x+1))
done
#fin
| true
|
213b38894beb1e6dd7188a4d2a709eb2b6bcf530
|
Shell
|
robotil/robil
|
/C31_PathPlanner/tests/testing
|
UTF-8
| 269
| 3.3125
| 3
|
[] |
no_license
|
#!/bin/bash
function test_it {
tf=$(tempfile)
c=$(head -1 $1)
echo "$c" > $tf
$c 2>&1 >> $tf
diff $1 $tf > $tf.diff && (echo "PASS"; rm $tf.diff) || echo "ERROR: log in $tf.diff"
rm $tf
}
for f in test_result*; do echo -n "TEST: $f ... "; test_it $f; done
| true
|
fd19ca74067443e658a99ef605012365878740c4
|
Shell
|
richardhj/heroku-buildpack-contao
|
/bin/compile
|
UTF-8
| 2,209
| 3.796875
| 4
|
[] |
no_license
|
#!/bin/sh
indent() {
sed -u 's/^/ /'
}
echo "-----> Installing Contao managed edition"
BUILD_DIR=$1
BIN_DIR=$(cd $(dirname $0); pwd)
ROOT_DIR=$(dirname $BIN_DIR)
CACHE_DIR=$2/php
mkdir -p "$CACHE_DIR"
# Move monorepo into subfolder named "repo". The composer.json loads this folder as repository.
cd $BUILD_DIR
mkdir repo && ls | grep -v repo | xargs mv -t repo
# Create the project by copying the skeleton files
rsync -a -v $ROOT_DIR/skeleton/ $BUILD_DIR
# Install a mini-PHP (for Composer) and Composer
STACK=${STACK:-heroku-18} # Anvil has none
s3_url="https://lang-php.s3.amazonaws.com/dist-${STACK}-stable/"
CACHE_DIR=$2/php
mkdir -p "$CACHE_DIR"
echo "-----> Creating composer.lock file"
# PHP expects to be installed in /app/.heroku/php because of compiled paths, let's set that up!
# Adapted from the original heroku/php buildpack
mkdir -p /app/.heroku
# all system packages live in there
mkdir -p $BUILD_DIR/.heroku/php
# set up Composer
export COMPOSER_HOME=$CACHE_DIR/.composer
mkdir -p $COMPOSER_HOME
# Install PHP
mkdir -p $BUILD_DIR/.heroku/php-min
ln -s $BUILD_DIR/.heroku/php-min /app/.heroku/php-min
curl --fail --silent --location -o $BUILD_DIR/.heroku/php-min.tar.gz "${s3_url}php-min-7.4.15.tar.gz"
tar xzf $BUILD_DIR/.heroku/php-min.tar.gz -C $BUILD_DIR/.heroku/php-min
rm $BUILD_DIR/.heroku/php-min.tar.gz
# Install Composer
EXPECTED_CHECKSUM="$(/app/.heroku/php-min/bin/php -r 'copy("https://composer.github.io/installer.sig", "php://stdout");')"
/app/.heroku/php-min/bin/php -r "copy('https://getcomposer.org/installer', 'composer-setup.php');"
ACTUAL_CHECKSUM="$(/app/.heroku/php-min/bin/php -r "echo hash_file('sha384', 'composer-setup.php');")"
if [ "$EXPECTED_CHECKSUM" != "$ACTUAL_CHECKSUM" ]
then
>&2 echo 'ERROR: Invalid installer checksum' | indent
rm composer-setup.php
exit 1
fi
/app/.heroku/php-min/bin/php composer-setup.php --quiet
rm composer-setup.php
# Create the composer.lock file
/app/.heroku/php-min/bin/php composer.phar update --no-dev --prefer-dist --no-plugins --no-install --no-scripts --no-interaction --no-progress --ignore-platform-reqs
# Done! The heroku/php buildpack MUST follow and will build the Symfony application.
| true
|
c0eecca604d7797e5a6b1ed66111e659f90bb805
|
Shell
|
uly55e5/uly55e5-overlay
|
/media-gfx/lightzone/files/lightzone-launcher
|
UTF-8
| 237
| 2.515625
| 3
|
[] |
no_license
|
#!/bin/bash
export LD_LIBRARY_PATH="/opt/#LZPATH#:${LD_LIBRARY_PATH}"
LZCLASSPATH=""
cd /opt/#LZPATH#
for i in `ls *.jar`; do
LZCLASSPATH=${LZCLASSPATH}:${i}
done;
java -cp ${LZCLASSPATH} com.lightcrafts.platform.linux.LinuxLauncher
| true
|
e4163679926a2b8a4f894c8b67a91af1b1dd8310
|
Shell
|
jariano88/data
|
/ayuda
|
UTF-8
| 942
| 3.359375
| 3
|
[] |
no_license
|
#!/bin/bash
function menu {
echo "1. Ayuda con despliegues"
echo "2. Ayuda con contenedores"
echo "3. Ayuda Netstat"
echo "9. Salir"
read OPTION
netstat -ntpl >> /var/datatools/netstat_`date +%d%m%Y_%H%M`
if [ $OPTION -eq 1 ];then
cat /var/datatools/ayuda_despliegues
elif [ $OPTION -eq 2 ];then
sh /var/datatools/contenedores.sh
elif [ $OPTION -eq 3 ];then
cat $(ls -1tr /var/datatools/netstat* | head -n 1)
echo "Anteriores buscar en /var/datatools/netstat*"
valida=` ls /var/datatools/netstat* | wc -l`
if [ $valida -gt 5 ]; then
ls -1tr /var/datatools/netstat* | head -n 1 | xargs rm
fi
elif [ $OPTION -eq 9 ];then
exit
else
echo "Opcion no valida"
menu
fi
}
menu
| true
|
ab8ed6a0f62c501bdef850071308ae003a17cf9e
|
Shell
|
futurescube/ambari-astroserver
|
/package/templates/abDetection.sh.j2
|
UTF-8
| 258
| 2.578125
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
rm -rf /etc/gwac_dbgen.pid
echo $$ > /etc/gwac_dbgen.pid
while true
do
python ./gwac_dbgen/pipeline.py 1 1 &&
file=`ls /data/astrodb/latest/gwac/catalog.csv/*`
echo "{{master_node}} ${file}" > /tmp/Squirrel_pipe_test
sleep 2
done
| true
|
a868aae8e88700b778cdddf5fe23a11a509e7cc1
|
Shell
|
Carolis/cerberus
|
/linux-mint/virtualbox-install.sh
|
UTF-8
| 857
| 3.140625
| 3
|
[] |
no_license
|
#!/bin/bash
# virtualbox install
sudo apt-get install virtualbox-nonfree
# add your user to the vboxusers group
# then log out and log back in
sudo usermod -a -G vboxusers username
# then download and install the VirtualBox 4.2.12 Oracle VM VirtualBox Extension Pack
# https://www.virtualbox.org/wiki/Downloads
# double click the download file to install the extension pack
# in the guest download the latest guest additions
# http://download.virtualbox.org/virtualbox/4.2.12/VBoxGuestAdditions_4.2.12.iso
# mount the iso
mount VBoxGuestAdditions_4.2.12.iso /media/cdrom
# copy the VBoxLinuxAdditions.run to /root/
cp /media/cdrom/VBoxLinuxAdditions.run /root/
# make VBoxLinuxAdditions.run executable
cd /root/
chmod +x VBoxLinuxAdditions.run
# now run the script
./VBoxLinuxAdditions.run
# reboot and you should be able to go fullscreen
reboot
| true
|
150053e644674f91e00b4a083bc8ff0c2611a791
|
Shell
|
davebaird/z5.stdlib
|
/sh/dockertools
|
UTF-8
| 4,977
| 4.1875
| 4
|
[] |
no_license
|
#!/bin/bash
import errcho
import pidtools
import checks
import installer
import pathtools
dtools.ip () {
local dname; dname="$1"
dtools.waitfor "$dname"
# local ip; ip=$(docker inspect --format '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' "$dname")
# printf '%s\n' "$ip"
docker inspect --format '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' "$dname"
}
dtools.stop () {
local -i verbose; verbose=0
if [[ $1 == --verbose ]]; then
verbose=1
shift
fi
local dname; dname="$1"
if dtools.container.running "$dname"
then
((verbose == 1)) && errcho "Stopping $dname"
docker stop "$dname"
else
((verbose == 1)) && errcho "Not stopping: $dname is not running"
fi
if dtools.container.exists "$dname"
then
((verbose == 1)) && errcho "Removing stopped container $dname"
docker rm --force "$dname"
fi
}
dtools.container.running () (
set +e
docker top "$1" > /dev/null 2>&1
)
dtools.container.exists () (
set +e
docker container inspect "$1" > /dev/null 2>&1
)
dtools.image.exists () (
set +e
docker image inspect "$1" > /dev/null 2>&1
)
dtools.volume.ls () {
if [[ $1 = */* ]]
then
docker run --rm -i --volume "${1%/*}":/tmp/myvolume busybox ls -al /tmp/myvolume/"${1#*/}"
else
docker run --rm -i --volume "${1}":/tmp/myvolume busybox ls -al /tmp/myvolume
fi
}
dtools.volume.find () {
docker run --rm -i --volume "$1":/tmp/myvolume busybox find /tmp/myvolume
}
# Wait for the container to be up, running, and functional, by checking it can
# generate log output. Then cleanup the log process.
dtools.waitfor () {
local dname; dname="$1"
local pid; pid="$(dtools.tail.log "$dname" /dev/null)"
stop.pid "$pid"
}
# How to launch a process in background and wait for it to start before moving on
# https://stackoverflow.com/a/33564955/2334574
dtools.tail.log () {
local dname="$1"
local logfile="$2"
local pid
local count
path.parent.exists "$logfile" || errxit "ERROR: $logfile parent folder does not exist"
while true
do
docker logs -f "$dname" >> "$logfile" 2>&1 &
pid=$!
# Sometimes, when called by dtools.waitfor, this next command kicks up an error but
# the script continues running. Until I can figure it out, it seems to happen
# intermittently I'm guessing when the container we just started is not ready.
# For something. I mean, that's what dtools.waitfor is checking, and it uses this function as a
# hack to do its testing, but I guess since it is a hack, these things are to be expected.
# Anyway, this note just to reassure a future me when I forget all about it and
# am trying to debug these unexpected, intermittent messages.
# shellcheck disable=2009
count=$(ps -A | grep --count $pid) # check whether process is still running
# if process is already terminated, then there can be two cases:
# the process executed and stopped successfully, or it terminated abnormally
if ((count == 0))
then
if wait $pid # checks if process executed successfully or not
then
errcho "ERROR: docker logs $dname ran and exited"
return 1
else # process terminated abnormally
debug "docker $dname not ready - can't start logging yet (returned $?)"
sleep 0.1
fi
else # process is still running
# errcho "docker logs $dname is running"
break
fi
done
echo "$pid"
}
# https://docs.docker.com/engine/install/debian/
# https://docs.docker.com/engine/install/ubuntu/
dtools.docker.install.cli () {
local os; os=$(get.os)
apt-get remove docker docker-engine docker.io containerd runc > /dev/null 2>&1 || :
install.apt.pkgs software-properties-common curl gnupg2
curl -fsSL "https://download.docker.com/linux/$os/gpg" | apt-key add -
apt-add-repository --yes \
"deb [arch=amd64] https://download.docker.com/linux/$os $(lsb_release -cs) stable"
install.apt.pkgs apt-transport-https ca-certificates curl gnupg-agent \
software-properties-common docker-ce docker-ce-cli
}
dtools.docker.install () {
dtools.docker.install.cli
install.apt.pkgs containerd.io
groupadd docker > /dev/null 2>&1 || :
systemctl enable docker > /dev/null 2>&1
}
# 2021-01-05T22:13:39.12345+00:00
dtools.LastTagTime.iso8601 () {
# 2021-01-05 22:12:29.766192404 +0100 CET
local lttime; lttime="$(docker image inspect --format '{{.Metadata.LastTagTime}}' "$1")"
lttime="$(printf '%s' "$lttime" | sed 's/\s/T/')"
lttime="$(printf '%s' "$lttime" | sed 's/\s//')"
lttime="$(printf '%s' "$lttime" | sed 's/\s[[:upper:]]\+//')"
printf '%s\n' "$lttime"
}
| true
|
0445cfc87f72589386ecc218cdc0da8e2a06525a
|
Shell
|
COLDTURNIP/CDIP_config
|
/.zprofile
|
UTF-8
| 446
| 2.515625
| 3
|
[] |
no_license
|
# Note:
# - zshenv is sourced every shell initialization
# (thus it should only include envvar-related settings)
# - zprofile (current file) is sourced only in login shell, before zshrc
# - zshrc is source in every interactive shells
# - zlogin is sourced only in login shell, after zshrc
# ====================
# 3rd Party Tools
# ==================== {{{
# for Python
[[ -d "$HOME/.pyenv" ]] && export PYENV_ROOT="$HOME/.pyenv"
# }}}
| true
|
98ac54b15e2d995d1fe9b7eebd3938acb79319e3
|
Shell
|
jeffduda/brain-behavior-change-processing
|
/Reconstruction/dirtree.sh
|
UTF-8
| 253
| 3.296875
| 3
|
[] |
no_license
|
#!/bin/bash
# DIRTREE - print a nice graphical directory tree structure
if [ $# -gt 1 ]; then
echo "usage: `basename $0` <topfolder>"
exit 1
fi
ls -R $1 | grep ":$" | sed -e 's/:$//' -e 's/[^-][^\/]*\//--/g' -e 's/^/ /' -e 's/-/|/'
exit 0
| true
|
a904057dade79823559fe508d3a8c4b86b1cfc25
|
Shell
|
sardankur-zz/graph-analytics
|
/run.sh
|
UTF-8
| 4,243
| 3.359375
| 3
|
[] |
no_license
|
#!/bin/bash
export MAVEN_HOME=/usr/share/maven
export PGX_HOME=/home/ankursarda/Software/pgx-2.2.1
export PGX_VERSION="2.2.1"
export PYTHON_EXEC_VER=/usr/bin/python3.5
if [ -z "$JAVA_HOME" ]; then
echo "Need to set JAVA_HOME"
exit 1
fi
if [ -z "$MAVEN_HOME" ]; then
echo "Need to set MAVEN_HOME"
exit 1
fi
if [ -z "$PGX_HOME" ]; then
echo "Need to set PGX_HOME"
exit 1
fi
if [ -z "$PGX_VERSION" ]; then
echo "Need to set PGX_VERSION"
exit 1
fi
if [ $# -eq 0 ]; then
echo "Enter one of the following arguments : sh run.sh [args]"
echo "TopologicalSort data_file"
echo "DagCheck data_file"
#echo "AverageNeighborDegree data_file"
#echo "AverageDegreeConnectivity data_file"
echo "CurrentFlowCloseness data_file"
echo "CurrentFlowBetweenness data_file"
echo "PreFlowPush data_file source destination"
echo "EdmondsKarp data_file source destination"
echo "CycleBasis data_file"
echo "FindCycle data_file"
echo "AverageClustering data_file"
#echo "LabelPropagation data_file"
#echo "MaximumIndependentSet data_file"
#echo "SSSP data_file"
exit
fi
dfile=$2
testfile=
pyfile=
pomfile=
extraargs=
case $PGX_VERSION in
"2.2.1")
pomfile=pgx/pom221.xml
;;
"2.3.1")
pomfile=pgx/pom231.xml
;;
"2.4")
pomfile=pgx/pom24.xml
;;
"2.5")
pomfile=pgx/pom25.xml
;;
esac
case $1 in
"TopologicalSort")
echo "The answers may be different, because there can be multiple ways of doing topological sort."
testfile=algorithms.topological_sort.TopologicalSortExtTest
pyfile=algorithms/topological_sort/topological_sort.py
;;
"DagCheck")
testfile=algorithms.dag_check.DagCheckExtTest
pyfile=algorithms/dag_check/dag_check.py
;;
# "AverageNeighborDegree")
# testfile=algorithms.average_neighbor_degree.AverageNeighborDegreeExtTest
# pyfile=algorithms/average_neighbor_degree/average_neighbor_degree.py
# ;;
# "AverageDegreeConnectivity")
# testfile=algorithms.average_degree_connectivity.AverageDegreeConnectivityExtTest
# pyfile=algorithms/average_degree_connectivity/average_degree_connectivity.py
# ;;
"CurrentFlowBetweenness")
testfile=algorithms.current_flow.current_flow_betweenness.CurrentFlowBetweennessExtTest
pyfile=algorithms/current_flow_betweenness_centrality/current_flow_betweenness_centrality.py
;;
"CurrentFlowCloseness")
testfile=algorithms.current_flow.current_flow_closeness.CurrentFlowClosenessExtTest
pyfile=algorithms/current_flow_closeness_centrality/current_flow_closeness_centrality.py
;;
"PreFlowPush")
testfile=algorithms.maximum_flow.PreFlowPushExtTest
pyfile=algorithms/preflow_push/preflow_push.py
extraargs="-Dsource=$3 -Ddest=$4"
extrargspy="$3 $4"
;;
"EdmondsKarp")
testfile=algorithms.maximum_flow.EdmondsKarpMaximumFlowExtTest
pyfile=algorithms/edmondskarp/edmondskarp.py
extraargs="-Dsource=$3 -Ddest=$4"
extrargspy="$3 $4"
;;
"CycleBasis")
testfile=algorithms.cycles.CycleBasisExtTest
pyfile=algorithms/cycle_basis/cycle_basis.py
;;
"FindCycle")
testfile=algorithms.cycles.FindCycleExtTest
pyfile=algorithms/find_cycle/find_cycle.py
;;
"AverageClustering")
testfile=algorithms.average_clustering.AverageClusteringExtTest
pyfile=algorithms/average_clustering/average_clustering.py
;;
# "LabelPropagation")
# testfile=algorithms.label_propagation.LabelPropagationExtTest
# ;;
# "MaximumIndependentSet")
# testfile=algorithms.maximal_independent_set.MaximalIndependentSetExtTest
# ;;
# "SSSP")
# testfile=algorithms.sssp.SsspExtTest
# ;;
esac
echo "Running python test"
pythonout=$($PYTHON_EXEC_VER networkx/$pyfile $dfile $extrargspy)
echo $pythonout
echo "---------------------------------------"
echo "Compiling JAVA sources"
$MAVEN_HOME/bin/mvn -f $pomfile clean compile test-compile -DskipTests
echo "---------------------------------------"
echo "Running java test"
javares=$($MAVEN_HOME/bin/mvn -q -f $pomfile surefire:test -Dpgx.location=$PGX_HOME -Dtest=$testfile -DdFile=$dfile $extraargs | grep '###.*###')
javaout=$(echo "$javares" | cut -c1-3 --complement | rev | cut -c1-3 --complement | rev)
echo $javaout
echo "---------------------------------------"
echo "performing diff"
echo $javaout > javaout.txt
echo $pythonout > pythonout.txt
diff javaout.txt pythonout.txt
| true
|
337e952c0e77ac82fa6efbcda621f6f2c172568b
|
Shell
|
Eduardo1723/Shell-Script-2021-Lista1
|
/n3.sh
|
UTF-8
| 137
| 2.96875
| 3
|
[] |
no_license
|
#!/bin/bash
ls -l
DATA="$(date +%Y-%m-%d)"
PASTA="/tmp/$DATA"
mkdir -p ${PASTA} 2> /dev/null
cp * $PASTA
echo "$(date +%Y-%m-%d)"
| true
|
2a7e571b80ec0d6b501072581bd2685e04d2ac19
|
Shell
|
f1est/proxy
|
/build_crosscompile_dlink.sh
|
UTF-8
| 656
| 2.671875
| 3
|
[] |
no_license
|
#!/bin/sh
PROJECTDIR=`pwd`
BCM_BUILDROOT=/opt/hndtools-arm-linux-2.6.36-uclibc-4.5.3
ALPHA_BUILDROOT=/opt/hndtools-arm-linux-2.6.36-uclibc-4.5.3
export TPATH_UC=$ALPHA_BUILDROOT
export TPATH_KC=$BCM_BUILDROOT
export TPATH_UCLIBC=$ALPHA_BUILDROOT
export TPATH_LIBTGZ=$ALPHA_BUILDROOT/lib.tgz
export PATH=/opt/make-3.81:$BCM_BUILDROOT/bin:$PATH
export LD_LIBRARY_PATH=$ALPHA_BUILDROOT/lib
cd $PROJECTDIR/libs/libconfig/
make distclean
cd $PROJECTDIR/build
export STAGING_DIR=/opt/hndtools-arm-linux-2.6.36-uclibc-4.5.3/
export PATH="/opt/hndtools-arm-linux-2.6.36-uclibc-4.5.3/bin:$PATH"
cmake -DCMAKE_TOOLCHAIN_FILE=../crosscompile/dlink.cmake ../
make
| true
|
c17c525176321385ba324871c09d5088cbcb8cc0
|
Shell
|
nileshdhokrat52586/PracticeProblem
|
/Day5Ass/NumberInWordcase.sh
|
UTF-8
| 659
| 3.71875
| 4
|
[] |
no_license
|
#! /bin/bash -x
echo "Enter A Single Digit Number "
read num
case $num in
0)
echo "Entered Number is 'ZERO' "
;;
1)
echo "Entered Number is 'ONE' "
;;
2)
echo "Entered Number is 'TWO' "
;;
3)
echo "Entered Number is 'THREE' "
;;
4)
echo "Entered Number is 'FOUR' "
;;
5)
echo "Entered Number is 'FIVE' "
;;
6)
echo "Entered Number is 'SIX' "
;;
7)
echo "Entered Number is 'SEVEN' "
;;
8)
echo "Entered Number is 'EIGHT' "
;;
9)
echo "Entered Number is 'NINE' "
;;
*)
echo "Invalid Choice"
;;
esac
| true
|
09be1cdf9d91d551299a79bdd75d0221a88fe8c8
|
Shell
|
Chandu2319/Shell_script_practice
|
/Dictionaryproblems_day8/die_roll.sh
|
UTF-8
| 1,403
| 4.25
| 4
|
[] |
no_license
|
#! /bin/bash -x
# The die roll is obtained from RANDOM number generator and the results of how
# many times each number is cast is stored in a dictionary.
#variable declaration
declare -a die_roll_count
#initialization
die_roll_count[1]=0;
die_roll_count[2]=0;
die_roll_count[3]=0;
die_roll_count[4]=0;
die_roll_count[5]=0;
die_roll_count[6]=0;
max_num_of_cast=0 #this is the maximum of the number of times any of the numbers has been cast
function find_number_with_max_min_count() {
max=0;
min=1000;
for ((count=1;$count<=${#die_roll_count[@]};count++))
do
die_roll=${die_roll_count[count]};
if [ $die_roll -gt $max ]
then
num_at_max=$count;
max=$die_roll;
fi
if [ $die_roll -lt $min ]
then
num_at_min=$count
min=$die_roll;
fi
done
echo $num_at_max $num_at_min
}
while [ $max_num_of_cast -lt 10 ]
do
die_roll=$((RANDOM%6+1)) #this is the result of each die roll
die_roll_count[die_roll]=$(( ${die_roll_count[die_roll]}+1 ))
max_num=$(echo $(find_number_with_max_min_count | awk '{print $1}')) ;
max_num_of_cast=${die_roll_count[max_num]}
done
echo The die rolls ${!die_roll_count[@]}
echo and their count ${die_roll_count[@]}
min_num=$(echo $(find_number_with_max_min_count | awk '{print $2}')) ;
echo "The number that reached 10 times(maximum) is " $max_num
echo "The number that has been cast minimum is " $min_num "which is only " ${die_roll_count[min_num]}
| true
|
3b1289c88fe403ed9d440c82ec3c3e4b490e57bd
|
Shell
|
stallman-cui/log_analytics
|
/shell/init.d_etcd
|
UTF-8
| 1,706
| 3.890625
| 4
|
[] |
no_license
|
#!/bin/bash
### BEGIN INIT INFO
# Provides: etcd
# Required-Start: $network $remote_fs $local_fs
# Required-Stop: $network $remote_fs $local_fs
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: Stop/start etcd
### END INIT INFO
PATH=/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin
NAME=etcd
DESC=etcd
DAEMON=/usr/bin/etcd
test -x $DAEMON
if [ $? -eq 1 ]; then
echo "Missing binary file: $DAEMON"
exit 1
fi
get_pid() {
ps -A|grep etcd|awk '{print $1}'
}
PID=`get_pid`
set -e
. /lib/lsb/init-functions
start_etcd() {
if [ ! -x $PID ]; then
echo -n "Already started " && return 1
fi
$DAEMON -config=$CONFIG &
}
stop_etcd() {
[ -x $PID ] && echo -n "Not running " && return 1
[ -n "$PID" ] && kill -INT $PID &> /dev/null
if [ $? -ne 0 ]; then
return 1
fi
}
waitstart() {
i=0
while [ -x `get_pid` ] ; do
echo -n .
sleep 0.2
if [ $i -gt 50 ]; then
return 1
fi
i=$(($i + 1))
done
}
waitstop() {
i=0
while [ ! -x `get_pid` ] ; do
echo -n .
sleep 0.2
if [ $i -gt 50 ]; then
return 1
fi
i=$(($i + 1))
done
}
_start() {
if start_etcd; then
if waitstart; then
echo "... OK."
else
echo "... Waiting too long."
fi
else
echo "... FAILED."
fi
}
_stop() {
if stop_etcd; then
if waitstop; then
echo "... OK."
else
echo "... Waiting too long."
fi
else
echo "... FAILED."
fi
}
case "$1" in
start)
echo -n "Starting $NAME: "
_start
;;
stop)
echo -n "Stopping $NAME: "
_stop
;;
restart)
echo "Restarting $NAME"
echo -n "Stopping: "
_stop
echo -n "Starting: "
_start
;;
*)
echo "Usage: $NAME {start|stop|restart}" >&2
exit 1
;;
esac
exit 0
| true
|
25bb53413442da094ad49ad1a40892a8b569d5ce
|
Shell
|
Iubalus/BashTestDummy_Presentation
|
/gaspump/states/state_pump_fuel
|
UTF-8
| 129
| 2.734375
| 3
|
[] |
no_license
|
#!/bin/bash
function state_pump_fuel(){
header "Ready to pump fuel."
echo "pumping fuel"
CURRENT_STATE=$STATE_PAY
}
| true
|
4d2c0832418f38b3105fa902183b8d6e9e1d7cb7
|
Shell
|
hallettj/dot-xmonad
|
/home/bin/theme
|
UTF-8
| 677
| 3.421875
| 3
|
[] |
no_license
|
#!/bin/bash
set -e
if [ "$#" -ne 1 ]; then
echo "Usage: $0 <theme>"
echo ""
echo "The theme given must match a filename in ~/.config/X11/Xresources.avail"
fi
THEME="$1"
CONFIG_DIR="$HOME/.config/X11"
THEME_PATH="$CONFIG_DIR/Xresources.d/theme"
if [ -L "$THEME_PATH" ]; then
rm "$THEME_PATH"
fi
ln -s "../Xresources.avail/$THEME" "$THEME_PATH"
xrdb -merge "$THEME_PATH"
# If config-reload is enabled, this will update colors in running urxvt terms.
# See https://github.com/regnarg/urxvt-config-reload
# killall --signal SIGUSR1 urxvt
# TODO: the `killall` command stopped working on upgrading to Debian Buster
ps -C urxvt -o pid= | xargs -r kill --signal SIGUSR1
| true
|
9db2c95c22b06c150443440df4ebf11ccaa103ad
|
Shell
|
himanshuhastak/dotfiles
|
/tmux.sh
|
UTF-8
| 2,246
| 3.5
| 4
|
[] |
no_license
|
#!/bin/bash
# shellcheck disable=SC2086
# Session Name
SESSION=$USER
SESSIONEXISTS=$(tmux list-sessions | grep $USER)
WORK_HOSTLIST="host1 "
WORK1_HOSTLIST="host2 host3 "
WORK2_HOSTLIST="host4 host5 "
shopt -s extglob
#Note: there should be no spaces : like '@(host1 | host2)'
MINICOM_HOSTS='@(host4|host5)'
# Only create tmux session if it doesn't already exist
if [ "$SESSIONEXISTS" = "" ]; then
# Start main Session with our name
tmux new-session -d -s $SESSION
# Start multiple other sessions
for session in WORK WORK1 WORK2 ;do
tmux new-session -d -s $session
tmux switch -t $session
tmux rename-window -t 1 "$HOSTNAME" # 1st window is current host
SESSION_WINDOW_NUMBER=1
case $session in
WORK)
HOSTLIST=$WORK_HOSTLIST
;;
WORK1)
HOSTLIST=$WORK1_HOSTLIST
;;
WORK2)
HOSTLIST=$WORK2_HOSTLIST
;;
esac
for host in $HOSTLIST; do
if [[ "$HOSTNAME" != "$host" ]]; then
SESSION_WINDOW_NUMBER=$((SESSION_WINDOW_NUMBER + 1))
tmux new-window -t $session:$SESSION_WINDOW_NUMBER -n "$host"
tmux send-keys -t "$host" "ssh -Yt $USER@$host \"export DISPLAY=:51 && exec bash -l\" " C-m
fi
case $host in
${MINICOM_HOSTS})
SPLIT_RATIO=40
tmux split-window -h -p $SPLIT_RATIO
#!if we lock minicom other people cant use it, be careful what you wish for or do not lock -o/--noinit
tmux send-keys "ssh -Yt $USER@$host \"export DISPLAY=:51 && minicom -b 115200 --noinit -D /dev/haps-serial || exec bash -l\" " C-m
## As per tmux conf : session pane base ,window base starts with 1; set it as default
tmux select-pane -t 1 # This is where we have original ssh'd host, not yhe new minicom'd pane
;;
default) ;;
esac
done
done
## always start with main host pane as default
tmux switch -t $SESSION
tmux select-window -t 1
tmux select-pane -t 1
fi
# Attach Session, on the Main window if exists
tmux attach-session -t $SESSION
| true
|
bfd257c42a9a87f597a760ef26c112635c0fb8ce
|
Shell
|
gpac/testsuite
|
/scripts/graphics_dump.sh
|
UTF-8
| 1,484
| 2.890625
| 3
|
[] |
no_license
|
graphics_test ()
{
test_begin "graphics-$1"
if [ $test_skip = 1 ] ; then
return
fi
dump_file=$TEMP_DIR/dump.png
do_test "$GPAC -font-dirs=$EXTERNAL_MEDIA_DIR/fonts/ -rescan-fonts -i $2 -o $dump_file --osize=512x512 --noback" "dump-png"
#hash (2D) or test file presence
if [ $3 = 1 ] ; then
do_hash_test "$dump_file" "dump-png"
else
if [ ! -f $dump_file ] ; then
result="PNG output not present"
fi
fi
dump_file=$TEMP_DIR/dump.jpg
do_test "$GPAC -font-dirs=$EXTERNAL_MEDIA_DIR/fonts/ -rescan-fonts -i $2 -o $dump_file --osize=512x512" "dump-jpg"
#hash (2D) or test file presence
if [ $3 = 1 ] ; then
do_hash_test "$dump_file" "dump-jpg"
else
if [ ! -f $dump_file ] ; then
result="JPG output not present"
fi
fi
test_end
}
do_hash=1
#we don't have the same 2D renderer precision on 32 and 64 bits, disable hashes for linux 32
#note that we keep them for win32 since we run our tests on a 64 bit VM emuating 32 bit so no precision loss
if [ $GPAC_OSTYPE == "lin32" ] ; then
do_hash=0
fi
graphics_test "bt2d-simple" "$MEDIA_DIR/bifs/bifs-2D-painting-material2D.bt" $do_hash
graphics_test "bt2d-texture" "$MEDIA_DIR/bifs/bifs-2D-texturing-imagetexture-shapes.bt" $do_hash
#cannot do hash tests for 3D, GPUs will give different results...
graphics_test "bt3d-simple" "$MEDIA_DIR/auxiliary_files/nefertiti.wrl" 0
graphics_test "bt3d-texture" "$MEDIA_DIR/bifs/bifs-3D-texturing-box.bt" 0
graphics_test "svg-simple" "$MEDIA_DIR/svg/Ghostscript_Tiger.svg" $do_hash
| true
|
a424d122b31ba952d1862b3dff261307371d9f16
|
Shell
|
aur-archive/jp2a
|
/PKGBUILD
|
UTF-8
| 523
| 2.703125
| 3
|
[] |
no_license
|
# Contributor: Tim Yang <tdy@gmx.com>
# Contributor: Christoph Siegenthaler <csi@gmx.ch>
pkgname=jp2a
pkgver=1.0.6
pkgrel=3
pkgdesc="A small utility for converting JPG images to ASCII"
arch=('i686' 'x86_64')
url="http://jp2a.sourceforge.net/"
license=('GPL')
depends=('curl' 'libjpeg')
source=(http://downloads.sourceforge.net/jp2a/jp2a-$pkgver.tar.gz)
md5sums=('eb6281eee29acf1c494dcaf7d745a5df')
build(){
cd "$srcdir/jp2a-$pkgver"
./configure --prefix=/usr
make
}
package(){
cd "$srcdir/jp2a-$pkgver"
make DESTDIR="$pkgdir" install
}
| true
|
497949130e3292f149e755d983d125733d278c91
|
Shell
|
BIMSBbioinfo/HOT-or-not-examining-the-basis-of-high-occupancy-target-regions
|
/Scripts/Align_bash/align_DRIP_seq_paired.sh
|
UTF-8
| 1,650
| 2.65625
| 3
|
[] |
no_license
|
#!/bin/bash
#paths
quixpath="~/.guix-profile/bin"
bedGraphToBigWigpath="/home/kwreczy/programs"
bedtoolspath="/home/kwreczy/programs/bedtools-2.17.0/bin"
bbdukpath="/home/kwreczy/programs/bbmap"
fastqcpath="/home/kwreczy/programs/FastQC"
#bowtiepath="/home/kwreczy/programs/bowtie-1.1.2/"
akalindir="/data/akalin/Projects/AAkalin_HOTRegions/Data/DRIP_seq/hg19/"
hg19index="/data/akalin/Base/GenomeIndices/hg19/Bowtie2/hg19"
hg19chromInfo="/data/akalin/kasia/projects/HOT_DATA/hg19.chromInfo.txt"
adapter="/home/kwreczy/programs/bbmap/resources/truseq.fa.gz"
id=$1 #run id
dirname=$2
##mapping using Bowtie
# SINGLE-read
m=1 # number of allowed hits
k=1 # number to report
n=1 #number of mismatches
procnum=3 # number of processors used
#Log file from STDOUT for mapping
LOG2=$akalindir/$dirname/MAPPED/Bowtie/$id.log
#Error messages from STDERR for mapping
ERR2=$akalindir/$dirname/MAPPED/Bowtie/$id.err
~/.guix-profile/bin/bowtie2 --un-conc $akalindir/$dirname/MAPPED/Bowtie/output_file_un_conc --al-conc $akalindir/$dirname/MAPPED/Bowtie/output_file_al_conc -p 5 -x $hg19index -1 $akalindir/$dirname/RAW/fastq/$id'_1'.fastq -2 $akalindir/$dirname/RAW/fastq/$id'_2'.fastq -S $akalindir/$dirname/MAPPED/Bowtie/$id.sam > $LOG2 > $ERR2
| true
|
c0d3297fecf592b61c1633e46f7723ed2b3df645
|
Shell
|
kaduk/heimdal
|
/tests/gss/check-basic.in
|
UTF-8
| 5,713
| 2.703125
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/sh
#
# Copyright (c) 2007 Kungliga Tekniska Högskolan
# (Royal Institute of Technology, Stockholm, Sweden).
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the Institute nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE INSTITUTE AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE INSTITUTE OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
# $Id$
#
srcdir="@srcdir@"
objdir="@objdir@"
# If there is no useful db support compile in, disable test
../db/have-db || exit 77
R=TEST.H5L.SE
port=@port@
keytabfile=${objdir}/server.keytab
keytab="FILE:${keytabfile}"
nokeytab="FILE:no-such-keytab"
cache="FILE:krb5ccfile"
cache2="FILE:krb5ccfile2"
nocache="FILE:no-such-cache"
kinit="${TESTS_ENVIRONMENT} ../../kuser/kinit -c $cache ${afs_no_afslog}"
kdestroy="${TESTS_ENVIRONMENT} ../../kuser/kdestroy -c $cache ${afs_no_unlog}"
klist="${TESTS_ENVIRONMENT} ../../kuser/klist -c $cache"
kadmin="${TESTS_ENVIRONMENT} ../../kadmin/kadmin -l -r $R"
kdc="${TESTS_ENVIRONMENT} ../../kdc/kdc --addresses=localhost -P $port"
acquire_cred="${TESTS_ENVIRONMENT} ../../lib/gssapi/test_acquire_cred"
test_kcred="${TESTS_ENVIRONMENT} ../../lib/gssapi/test_kcred"
KRB5_CONFIG="${objdir}/krb5.conf"
export KRB5_CONFIG
KRB5_KTNAME="${keytab}"
export KRB5_KTNAME
KRB5CCNAME="${cache}"
export KRB5CCNAME
rm -f ${keytabfile}
rm -f current-db*
rm -f out-*
rm -f mkey.file*
> messages.log
echo Creating database
${kadmin} \
init \
--realm-max-ticket-life=1day \
--realm-max-renewable-life=1month \
${R} || exit 1
echo upw > ${objdir}/foopassword
${kadmin} add -p upw --use-defaults user@${R} || exit 1
${kadmin} add -p upw --use-defaults another@${R} || exit 1
${kadmin} add -p p1 --use-defaults host/host.test.h5l.se@${R} || exit 1
${kadmin} ext -k ${keytab} host/host.test.h5l.se@${R} || exit 1
echo "Doing database check"
${kadmin} check ${R} || exit 1
echo Starting kdc
${kdc} &
kdcpid=$!
sh ${srcdir}/../kdc/wait-kdc.sh
if [ "$?" != 0 ] ; then
kill ${kdcpid}
exit 1
fi
trap "kill ${kdcpid}; echo signal killing kdc; exit 1;" EXIT
exitcode=0
echo "initial ticket"
${kinit} --password-file=${objdir}/foopassword user@${R} || exitcode=1
echo "keytab"
${acquire_cred} \
--acquire-type=accept \
--acquire-name=host@host.test.h5l.se || exit 1
echo "keytab w/ short-form name and name canon rules"
${acquire_cred} \
--acquire-type=accept \
--acquire-name=host@host || exit 1
echo "keytab w/o name"
${acquire_cred} \
--acquire-type=accept || exit 1
echo "keytab w/ wrong name"
${acquire_cred} \
--acquire-type=accept \
--acquire-name=host@host2.test.h5l.se 2>/dev/null && exit 1
echo "init using keytab"
${acquire_cred} \
--acquire-type=initiate \
--acquire-name=host@host.test.h5l.se > /dev/null || exit 1
echo "init using keytab (loop 10)"
${acquire_cred} \
--acquire-type=initiate \
--loops=10 \
--acquire-name=host@host.test.h5l.se > /dev/null || exit 1
echo "init using keytab (loop 10, target)"
${acquire_cred} \
--acquire-type=initiate \
--loops=10 \
--target=host@host.test.h5l.se \
--acquire-name=host@host.test.h5l.se > /dev/null || exit 1
echo "init using keytab (loop 10, kerberos)"
${acquire_cred} \
--acquire-type=initiate \
--loops=10 \
--kerberos \
--acquire-name=host@host.test.h5l.se > /dev/null || exit 1
echo "init using keytab (loop 10, target, kerberos)"
${acquire_cred} \
--acquire-type=initiate \
--loops=10 \
--kerberos \
--target=host@host.test.h5l.se \
--acquire-name=host@host.test.h5l.se > /dev/null || exit 1
echo "init using existing cc"
${acquire_cred} \
--name-type=user-name \
--acquire-type=initiate \
--acquire-name=user || exit 1
KRB5CCNAME=${nocache}
echo "fail init using existing cc"
${acquire_cred} \
--name-type=user-name \
--acquire-type=initiate \
--acquire-name=user 2>/dev/null && exit 1
echo "use gss_krb5_ccache_name for user"
${acquire_cred} \
--name-type=user-name \
--ccache=${cache} \
--acquire-type=initiate \
--acquire-name=user >/dev/null || exit 1
KRB5CCNAME=${cache}
KRB5_KTNAME=${nokeytab}
echo "kcred"
${test_kcred} || exit 1
${kdestroy}
KRB5_KTNAME="${keytab}"
echo "init using keytab"
${acquire_cred} \
--acquire-type=initiate \
--acquire-name=host@host.test.h5l.se 2>/dev/null || exit 1
echo "init using keytab (ccache)"
${acquire_cred} \
--acquire-type=initiate \
--ccache=${cache} \
--acquire-name=host@host.test.h5l.se 2>/dev/null || exit 1
trap "" EXIT
echo "killing kdc (${kdcpid})"
kill ${kdcpid} 2> /dev/null
exit $exitcode
| true
|
cca6c16df32975650946db3cf8b3e4de77013681
|
Shell
|
0atman/lit
|
/lit.sh
|
UTF-8
| 2,220
| 4.5625
| 5
|
[] |
no_license
|
#!/bin/bash
# given a filename ending in .md, return the base filename
function remove_extension {
local file=$1
# file extension .md will always have three characters
local extension_length=3
# old file path length
local old_length=${#file}
# calculate new filename length
local new_length=$old_length-$extension_length
# cut substring for new filename
local new_filename=${file:0:$new_length}
# return the new string
echo "$new_filename"
}
# make sure a filename is safe to process
function test_filename {
# first argument is the filename to test
local file_path=$1
# return immediately if this isn't a markdown file
local last_three_characters=${file_path: -3}
if [ $last_three_characters != ".md" ]; then
return 1
fi
# strip leading directories and only look at the filename
local file_name=${file_path##*/}
# return filename
local dots=${file_name//[^.]};
local dot_count=${#dots}
if [ $dot_count -gt 1 ]; then
return 0
else
return 1
fi
}
# strip Markdown
function process_lines {
# first argument is filename
local file=$1
# iterate through lines with awk
local awk_command='
# if it is a code block
if (/^```/) {
# increase backtick counter
i++;
# jump to next command
next;
}
# print
if ( i % 2 == 1) {
print;
}
'
# run awk command
local processed=$(awk {"$awk_command"} $file)
# return code blocks only
echo "$processed"
}
# compile Markdown code blocks in a file using awk
function compile {
# first argument is filename
local file=$1
# conver to the new filename
local new_filename=$(remove_extension $file)
# log message
echo "compiling $file > $new_filename"
# parse file content and remove Markdown comments
local compiled=$(process_lines $file)
# save results to file
echo "$compiled" > $new_filename
}
# if the first argument exists, use it as the
# target directory
if [ "$1" ]; then
files=$1
# otherwise load all files in current directory
else
files="."
fi
# loop through files
for file in $(ls $files)
do
# make sure it's a literate code file
if test_filename $file; then
# compile
compile $file
fi
done
| true
|
da645b23e9d8913633861d478c086a00a5ec819c
|
Shell
|
cmap/cmapM
|
/sig_tools/ext/mtocpp/test.sh
|
UTF-8
| 858
| 3.421875
| 3
|
[
"BSD-2-Clause",
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
function doit() {
SUFFIX=$1;
for MFILE in $FILES
do
echo "Checking $MFILE ..."
MBFILE=`basename $MFILE`
CCFILE="${MBFILE%.m}${SUFFIX}.cc"
CCREFFILE="${MBFILE%.m}${SUFFIX}_ref.cc"
DIFFFILE="${MBFILE%.m}${SUFFIX}.diff"
$MTOCPP $MFILE conf$SUFFIX > $CCFILE
if diff --strip-trailing-cr -u $CCFILE $CCREFFILE > $DIFFFILE
then
echo " (passed)"
rm $DIFFFILE
else
echo " (failed)"
fi
done
}
FILES="test/+grid/+rect/@rectgrid/*.m test/+grid/+rect/doxygen_in_namespace.m ./*.m"
cd /Users/narayan/workspace/mortar/ext/mtocpp/test
if [ -x /Users/narayan/workspace/mortar/ext/mtocpp/mtocpp ];
then
MTOCPP=/Users/narayan/workspace/mortar/ext/mtocpp/mtocpp
else
MTOCPP="wine /Users/narayan/workspace/mortar/ext/mtocpp/mtocpp.exe"
fi
doit ""
doit "2"
| true
|
6abfc3b7d1b848a4c6933bc77ff3222c733a931f
|
Shell
|
jimkil/update_git_repositories
|
/functions-misc.sh
|
UTF-8
| 944
| 4.25
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
# Misc functions file
show_message(){
# Shows the welcome message
figlet "UPDATE GIT"
printf "by: sWallyx\n\n"
printf "Let me update the repositories for you\n"
printf " - - - - - - - - - - - - - - - \n\n"
}
enter_folder_and_print_inside_exit(){
# Enters the given folder and prints the
# repository name.
# When inside calls for the update_repository
# function.
# Enter folder with git
cd "$1" || exit
# Go up one level, because we are on .git
cd ..
# Check folder is git repository
base_name=$(basename "$(git rev-parse --show-toplevel)")
echo "Updating repository $base_name"
update_repository
}
bye_message(){
# Shows the message at the end of the command
# when everything has been done
printf "\n - - - - - - - - - - - - - - - "
printf "\nAll repositories updated\n"
printf "Have a nice coding :)\n\n"
}
| true
|
6d4238c729364e51fd9d3db78cc1d1aca6e4a3ea
|
Shell
|
rbalzs/m2-cli-aliases
|
/magento2_cli_aliases.sh
|
UTF-8
| 1,202
| 2.890625
| 3
|
[] |
no_license
|
export PROJECT="/home/user/m2project";
alias ws='cd $PROJECT; warden shell -c '
# reindexing
alias fre="ws 'bin/magento indexer:reindex'"
# DI compile
alias fco="ws 'bin/magento setup:di:compile'"
# runs a composer install & setup:upgrade
alias c="ws 'composer install'"
alias fsup="c; ws 'bin/magento setup:upgrade'"
# some aliases to list,create and update a given magento Admin user (using N98-magerun for M2)
alias adminl="ws 'n98-magerun admin:user:list'"
alias adminc="ws 'bin/magento admin:user:create'"
alias adminu="ws 'n98-magerun admin:user:change-password'"
# start/stop Warden & tail nginx log entries ..
alias wstart='sudo service apache2 stop; sudo service mysql stop; sudo service elasticsearch stop; cd $PROJECT; warden env start'
alias wstop='cd $PROJECT; warden env stop'
alias log='cd $PROJECT; warden env logs --tail 0 -f nginx'
# grep over /vendor folder, excluding testing Classes and some specific 'file types'.. usage: gv 'stringToSearch'
export EXCLUDE_FILE_TYPES="--exclude="*.map" --exclude="*.patch" --exclude="*.wsdl" --exclude="*.md" --exclude="*.pdf"";
alias gv='function _gv(){ grep -i -F -e "$1" $PROJECT/vendor -R $EXCLUDE_FILE_TYPES | grep -i -v "test"; };_gv'
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.