blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
9ca669b846b0dd8a211f0eedece134ee9862241f
|
Shell
|
nii-gakunin-cloud/sinetstream
|
/docs/tutorial/all-in-one/init.sh
|
UTF-8
| 6,558
| 3.140625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
: ${CATOP:=/etc/pki/CA}
: ${CA_COUNTRY:=JP}
: ${CA_STATE:=Default_Organization}
: ${CA_ORGANIZATION:=Default_Organization}
: ${CA_CNAME:=private-ca}
: ${CA_DAYS:=1095}
: ${BROKER_HOSTNAME:=$(hostname -f)}
: ${KAFKA_CERTS_DIR:=/srv/kafka/config/certs}
: ${TRUSTSTORE_PASSWORD:=ca-pass}
: ${KEYSTORE_PASSWORD:=broker-pass}
: ${MOSQUITTO_CONF_D_DIR:=/etc/mosquitto/conf.d}
: ${MOSQUITTO_CERTS_DIR:=/etc/mosquitto/certs}
: ${CLIENT_CNAME:=client0}
: ${BAD_CLIENT_CNAME:=badclient}
setup_ca() {
if [[ ! -f ${CATOP}/serial ]]; then
mkdir -p ${CATOP}
mkdir -p ${CATOP}/certs
mkdir -p ${CATOP}/crl
mkdir -p ${CATOP}/newcerts
mkdir -p ${CATOP}/private
touch ${CATOP}/index.txt
fi
sed -i -r -e '/unique_subject/s/^#//' -e '/copy_extensions/s/^#[ ]*//' \
/etc/pki/tls/openssl.cnf
if [[ ! -f ${CATOP}/private/cakey.pem ]]; then
openssl req -new -keyout ${CATOP}/private/cakey.pem \
-out ${CATOP}/careq.pem -nodes \
-subj /C=${CA_COUNTRY}/ST=${CA_STATE}/O=${CA_ORGANIZATION}/CN=${CA_CNAME}
openssl ca -create_serial -out ${CATOP}/cacert.pem -days ${CA_DAYS} \
-batch -keyfile ${CATOP}/private/cakey.pem -selfsign -extensions v3_ca \
-infiles ${CATOP}/careq.pem
fi
}
generate_server_cert() {
local openssl_conf=/etc/pki/tls/openssl.cnf
sed -i -r -e '/req_extensions/s/^#[ ]*//' ${openssl_conf}
if ! grep -q alt_names ${openssl_conf}; then
sed -i -r -e '/\[ v3_req \]/asubjectAltName = @alt_names' ${openssl_conf}
cat >> ${openssl_conf} <<EOF
[ alt_names ]
DNS = ${BROKER_HOSTNAME}
EOF
fi
if [[ ! -f ${CATOP}/private/broker.key ]]; then
openssl req -new -keyout ${CATOP}/private/broker.key \
-out ${CATOP}/broker.csr -nodes \
-subj /C=${CA_COUNTRY}/CN=${BROKER_HOSTNAME}
openssl ca -batch -keyfile ${CATOP}/private/cakey.pem \
-cert ${CATOP}/cacert.pem -in ${CATOP}/broker.csr \
-out ${CATOP}/certs/broker.crt -policy policy_anything
fi
}
setup_kafka() {
logdir="$(grep log.dir /srv/kafka/config/kraft/server.properties | cut -d = -f 2 | cut -d , -f 1)"
if [[ ! -f "${logdir}/meta.properties" ]]; then
pushd /srv/kafka
KAFKA_CLUSTER_ID="$(bin/kafka-storage.sh random-uuid)"
bin/kafka-storage.sh format -t $KAFKA_CLUSTER_ID -c config/kraft/server.properties
popd
fi
}
setup_kafka_ssl() {
local kafka_server_props=//srv/kafka/config/server.properties
mkdir -p ${KAFKA_CERTS_DIR}
if [[ ! -f ${KAFKA_CERTS_DIR}/broker.p12 ]]; then
openssl pkcs12 -export -in ${CATOP}/cacert.pem \
-inkey ${CATOP}/private/cakey.pem \
-out ${KAFKA_CERTS_DIR}/ca.p12 -name ${CA_CNAME} \
-CAfile ${CATOP}/cacert.pem -caname ${CA_CNAME} \
-passout pass:${TRUSTSTORE_PASSWORD}
openssl pkcs12 -export -in ${CATOP}/certs/broker.crt \
-inkey ${CATOP}/private/broker.key \
-out ${KAFKA_CERTS_DIR}/broker.p12 -name ${BROKER_HOSTNAME} \
-CAfile ${CATOP}/cacert.pem -caname ${CA_CNAME} \
-passout pass:${KEYSTORE_PASSWORD}
fi
if ! grep -q broker.p12 ${kafka_server_props}; then
cat >> ${kafka_server_props} <<EOF
listeners=PLAINTEXT://:9092,SSL://:9093
advertised.listeners=PLAINTEXT://${BROKER_HOSTNAME}:9092,SSL://${BROKER_HOSTNAME}:9093
ssl.truststore.location=${KAFKA_CERTS_DIR}/ca.p12
ssl.truststore.password=${TRUSTSTORE_PASSWORD}
ssl.truststore.type=pkcs12
ssl.keystore.location=${KAFKA_CERTS_DIR}/broker.p12
ssl.keystore.password=${KEYSTORE_PASSWORD}
ssl.keystore.type=pkcs12
ssl.client.auth=${SSL_CLIENT_AUTH}
EOF
fi
}
setup_mosquitto_ssl() {
mkdir -p ${MOSQUITTO_CONF_D_DIR}
mkdir -p /var/lib/mosquitto
chown mosquitto:mosquitto /var/lib/mosquitto
if [[ ! -f ${MOSQUITTO_CONF_D_DIR}/00-plain.conf ]]; then
cat >> /etc/mosquitto/mosquitto.conf <<EOF
persistence true
persistence_location /var/lib/mosquitto
per_listener_settings true
include_dir ${MOSQUITTO_CONF_D_DIR}
EOF
cat > ${MOSQUITTO_CONF_D_DIR}/00-plain.conf <<EOF
listener 1883
allow_anonymous true
EOF
cat > ${MOSQUITTO_CONF_D_DIR}/01-ssl.conf <<EOF
listener 8883
cafile ${MOSQUITTO_CERTS_DIR}/cacert.pem
keyfile ${MOSQUITTO_CERTS_DIR}/broker.key
certfile ${MOSQUITTO_CERTS_DIR}/broker.crt
allow_anonymous true
EOF
fi
mkdir -p ${MOSQUITTO_CERTS_DIR}
cp -p ${CATOP}/private/broker.key ${CATOP}/certs/broker.crt \
${CATOP}/cacert.pem ${MOSQUITTO_CERTS_DIR}
chown -R mosquitto:mosquitto ${MOSQUITTO_CERTS_DIR}
}
expose_cert() {
mkdir -p /srv/http
cp ${CATOP}/cacert.pem /srv/http
}
setup_http_server() {
expose_cert
if ! grep -q program:http /etc/supervisord.conf; then
cat >> /etc/supervisord.conf <<EOF
[program:http]
command=/usr/bin/python3 -m http.server 8080
directory=/srv/http
autostart=true
autorestart=true
stdout_logfile=/var/log/%(program_name)s.log
redirect_stderr=true
EOF
fi
}
generate_client_cert() {
local cert_dir=/srv/http
if [[ ! -f ${cert_dir}/client0.key ]]; then
openssl req -new -days 365 -nodes \
-keyout ${cert_dir}/client0.key \
-out ${cert_dir}/client0.csr \
-subj /C=${CA_COUNTRY}/CN=${CLIENT_CNAME}
openssl ca -batch -keyfile ${CATOP}/private/cakey.pem \
-cert ${CATOP}/cacert.pem -in ${cert_dir}/client0.csr \
-out ${cert_dir}/client0.crt -policy policy_anything
fi
}
generate_bad_client_cert() {
local cert_dir=/srv/http
if [[ ! -f ${cert_dir}/bad-client.key ]]; then
openssl req -new -x509 -newkey rsa:2048 -nodes \
-keyout ${cert_dir}/bad-client.key \
-out ${cert_dir}/bad-client.crt \
-subj /C=${CA_COUNTRY}/CN=${BAD_CLIENT_CNAME}
fi
}
generate_client_certs() {
generate_client_cert
generate_bad_client_cert
}
setup_etc_hosts() {
if [[ ${BROKER_HOSTNAME} != $(hostname -f) ]]; then
if ! grep -q ${BROKER_HOSTNAME} /etc/hosts; then
echo "$(hostname -I) ${BROKER_HOSTNAME}" >> /etc/hosts
fi
fi
}
setup_sshd() {
if [[ ! -f /etc/ssh/ssh_host_rsa_key ]]; then
/usr/libexec/openssh/sshd-keygen rsa
fi
if [[ ! -f /etc/ssh/ssh_host_ecdsa_key ]]; then
/usr/libexec/openssh/sshd-keygen ecdsa
fi
if [[ ! -f /etc/ssh/ssh_host_ed25519_key ]]; then
/usr/libexec/openssh/sshd-keygen ed25519
fi
}
setup() {
setup_ca
generate_server_cert
setup_etc_hosts
setup_kafka
setup_kafka_ssl
setup_mosquitto_ssl
setup_http_server
if [[ ${GENERATE_CLIENT_CERTS} = "true" ]]; then
generate_client_certs
fi
}
if [[ ${ENABLE_BROKER} = "true" && ${ENABLE_SSL} = "true" ]]; then
setup
fi
if [[ ${ENABLE_SSHD} = "true" ]]; then
setup_sshd
fi
exec /usr/bin/supervisord -n -c /etc/supervisord.conf
| true
|
9b1fffafa5b7dc21435f3116d092ba14e4a90de3
|
Shell
|
tetrapharmakon/yoneda-ontology
|
/II-spheres/MAKE
|
UTF-8
| 495
| 2.75
| 3
|
[] |
no_license
|
#!/bin/env bash
case "$1" in
"Ip")
latexmk -pdf -shell-escape -pvc II-spheres.tex
latexmk -c ;;
"I")
latexmk -pdf -shell-escape II-spheres.tex
latexmk -c ;;
"clobber")
git add -A
git commit -m "safety update"
git push
# ^ do a push for safety reason: one never knows what rm deletes...
find . -regex ".*conflitto.+." -not -name ".tex" | detox | xargs rm
latexmk -C
cd appmeta
latexmk -C
cd .. ;;
"git")
git add -A
git commit -m "automated update"
git push ;;
esac
| true
|
a50c73f19b60316f717ceb431e155d295bb8cd44
|
Shell
|
ydongfang/imx28x-build
|
/configs/zlg_imx287_env.sh
|
UTF-8
| 3,240
| 3.4375
| 3
|
[] |
no_license
|
#
# build scripts for ZLG IMX28x Board
# Author: Han Pengfei <pengphei@foxmail.com>
#
export BUILD_TRUNK=$(pwd)
export BUILD_TRUNK_OUT=${BUILD_TRUNK}/out_zlg_im28x
export BUILD_LINUX_PATH=${BUILD_TRUNK}/package/linux-2.6.35.3
export BUILD_UBOOT_PATH=${BUILD_TRUNK}/package/bootloader/u-boot-2009.08
export BUILD_BOOTLETS_PATH=${BUILD_TRUNK}/package/bootloader/imx-bootlets-src-10.12.01
export BUILD_TOOLS_PATH=${BUILD_TRUNK}/tools
export BUILD_TOOLCHAIN_PATH=${BUILD_TOOLS_PATH}/arm-2014.05/bin
export BUILD_ARCH=arm
export BUILD_CROSS_COMPILE=arm-none-linux-gnueabi-
export BUILD_UBOOT_CONFIG=mx28_evk_config
export BUILD_KERNEL_CONFIG=imx28_zlg_defconfig
export PATH=$PATH:${BUILD_TOOLCHAIN_PATH}:${BUILD_UBOOT_PATH}/tools:${BUILD_TRUNK}/package/bootloader/elftosb
export LANG=C
export LC_ALL=C
# prepare building environment
function build_prepare()
{
if [ ! -d ${BUILD_TRUNK_OUT} ]; then
mkdir ${BUILD_TRUNK_OUT}
fi
}
# build uboot
function build_uboot()
{
cd ${BUILD_UBOOT_PATH}
make ARCH=${BUILD_ARCH} CROSS_COMPILE=${BUILD_CROSS_COMPILE} distclean
make ARCH=${BUILD_ARCH} CROSS_COMPILE=${BUILD_CROSS_COMPILE} ${BUILD_UBOOT_CONFIG}
make ARCH=${BUILD_ARCH} CROSS_COMPILE=${BUILD_CROSS_COMPILE}
cp ${BUILD_UBOOT_PATH}/u-boot.bin ${BUILD_TRUNK_OUT}
cp ${BUILD_UBOOT_PATH}/u-boot ${BUILD_BOOTLETS_PATH}
cd -
}
# build bootlets
function build_bootlets()
{
cd ${BUILD_BOOTLETS_PATH}
if [ "i386" = ${HOSTTYPE} ];then
export BUILD_ELFTOSB=elftosb_32bit
else
export BUILD_ELFTOSB=elftosb_64bit
fi
# copy uboot to bootlets directory
cp ${BUILD_UBOOT_PATH}/u-boot ${BUILD_BOOTLETS_PATH}
# copy kernel zImage to bootlets directory
cp ${BUILD_LINUX_PATH}/arch/${BUILD_ARCH}/boot/zImage ${BUILD_BOOTLETS_PATH}
# build bootlets image
make CROSS_COMPILE=${BUILD_CROSS_COMPILE} ELFTOSB=${BUILD_ELFTOSB} BOARD=iMX28_EVK
# copy uboot final image to output
cp ${BUILD_BOOTLETS_PATH}/*.sb ${BUILD_TRUNK_OUT}
cd -
}
# build kernel
function build_kernel()
{
cd ${BUILD_LINUX_PATH}
if [ -d output ]; then
rm -rf output
mkdir output
else
mkdir output
fi
# check if we have config used
if [ ! -e .config ]; then
cp arch/${BUILD_ARCH}/configs/${BUILD_KERNEL_CONFIG} .config
fi
# build kernel uImage and modules
make ARCH=${BUILD_ARCH} CROSS_COMPILE=${BUILD_CROSS_COMPILE} uImage modules
# install modules to target directory
make INSTALL_MOD_PATH=output ARCH=${BUILD_ARCH} CROSS_COMPILE=${BUILD_CROSS_COMPILE} modules_install
# get kernel version
if [ -r include/generated/utsrelease.h ]; then
KERNEL_VERSION=`cat include/generated/utsrelease.h |awk -F\" '{print $2}'`
fi
BUILD_KERNEL_MODULES_OUT=${BUILD_LINUX_PATH}/output/lib/modules
cp ${BUILD_LINUX_PATH}/arch/${BUILD_ARCH}/boot/uImage ${BUILD_TRUNK_OUT}
cp ${BUILD_LINUX_PATH}/arch/${BUILD_ARCH}/boot/zImage ${BUILD_TRUNK_OUT}
rm -rf ${BUILD_TRUNK_OUT}/modules
cp -r ${BUILD_KERNEL_MODULES_OUT} ${BUILD_TRUNK_OUT}
rm -rf ${BUILD_TRUNK_OUT}/modules/${KERNEL_VERSION}/build
rm -rf ${BUILD_TRUNK_OUT}/modules/${KERNEL_VERSION}/source
cd -
}
build_prepare
| true
|
d0dfc4544c59b42401afc44367b2386ce57f37f8
|
Shell
|
bigmontz/dev-env-maker
|
/git/hooks/pre-commit
|
UTF-8
| 1,135
| 3.953125
| 4
|
[] |
no_license
|
#!/bin/sh
if git rev-parse --verify HEAD >/dev/null 2>&1
then
against=HEAD
else
against=f737485baee8b0edc5ef4d00c498305578d630fd
fi
# Redirect output to stderr.
exec 1>&2
RED='\033[0;31m'
NC='\033[0m' # No Color
keywords=(System.out.print @deleteme console.log)
keywords_for_grep=$(printf "|%s" "${keywords[@]}")
keywords_for_grep=${keywords_for_grep:1}
files_changed=`git diff-index --name-only HEAD --`
`git grep -q -E "$keywords_for_grep" $files_changed`
number_of_results=$?
if [ $number_of_results -eq 0 ]
then
echo -e "${RED}# Check following lines:${NC}"
for FILE in ${files_changed[*]} ; do
for keyword in ${keywords[*]} ; do
`git grep -q $keyword $FILE`
result_for_file=$?
if [ $result_for_file -eq 0 ]
then
LINE=`git grep -n $keyword $FILE | awk -F ":" '{print $2}'`
echo -e "${RED}$FILE contains $keyword at line $LINE.${NC}"
fi
done
done
exec < /dev/tty
read -p "Do you want to proceed anyway? (y/N)" answer
if [[ $answer =~ ^[Yy]$ ]] ;
then
echo "Commit anyway"
else
exit 1
fi
fi
| true
|
f142b97a86a589424e0444d934b2967c70660451
|
Shell
|
oracle/cloudtestdrive
|
/AppDev/wls/ll-wls-migration/WLS_deploy_scripts/source-12213-domain/container-scripts/setEnv.sh
|
UTF-8
| 2,663
| 3.65625
| 4
|
[
"UPL-1.0"
] |
permissive
|
#!/bin/bash ex
# Copyright (c) 2018, 2019 Oracle and/or its affiliates. All rights reserved.
# The Universal Permissive License (UPL), Version 1.0
#
# This example creates the BUILD_ARG environment variable as a string of --build-arg for
# the arguments passed on the docker build command. The variable file that is used for the WDT
# create domain step is the input to this script. This insures that the values persisted
# as environment variables in the docker image match the configured domain home.
BUILD_ARG=''
if [ "$#" -eq "0" ]; then
echo "A properties file with variable definitions should be supplied."
exit 1
else
PROPERTIES_FILE=$1
echo Export environment variables from the ${PROPERTIES_FILE} properties file
fi
DOMAIN_DIR=`awk '{print $1}' $PROPERTIES_FILE | grep ^DOMAIN_NAME= | cut -d "=" -f2`
if [ ! -n "$DOMAIN_DIR" ]; then
if [ -n "$DOMAIN_NAME" ]; then
DOMAIN_DIR=$DOMAIN_NAME
fi
fi
if [ -n "$DOMAIN_DIR" ]; then
DOMAIN_NAME=$DOMAIN_DIR
export DOMAIN_NAME
echo DOMAIN_NAME=$DOMAIN_NAME
BUILD_ARG="$BUILD_ARG --build-arg CUSTOM_DOMAIN_NAME=$DOMAIN_NAME"
fi
ADMIN_HOST=`awk '{print $1}' $PROPERTIES_FILE | grep ^ADMIN_HOST= | cut -d "=" -f2`
if [ -n "$ADMIN_HOST" ]; then
export ADMIN_HOST
echo ADMIN_HOST=$ADMIN_HOST
BUILD_ARG="$BUILD_ARG --build-arg CUSTOM_ADMIN_HOST=$ADMIN_HOST"
fi
ADMIN_NAME=`awk '{print $1}' $PROPERTIES_FILE | grep ^ADMIN_NAME= | cut -d "=" -f2`
if [ -n "$ADMIN_NAME" ]; then
export ADMIN_NAME
echo ADMIN_NAME=$ADMIN_NAME
BUILD_ARG="$BUILD_ARG --build-arg CUSTOM_ADMIN_NAME=$ADMIN_NAME"
fi
ADMIN_PORT=`awk '{print $1}' $PROPERTIES_FILE | grep ^ADMIN_PORT= | cut -d "=" -f2`
if [ -n "$ADMIN_PORT" ]; then
export ADMIN_PORT
echo ADMIN_PORT=$ADMIN_PORT
BUILD_ARG="$BUILD_ARG --build-arg CUSTOM_ADMIN_PORT=$ADMIN_PORT"
fi
MANAGED_SERVER_PORT=`awk '{print $1}' $PROPERTIES_FILE | grep ^MANAGED_SERVER_PORT= | cut -d "=" -f2`
if [ -n "$MANAGED_SERVER_PORT" ]; then
export MANAGED_SERVER_PORT
echo MANAGED_SERVER_PORT=$MANAGED_SERVER_PORT
BUILD_ARG="$BUILD_ARG --build-arg CUSTOM_MANAGED_SERVER_PORT=$MANAGED_SERVER_PORT"
fi
DEBUG_PORT=`awk '{print $1}' $PROPERTIES_FILE | grep ^DEBUG_PORT= | cut -d "=" -f2`
if [ -n "$DEBUG_PORT" ]; then
export DEBUG_PORT
echo DEBUG_PORT=$DEBUG_PORT
BUILD_ARG="$BUILD_ARG --build-arg CUSTOM_DEBUG_PORT=$DEBUG_PORT"
fi
CUSTOM_TAG_NAME=`awk '{print $1}' $PROPERTIES_FILE | grep ^IMAGE_TAG= | cut -d "=" -f2`
if [ -n "$CUSTOM_TAG_NAME" ]; then
TAG_NAME=${CUSTOM_TAG_NAME}
export TAG_NAME
echo "Set the image tag name to $TAG_NAME"
fi
echo BUILD_ARG=$BUILD_ARG
| true
|
f3c697fd404e85206c2b80ea20142a73bf0d903d
|
Shell
|
wlshiu/my_note
|
/ref_shell/my_add2line.sh
|
UTF-8
| 340
| 2.78125
| 3
|
[] |
no_license
|
#!/bin/bash
function help()
{
echo "usage: addr2line [execute file] [addr]"
echo " e.g. addr2line ../build/live555_server-1.0/liveMediaServer 0x406854 "
echo " ps. Need to add compiler options '-g -rdynamic' "
exit 1
}
if [ $# != 2 ];then
help
fi
./host/usr/bin/aarch64-linux-gnu-addr2line -e $1 -f $2
| true
|
dda328cbbf3f7b8876600c365645b84d60a0b110
|
Shell
|
tsony-tsonev/configs
|
/setup.sh
|
UTF-8
| 3,697
| 2.515625
| 3
|
[] |
no_license
|
#/bin/sh
# install vim
sudo apt-get -y remove vim vim-runtime gvim
# install deps for manually building vim
sudo apt-get -y install liblua5.1-dev luajit libluajit-5.1 python-dev ruby-dev libperl-dev libncurses5-dev libatk1.0-dev libx11-dev libxpm-dev libxt-dev python2.7 python3.6 python-pip python3-pip libgnome2-dev libgnomeui-dev libgtk2.0-dev libbonoboui2-dev libcairo2-dev
sudo mkdir /usr/include/lua5.1/include
sudo cp /usr/include/lua5.1/* /usr/include/lua5.1/include
git clone https://github.com/vim/vim.git
cd vim
sudo make uninstall # remove previous custom builds if any
# pay attention here check if directories are correct
make distclean
./configure --with-features=huge \
--enable-multibyte \
--enable-rubyinterp=yes \
--enable-pythoninterp=yes \
--enable-python3interp=yes \
--with-python3-config-dir=/usr/lib/python3.6/config-3.6m-x86_64-linux-gnu \
--enable-perlinterp=yes \
--enable-luainterp=yes \
--with-luajit \
--with-lua-prefix=/usr/include/lua5.1 \
--enable-gui=auto \
--enable-largefile \
--enable-fail-if-missing \
--enable-cscope \
--prefix=/usr/local
sudo make VIMRUNTIMEDIR=/usr/local/share/vim/vim81
sudo make install
cd ..
sudo rm -rf vim
#install neovim-bridge for the autocomplete
sudo apt-get install -y python3-setuptools
pip install wheel
pip install neovim
go get -u github.com/mdempsky/gocode
# install and change to zsh
sudo apt-get -y install zsh
chsh -s $(which zsh)
# zsh plugin manager
sh -c "$(wget https://raw.githubusercontent.com/robbyrussell/oh-my-zsh/master/tools/install.sh -O -)"
git clone git://github.com/zsh-users/zaw.git ~/.oh-my-zsh/custom/plugins/zaw
git clone https://github.com/zsh-users/zsh-autosuggestions ~/.oh-my-zsh/custom/plugins/zsh-autosuggestions
git clone https://github.com/zsh-users/zsh-syntax-highlighting.git ~/.oh-my-zsh/custom/plugins/zsh-syntax-highlighting
git clone https://github.com/JamesKovacs/zsh_completions_mongodb.git ~/.oh-my-zsh/custom/plugins/mongodb
# install tmux
sudo apt-get -y install curl xclip python-pygments
TMUX_VERSION=2.5
sudo apt-get -y remove tmux
sudo apt-get -y install wget tar libevent-dev libncurses-dev
wget https://github.com/tmux/tmux/releases/download/${TMUX_VERSION}/tmux-${TMUX_VERSION}.tar.gz
tar xf tmux-${TMUX_VERSION}.tar.gz
rm -f tmux-${TMUX_VERSION}.tar.gz
cd tmux-${TMUX_VERSION}
sudo make uninstall
./configure
make
sudo make install
cd -
sudo rm -rf /usr/local/src/tmux-*
sudo mv tmux-${TMUX_VERSION} /usr/local/src
# tmux plugin manager
git clone https://github.com/tmux-plugins/tpm ~/.tmux/plugins/tpm
git clone https://github.com/tmux-plugins/tmux-sensible ~/.tmux/plugins/tmux-sensible
# install tmux nord theme
git clone https://github.com/arcticicestudio/nord-tmux ~/.tmux/plugins/nord-tmux
# vim plugin manager
mkdir -p ~/.vim/autoload ~/.vim/bundle && curl -LSso ~/.vim/autoload/pathogen.vim https://tpo.pe/pathogen.vim
# install vim fonts
# note: do not forget to set terminal emulator font to DroidSansMono
mkdir ~/.local/share/fonts && cd ~/.local/share/fonts && curl -fLo "Droid Sans Mono for Powerline Nerd Font Complete.otf" https://github.com/ryanoasis/nerd-fonts/raw/master/patched-fonts/DroidSansMono/complete/Droid%20Sans%20Mono%20Nerd%20Font%20Complete.otf
# install vim-tagbar deps
sudo apt-get -y install exuberant-ctags
# simlink and proper files
rm ~/.vimrc
rm ~/.tmux.conf
rm ~/.zshrc
rm ~/vim-plugins
ln -s `pwd`/vim-plugins ~/vim-plugins
ln -s `pwd`/.vimrc ~/.vimrc
ln -s `pwd`/.tmux.conf ~/.tmux.conf
ln -s `pwd`/.zshrc ~/.zshrc
touch .zsh_private
# needed for the vim 8+ persistent undo history
mkdir ~/.vim/undo
| true
|
f858ed14c90fb65ede1c5c364b31afdf0113054f
|
Shell
|
n800sau/robotarr
|
/3wheeler/link_extern2ws.sh
|
UTF-8
| 184
| 2.578125
| 3
|
[] |
no_license
|
DIRNAME=`dirname $0`
#echo $DIRNAME
ROS2_PACK_PATH=`realpath ${DIRNAME}`/ros2_ws_extern/wheeler_3_extern
#echo $ROS2_PACK_PATH
ln -s "${ROS2_PACK_PATH}" ~/ros2_ws/src/wheeler_3_extern
| true
|
ffcb32a2e548ed06fdb74419a3b624325f667282
|
Shell
|
yuankunzhang/gb
|
/entry.sh
|
UTF-8
| 1,114
| 3.453125
| 3
|
[] |
no_license
|
#!/bin/bash
set -e
DRUPAL_DIR=/var/www/html
setup_drupal() {
cd $DRUPAL_DIR
php composer.phar require drush/drush
./vendor/bin/drush site-install standard -y \
--db-url="mysql://$DB_USER:$DB_PASS@$DB_HOST:3306/$DB_NAME" \
--site-name="$SITE_NAME" \
--account-name="$ACCOUNT_NAME" \
--account-pass="$ACCOUNT_PASS"
./vendor/bin/drush -y config-set system.performance css.preprocess 0
./vendor/bin/drush -y config-set system.performance js.preprocess 0
# Change site owner.
chown -R www-data:www-data $DRUPAL_DIR
}
# Wait for MySQL connection.
connected=false
for _ in $(seq 1 10); do
if nc -z "$DB_HOST" 3306; then
connected=true
break
fi
sleep 6
done
if [ "$connected" = false ]; then
echo "Unable to connect to $DB_HOST, quit"
exit 1
fi
if [ ! -f "$DRUPAL_DIR/vendor/bin/drush" ]; then
echo "####################################################"
echo "This is the first time you run this image, the setup"
echo "process will take a while, please be patient :)"
echo "####################################################"
setup_drupal
fi
apache2-foreground
| true
|
5f5e70946c79e5fb07c74c89910a55544c8f792f
|
Shell
|
ilias500/xsd2pgschema
|
/example/lucene_index_uniprotkb.sh
|
UTF-8
| 1,250
| 3.671875
| 4
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/bin/bash
sync_update=true
XML_DIR=uniprot_xml
XSD_SCHEMA=uniprot.xsd
IDX_DIR=lucene_index
if [ -d $IDX_DIR ] ; then
echo
echo "Do you want to update lucene index? (y [n]) "
read ans
case $ans in
y*|Y*) ;;
*) echo stopped.
exit 1;;
esac
if [ $sync_update != "true" ] ; then
rm -rf $IDX_DIR
fi
fi
WORK_DIR=lucene_work
ERR_DIR=$WORK_DIR/err
rm -rf $WORK_DIR
mkdir -p $WORK_DIR
mkdir -p $ERR_DIR
if [ $sync_update = "true" ] ; then
MD5_DIR=chk_sum_lucene
fi
err_file=$ERR_DIR/all_err
if [ $sync_update != "true" ] ; then
java -classpath ../xsd2pgschema.jar xml2luceneidx --xsd $XSD_SCHEMA --xml $XML_DIR --idx-dir $IDX_DIR --attr-all --no-rel 2> $err_file
else
java -classpath ../xsd2pgschema.jar xml2luceneidx --xsd $XSD_SCHEMA --xml $XML_DIR --idx-dir $IDX_DIR --attr-all --no-rel --sync $MD5_DIR 2> $err_file
fi
if [ $? = 0 ] && [ ! -s $err_file ] ; then
rm -f $err_file
else
echo $0 aborted.
exit 1
fi
red='\e[0;31m'
normal='\e[0m'
errs=`ls $ERR_DIR/*_err 2> /dev/null | wc -l`
if [ $errs = 0 ] ; then
echo "Lucene index (UniProtKB) is update."
rm -rf $WORK_DIR
else
echo
echo -e "${red}$errs errors were detected. Please check the log files for more details.${normal}"
exit 1
fi
date
| true
|
999300c8e77328365184490bd446c7be46f54593
|
Shell
|
oldcryptogeek/guild-operators
|
/files/grest/cron/jobs/asset-registry-update.sh
|
UTF-8
| 2,489
| 3.84375
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
CNODE_VNAME=cnode
DB_NAME=cexplorer
TR_URL=https://github.com/cardano-foundation/cardano-token-registry
TR_SUBDIR=mappings
TR_DIR=${HOME}/git
TR_NAME=${CNODE_VNAME}-token-registry
echo "$(date +%F_%H:%M:%S) - START - Asset Registry Update"
if [[ ! -d "${TR_DIR}/${TR_NAME}" ]]; then
[[ -z ${HOME} ]] && echo "HOME variable not set, aborting..." && exit 1
mkdir -p "${TR_DIR}"
cd "${TR_DIR}" >/dev/null || exit 1
git clone ${TR_URL} ${TR_NAME} >/dev/null || exit 1
fi
pushd "${TR_DIR}/${TR_NAME}" >/dev/null || exit 1
git pull >/dev/null || exit 1
last_commit="$(psql ${DB_NAME} -c "select last_value from grest.control_table where key='asset_registry_commit'" -t | xargs)"
[[ -z "${last_commit}" ]] && last_commit="$(git rev-list HEAD | tail -n 1)"
latest_commit="$(git rev-list HEAD | head -n 1)"
[[ "${last_commit}" == "${latest_commit}" ]] && echo "$(date +%F_%H:%M:%S) - END - Asset Registry Update, no updates necessary." && exit 0
asset_cnt=0
[[ -f '.assetregistry.csv' ]] && rm -f .assetregistry.csv
while IFS= read -re assetfile; do
if ! asset_data_csv=$(jq -er '[
.subject[0:56],
.subject[56:],
.name.value,
.description.value // "",
.ticker.value // "",
.url.value // "",
.logo.value // "",
.decimals.value // 0
] | @csv' "${assetfile}"); then
echo "Failure parsing '${assetfile}', skipping..."
continue
fi
echo "${asset_data_csv}" >> .assetregistry.csv
((asset_cnt++))
done < <(git diff --name-only "${last_commit}" "${latest_commit}" | grep ^${TR_SUBDIR})
cat << EOF > .assetregistry.sql
CREATE TEMP TABLE tmparc (like grest.asset_registry_cache);
\COPY tmparc FROM '.assetregistry.csv' DELIMITER ',' CSV;
INSERT INTO grest.asset_registry_cache SELECT DISTINCT ON (asset_policy,asset_name) * FROM tmparc ON CONFLICT(asset_policy,asset_name) DO UPDATE SET asset_policy=excluded.asset_policy, asset_name=excluded.asset_name, name=excluded.name, description=excluded.description, ticker=excluded.ticker, url=excluded.url, logo=excluded.logo,decimals=excluded.decimals;
EOF
psql ${DB_NAME} -qb -f .assetregistry.sql >/dev/null && rm -f .assetregistry.sql
psql ${DB_NAME} -qb -c "INSERT INTO grest.control_table (key, last_value) VALUES ('asset_registry_commit','${latest_commit}') ON CONFLICT(key) DO UPDATE SET last_value='${latest_commit}'"
echo "$(date +%F_%H:%M:%S) - END - Asset Registry Update, ${asset_cnt} assets added/updated for commits ${last_commit} to ${latest_commit}."
| true
|
2303b5db302e464b6e75d1e2fca22664f74df936
|
Shell
|
lyf-1/parallel
|
/project1/run.sh
|
UTF-8
| 728
| 2.765625
| 3
|
[] |
no_license
|
#!/bin/bash
chmod 777 ./gather/my_allgather
chmod 777 ./gemm/cannon
chmod 777 ./gemm/conv
# 1 - name
# 2 - num of procs
# 3 - num of elements
# 4 - num of trials
if test $1 = "gather"; then
mpirun -np $2 ./gather/my_allgather $3 $4
# 3 - matrix dimension
elif test $1 = "matrixmat"; then
mpiexec -n $2 ./gemm/cannon $3
elif test $1 = "matrixconv"; then
mpiexec -n $2 ./gemm/conv $3 0
elif test $1 = "matrixpool"; then
mpiexec -n $2 ./gemm/conv $3 1
elif test $1 = "wc_big"; then
echo "you need to go to mpi_wordcount/src folder and run 'mpiexec -n <process_num> ./main 0'"
elif test $1 = "wc_small"; then
echo "you need to go to mpi_wordcount/src folder and run 'mpiexec -n <process_num> ./main 1'"
fi
| true
|
0980ebb000c70bb34cb6f4881f686327bcb37b8e
|
Shell
|
mmalenta/meertrap_shipyard
|
/tag_functions.sh
|
UTF-8
| 5,213
| 4.71875
| 5
|
[] |
no_license
|
#!/bin/bash
source ./logging.sh
BETA_REG="^[0-9]+\.[0-9]+\.[0-9]+b[0-9]+$"
STABLE_REG="^[0-9]+\.[0-9]+\.[0-9]+$"
function get_latest_tag()
{
# Check for the latest tag
# This assumes we stick to the convention and don't create
# any tags that deviate from it significantly
local tag_image=$1
latest_tag=$( docker image ls ${tag_image} --format {{.Tag}} | sort -t '.' -k 1,1 -k 2,2 -k 3,3 -rn | head -n 1 )
}
function check_version_tag()
{
# Make sure that there are these components only
# Strip any text other than 'b' for micro release,
# which is used for marking beta candidates.
# Use regexp to check the tag
local new_tag=$1
local force_tag=$2
if [[ $new_tag =~ $BETA_REG ]]
then
echo
elif [[ $new_tag =~ $STABLE_REG ]]
then
echo
else
ERROR "Non-conforming tag provided!"
echo "Tags have to adhere to the following convention:"
echo "Stable release 0.[minor].[micro]"
echo "Beta release 0.[minor].[micro]b[beta]"
echo
exit 1
fi
local head_tag=$( echo $new_tag $latest_tag | tr " " "\n" | sort -t '.' -k 1,1 -k 2,2 -k 3,3 -rn | head -n 1 )
if [[ $new_tag == $latest_tag || $head_tag != $new_tag ]]
then
# Need to handle beta - > stable release move properly
# 0.1.8b1 is higher on the sorted list than 0.1.8
# but from the release sense 0.1.8 is higher
if [[ $latest_tag =~ $BETA_REG ]]
then
if [[ $new_tag == $( echo $latest_tag | sed 's/\(.*\)b[0-9]/\1/g') ]]
then
echo "Provided tag moves from beta to the stable release"
INFO "Requested release tag change:\033[0m \033[1;34m${latest_tag}\033[0m -> \033[1;32m${new_tag}\033[0m"
else
ERROR "Provided tag does not increment the beta release"
INFO "Requested release tag change:\033[0m \033[1;34m${latest_tag}\033[0m -> \033[1;31m${new_tag}\033[0m"
echo
if [[ $force_tag == true ]]
then
WARNING "Will force the use of the provided tag!"
else
exit 1
fi
fi
else
ERROR "Provided tag does not increment the release"
INFO "Requested release tag change:\033[0m \033[1;34m${latest_tag}\033[0m -> \033[1;31m${new_tag}\033[0m"
echo
if [[ $force_tag == true ]]
then
WARNING "Will force the use of the provided tag!"
else
exit 1
fi
fi
else
echo "Provided tag increments the release"
INFO "Requested release tag change:\033[0m \033[1;34m${latest_tag}\033[0m -> \033[1;32m${new_tag}\033[0m"
fi
if [[ $major -gt 0 ]]
then
echo -e "Are you \033[1mreally\033[0m sure we are ready for a release version?"
fi
}
function generate_version_tag()
{
local local_tag
echo
if [[ $latest_tag =~ $BETA_REG ]]
then
echo -e "Latest beta version detected: \033[0m \033[1;33m${latest_tag}\033[0m"
read -p "Would you like to move to a [s]table release or [i]crement the beta release? " release_choice
case $release_choice in
s)
local_tag=$( echo $latest_tag | sed 's/\(.*\)b[0-9]/\1/g')
INFO "Moving to a stable release tag:\033[0m \033[1;33m${latest_tag}\033[0m -> \033[1;32m${local_tag}\033[0m"
;;
i)
local beta_version=$(( $(echo $latest_tag | sed 's/.*b\([0-9]*\)/\1/g' ) + 1 ))
local_tag=$( echo $latest_tag | sed 's/\(.*b\)[0-9]/\1/g')${beta_version}
INFO "Incrementing the beta release tag:\033[0m \033[1;33m${latest_tag}\033[0m -> \033[1;32m${local_tag}\033[0m"
;;
*)
ERROR "Invalid option!"
exit 1
;;
esac
elif [[ $latest_tag =~ $STABLE_REG ]]
then
echo -e "Latest stable version detected: \033[0m \033[1;33m${latest_tag}\033[0m"
read -p "Would you like to increment mi[n]or or mi[c]ro release " release_choice
case $release_choice in
n)
local minor=$(( $( echo $latest_tag | awk -F '.' '{print $2}' ) + 1 ))
# Assumes we will not reach a major release for a while
local_tag="0.${minor}.0"
echo -e "\033[1mIncrementing the major release tag:\033[0m \033[1;33m${latest_tag}\033[0m -> \033[1;32m${local_tag}\033[0m"
;;
c)
local micro=$(( $( echo $latest_tag | awk -F '.' '{print $3}' ) + 1 ))
if [[ $micro -eq 20 ]]
then
WARNING "Maximum micro version reached!"
WARNING "Will increment the minor version!"
local minor=$(( $( echo $latest_tag | awk -F '.' '{print $2}' ) + 1 ))
local_tag="0.${minor}.0"
INFO "Incrementing the minor release tag:\033[0m \033[1;33m${latest_tag}\033[0m -> \033[1;32m${local_tag}\033[0m"
else
local_tag=$( echo $latest_tag | sed -r 's/(^[0-9]+.[0-9]+.).*$/\1/g' )${micro}
INFO "Incrementing the micro release tag:\033[0m \033[1;33m${latest_tag}\033[0m -> \033[1;32m${local_tag}\033[0m"
fi
;;
*)
ERROR "Invalid option!"
echo
exit 1
;;
esac
else
if [[ -z $latest_tag ]]
then
ERROR "Did not find the pipeline image at all!"
echo
exit 1
else
ERROR "Latest image tag not recognised!"
echo
exit 1
fi
fi
version_tag=$local_tag
}
| true
|
ab8597476f4de21960ca683d2c18459a43fd5096
|
Shell
|
skyzyx/server-metadata
|
/get-server-info.sh
|
UTF-8
| 16,437
| 3.671875
| 4
|
[
"MIT"
] |
permissive
|
#! /usr/bin/env bash
# https://github.com/skyzyx/server-metadata
# By Ryan Parman
echo "#-------------------------------------------------------------------------------"
# Which version of sed do we have available?
if [[ $(sed --help 2>&1) && $? -eq 0 ]]; then
gnused=true
sed=sed
elif [[ $(gsed --help 2>&1) && $? -eq 0 ]]; then
gnused=true
sed=gsed
else
gnused=false
sed=sed
fi;
function __uptime() {
uptime=$(cat /proc/uptime)
uptime=${uptime%%.*}
seconds=$(( uptime%60 ))
minutes=$(( uptime/60%60 ))
hours=$(( uptime/60/60%24 ))
days=$(( uptime/60/60/24 ))
uptime="$days"d", $hours"h", $minutes"m", $seconds"s""
echo $uptime
}
#-------------------------------------------------------------------------------
echo "OPERATING SYSTEM:"
if [[ $(which sw_vers 2>&1) != *"no sw_vers"* && $(which sw_vers 2>&1) ]]; then
echo "OS: $(sw_vers -productName) $(sw_vers -productVersion) ($(sw_vers -buildVersion))"
elif [[ $(which python 2>&1) != *"no python"* && $(which python 2>&1) ]]; then
echo "OS: $(python -c 'import platform; print platform.linux_distribution()[0] + " " + platform.linux_distribution()[1]')"
fi;
if [[ $(which uname 2>&1) != *"no uname"* && $(which uname 2>&1) ]]; then
echo "Kernel: $(uname) $(uname -r)"
fi;
echo "Active Shell: $SHELL"
$(echo $SHELL) --version | head -n 1
#-------------------------------------------------------------------------------
echo ""
echo "NETWORK:"
if [[ $(which scutil 2>&1) != *"no scutil"* && $(which scutil 2>&1) ]]; then
echo "Hostname: $(scutil --get LocalHostName)"
elif [[ $(which hostname 2>&1) != *"no hostname"* && $(which hostname 2>&1) ]]; then
echo "Hostname: $(hostname)"
fi;
if [[ $(which ifconfig 2>&1) != *"no ifconfig"* && $(which ifconfig 2>&1) ]]; then
echo "Internal IP(s): $(ifconfig | awk -F "[: ]+" '/inet addr:/ { if ($4 != "127.0.0.1") print $4 }' | awk '{printf "%s, ", $0} END {print ""}' | awk '{sub(/, $/,""); print}')"
fi;
#-------------------------------------------------------------------------------
echo ""
echo "HARDWARE:"
if [ -f /proc/cpuinfo ]; then
echo "CPU Speed: $(cat /proc/cpuinfo | grep 'model name' | sed -e "s/.*@ *//" | head -n 1)"
echo "CPU Cores: $(nproc)"
elif [[ $(which sysctl 2>&1) != *"no sysctl"* && $(which sysctl 2>&1) ]]; then
echo "CPU Speed: $(sysctl -n machdep.cpu.brand_string | sed -e "s/.*@ *//")"
echo "CPU Cores: $(sysctl -n hw.ncpu)"
fi;
if [ -f /proc/meminfo ]; then
echo "Memory: $(expr $(cat /proc/meminfo | grep 'MemTotal:' | awk {'print $2}') / 1024) MB"
elif [[ $(which sysctl 2>&1) != *"no sysctl"* && $(which sysctl 2>&1) ]]; then
echo "Memory: $(expr $(sysctl -n hw.memsize) / 1024 / 1024) MB"
fi;
if [ -f /proc/uptime ]; then
echo "System Uptime: $( __uptime )"
fi;
if [[ $(which uptime 2>&1) != *"no uptime"* && $(which uptime 2>&1) ]]; then
echo "Load Average: $(uptime | awk -F'load average:' '{ print $2 }' | sed 's/^ *//g')"
fi;
#-------------------------------------------------------------------------------
echo ""
echo "SHELLS:"
if [[ $(which bash 2>&1) != *"no bash"* && $(which bash 2>&1) ]]; then
echo "Bash shell $(bash --version 2>&1 | head -n 1 | sed -e "s/-release.*//" | sed -e "s/GNU bash, version //") ($(which bash 2>&1))"
fi;
if [[ $(which csh 2>&1) != *"no csh"* && $(which csh 2>&1) ]]; then
echo "C-shell $(csh --version | sed -e "s/ (.*)//g") ($(which csh 2>&1))"
fi;
if [[ $(which fish 2>&1) != *"no fish"* && $(which fish 2>&1) ]]; then
echo "Fish shell $(fish --version 2>&1 | sed -e "s/.*version //") ($(which fish 2>&1))"
fi;
if [[ $(which ksh 2>&1) != *"no ksh"* && $(which ksh 2>&1) ]]; then
echo "Korn shell $(ksh --version 2>&1 | sed -e "s/.*) //") ($(which ksh 2>&1))"
fi;
if [[ $(which zsh 2>&1) != *"no zsh"* && $(which zsh 2>&1) ]]; then
echo "Z-shell $(zsh --version | sed -e "s/ (.*//" | sed -e "s/zsh //") ($(which zsh 2>&1))"
fi;
#-------------------------------------------------------------------------------
echo ""
hhvm=false
echo "RUNTIMES/COMPILERS:"
if [[ $(which gcc 2>&1) != *"no gcc"* && $(which gcc 2>&1) ]]; then
echo "GCC: $(gcc --version 2>/dev/null | head -n 1 | sed -e "s/Apple //" | sed -e "s/version //") ($(which gcc 2>&1))"
fi;
if [[ $(which go 2>&1) != *"no go"* && $(which go 2>&1) ]]; then
echo "Golang: $(go version 2>&1 | sed -e "s/version go//" | awk '{print $2}') ($(which go 2>&1))"
fi;
if [[ $(which hhvm 2>&1) != *"no hhvm"* && $(which hhvm 2>&1) ]]; then
hhvm=true
echo "HHVM $(hhvm --version | head -n 1 | sed -e "s/HipHop VM //" | sed -e "s/ (.*//") ($(which hhvm 2>&1))"
fi;
if [[ $(which java 2>&1) != *"no java"* && $(which java 2>&1) ]]; then
echo "Java $(java -version 2>&1 | head -n 2 | tail -n 1 | sed -e "s/.*build //" | tr -d ")" ) ($(which java 2>&1))"
fi;
if [[ $(which clang 2>&1) != *"no clang"* && $(which clang 2>&1) ]]; then # LLVM
echo "LLVM/Clang: $(clang --version 2>/dev/null | head -n 1 | sed -e "s/Apple //" | sed -e "s/version //") ($(which clang 2>&1))"
fi;
if [[ $(which node 2>&1) != *"no node"* && $(which node 2>&1) ]]; then
echo "Node.js $(node --version 2>&1) ($(which node 2>&1))"
fi;
if [[ $(which php 2>&1) != *"no php"* && $(which php 2>&1) && $hhvm == false ]]; then
echo "$(php --version 2>&1 | head -n 1 | sed -e "s/(cli).*//") ($(which php 2>&1))"
fi;
if [[ $(which python 2>&1) != *"no python"* && $(which python 2>&1) ]]; then
echo "$(python --version 2>&1) ($(which python 2>&1))"
fi;
if [[ $(which python26 2>&1) != *"no python"* && $(which python26 2>&1) ]]; then
echo "$(python26 --version 2>&1) ($(which python26 2>&1))"
fi;
if [[ $(which python2.6 2>&1) != *"no python"* && $(which python2.6 2>&1) ]]; then
echo "$(python2.6 --version 2>&1) ($(which python2.6 2>&1))"
fi;
if [[ $(which python27 2>&1) != *"no python"* && $(which python27 2>&1) ]]; then
echo "$(python27 --version 2>&1) ($(which python27 2>&1))"
fi;
if [[ $(which python2.7 2>&1) != *"no python"* && $(which python2.7 2>&1) ]]; then
echo "$(python2.7 --version 2>&1) ($(which python2.7 2>&1))"
fi;
if [[ $(which python3 2>&1) != *"no python3"* && $(which python3 2>&1) ]]; then
echo "$(python3 --version 2>&1) ($(which python3 2>&1))"
fi;
if [[ $(which python34 2>&1) != *"no python3"* && $(which python34 2>&1) ]]; then
echo "$(python34 --version 2>&1) ($(which python34 2>&1))"
fi;
if [[ $(which python3.4 2>&1) != *"no python3"* && $(which python3.4 2>&1) ]]; then
echo "$(python3.4 --version 2>&1) ($(which python3.4 2>&1))"
fi;
if [[ $(which python35 2>&1) != *"no python3"* && $(which python35 2>&1) ]]; then
echo "$(python35 --version 2>&1) ($(which python35 2>&1))"
fi;
if [[ $(which python3.5 2>&1) != *"no python3"* && $(which python3.5 2>&1) ]]; then
echo "$(python3.5 --version 2>&1) ($(which python3.5 2>&1))"
fi;
if [[ $(which ruby 2>&1) != *"no ruby"* && $(which ruby 2>&1) ]]; then
echo "$(ruby --version | sed -e "s/(.*//" | sed -e "s/ruby/Ruby/") ($(which ruby 2>&1))"
fi;
if [[ $(which scalac 2>&1) != *"no scalac"* && $(which scalac 2>&1) ]]; then
echo "Scala $(scalac -version 2>&1 | sed -e "s/.*version //" | sed -e "s/ -- .*//") ($(which scalac 2>&1))"
fi;
if [[ $(which swift 2>&1) != *"no swift"* && $(which swift 2>&1) ]]; then
echo "Swift $(swift -version | head -n 1 | sed -e "s/.*version //") ($(which swift 2>&1))"
fi;
#-------------------------------------------------------------------------------
echo ""
echo "VERSION CONTROL:"
if [[ $(which cvs 2>&1) != *"no cvs"* && $(which cvs 2>&1) ]]; then
echo "CVS $(cvs --version | head -n 2 | tail -n 1 | sed -e "s/.*CVS) //" | sed -e "s/ (.*//") ($(which cvs 2>&1))"
fi;
if [[ $(which git 2>&1) != *"no git"* && $(which git 2>&1) ]]; then
echo "$(git version | sed -e "s/git version/Git/" | head -n 1) ($(which git 2>&1))"
fi;
if [[ $(which hg 2>&1) != *"no hg"* && $(which hg 2>&1) ]]; then
echo "Mercurial $(hg --version | head -n 1 | sed -e "s/.*version //" | sed -e "s/)//") ($(which hg 2>&1))"
fi;
if [[ $(which svn 2>&1) != *"no svn"* && $(which svn 2>&1) ]]; then
echo "Subversion $(svn --version | head -n 1 | sed -e "s/.*version //") ($(which svn 2>&1))"
fi;
#-------------------------------------------------------------------------------
echo ""
echo "EDITORS:"
if [[ $(which emacs 2>&1) != *"no emacs"* && $(which emacs 2>&1) ]]; then
echo "$(emacs --version | head -n 1) ($(which emacs 2>&1))"
fi;
if [[ $(which nano 2>&1) != *"no nano"* && $(which nano 2>&1) ]]; then
echo "$(nano --version | head -n 1 | sed -e "s/ (.*)//" | sed -e "s/^ *//") ($(which nano 2>&1))"
fi;
if [[ $(which vi 2>&1) != *"no vi"* && $(which vi 2>&1) ]]; then
echo "$(vi --version | head -n 1 | sed -e "s/ (.*)//") ($(which vi 2>&1))"
fi;
#-------------------------------------------------------------------------------
echo ""
echo "SOFTWARE:"
if [[ $(which awk 2>&1) != *"no awk"* && $(which awk 2>&1) ]]; then
echo "$(awk --version 2>&1 | head -n 1) ($(which awk 2>&1))"
fi;
if [[ $(which curl 2>&1) != *"no curl"* && $(which curl 2>&1) ]]; then
echo "$(curl --version 2>&1 | head -n 1 | sed -e "s/ ([^\)]*)/:/") ($(which curl 2>&1))"
fi;
if [[ $(which docker-compose 2>&1) != *"no docker-compose"* && $(which docker-compose 2>&1) ]]; then
echo "Docker Compose $(docker-compose --version | head -n 1 | sed -e "s/.*version:* //") ($(which docker-compose 2>&1))"
fi;
if [[ $(which docker-machine 2>&1) != *"no docker-machine"* && $(which docker-machine 2>&1) ]]; then
echo "Docker Machine $(docker-machine --version | head -n 1 | sed -e "s/.*version //") ($(which docker-machine 2>&1))"
fi;
if [[ $(which docker-swarm 2>&1) != *"no docker-swarm"* && $(which docker-swarm 2>&1) ]]; then
echo "Docker Swarm $(docker-swarm --version | head -n 1 | sed -e "s/.*version //") ($(which docker-swarm 2>&1))"
fi;
if [[ $(which openssl 2>&1) != *"no openssl"* && $(which openssl 2>&1) ]] && [[ $(which apt-get 2>&1) != *"no apt-get"* && $(which apt-get 2>&1) ]]; then
echo "$(apt-cache show openssl | grep 'Version:' | head -n 1 | sed 's/Version:/OpenSSL/') ($(which openssl 2>&1))"
elif [[ $(which openssl 2>&1) != *"no openssl"* && $(which openssl 2>&1) ]] && [[ $(which yum 2>&1) != *"no yum"* && $(which yum 2>&1) ]]; then
echo "OpenSSL $(yum list openssl 2>&1 | grep -i "openssl.x86_64" | awk '{print $2}') ($(which openssl 2>&1))"
else
echo "$(openssl version) ($(which openssl 2>&1))"
fi;
if [[ $gnused == true ]]; then
echo "$($sed --version 2>&1 | head -n 1) ($(which $sed 2>&1))"
fi;
if [[ $(which vagrant 2>&1) != *"no vagrant"* && $(which vagrant 2>&1) ]]; then
echo "$(vagrant --version 2>&1 | head -n 1) ($(which vagrant 2>&1))"
fi;
#-------------------------------------------------------------------------------
echo ""
echo "SERVICES:"
if [[ $(which docker 2>&1) != *"no docker"* && $(which docker 2>&1) ]]; then
echo "Docker $(docker --version | sed -e "s/.*version //" | sed -e "s/,.*//") ($(which docker 2>&1))"
fi;
if [[ $(which etcd 2>&1) != *"no etcd"* && $(which etcd 2>&1) ]]; then
echo "etcd $(etcd --version | sed -e "s/.*version //") ($(which etcd 2>&1))"
fi;
if [[ $(which fleet 2>&1) != *"no fleet"* && $(which fleet 2>&1) ]]; then
echo "Fleet $(fleet --version | sed -e "s/.*version //") ($(which fleet 2>&1))"
fi;
if [[ $(which httpd 2>&1) != *"no httpd"* && $(which httpd 2>&1) ]]; then
echo "$(httpd -v | grep -i "Server version:" | sed -e "s/Server version: *//" | sed -e "s/Apache\//httpd /") ($(which httpd 2>&1))"
fi;
if [[ $(which mongo 2>&1) != *"no mongo"* && $(which mongo 2>&1) ]]; then
echo "$(mongo --version | sed -e "s/ shell version://") ($(which mongo 2>&1))"
fi;
if [[ $(which mysql 2>&1) != *"no mysql"* && $(which mysql 2>&1) ]]; then
echo "MySQL $(mysql --version | sed -e "s/.*Distrib *//" | sed -e "s/,.*//") ($(which mysql 2>&1))"
fi;
if [[ $(which nginx 2>&1) != *"no nginx"* && $(which nginx 2>&1) ]]; then
echo "Nginx $(nginx -v 2>&1 | sed -e "s/nginx version: //" | sed -e "s/nginx\///") ($(which nginx 2>&1))"
fi;
if [[ $(which psql 2>&1) != *"no psql"* && $(which psql 2>&1) ]]; then
echo "PostgreSQL $(psql -V | sed -e "s/.*) *//") ($(which psql 2>&1))"
fi;
if [[ $(which redis-server 2>&1) != *"no redis-server"* && $(which redis-server 2>&1) ]]; then
echo "$(redis-server --version | sed -e "s/ server v=/ /" | sed -e "s/sha=.*//" | sed -e "s/ server version//" | sed -e "s/ (.*//") ($(which redis-server 2>&1))"
fi;
if [[ $(which rsyslogd 2>&1) != *"no rsyslogd"* && $(which rsyslogd 2>&1) ]]; then
echo "$(rsyslogd -v 2>&1 | head -n 1 | sed -e "s/,.*//") ($(which rsyslogd 2>&1))"
fi;
if [[ $(which unicorn 2>&1) != *"no unicorn"* && $(which unicorn 2>&1) ]]; then
echo "$(unicorn -v | sed -e "s/unicorn v/Unicorn /") ($(which unicorn 2>&1))"
fi;
#-------------------------------------------------------------------------------
echo ""
echo "LOGGERS:"
ls -d /etc/*syslog*.conf | sed -e "s/\/etc\///" | sed -e "s/.conf//"
#-------------------------------------------------------------------------------
echo ""
echo "PACKAGE MANAGERS:"
if [[ $(which apt-get 2>&1) != *"no apt-get"* && $(which apt-get 2>&1) ]]; then
echo "APT $(apt-get --version | head -n 1 | sed -e "s/apt //" | sed -e "s/ .*//") ($(which apt-get 2>&1))"
fi;
if [[ $(which bundler 2>&1) != *"no bundler"* && $(which bundler 2>&1) ]]; then
echo "$(bundler -v | sed -e "s/ version//") ($(which bundler 2>&1))"
fi;
if [[ $(which composer 2>&1) != *"no composer"* && $(which composer 2>&1) ]]; then
if [[ $gnused == true ]]; then
echo "$(composer --version)" | sed -e "s/ version//" | sed -e "s/ (.*)//" | $sed -r "s/\x1B\[([0-9]{1,2}(;[0-9]{1,2})?)?[m|K]//g"
else
echo "$(composer --version)" | sed -e "s/ version//" | sed -e "s/ (.*)//"
fi;
fi;
if [[ $(which brew 2>&1) != *"no brew"* && $(which brew 2>&1) ]]; then
echo "Homebrew $(brew --version) ($(which brew 2>&1))"
fi;
if [[ $(which npm 2>&1) != *"no npm"* && $(which npm 2>&1) ]]; then
echo "npm $(npm --version) ($(which npm 2>&1))"
fi;
if [[ $(which pip 2>&1) != *"no pip"* && $(which pip 2>&1) ]]; then
echo "$(pip --version 2>&1 | sed -e "s/from.*(/(/") ($(which pip 2>&1))"
fi;
if [[ $(which pip2 2>&1) != *"no pip"* && $(which pip2 2>&1) ]]; then
echo "$(pip2 --version 2>&1 | sed -e "s/from.*(/(/") ($(which pip2 2>&1))"
fi;
if [[ $(which pip2.6 2>&1) != *"no pip"* && $(which pip2.6 2>&1) ]]; then
echo "$(pip2.6 --version 2>&1 | sed -e "s/from.*(/(/") ($(which pip2.6 2>&1))"
fi;
if [[ $(which pip2.7 2>&1) != *"no pip"* && $(which pip2.7 2>&1) ]]; then
echo "$(pip2.7 --version 2>&1 | sed -e "s/from.*(/(/") ($(which pip2.7 2>&1))"
fi;
if [[ $(which pip3 2>&1) != *"no pip3"* && $(which pip3 2>&1) ]]; then
echo "$(pip3 --version 2>&1 | sed -e "s/from.*(/(/") ($(which pip3 2>&1))"
fi;
if [[ $(which pip3.4 2>&1) != *"no pip3"* && $(which pip3.4 2>&1) ]]; then
echo "$(pip3.4 --version 2>&1 | sed -e "s/from.*(/(/") ($(which pip3.4 2>&1))"
fi;
if [[ $(which pip3.5 2>&1) != *"no pip3"* && $(which pip3.5 2>&1) ]]; then
echo "$(pip3.5 --version 2>&1 | sed -e "s/from.*(/(/") ($(which pip3.5 2>&1))"
fi;
if [[ $(which gem 2>&1) != *"no gem"* && $(which gem 2>&1) ]]; then
echo "RubyGems $(gem --version) ($(which gem 2>&1))"
fi;
if [[ $(which easy_install 2>&1) != *"no easy_install"* && $(which easy_install 2>&1) ]]; then
echo "$(easy_install --version) ($(which easy_install 2>&1))"
fi;
if [[ $(which easy_install-2.6 2>&1) != *"no easy_install"* && $(which easy_install-2.6 2>&1) ]]; then
echo "$(easy_install-2.6 --version) ($(which easy_install-2.6 2>&1))"
fi;
if [[ $(which easy_install-2.7 2>&1) != *"no easy_install"* && $(which easy_install-2.7 2>&1) ]]; then
echo "$(easy_install-2.7 --version) ($(which easy_install-2.7 2>&1))"
fi;
if [[ $(which easy_install-3.4 2>&1) != *"no easy_install"* && $(which easy_install-3.4 2>&1) ]]; then
echo "$(easy_install-3.4 --version) ($(which easy_install-3.4 2>&1))"
fi;
if [[ $(which easy_install-3.5 2>&1) != *"no easy_install"* && $(which easy_install-3.5 2>&1) ]]; then
echo "$(easy_install-3.5 --version) ($(which easy_install-3.5 2>&1))"
fi;
if [[ $(which yum 2>&1) != *"no yum"* && $(which yum 2>&1) ]]; then
echo "YUM $(yum --version | head -n 1) ($(which yum 2>&1))"
fi;
echo "#-------------------------------------------------------------------------------"
| true
|
5d0f81071995fcbfd36b0a7bacbaed65d18ddd87
|
Shell
|
tabulon-ext/ellipsis
|
/scripts/shim.sh
|
UTF-8
| 640
| 3.59375
| 4
|
[
"MIT"
] |
permissive
|
EOF
# shim for ellipsis install.bash
#
# This is used on ellipsis.sh to bootstrap the full installer, which you
# otherwise can't curl and pipe to sh (as it requires bash). Not meant to be
# run standalone.
# wait for curl output to finish
sleep 0.5
# Ensure dependencies are installed.
deps="bash curl git"
for dep in $deps; do
hash $dep 2>/dev/null || { echo >&2 "ellipsis requires $dep to be installed."; exit 1; }
done
# Download full installer and execute with bash
curl -sL https://raw.githubusercontent.com/ellipsis/ellipsis/master/scripts/install.bash > install-$$.bash
bash install-$$.bash
# Clean up
rm install-$$.bash
| true
|
afc98601ce3fbf13fd673dd1c8968cdb6fa9664a
|
Shell
|
seckcoder/.seck_config
|
/bin/sed.sh
|
UTF-8
| 593
| 3.8125
| 4
|
[] |
no_license
|
#!/bin/bash
source_script=$PWD/sub.sed
ybuild_script=$PWD/ybuild.sed
process() {
local file=$1
if [ "${file##*.}" = "h" ] || [ "${file##*.}" = "cc" ]; then
echo "processing $file.."
sed -i -f $source_script $file
elif [ "${file##*\/}" = "YBUILD" ]; then
echo "processing $file.."
sed -i -f $ybuild_script $file
fi
}
iterate() {
for file in `ls`; do
if [ -f "$file" ]; then
process $PWD/$file
elif [ -L "$file" ]; then
echo "escape link: $PWD/$file"
elif [ -d "$file" ]; then
cd $file
iterate
cd ..
fi
done
}
iterate
| true
|
284e70330355ddef321273ddca856313e43a3a01
|
Shell
|
david50407/plurk-privatify
|
/tools/join.sh
|
UTF-8
| 81
| 2.75
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
IFS="$1"
shift
echo -n "$1"
shift
printf "%s" "${@/#/$IFS}"
| true
|
cb7e2a8b3bd3dccbe2578c1cd0e0d07206afb64b
|
Shell
|
pocketgroovy/voltage
|
/witches-server-configuration/scripts/StartServers.sh
|
UTF-8
| 1,053
| 3.125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
###################################################################################################
###### This script is responsible for setting up a server for an app "Kisses and Curses" ######
###################################################################################################
mongod --dbpath /var/lib/mongo/ --fork --syslog || NOMONGOSTART=$? # systemctl start mongod || NOMONGOSTART=$? <- this should be
# used but somehow it doesn't find dbpath even though it's set up
# in /etc/mongod.conf
if [ ${NOMONGOSTART} ]; then
echo "-LOG- ERROR on starting mongo"
exit 1
else
echo "-LOG- Mongo started"
fi
systemctl start redis.service || NOREDISSTART=$?
if [ ${NOREDISSTART} ]; then
echo "-LOG- ERROR on starting redis"
exit 1
else
echo "-LOG- Redis started"
fi
systemctl start httpd.service || NOHTTPD=$?
if [ ${NOHTTPD} ]; then
echo "-LOG- ERROR on starting apache"
exit 1
else
echo "-LOG- apache started"
fi
| true
|
8437144a23d469d069c49b7b39e3f87e70f907fe
|
Shell
|
DebmalyaRay9989/Movies-Analytics-in-Spark-and-Scala
|
/Spark_RDD/Distinct_Genres/execute.sh
|
UTF-8
| 308
| 2.984375
| 3
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
# Check if the directory is present or not
if [ -d "result" ]
then
echo "Removing existing directory"
rm -r result
else
echo "Executing File"
fi
spark-shell -i list_of_distinct_genres.scala
echo ""
echo ""
echo "The list of distinct genres are"
echo ""
cat "result/part-00000"
echo ""
| true
|
e9b237bb1e8532aa0486994b39e54dc81a9d9d7c
|
Shell
|
ertanden/enmasse
|
/systemtests/scripts/setup_tck.sh
|
UTF-8
| 1,870
| 3.015625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
CURDIR=`readlink -f \`dirname $0\``
source ${CURDIR}/test_func.sh
CLI_ID=$1
DEFAULT_ADDRESS_SPACE=$2
#install prerequisites
sudo yum -y install patch jq
oc extract secret/keycloak-credentials --confirm
USER=$(cat admin.username)
PASSWORD=$(cat admin.password)
#setup environment
create_addres_space ${DEFAULT_ADDRESS_SPACE} './systemtests/templates/tckAddressSpace.json' || return 1
create_addresses ${DEFAULT_ADDRESS_SPACE} './systemtests/templates/tckAddresses.json'
#setup user and groups
create_group ${CLI_ID} ${USER} ${PASSWORD} ${DEFAULT_ADDRESS_SPACE} "send_#"
create_group ${CLI_ID} ${USER} ${PASSWORD} ${DEFAULT_ADDRESS_SPACE} "recv_#"
create_group ${CLI_ID} ${USER} ${PASSWORD} ${DEFAULT_ADDRESS_SPACE} "manage_#"
create_group ${CLI_ID} ${USER} ${PASSWORD} ${DEFAULT_ADDRESS_SPACE} "view_#"
create_group ${CLI_ID} ${USER} ${PASSWORD} ${DEFAULT_ADDRESS_SPACE} "monitor"
create_group ${CLI_ID} ${USER} ${PASSWORD} ${DEFAULT_ADDRESS_SPACE} "admin"
create_group ${CLI_ID} ${USER} ${PASSWORD} ${DEFAULT_ADDRESS_SPACE} "manage"
create_user ${CLI_ID} ${USER} ${PASSWORD} ${DEFAULT_ADDRESS_SPACE} './systemtests/templates/tckUser.json'
TCK_USER=$(cat ./systemtests/templates/tckUser.json | jq -r '.username')
join_group ${CLI_ID} ${USER} ${PASSWORD} ${DEFAULT_ADDRESS_SPACE} "${TCK_USER}" "manage"
join_group ${CLI_ID} ${USER} ${PASSWORD} ${DEFAULT_ADDRESS_SPACE} "${TCK_USER}" "admin"
join_group ${CLI_ID} ${USER} ${PASSWORD} ${DEFAULT_ADDRESS_SPACE} "${TCK_USER}" "monitor"
join_group ${CLI_ID} ${USER} ${PASSWORD} ${DEFAULT_ADDRESS_SPACE} "${TCK_USER}" "view_#"
join_group ${CLI_ID} ${USER} ${PASSWORD} ${DEFAULT_ADDRESS_SPACE} "${TCK_USER}" "manage_#"
join_group ${CLI_ID} ${USER} ${PASSWORD} ${DEFAULT_ADDRESS_SPACE} "${TCK_USER}" "recv_#"
join_group ${CLI_ID} ${USER} ${PASSWORD} ${DEFAULT_ADDRESS_SPACE} "${TCK_USER}" "send_#"
| true
|
22010e334cfb959dc408db90a5d01a7289c3c696
|
Shell
|
gtheys/dotfiles
|
/script/bootstrap.sh
|
UTF-8
| 2,341
| 3.78125
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
#
# bootstrap installs things.
DOTFILES_ROOT="`pwd`"
# Test whether a command exists
# $1 - cmd to test
type_exists() {
if [ $(type -P $1) ]; then
return 0
fi
return 1
}
echo ''
echo 'Bootstrapping local environment'
echo ''
# Check If I need to fetch this submodule
# Use this for my colorschemes
echo 'Install/Update submodules'
if [ -d $DOTFILES_ROOT/.config/base16-shell ]; then
git submodule update --recursive --quiet
echo "Update Base16 colorscheme"
else
git submodule update --recursive --init --quiet
echo "Initialized base 16 colorscheme"
fi
source script/link.sh
# Before relying on Homebrew, check that packages can be compiled
if ! type_exists 'gcc'; then
echo "The XCode Command Line Tools must be installed first."
echo "Run in a terminal gcc and follow instructions for installation"
echo "Then run: bash ~/.dotfiles/script/bootstrap"
exit 1
fi
# Check for Homebrew
if ! type_exists 'brew'; then
echo "Installing Homebrew..."
ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)"
fi
# Check if git comes from Homebrew and install if needed
if brew list | grep --quiet git; then
echo "Homebrew version of git already installed!"
else
echo "Updating Homebrew..."
brew update
echo "Installing Git..."
brew install git
echo "New git version will be used after you close the terminal :("
fi
# run brew bundle and install most of our tools
echo "Updating Homebrew..."
brew bundle
# Install App store application
source $DOTFILES_ROOT/script/mas.sh
# After install setup default gems
# Used in my git hooks
echo 'puppet' >> "$(brew --prefix rbenv)/default-gems"
echo 'puppet-lint' >> "$(brew --prefix rbenv)/default-gems"
echo 'bundler' >> "$(brew --prefix rbenv)/default-gems"
# After the install, setup fzf
echo "\\n\\nRunning fzf install script..."
/usr/local/opt/fzf/install --all --no-bash --no-fish
# after hte install, install neovim python libraries
echo -e "\\n\\nRunning Neovim Python install"
pip2 install --user neovim
pip3 install --user neovim
# Install App store application
source $DOTFILES_ROOT/script/vscode.sh
# Switch to Oh My SZH
echo "Install Oh my ZSH"
sh -c "$(curl -fsSL https://raw.github.com/robbyrussell/oh-my-zsh/master/tools/install.sh)"
echo ''
echo 'All installed!'
| true
|
81475c253d22a638ab8140749463dbfddca8089e
|
Shell
|
lynnagara/dotfiles
|
/script/symlink-dotfiles.sh
|
UTF-8
| 175
| 3.1875
| 3
|
[] |
no_license
|
files="vimrc vim gitconfig"
dir=~/dotfiles
cd $dir
for file in $files; do
echo "Creating symlink to $file in home directory."
ln -s $dir/$file/.$file ~/.$file
done
| true
|
8bc9ec0465682869992335dd5daf15f31e3ebaf6
|
Shell
|
sanderboer/IfcOpenShell-devenv
|
/scripts/build.sh
|
UTF-8
| 559
| 3.390625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
DIR="$( cd -P "$( dirname "$0" )" && pwd )"
source $DIR/env.sh
# pushd ${ROOT_DIR}
# rm .clang_complete
# for i in ${COMPILER_FLAGS}; do
# echo ${i} >> .clang_complete
# done
# for i in ${INC_PATHS}; do
# echo ${i} >> .clang_complete
# done
# popd
echo "BUILD_DIR: ${BUILD_DIR}"
if [ ! -d "${BUILD_DIR}" ]; then
echo "no cmake build dir present, regenerating..."
$DIR/cmake-init.sh;
fi
pushd ${BUILD_DIR}
echo "--- building on ${UNAME}"
# cmake --build . -j ${IFCOS_NUM_BUILD_PROC} --target install
cmake --build . -j 4
popd
| true
|
56f88d1ede7b6e26b4f2abf1da3da13280a065fb
|
Shell
|
kblancato/theia-net
|
/theia-net/classification/make_run.sh
|
UTF-8
| 1,338
| 3.25
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
RUN=0
SAMPLE="balanced_classification"
DATA="ts"
# hyperparameter file id
HYPER=97
# number of hyperparam combinations
NHYPER=144
declare -a PARAMS=("evolve")
for n in "${!PARAMS[@]}"
do
echo "${PARAMS[$n]}"
CODE_DIR="<path to code directory>"
HOME_DIR="<path to home directory>"
DIR="<path to save models>/${DATA}/${SAMPLE}/${PARAMS[$n]}/run${RUN}"
# make data.sh
cat > data_${SAMPLE}_${PARAMS[$n]}.sh <<EOF
#!/bin/bash
#SBATCH -p cca
#SBATCH -J job
#SBATCH -o <path to save output file>/output_data_${SAMPLE}_${PARAMS[$n]}.%j
#SBATCH -e <path to save output file>/error_data_${SAMPLE}_${PARAMS[$n]}.%j
#SBATCH -t 00-00:30:00
python3 ../data.py "${RUN}" "${CODE_DIR}" "${HOME_DIR}" "${SAMPLE}" \
"${PARAMS[$n]}" "${DATA}"
sleep 30s
#END
EOF
# make disbatch
foo=1
NHYPER="$(($NHYPER-$foo))"
for i in $(seq 0 $NHYPER)
do
cat >> disbatch_${SAMPLE}_${param} <<EOF
python3 ./run/main.py $RUN "${DIR}" "${PARAMS[$n]}" "${HYPER}" $i
EOF
done
# make select
cat >> select_${SAMPLE}_${PARAMS[$n]}.sh <<EOF
#!/bin/bash
#SBATCH -p cca
#SBATCH -J job
#SBATCH -o <path to save output file>/output_choose_${SAMPLE}_${param}.%j
#SBATCH -e <path to save output file>/error_choose_${SAMPLE}_${param}.%j
#SBATCH -t 00-01:00:00
python3 ./run/select.py $RUN "${DIR}" "${PARAMS[$n]}" "${HYPER}"
EOF
done
| true
|
44642fbf1121b55a0f22cb97eaf7b1f8231339ed
|
Shell
|
GhaiethZouaghi/tunihack-workshop
|
/schema-manager/script.sh
|
UTF-8
| 1,796
| 3.09375
| 3
|
[] |
no_license
|
#! /bin/bash
DB_HOST=cassandra
DB_PORT=9042
DB_USER=dse
DB_PASS=\"\"
ARGS="$DB_HOST $DB_PORT -u $DB_USER -p $DB_PASS"
#### Create keyspace
echo "[INFO] Creating keyspace"
query="CREATE KEYSPACE IF NOT EXISTS tunihack WITH replication = {'class': 'SimpleStrategy','replication_factor': 3 };"
cqlsh $ARGS -e "$query"
#### Create tables
echo "[INFO] Creating student table"
student="CREATE TABLE IF NOT EXISTS tunihack.student (
id INT,
name TEXT,
age INT,
specialization TEXT,
class LIST<TEXT>,
PRIMARY KEY (id)
);"
cqlsh $ARGS -e "$student"
echo "[INFO] Creating teacher table"
teacher="CREATE TABLE IF NOT EXISTS tunihack.teacher (
id INT,
name TEXT,
age INT,
class TEXT,
students LIST<INT>,
PRIMARY KEY (id)
);"
cqlsh $ARGS -e "$teacher"
echo "[INFO] Tables created successfully"
echo "[INFO] Loading student data"
query="INSERT INTO tunihack.student(id, name, age, specialization, class)
VALUES (0, 'Mohamed', 20, 'Math Physics', ['Math', 'Physics', 'Programming']);
INSERT INTO tunihack.student(id, name, age, specialization, class)
VALUES (1, 'Ghaieth', 28, 'Computer Science',['Machine Learning', 'Physics', 'Programming']);
INSERT INTO tunihack.student(id, name, age, specialization, class)
VALUES (2, 'Zied', 23, 'Chemistry', ['Programming', 'Chemistry']);"
cqlsh $ARGS -e "$query"
echo "[INFO] Loading teacher data"
query="INSERT INTO tunihack.teacher(id, name, age, class, students)
VALUES (0, 'Mark', 40, 'Math', [0]);
INSERT INTO tunihack.teacher(id, name, age, class, students)
VALUES (1, 'Greg', 50, 'Machine Learning', [1]);
INSERT INTO tunihack.teacher(id, name, age, class, students)
VALUES (2, 'Elias', 45, 'Programming', [0, 1, 2]);"
cqlsh $ARGS -e "$query"
echo "[INFO] Data Loaded successfully"
| true
|
791c62cbfb9f92d6f4364af7f9c7c8283d4ebe27
|
Shell
|
Tushar-W/ShellScript-Programming
|
/ProgrammingConstruct/RepetitionStatementExamples/ProgramsOnForLoop/CheckNumberIsPrimeOrNot.sh
|
UTF-8
| 540
| 3.9375
| 4
|
[] |
no_license
|
#!/bin/bash -x
read -p "Enter Number :" number;
count=0;
if [ "$number" != 0 ] && [ "$number" != 1 ];
then
for (( tempNum=1; tempNum <= number; tempNum++ ))
do
if [ $((number%tempNum)) == 0 ];
then
count=$((++count))
fi
done
if [ "$count" == 2 ]
then
echo "$number Is prime"
else
echo "$number Is Not Prime"
fi
else
if [ "$number" == 0 ]
then
echo "0 Is Not Prime Number"
else
echo "1 Is Neither Prime Nor Composite Number"
fi
fi
| true
|
33a0458a79dc00658d8ebe3b567f1fe0f9c53a25
|
Shell
|
facebook/buck
|
/bin/buck
|
UTF-8
| 784
| 3.578125
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# Resolve symlinks if necessary, otherwise madness will follow.
# On a GNU system, we could use "-f" to follow all symlinks. BSD based
# systems don't have this flag. *sigh*
# http://stackoverflow.com/a/246128
# https://gist.github.com/tvlooy/cbfbdb111a4ebad8b93e
function abs_script_dir_path {
SOURCE=${BASH_SOURCE[0]}
while [ -h "$SOURCE" ]; do
DIR=$( cd -P $( dirname "$SOURCE") && pwd )
SOURCE=$(readlink "$SOURCE")
[[ $SOURCE != /* ]] && SOURCE="$DIR/$SOURCE"
done
DIR=$( cd -P $( dirname "$SOURCE" ) && pwd )
echo $DIR
}
BUCK_DIR=$(abs_script_dir_path "$0")
PYTHON=$(command -v python2.7 python2 python | head -1)
PYTHONPATH="$BUCK_DIR"/../third-party/nailgun:"$BUCK_DIR"/.. exec $PYTHON "$BUCK_DIR"/../programs/buck.py "$@"
| true
|
de31b1c6d9e46ccec45e2aa86f52a4000524acf9
|
Shell
|
seocahill/ember-fastboot-pwa
|
/scripts/make-dev-cert.sh
|
UTF-8
| 662
| 2.765625
| 3
|
[] |
no_license
|
#!/bin/sh -
# docker run --rm -v /tmp/certs:/certs -e SSL_SUBJECT=test.seocahill.dev paulczar/omgwtfssl
mkdir certs && cd certs
cat > openssl.cnf <<-EOF
[req]
distinguished_name = req_distinguished_name
x509_extensions = v3_req
prompt = no
[req_distinguished_name]
CN = *.seocahill.dev
[v3_req]
keyUsage = keyEncipherment, dataEncipherment
extendedKeyUsage = serverAuth
subjectAltName = @alt_names
[alt_names]
DNS.1 = *.seocahill.dev
DNS.2 = seocahill.dev
EOF
openssl req \
-new \
-newkey rsa:2048 \
-sha1 \
-days 3650 \
-nodes \
-x509 \
-keyout ssl.key \
-out ssl.crt \
-config openssl.cnf
rm openssl.cnf
cd ..
| true
|
c1b3bdda8963331cc8ee9f71ad58820cb799114b
|
Shell
|
rohitraj29/example-project-nist-fingerprint-matching
|
/match.sh
|
UTF-8
| 1,605
| 3.125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
source /etc/profile
usage() {
cat <<EOF
Usage: $0 load-images|ridge-detection|subsample|match
EOF
}
load-images() {
spark-submit \
--master yarn \
--deploy-mode cluster \
--driver-class-path $(hbase classpath) \
--class LoadData \
target/scala-2.10/NBIS-assembly-1.0.jar \
/tmp/nist/NISTSpecialDatabase4GrayScaleImagesofFIGS/sd04/sd04_md5.lst
touch .load-images
}
ridge-detection() {
spark-submit \
--master yarn \
--deploy-mode cluster \
--driver-class-path $(hbase classpath) \
--class RunMindtct \
target/scala-2.10/NBIS-assembly-1.0.jar
touch .ridge-detection
}
subsample() {
spark-submit \
--master yarn \
--deploy-mode cluster \
--driver-class-path $(hbase classpath) \
--class RunGroup \
target/scala-2.10/NBIS-assembly-1.0.jar \
probe 0.001 \
gallery 0.01
touch .subsample
}
match() {
spark-submit \
--master yarn \
--deploy-mode cluster \
--driver-class-path $(hbase classpath) \
--class RunBOZORTH3 \
target/scala-2.10/NBIS-assembly-1.0.jar \
probe gallery
}
_FIXME () {
local fn=$1
test -f .$fn && return
$fn && touch .$fn
}
cd $HOME
pwd
ls
case $1 in
load-images)
_FIXME load-images
;;
ridge-detection)
_FIXME ridge-detection
;;
subsample)
_FIXME subsample
;;
match)
_FIXME match
;;
*)
usage
exit 1
;;
esac
| true
|
249b6ae656479414e3d05b82bffe5807db2cf768
|
Shell
|
more456/Android-sdk-setup-ubuntu
|
/scripts/android_sdk_frame.sh
|
UTF-8
| 2,847
| 3.90625
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# set -e
# to run this script on remote server
# ssh root@host_ip_address 'bash -s' < android_sdk_framer.sh
counter=0
function printAndSleep() {
((counter++))
echo "
########################################################
$counter: $1
########################################################"
if [ -z $2 ]; then
sleep 1
else
sleep $2
fi
}
# update apt
printAndSleep "Updating apt"
apt-get -y update
# install jdk8
JAVA_VER=$(java -version 2>&1 | sed -n ';s/.* version "\(.*\)\.\(.*\)\..*"/\1\2/p;')
if [ "$JAVA_VER" -ge 18 ]; then
printAndSleep "Java $JAVA_VER Found"
else
printAndSleep "Installing OpenJdk 8"
apt-get -y install openjdk-8-jdk
fi
if [ ! -d "$HOME/android-sdk/" ]; then
# install unzip tool
printAndSleep "installing unzip tool"
apt-get -y install unzip
# download android linux sdk tools
printAndSleep "Downloading linux android sdk tools"
wget -nc -O sdk-tools-linux.zip https://dl.google.com/android/repository/sdk-tools-linux-3859397.zip
# create sdk directory
mkdir -p android-sdk
# unzip android linux sdk tools
printAndSleep "unzipping linux android sdk tools"
unzip -o sdk-tools-linux.zip -d android-sdk
[ ! -f $HOME/.android/repositories.cfg ] && echo "### User Sources for Android SDK Manager" >>$HOME/.android/repositories.cfg && echo "count=0" >>$HOME/.android/repositories.cfg
# navigate into our directory
cd android-sdk/
printAndSleep "Accepting Android SDK licenses"
mkdir -p licenses/
echo -e "\n8933bad161af4178b1185d1a37fbf41ea5269c55" >"licenses/android-sdk-license"
echo -e "\n84831b9409646a918e30573bab4c9c91346d8abd" >"licenses/android-sdk-preview-license"
printAndSleep "Installing android sdk packages"
declare -a arr=("build-tools;25.0.3" "cmake;3.6.3155560" "extras;android;m2repository" "extras;google;google_play_services" "extras;google;m2repository" "extras;m2repository;com;android;support;constraint;constraint-layout-solver;1.0.2" "extras;m2repository;com;android;support;constraint;constraint-layout;1.0.2" "lldb;2.3" "patcher;v4" "platform-tools" "platforms;android-25")
for package in "${arr[@]}"; do
printAndSleep "Installing $package"
./tools/bin/sdkmanager "$package"
done
printAndSleep "Checking for Android SdkManager updates..."
./tools/bin/sdkmanager --update
printAndSleep "Adding ANDROID_HOME_SCRIPT to environmental variables"
ANDROID_HOME_SCRIPT=$(pwd)
echo "" >>~/.bashrc
echo "export ANDROID_HOME_SCRIPT=\"$(pwd)\"" >>~/.bashrc
echo "" >>~/.bashrc
echo "export PATH=\"${PATH}:${ANDROID_HOME_SCRIPT}/tools/:${ANDROID_HOME_SCRIPT}/platform-tools/\"" >>~/.bashrc
echo -n "You need to logout to environmental variables take effect."
read -n 1 -s -p "Press any key to continue"
else
printAndSleep "Checking for Android SdkManager updates..."
.$ANDROID_HOME_SCRIPT/tools/bin/sdkmanager --update
fi
| true
|
8b8da64b6a6e46e40a8956e258f163d402578273
|
Shell
|
wikimedia/WikiContrib
|
/deploy/deploy_frontend.sh
|
UTF-8
| 663
| 2.734375
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
ssh -o "StrictHostKeyChecking=no" -i /tmp/deploy_rsa -A ${DEPLOY_USERNAME}@${DEPLOY_HOST} << EOT
become ${DEPLOY_FRONTEND}
rm -rf ~/public_html/
git clone https://github.com/wikimedia/WikiContrib
echo "Cloned the repo"
cp -r WikiContrib/frontend/WikiContrib-Frontend/. dep/
rm -rf WikiContrib/
cd dep/
mv dist/ src/dist/
npm run build
echo "Creating the build folder"
mv src/dist/ dist/
cd ../
mkdir public_html/
cp -r dep/build/. public_html/
cd dep/
rm -rf build/ package.json package-lock.json public/ README.md src/ Install.md
echo "Performed all the shell operations."
webservice --backend=gridengine --release buster lighttpd restart
EOT
| true
|
aff41289a873cf438f00f83a49254556751e7d22
|
Shell
|
fbaileyjr/high_availability_linux
|
/.svn/pristine/af/aff41289a873cf438f00f83a49254556751e7d22.svn-base
|
UTF-8
| 170
| 3.21875
| 3
|
[] |
no_license
|
#!/bin/bash
service=firewalld
if (( $(ps -ef | grep -v grep | grep $service | wc -l) > 0 ))
then
echo "$service is running!!!"
else
echo "$service is NOT running!!!"
fi
| true
|
c74f15b4c0b504bda3f51dcf7ed9364c935303ed
|
Shell
|
lappsgrid-incubator/discovery-course
|
/scripts/clone
|
UTF-8
| 588
| 3.109375
| 3
|
[] |
no_license
|
#!/bin/bash
set -eu
GITHUB=https://github.com/lappsgrid-incubator
function clone {
dir=$1
repo=$2
branch=$3
if [ -e $dir ] ; then
cd $dir
git pull origin $branch
else
git clone $GITHUB/$repo $dir
cd $dir
git checkout $branch
fi
# make
# make push
cd ../
}
clone vassar docker-vassar.git master
clone brandies docker-brandeis.git master
clone gigaword docker-gigaword.git discovery
clone manager docker-service-manager.git discovery
clone galaxy docker-galaxy-lappsgrid.git discovery
clone evaluation docker-oaqa.git master
echo "All Docker images have been built."
| true
|
1923380596cc34dee5592792c09815b9e3aec0a8
|
Shell
|
cbdavide/competitive-programming-tools
|
/runner
|
UTF-8
| 971
| 3.921875
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
PREPROCESSOR_VAR="CBDAVIDES"
BAR="----------------------------------------------------------------"
PROBLEM=$1
if [ ! -d "$PROBLEM" ]; then
echo "Error: Problem $PROBLEM does not exits in the current folder."
exit 1
fi
echo "Compilando source code of problem $PROBLEM..."
echo $BAR
BINARY="$PROBLEM/a.out"
CODE=$PROBLEM/$PROBLEM.cpp
g++ $CODE --std=c++14 -D$PREPROCESSOR_VAR -o $BINARY
COMPILE_CODE=$?
if [[ COMPILE_CODE -ne 0 ]]; then
echo 'Compilation error'
echo $BAR
exit 1
fi
echo "Running Code"
echo $BAR
for test_case in $PROBLEM/$PROBLEM_*.input; do
echo "$test_case"
timeout 1s ./$BINARY < $test_case
EXCECUTION_CODE=$?
if [[ EXCECUTION_CODE -ne 0 ]]; then
echo -e '\nExecution interrupted, due some of the following reasons:'
echo -e ' * Runtime Error'
echo -e ' * Time Limit Exceeded'
echo $BAR
exit 1
fi
done
echo $BAR
echo "END OF OUTPUT"
| true
|
ed5391d4c817b1e390c391bbd554f1d857223b28
|
Shell
|
EMBL-EBI-TSI/veewee
|
/templates/Debian-5.0.8-amd64-netboot/cleanup.sh
|
UTF-8
| 375
| 2.6875
| 3
|
[
"MIT"
] |
permissive
|
# Remove items used for building, since they aren't needed anymore
apt-get -y remove linux-headers-$(uname -r) build-essential
apt-get -y autoremove
# Removing leftover leases and persistent rules
echo "cleaning up dhcp leases"
rm /var/lib/dhcp3/*
echo "Adding a 2 sec delay to the interface up, to make the dhclient happy"
echo "pre-up sleep 2" >> /etc/network/interfaces
| true
|
8c8b8f41da1ac63f218477d9839cebc2687b2e38
|
Shell
|
wangjf8090/MIRTT
|
/bilinear_method/run.bash
|
UTF-8
| 248
| 3
| 3
|
[] |
no_license
|
# The name of this experiment: model_dataset
name=$1
# Save logs and models
output=result/$name
mkdir -p $output
# See Readme.md for option details.
CUDA_VISIBLE_DEVICES=$2 python ./main.py --mod $3 --dataset $4 --model $5 --output $output ${@:6}
| true
|
e7e30d6509582322a076689de30bc744ffa53465
|
Shell
|
ivan-c/rpi4b-debootstrap
|
/debootstrap-rpi4.sh
|
UTF-8
| 2,025
| 3.203125
| 3
|
[] |
no_license
|
#! /bin/sh -e
set -x
if [ ! -f rpi4.img ]; then
fallocate --length 3GiB rpi4.img
fi
loopback_devices=$(losetup -l --noheading)
if [ -z "$loopback_devices" ]; then
losetup -f -P rpi4.img
fi
loopback_device=$(losetup -l | grep rpi4 | awk '{print $1}')
parted --script --align optimal "$loopback_device" -- \
mklabel msdos \
mkpart primary fat32 1 128MiB \
mkpart primary ext4 128MiB 100% set 1 boot
mkfs.vfat -F 32 /dev/loop0p1
mkfs.ext4 /dev/loop0p2
test -d /mnt/sd || mkdir -p /mnt/sd
mount /dev/loop0p2 /mnt/sd
test -d /mnt/sd/boot || mkdir /mnt/sd/boot
mount /dev/loop0p1 /mnt/sd/boot
# debootstrap --arch arm64 buster /mnt/sd
qemu-debootstrap --arch=arm64 --keyring /usr/share/keyrings/debian-archive-keyring.gpg --variant=buildd --exclude=debfoster buster /mnt/sd http://ftp.debian.org/debian
echo done running debootstrap
cat << EOF > /mnt/sd/etc/apt/sources.list
# deb http://http.us.debian.org/debian buster main
deb http://http.us.debian.org/debian buster main non-free
deb-src http://http.us.debian.org/debian buster main non-free
deb http://security.debian.org/debian-security buster/updates main non-free
deb-src http://security.debian.org/debian-security buster/updates main non-free
# buster-updates, previously known as 'volatile'
deb http://http.us.debian.org/debian buster-updates main non-free
deb-src http://http.us.debian.org/debian buster-updates main non-free
EOF
# mount -t devpts -o gid=5,mode=620 devpts /mnt/sd/dev/pts
mkdir -p /mnt/sd/tmp/
cp provision.sh /mnt/sd/usr/bin/
cp provision-ansible.sh /mnt/sd/usr/bin/
# pass proxy to chroot
if [ -n "$http_proxy" ]; then
proxy_vars="http_proxy=${http_proxy}"
fi
# reuse given http proxy
schroot --chroot debootstrap-rpi4 -u root -- sh -c "${proxy_vars} provision.sh"
schroot --chroot debootstrap-rpi4 -u root -- sh -c "${proxy_vars} provision-ansible.sh"
cp provision-boot.sh /mnt/sd/usr/bin/
chroot /mnt/sd/ /usr/bin/provision-boot.sh
# remove file identifying as chroot
rm /mnt/sd/etc/debian_chroot
| true
|
d3e737cae88fbc720aa6d36a5b867a673837351c
|
Shell
|
bestclover/mirrors.cug.edu.cn
|
/test/shell/stat_watcher.sh
|
UTF-8
| 365
| 3.25
| 3
|
[] |
no_license
|
#!/bin/bash
source $(cd `dirname $0`; pwd)/../sync.conf
STAT_WATCHER_LOCK="$VAR_DIR"stat.watching.lock
if [ -e $STAT_WATCHER_LOCK ] ; then
exit 0
else
touch $STAT_WATCHER_LOCK
chmod 600 $STAT_WATCHER_LOCK
fi
trap "rm -f ${STAT_WATCHER_LOCK}; exit 0" 0 1 2 3 9 15
while true
do
"$SYNC_ROOT"shell/stat.sh &
sleep 10s
done
| true
|
a1f3dd8c4af5db95a5700e8d4944fc87c0768cdb
|
Shell
|
caguerra/Burkardt-Fortran-90
|
/f_src/ornstein_uhlenbeck_test/ornstein_uhlenbeck_test.sh
|
UTF-8
| 546
| 2.765625
| 3
|
[] |
no_license
|
#! /bin/bash
#
gfortran -c -Wall ornstein_uhlenbeck_test.f90
if [ $? -ne 0 ]; then
echo "Compile error."
exit
fi
#
gfortran ornstein_uhlenbeck_test.o $HOME/lib/ornstein_uhlenbeck.o
if [ $? -ne 0 ]; then
echo "Load error."
exit
fi
rm ornstein_uhlenbeck_test.o
#
mv a.out ornstein_uhlenbeck_test
./ornstein_uhlenbeck_test > ornstein_uhlenbeck_test.txt
if [ $? -ne 0 ]; then
echo "Run error."
exit
fi
rm ornstein_uhlenbeck_test
#
gnuplot < ou_euler_commands.txt
gnuplot < ou_euler_maruyama_commands.txt
#
echo "Normal end of execution."
| true
|
608b247ad4101493e3d2029a4f426bdfe0f55a47
|
Shell
|
petronny/aur3-mirror
|
/arm-linux-gnueabi-eglibc-headers/PKGBUILD
|
UTF-8
| 1,723
| 3
| 3
|
[] |
no_license
|
# Contributor: Alexander 'hatred' Drozdov <adrozdoff@gmail.com>
# Maintainer: Erico Nunes <nunes dot erico at gmail>
_target="arm-linux-gnueabi"
pkgname=${_target}-eglibc-headers
pkgver=2.19
pkgrel=1
pkgdesc="Embedded GNU C Library. Stage 1 - headers only. Needed for initialy build toolchain (${_target})"
arch=(any)
url="http://www.eglibc.org"
license=('GPL')
depends=("${_target}-linux-api-headers")
makedepends=("${_target}-gcc-stage1" 'subversion')
options=(!strip)
source=(eglibc-${pkgver}::svn://svn.eglibc.org/branches/eglibc-${pkgver/./_}/libc)
md5sums=('SKIP')
build() {
cd "$srcdir"
cd eglibc-${pkgver}
# Don not build timezones
sed -i 's/timezone rt/rt/' Makeconfig
rm -rf build
mkdir build
cd build
export CFLAGS="-U_FORTIFY_SOURCE -mlittle-endian -msoft-float -O2"
unset LD_LIBRARY_PATH
export BUILD_CC=gcc
export CC=${_target}-gcc
export AR=${_target}-ar
export RANLIB=${_target}-ranlib
../configure \
--target=${_target} \
--host=${_target} \
--build=${CHOST} \
--prefix=/ \
--with-headers=/usr/${_target}/include \
--enable-add-ons=libidn,ports,nptl \
--disable-profile \
--without-gd \
--without-cvs
# make some libs and stubs
make csu/subdir_lib
}
package() {
cd "$srcdir/eglibc-$pkgver/build"
make install-headers "install_root=${pkgdir}/usr/${_target}" \
install-bootstrap-headers=yes
mkdir -p "${pkgdir}/usr/${_target}/lib"
cp csu/crt1.o csu/crti.o csu/crtn.o "${pkgdir}/usr/${_target}/lib"
# create stub lib
${_target}-gcc -nostdlib -nostartfiles -shared -x c /dev/null \
-o "${pkgdir}/usr/${_target}/lib/libc.so"
mkdir -p "${pkgdir}/usr/${_target}/usr"
ln -s ../{include,lib} "${pkgdir}/usr/${_target}/usr"
}
| true
|
863cb2cbd95155acec787d10ccaeecf4d5c3fe9b
|
Shell
|
yanivyam/vm-bakery
|
/build_vm_image.sh
|
UTF-8
| 6,517
| 4.1875
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
#
# Creates VM image
#
# Requires the following environment variables to be set:
#
# PROVIDER - hypervisor to use: kvm, virtualbox, vmware
# OSNAME - target operating system name (CentOS, OracleLinux, etc)
# OSVERSION - target OS version (example: 7.1, 6.5)
# OSBUILDVERSION - target OS build version (example: 5123)
#
# Optional:
# VAGRANT - if set to true, will creates a vagrant box for VirtualBox
# SKIPBUILD - Set to 1 to skip running Packer. Useful for debugging
error_msg()
{
echo $1 >&2
exit 1
}
SCRIPT_DIR=$(dirname $0)
# Go to the script's directory
echo "Changing dir to ${SCRIPT_DIR}"
cd "${SCRIPT_DIR}" || error_msg "Unable to chdir to ${SCRIPT_DIR}"
if [ ${SCRIPT_DIR} == '.' ]; then
SCRIPT_DIR=`pwd`
fi
WORK_DIR=${SCRIPT_DIR}/work
if [ -z "${PROVIDER}" ]; then
error_msg "Please supply PROVIDER environment variable"
fi
if [ -z "${OSNAME}" ]; then
error_msg "Please supply OSNAME environment variable"
fi
if [ -z "${OSVERSION}" ]; then
error_msg "Please supply OSVERSION environment variable"
fi
if [ -z "${OSBUILDVERSION}" ]; then
error_msg "Please supply OSBUILDVERSION environment variable"
fi
source ./settings.sh
# print some info
echo "######################################################################"
echo ""
echo "Provider: ${PROVIDER}"
echo "Target OS: ${OSNAME}, version: ${OSVERSION}, build: ${OSBUILDVERSION}"
echo ""
if [ "${VAGRANT}" == 'true' ]; then
echo "Output: Vagrant box for Virtualbox"
else
echo "Output: image file for ${PROVIDER}"
fi
echo ""
echo "######################################################################"
echo "Note for running builds on KVM VM:"
echo "Please ensure that you have enabled running KVM inside the VM this"
echo "script runs on."
echo "For more details, see:"
echo "https://fedoraproject.org/wiki/How_to_enable_nested_virtualization_in_KVM"
echo ""
echo "The simplest way to do this is open the VM settings in virt-manager"
echo "click on Processor, and select Copy the host CPU configuration to the VM's CPU"
echo "######################################################################"
echo ""
# build version for the image by TMS
# the version will be the Jenkins build number if exists
# otherwise, will default to "SNAPSHOT"
if [ -z "${BUILD_NUMBER}" ]; then
IMAGE_BUILD_VERSION='SNAPSHOT'
else
IMAGE_BUILD_VERSION="${BUILD_NUMBER}"
fi
# Figure out the major OS version
IFS="." read -a osversion_array <<< "${OSVERSION}"
OSFAMILY="NA"
case "${OSNAME}" in
'CentOS')
OSFAMILY="redhat"
;;
'OracleLinux')
OSFAMILY="redhat"
;;
*)
# no OS family for this Linux distribution
OSFAMILY="NA"
;;
esac
echo "OS Family detected: ${OSFAMILY}"
OSMAJORVERSION=${osversion_array[0]}
echo "OS Major version detected: ${OSMAJORVERSION}"
# Figure out which Packer configuration file to use based on the OSNAME
# 1. ExactOSName-ExactVersion-Provider.json
# 2. ExactOSName-MajorVersion-Provider.json
# 3. OSFamilyName-ExactVersion-Provider.json
# 4. OSFamilyName-MajorVersion-Provicer.json
#
PACKER_CONFS_ARRAY[0]="${OSNAME}${OSVERSION}-${PROVIDER}.json"
PACKER_CONFS_ARRAY[1]="${OSNAME}${OSMAJORVERSION}.x-${PROVIDER}.json"
if [ ${OSFAMILY} != "NA" ]; then
PACKER_CONFS_ARRAY[2]="${OSFAMILY}${OSVERSION}-${PROVIDER}.json"
PACKER_CONFS_ARRAY[3]="${OSFAMILY}${OSMAJORVERSION}.x-${PROVIDER}.json"
fi
PACKER_CONF=""
for i in "${PACKER_CONFS_ARRAY[@]}"
do
echo "Searching for Packer configuration ${i}.."
if [ -f ${i} ]; then
PACKER_CONF="$i"
echo "found $i !"
break
fi
done
if [ -z "${PACKER_CONF}" ]; then
error_msg "Could not find suitable Packer configuration for this OS and version, or alternatively for the OS family"
fi
BUILD_DIR=${WORK_DIR}/build
VAGRANT_BUILD_DIR=${WORK_DIR}/buildvagrant
# name of OS iso file:
SOURCE_ISO_NAME="${OSNAME}-${OSVERSION}-${OSBUILDVERSION}.iso"
# location of OS iso file on disk (optional for installations from laptop)
SOURCE_ISO_PATH="${SCRIPT_DIR}/source_iso"
echo "Looking for ISO in ${SOURCE_ISO_PATH}/${SOURCE_ISO_NAME}..."
# location of OS iso file on repository (used by Jenkins when running on a builder server)
SOURCE_ISO_URL="${SETTINGS_SOURCE_ISO_HTTP_SERVER}/${OSNAME}/${OSVERSION}-${OSBUILDVERSION}/${SOURCE_ISO_NAME}"
echo "Detecting which MD5 location to use"
if [ -f "${SOURCE_ISO_PATH}/${SOURCE_ISO_NAME}" ]; then
# Full absolute path must be used here, otherwise Packer will not be able to find the md5 file
SOURCE_ISO_MD5_URL="file://${SOURCE_ISO_PATH}/${SOURCE_ISO_NAME}.md5"
else
SOURCE_ISO_MD5_URL="${SOURCE_ISO_URL}.md5"
fi
echo "Using MD5 from ${SOURCE_ISO_MD5_URL}"
# clean up the build directory
if [ "${SKIPBUILD}" != "1" ]; then
echo "Cleaning build directories"
rm -rf ${BUILD_DIR}
rm -rf ${VAGRANT_BUILD_DIR}
fi
# run Packer
PACKER_BIN="SETTINGS_PACKER_BIN"
if [ ! -x "${PACKER_BIN}" ]; then
# if Packer is not in its default path, then try to run it from somewhere else
# (good when running on laptop)
PACKER_BIN="packer"
fi
export OSNAME
export OSVERSION
export OSBUILDVERSION
export IMAGE_BUILD_VERSION
export SOURCE_ISO_NAME
export SOURCE_ISO_PATH
export SOURCE_ISO_URL
export SOURCE_ISO_MD5_URL
# Packer env vars
export PACKER_LOG=1
# we must change Packer temp dir, otherwise it will use /tmp which may not have enough space
export TMPDIR="${WORK_DIR}/tmp"
mkdir -p "${TMPDIR}" || error_msg "Unable to create temp dir in ${TMPDIR}"
if [ "${SKIPBUILD}" != "1" ]; then
${PACKER_BIN} build ./${PACKER_CONF}
RES=$?
if [ ${RES} != 0 ]; then
error_msg "Packer build failed"
fi
fi
if [ "${VAGRANT}" == 'true' ]; then
SRC_ARTIFACT_FILE="${VAGRANT_BUILD_DIR}/vagrant.box"
OUT_FILE="${OSNAME}-${OSVERSION}-${OSBUILDVERSION}-${PROVIDER}-${IMAGE_BUILD_VERSION}.box"
else
SRC_ARTIFACT_FILE="${BUILD_DIR}/out.img"
OUT_FILE="${OSNAME}-${OSVERSION}-${OSBUILDVERSION}-${PROVIDER}-${IMAGE_BUILD_VERSION}.img"
fi
DEST=${SETTINGS_ARTIFACTS_BASE_DIR}/${PROVIDER}/${OSNAME}/${OSVERSION}-${OSBUILDVERSION}-${PROVIDER}-${IMAGE_BUILD_VERSION}
# Copy the build to the master repository
echo "Creating destination path on remote repository ${DEST}"
mkdir -p ${DEST} || error_msg "Unable to create destination directory in the repository"
echo "Copying build ${SRC_ARTIFACT_FILE} to remote repository"
cp ${SRC_ARTIFACT_FILE} ${DEST}/${OUT_FILE} || error_msg "Cannot copy output image file to repository"
| true
|
752a495f39dc52cc6485526fdffe994aa848a023
|
Shell
|
GEResearch/build_with_docker
|
/build.sh
|
UTF-8
| 191
| 2.75
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
echo "Running build_with_docker generic script for cmake project..."
mkdir -p bin/$ARCH/src
cp -R $NAME bin/$ARCH/src
#Compile cmake project
cd bin/$ARCH/src/$NAME
cmake .
make
| true
|
ee2ad00e5ddb6d4f84c1fde0726dea07947362c0
|
Shell
|
pbas4/mac-setup
|
/scripts/ssh-key.sh
|
UTF-8
| 328
| 3.125
| 3
|
[] |
no_license
|
#!/bin/sh
echo "Running ssh-key.sh..."
if [[ ! -e ~/.ssh ]]; then
echo "Creating an SSH key for you..."
ssh-keygen -t rsa
else
echo "Existing ssh key found, skipping..."
fi
# Github config
echo "Please add this public key to Github \n"
echo "https://github.com/account/ssh \n"
read -p "Press [Enter] key after this..."
| true
|
81250c66ffba8b596b8eeaf64f5b084caf8e6d18
|
Shell
|
smasoka/ChpcSlurm
|
/roles/slurm/templates/init.d.slurmdbd.j2
|
UTF-8
| 4,037
| 3.84375
| 4
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
#
# chkconfig: 345 90 10
# description: SLURMDBD is a database server interface for \
# SLURM (Simple Linux Utility for Resource Management).
#
# processname: ${exec_prefix}/sbin/slurmdbd
# pidfile: /var/run/slurmdbd.pid
#
# config: /etc/sysconfig/slurm
#
### BEGIN INIT INFO
# Provides: slurmbd
# Required-Start: $remote_fs $syslog $network munge
# Required-Stop: $remote_fs $syslog $network munge
# Should-Start: $named
# Should-Stop: $named
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: SLURM database daemon
# Description: Start slurm to provide database server for SLURM
### END INIT INFO
HOMEDIR=/opt/slurm
CONFDIR="${HOMEDIR}/etc"
LIBDIR="${HOMEDIR}/lib"
SBINDIR="${HOMEDIR}/sbin"
#Source function library.
if [ -f /etc/rc.status ]; then
. /etc/rc.status
SUSE=1
STARTPROC=startproc
rc_reset
else
if [ ! -f /etc/rc.d/init.d/functions ]; then
echo "Could not find /etc/rc.d/init.d/functions. Is some other daemon launch mechanism used?"
exit 1
fi
. /etc/rc.d/init.d/functions
SUSE=0
STARTPROC=daemon
function rc_status() {
RETVAL=$?
}
function rc_exit () {
exit $RETVAL
}
RETVAL=0
fi
# We can not use a starter program without losing environment
# variables that are critical on Blue Gene systems
if [ -d /bgl/BlueLight/ppcfloor ]; then
STARTPROC=""
fi
# Source slurm specific configuration
# SLURMDBD_OPTIONS defines slurmdbd command line options. See "man slurmdbd"
if [ -f /etc/sysconfig/slurm ] ; then
. /etc/sysconfig/slurm
else
SLURMDBD_OPTIONS=""
fi
if [ ! -f $CONFDIR/slurmdbd.conf ]; then
echo "Could not find $CONFDIR/slurmdbd.conf. Bad path?"
exit 1
fi
# setup library paths for slurm and munge support
export LD_LIBRARY_PATH=$LIBDIR${LD_LIBRARY_PATH:+:$LD_LIBRARY_PATH}
start() {
prog=$1
shift
echo -n "starting $prog: "
unset HOME MAIL USER USERNAME
$STARTPROC $SBINDIR/$prog $SLURMDBD_OPTIONS
rc_status -v
echo
touch /var/lock/subsys/slurmdbd
}
stop() {
echo -n "stopping $1: "
killproc $1 -TERM
rc_status -v
echo
rm -f /var/lock/subsys/slurmdbd
rm -f /var/run/slurm/slurmdbd.pid
}
slurmstatus() {
local base=${1##*/}
local pid
local rpid
local pidfile
pidfile=`grep -i pidfile $CONFDIR/slurmdbd.conf | grep -v '^ *#'`
if [ $? = 0 ]; then
pidfile=${pidfile##*=}
pidfile=${pidfile%#*}
pidfile=${pidfile//\"/}
else
pidfile=/var/run/slurmdbd.pid
fi
pid=`pidof -o $$ -o $$PPID -o %PPID -x $1 || \
pidof -o $$ -o $$PPID -o %PPID -x ${base}`
if [ -f $pidfile ]; then
read rpid < $pidfile
if [ "$rpid" != "" -a "$pid" != "" ]; then
for i in $pid ; do
if [ "$i" = "$rpid" ]; then
echo $"${base} (pid $pid) is running..."
return 0
fi
done
elif [ "$rpid" != "" -a "$pid" = "" ]; then
echo $"${base} dead but pid file exists"
return 1
fi
fi
if [ "$base" = "slurmdbd" -a "$pid" != "" ] ; then
echo $"${base} (pid $pid) is running..."
return 0
fi
echo $"${base} is stopped"
return 3
}
#
# stop slurm daemons,
# wait for termination to complete (up to 10 seconds) before returning
#
slurmstop() {
stop $1
for i in 1 2 3 4
do
sleep $i
slurmstatus $1
if [ $? != 0 ]; then
break
fi
done
}
#
# The pathname substitution in daemon command assumes prefix and
# exec_prefix are same. This is the default, unless the user requests
# otherwise.
#
# Any node can be a slurm controller and/or server.
#
case "$1" in
start)
start slurmdbd
;;
stop)
slurmstop slurmdbd
;;
status)
slurmstatus slurmdbd
rc_status -v
;;
restart)
$0 stop
$0 start
;;
condrestart)
if [ -f /var/lock/subsys/slurm ]; then
stop slurmdbd
start slurmdbd
fi
;;
reconfig|reload)
echo -n $"Reloading slurmdbd daemon configuration: "
killproc slurmdbd -HUP
echo
;;
*)
echo "Usage: $0 {start|stop|status|restart|condrestart|reconfig}"
exit 1
;;
esac
rc_exit
| true
|
3b0fd17a5bb1c38a1be7fa65565cb1d13030a0f4
|
Shell
|
edwardzhu/git-mirror
|
/bash.sh
|
UTF-8
| 582
| 3.46875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
input=$GIT_LIST
proxy=$PROXY
[ -z "$proxy" ] || (git config --global http.proxy "$proxy" && git config --global https.proxy "$proxy" && echo "Set the proxy: $proxy")
for f in *.git; do
if [ -d "$f" ]; then
cd $f && git fetch origin && git lfs fetch --all && git fetch --tags; cd ..
fi
done
while IFS= read -r line
do
echo "$line"
folder=$(basename $line | tr -d '\r')
if [ ! "$(find "$folder" -mindepth 1 -print -quit 2>/dev/null)" ]; then
git clone --mirror $line && \
cd $folder && git lfs fetch --all; cd .. && \
echo "Cloned: $line"
fi
done < "$input"
| true
|
dbba86f551266095f176537a00bb012d64b163f2
|
Shell
|
chingchangtw/systemdynamics
|
/publish_gitbook.sh
|
UTF-8
| 622
| 3
| 3
|
[] |
no_license
|
# install the plugins and build the static site
gitbook install && gitbook build
# checkout to the gh-pages branch
if ! git checkout gh-pages
then
echo >&2 "Checkout to branch gh-pages failed!"
exit 1
fi
# pull the latest updates
git pull --rebase origin gh-pages
# copy the static site files into the current directory.
cp -R _book/* .
# remove 'node_modules' and '_book' directory
git clean -fx node_modules
git clean -fx _book
# add all files
git add .
# commit
git commit -a -m "Update docs"
# push to the origin
git push origin gh-pages
# checkout to the master branch
git checkout master
## - END -
| true
|
3b06a78f6d3b1832ffd729c9e1b4daeaf3689598
|
Shell
|
kubernetes-sigs/kind
|
/site/static/examples/kind-gcr.sh
|
UTF-8
| 1,582
| 4.1875
| 4
|
[
"CC-BY-4.0",
"Apache-2.0"
] |
permissive
|
#!/bin/sh
set -o errexit
# desired cluster name; default is "kind"
KIND_CLUSTER_NAME="${KIND_CLUSTER_NAME:-kind}"
# create a temp file for the docker config
echo "Creating temporary docker client config directory ..."
DOCKER_CONFIG=$(mktemp -d)
export DOCKER_CONFIG
trap 'echo "Removing ${DOCKER_CONFIG}/*" && rm -rf ${DOCKER_CONFIG:?}' EXIT
echo "Creating a temporary config.json"
# This is to force the omission of credsStore, which is automatically
# created on supported system. With credsStore missing, "docker login"
# will store the password in the config.json file.
# https://docs.docker.com/engine/reference/commandline/login/#credentials-store
cat <<EOF >"${DOCKER_CONFIG}/config.json"
{
"auths": { "gcr.io": {} }
}
EOF
# login to gcr in DOCKER_CONFIG using an access token
# https://cloud.google.com/container-registry/docs/advanced-authentication#access_token
echo "Logging in to GCR in temporary docker client config directory ..."
gcloud auth print-access-token | \
docker login -u oauth2accesstoken --password-stdin https://gcr.io
# setup credentials on each node
echo "Moving credentials to kind cluster name='${KIND_CLUSTER_NAME}' nodes ..."
for node in $(kind get nodes --name "${KIND_CLUSTER_NAME}"); do
# the -oname format is kind/name (so node/name) we just want name
node_name=${node#node/}
# copy the config to where kubelet will look
docker cp "${DOCKER_CONFIG}/config.json" "${node_name}:/var/lib/kubelet/config.json"
# restart kubelet to pick up the config
docker exec "${node_name}" systemctl restart kubelet.service
done
echo "Done!"
| true
|
573d8d5973783110bd5a95f1066d9d7bca469c94
|
Shell
|
TAEKnical/ansible-study
|
/wordpress/wordpress_adhoc.sh
|
UTF-8
| 3,720
| 2.6875
| 3
|
[] |
no_license
|
#아파치 설치,재시작,방화벽
ansible node1 -m yum -a "name=httpd state=latest" -b
ansible node1 -m firewalld -a "immediate=true permanent=true service=http state=enabled" -b
#repository 추가
ansible all -m yum -a "name=https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm" -b
ansible all -m yum -a "name=https://rpms.remirepo.net/enterprise/remi-release-7.rpm" -b
ansible node1 -a "yum-config-manager --enable remi-php74" -b
#php74 설치
ansible node1 -m yum -a "name=php" -b
ansible node1 -m yum -a "name=php-mysql" -b
#아파치 재시작
ansible node1 -m service -a "name=httpd state=restarted enabled=true" -b
#wordpress 다운/설치
ansible node1 -m get_url -a "dest=/tmp/latest.tar.gz url=https://wordpress.org/latest.tar.gz" -b
ansible node1 -m unarchive -a "remote_src=yes src=/tmp/latest.tar.gz dest=/tmp/" -b
ansible node1 -m copy -a "remote_src=yes src=/tmp/wordpress/ dest=/var/www/html/wordpress/" -b
#wp-config
ansible node1 -m copy -a "src=/var/www/html/wordpress/wp-config-sample.php dest=/var/www/html/wordpress/wp-config.php remote_src=yes" -b
ansible node1 -m replace -a "path=/var/www/html/wordpress/wp-config.php regexp=database_name_here replace=wordpress" -b
ansible node1 -m replace -a "dest=/var/www/html/wordpress/wp-config.php regexp=username_here replace=wpadmin" -b
ansible node1 -m replace -a "dest=/var/www/html/wordpress/wp-config.php regexp=password_here replace=wpadmin" -b
ansible node1 -m replace -a "dest=/var/www/html/wordpress/wp-config.php regexp=localhost replace=192.168.122.52" -b
#mariadb 설치,세팅
ansible node2 -m yum_repository -a "name=MariaDB description=MariaDB baseurl=http://yum.mariadb.org/10.5.4/centos7-amd64/ gpgkey=https://yum.mariadb.org/RPM-GPG-KEY-MariaDB gpgcheck=1" -b
ansible node2 -m yum -a "name=MariaDB-server" -b
ansible node2 -m service -a "name=mariadb state=started enabled=true" -b
ansible node2 -m firewalld -a "immediate=true permanent=true service=mysql state=enabled" -b
ansible node2 -m yum -a "name=MySQL-python state=latest" -b
ansible node2 -m mysql_user -a "name=root login_password='' login_host=localhost login_user=root password=toor" -b
ansible node2 -m mysql_db -a "name=wordpress login_user=root login_password=toor login_host=localhost" -b
ansible node2 -m mysql_user -a "name=wpadmin password=wpadmin login_user=root login_password=toor login_host=localhost priv=wordpress.*:ALL,GRANT host=% state=present" -b
# VM이면 지우면 되는데, 이 시스템이 베어메탈이라면?
# #서비스 중지 : mariadb
# ansible node2 -m service -a "name=mariadb state=stopped" -b
# #DB 파일 제거 : /var/lib/mysql
# ansible node2 -m file -a "path=/var/lib/mysql state=absent" -b
# #selinux bollean 정책 : allow_user_mysql_connect
# ansible node2 -m seboolean -a "name=allow_user_mysql_connect state=no persistent=no"
# #패키지 제거 : epel-release, MySQL-python, libsemanage-python, MariaDB-server
# ansible node2 -m yum -a "name=epel-release,MySQL-python,MariaDB-server,libsemanage-python state=absent" -b
# #방화벽 : mysql
# ansible node2 -m firewalld -a "service=mysql state=disabled immediate=true" -b
# #레포제거: MariaDB
# ansible node2 -m yum_repository -a "name=MariaDB state=absent" -b
# #서비스 중지
# ansible node1 -m service -a 'name=httpd state=stopped' -b
# #파일 삭제
# ansible node1 -m file -a 'path=/var/www/html/wordpress state=absent' -b
# #seboolean -a 'name=httpd_can_network_connect_db state=no persistent=no'
# ansible node1 -m yum -a "name=epel-release,libsemanage-python,httpd,php,php-mysql,remi-release state=absent"
# #방화벽:
# ansible node1 -m firewalld -a "service=http state=disabled immediate=yes permanent=yes"
| true
|
c9f96fd8903693c63b569b3998a5dd8d9fb53fd2
|
Shell
|
leejw51crypto/chain-tx-enclave
|
/chain/integration-tests/docker/tendermint/bin/entrypoint.sh
|
UTF-8
| 1,036
| 3.40625
| 3
|
[
"Apache-2.0",
"MIT",
"BSD-3-Clause"
] |
permissive
|
#!/usr/bin/env bash
set -e
IFS=
# @argument message
function print_message() {
echo "[$(date +"%Y-%m-%d|%T")] ${1}"
}
# @argument Description
function print_step() {
print_message "${1}"
}
# @argument Key
# @argument Value
function print_config() {
print_message "[Config] ${1}=${2}"
}
# @argument Description
function print_error() {
print_message "[ERROR] ${1}"
}
if [ -z "${FEE_SCHEMA}" ]; then
print_error "Missing Fee Schema"
exit 1
fi
if [ -z "${PROXY_APP}" ]; then
print_error "Missing Tendermint Proxy App"
exit 1
fi
print_config "FEE_SCHEMA" "${FEE_SCHEMA}"
print_config "PROXY_APP" "${PROXY_APP}"
if [ "x${FEE_SCHEMA}" = "xWITHFEE" ]; then
cp -r ./tendermint-withfee/* "${TMHOME}"
elif [ "x${FEE_SCHEMA}" = "xZEROFEE" ]; then
cp -r ./tendermint-zerofee/* "${TMHOME}"
else
print_error "Unsupported Fee Schema: ${FEE_SCHEMA}"
exit 1
fi
print_step "Starting Tendermint"
/usr/bin/tendermint node --proxy_app=${PROXY_APP} --rpc.laddr=tcp://0.0.0.0:26657 --consensus.create_empty_blocks=true
| true
|
78e44f6bfcfd57b80ca2d96c04c43f2019b1f62b
|
Shell
|
orgenbora/CutLang
|
/runs/showall.sh
|
UTF-8
| 590
| 3.546875
| 4
|
[] |
no_license
|
#!/bin/bash
htxtfile=BP_1-histos.txt
rootfile=histoOut-CLA.root
regno=1
printUsage() {
echo "Usage: $0 [region_id=${regno}] [rootfile=${rootfile}]"
exit 0
}
showAll() {
cat ${htxtfile} | cut -f2 -d'"' | cut -f1 -d',' | grep -v Basics > histoname.txt
root.exe -l -q -x showall.C"(${regno} , \"${rootfile}\")"
exit 0
}
if [ $# == 0 ]; then
showAll
elif [ $# == 1 ]; then
if [ $1 == '-h' ]; then
printUsage
fi
regno=$1
htxtfile=BP_${regno}-histos.txt
showAll
elif [ $# == 2 ]; then
regno=$1
rootfile=$2
htxtfile=BP_${regno}-histos.txt
showAll
else
printUsage
fi
| true
|
ae7eb307f7633b2c3e29d78fe0fc73f70586f3c1
|
Shell
|
nakulp007/BigData-Mortality-Prediction-in-ICU
|
/code/scripts/process_labresults.sh
|
UTF-8
| 110
| 2.515625
| 3
|
[] |
no_license
|
#!/bin/bash
sed 's/\"\"/ /g' $1 | awk -F'"' -v OFS='"' '{ for (i=2; i<=NF; i+=2) gsub(",", ";", $i) } 1' > $2
| true
|
a2d21f2a365e6bdebb70502332a6ae09ce897b2f
|
Shell
|
oamat/hyperledgerFabric
|
/MultiOrgDocker/scripts/scriptsSeparated/fabric-ca/gen-orderer-identity.sh
|
UTF-8
| 1,275
| 3.140625
| 3
|
[
"Unlicense"
] |
permissive
|
# Creates/Enrolls the Orderer's identity + Sets up MSP for orderer
# Script may executed multiple times
# PS: Since Register (step 1) can happen only once - ignore register error if you run multiple times
# Identity of the orderer will be created by the admin from the orderer org
# 1. Set the Identity context to Orderer Admin
source set-client-home.sh orderer admin
# 2. Register the orderer identity
AFFILIATION=orderer
fabric-ca-client register --id.type orderer --id.name orderer --id.secret pw --id.affiliation $AFFILIATION
echo "======Completed: Registered orderer (can be done only once)===="
# 3. Hold the admin MSP localtion in a variable
ADMIN_CLIENT_HOME=$FABRIC_CA_CLIENT_HOME
# 4. Change the client context to orderer identity
source set-client-home.sh orderer orderer
# 5. Orderer identity is enrolled
# Admin will enroll the orderer identity. The MSP will be written in the
# FABRIC_CA_CLIENT_HOME
fabric-ca-client enroll -u http://orderer:pw@localhost:7054
echo "======Completed: Enrolled orderer ========"
# 6. Copy the admincerts to the appropriate folder
mkdir -p $FABRIC_CA_CLIENT_HOME/msp/admincerts
cp $ADMIN_CLIENT_HOME/msp/signcerts/* $FABRIC_CA_CLIENT_HOME/msp/admincerts
echo "======Completed: MSP setup for the orderer========"
| true
|
cf387d1bfa6139ebdd0eb85d55a744004da42425
|
Shell
|
sakilk130/shell-scripting-example-1-20
|
/ex18.sh
|
UTF-8
| 227
| 2.984375
| 3
|
[] |
no_license
|
echo enter a range
read range
echo 2
j=3
while test $j -le $range
do
i=2
x=$(($j - 1))
while test $i -le $x
do
if [ $(($j % $i)) -ne 0 ]
then
i=$(($i + 1))
else
break
fi
done
if [ $i -eq $j ]
then
echo $j
fi
j=$(($j + 1))
done
| true
|
251851583f0c9da9ce05679f66e44b178e65dbf0
|
Shell
|
jawshooah/dotfiles
|
/bash/bashrc.symlink
|
UTF-8
| 5,932
| 3.171875
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
# set -x
# shell options
shopt -s extglob
shopt -s globstar
# Gradle shit
alias gw='./gradlew --daemon --parallel --configure-on-demand'
alias gwi='gw --info'
# Common aliases
alias ..='cd ..'
alias cd..='cd ..'
alias l='ll'
alias ls='ls -G'
alias gp='grep -Hn --color=auto'
alias fgp='fgrep -Hn --color=auto'
alias egp='egrep -Hn --color=auto'
alias vlc='open -a Vlc'
alias wha='which -a'
alias bex='bundle exec'
# Colorize command output
alias colorize='python -m colorize'
alias svn='colorsvn'
# Lastpass aliases
alias lpcp='lpass show --clip --password'
# Homebrew aliases
# alias brew='env PATH=${PATH//$(pyenv root)\/shims:/} SSL_CERT_FILE="" CURL_CA_BUNDLE="" brew'
alias brew='env SSL_CERT_FILE="" CURL_CA_BUNDLE="" brew'
alias bd='brew doctor'
alias bh='brew home'
alias bi='brew info'
alias bcl='brew cleanup'
alias bed='brew edit'
alias bis='brew install'
alias bud='brew update'
alias bug='brew upgrade'
alias budu='brew update && brew upgrade'
alias bus='brew uninstall'
# Homebrew-cask aliases
alias bc='brew cask'
alias bcc='brew cask cleanup'
alias bcd='brew cask doctor'
alias bce='brew cask edit'
alias bch='brew cask home'
alias bci='brew cask info'
alias bcu='brew cask update'
alias bcis='brew cask install'
alias bcug='brew cask upgrade'
alias bcus='brew cask uninstall'
alias bczp='brew cask zap'
alias buduc='brew update && brew upgrade && brew cleanup && brew cask cleanup'
# FUCK
alias fuck='eval $(thefuck $(fc -ln -1))'
# Shorthand for colorized sdcv output
dict() {
sdcv --color "$@" | less -r
}
# Change iterm2 profile.
# Usage: it2prof ProfileName (case sensitive)
# https://coderwall.com/p/s-2_nw
it2prof() {
echo -e "\033]50;SetProfile=$1\a"
}
# shellcheck disable=SC2034
setup_color_codes() {
# ANSI color codes
DEFAULT=$'\e[0;0m'
ANSI_BLK=$'\e[0;30;40m'
ANSI_RED=$'\e[0;31;40m'
ANSI_GRN=$'\e[0;32;40m'
ANSI_YLO=$'\e[0;33;40m'
ANSI_BLU=$'\e[0;34;40m'
ANSI_MGT=$'\e[0;35;40m'
ANSI_CYN=$'\e[0;36;40m'
ANSI_WHT=$'\e[0;37;40m'
# Bold colors
ANSI_BBLK=$'\e[1;30;40m'
ANSI_BRED=$'\e[1;31;40m'
ANSI_BGRN=$'\e[1;32;40m'
ANSI_BYLO=$'\e[1;33;40m'
ANSI_BBLU=$'\e[1;34;40m'
ANSI_BMGT=$'\e[1;35;40m'
ANSI_BCYN=$'\e[1;36;40m'
ANSI_BWHT=$'\e[1;37;40m'
}
hg_prompt() {
local prefix="${ANSI_BMGT}("
local incoming="${ANSI_BYLO}{[+{incoming|count}]-->}"
local branch="${ANSI_BMGT}{root|basename}{/{branch}}"
local outgoing="${ANSI_BGRN}{-->[+{outgoing|count}]}"
local full_branch="${incoming}${branch}${outgoing}"
local bookmark="${ANSI_WHT}{ : ${ANSI_BBLU}{bookmark}}"
local status="${ANSI_BYLO}{status}"
local suffix="${ANSI_BMGT})"
hg prompt "${prefix}${full_branch}${bookmark}${status}${suffix}" 2>/dev/null
}
# shellcheck disable=SC2016
setup_prompt() {
local prefix='\n${ANSI_BMGT}['
local user='${ANSI_BGRN}\u${ANSI_BBLK}@${ANSI_BCYN}\h '
local cwd='${ANSI_BLU}\w${ANSI_BMGT}]'
local git='${ANSI_BYLO}$(git_ps1 " (%s)")'
local hg='$(hg_prompt)'
local suffix='${ANSI_BBLK}\n\$ ${DEFAULT}'
export PS1="${prefix}${user}${cwd}${git}${hg}${suffix}"
}
setup_editor() {
defaults write com.apple.LaunchServices/com.apple.launchservices.secure LSHandlers \
-array-add '{LSHandlerContentType=public.plain-text;LSHandlerRoleAll=com.sublimetext.3;}'
}
git_ps1() {
__git_ps1
}
bump_version() {
local old_version=$1
local new_version=$2
if [[ -z "${old_version}" && -z "${new_version}" ]]; then
echo "This function requires two arguments: old_version new_version"
return 1
fi
local escaped_old_version="${old_version//./\\.}"
local sed_exp="s/${escaped_old_version}/${new_version}/g"
while read -r file; do
sed -i -e "${sed_exp}" "$file" && git add "$file"
local retval=$?
[[ $retval == 0 ]] || return $retval
done < <(git grep -Il "${escaped_old_version}")
}
#TODO fix weird bug where you can't delete to start of prompt
#setup_color_codes
#setup_prompt
PS1='\[\033[0;36m\]\D{%F %T} - \u\[\033[00m\] in \[\033[0;36m\]$( pwd )\n\[\033[1;36m\]\$\[\033[;m\] '
export SUDO_PS1="\[\h:\w\] \u\\$ "
export TERM="xterm-256color"
export CLICOLOR=1
export GIT_PS1_SHOWDIRTYSTATE=1
export GIT_PS1_SHOWSTASHSTATE=1
export GIT_PS1_SHOWUNTRACKEDFILES=1
export GIT_PS1_SHOWUPSTREAM="verbose"
export GIT_PS1_DESCRIBE_STYLE="branch"
export GIT_PS1_SHOWCOLORHINTS=1
export EDITOR='vim'
export GNUTERM='x11'
export HOMEBREW_CASK_OPTS='--appdir=/Applications'
export HOMEBREW_DEVELOPER='1'
export HOMEBREW_NO_AUTO_UPDATE='1'
# Change ls alias if we are using GNU
if ls --color -d . >/dev/null 2>&1; then alias ls='ls --color'; fi
source_if_exists() {
if [[ -s "$1" ]]; then
printf "Sourcing $1 ... "
ts=$(gdate +%s%N)
source "$1"
tt=$((($(gdate +%s%N) - $ts)/1000000))
printf "$tt ms\n"
fi
}
SRC_BASH_COMPLETION="$(brew --prefix)/etc/bash_completion"
SRC_SCM_BREEZE="${HOME}/.scm_breeze/scm_breeze.sh"
SRC_OH_MY_GIT="${HOME}/.oh-my-git/prompt.sh"
SRC_TOP_SECRET="${HOME}/.bash_secret"
SRC_ITERM2_SHELL_INTEGRATION="${HOME}/.iterm2_shell_integration.bash"
source_if_exists "${SRC_BASH_COMPLETION}"
source_if_exists "${SRC_SCM_BREEZE}"
source_if_exists "${SRC_OH_MY_GIT}"
source_if_exists "${SRC_TOP_SECRET}"
source_if_exists "${SRC_ITERM2_SHELL_INTEGRATION}"
eval_if_hash() {
if hash "$1" 2>/dev/null; then
printf "Initializing $1 ... "
ts=$(gdate +%s%N)
eval "$2"
tt=$((($(gdate +%s%N) - $ts)/1000000))
printf "$tt ms\n"
fi
}
eval_if_hash direnv "$(direnv hook bash)"
eval_if_hash nodenv "$(nodenv init -)"
eval_if_hash rbenv "$(rbenv init -)"
eval_if_hash pyenv "$(pyenv init -)"
# Java and Gradle stuff
export JAVA_HOME="$(/usr/libexec/java_home)"
export GRADLE_OPTS="-Djavax.net.ssl.trustStore=$(/usr/libexec/java_home)/jre/lib/security/cacerts -Djavax.net.ssl.trustStorePassword=changeit -Djavax.net.ssl.keyStore=$(/usr/libexec/java_home)/jre/lib/security/cacerts -Djavax.net.ssl.keyStorePassword=changeit"
| true
|
d574c24437deacb9d42a40cb5ff92e72ef08cd5a
|
Shell
|
ShannonS-DS/nodecontroller
|
/scripts/configure-system.sh
|
UTF-8
| 1,967
| 2.984375
| 3
|
[] |
no_license
|
#!/bin/bash
script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
# this will make sure that an empty eMMC card will get the waggle image
touch /root/do_recovery
# this will trigger a self test on the first full boot
touch /home/waggle/start_test
echo -e "10.31.81.51\textensionnode1 extensionnode" >> /etc/hosts
for i in 2 3 4 5 ; do
echo -e "10.31.81.5${i}\textensionnode${i}" >> /etc/hosts
done
echo -e "127.0.0.1\tnodecontroller" >> /etc/hosts
# Restrict SSH connections to local port bindings and ethernet card subnet
sed -i 's/^#ListenAddress ::$/ListenAddress 127.0.0.1/' /etc/ssh/sshd_config
sed -i 's/^#ListenAddress 0.0.0.0$/ListenAddress 10.31.81.10/' /etc/ssh/sshd_config
cp ./etc/interfaces /etc/network/interfaces
cp ./etc/waggle-test-service /etc/sudoers.d/
# set AoT_key
mkdir -p /home/waggle/.ssh/
echo "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCsYPMSrC6k33vqzulXSx8141ThfNKXiyFxwNxnudLCa0NuE1SZTMad2ottHIgA9ZawcSWOVkAlwkvufh4gjA8LVZYAVGYHHfU/+MyxhK0InI8+FHOPKAnpno1wsTRxU92xYAYIwAz0tFmhhIgnraBfkJAVKrdezE/9P6EmtKCiJs9At8FjpQPUamuXOy9/yyFOxb8DuDfYepr1M0u1vn8nTGjXUrj7BZ45VJq33nNIVu8ScEdCN1b6PlCzLVylRWnt8+A99VHwtVwt2vHmCZhMJa3XE7GqoFocpp8TxbxsnzSuEGMs3QzwR9vHZT9ICq6O8C1YOG6JSxuXupUUrHgd AoT_key" >> /home/waggle/.ssh/authorized_keys
echo >> /home/waggle/.ssh/authorized_keys
# add AoT test cert
echo "command=\"/bin/date\" ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCedz4oU6YdvFjbWiJTpJiREplTizAk2s2dH0/aBMLmslSXzMXCgAh0EZOjsA3CW+P2SIn3NY8Hx3DmMR9+a1ISd3OcBcH/5F48pejK1MBtdLOnai64JmI80exT3CR34m3wXpmFbbzQ5jrtGFb63q/n89iVDb+BwY4ctrBn+J7BPEJbhh/aepoUNSG5yICWtjC0q8mDhHzr+40rYsxPXjp9HTaEzgLu+fNhJ0rK+4891Lr08MTud2n8TEntjBRlWQUciGrPn1w3jzIz+q2JdJ35a/MgLg6aRSQOMg6AdanZH2XBTqHbaeYOWrMhmDTjC/Pw9Jczl7S+wr0648bzXz2T AoT_key_test" >> /home/waggle/.ssh/authorized_keys
echo >> /home/waggle/.ssh/authorized_keys
# add AoT guest node cert
echo "from=\"10.31.81.5?\" ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC4ohQv1Qksg2sLIqpvjJuZEsIkeLfbPusEaJQerRCqI71g8hwBkED3BBv5FehLcezTg+cFJFhf2vBGV5SbV0NzbouIM+n0lAr6+Ei/XYjO0B1juDm6cUmloD4HSzQWv+cSyNmb7aXjup7V0GP1DZH3zlmvwguhMUTDrWxQxDpoV28m72aZ4qPH7VmQIeN/JG3BF9b9F8P4myOPGuk5XTjY1rVG+1Tm2mxw0L3WuL6w3DsiUrvlXsGE72KcyFBDiFqOHIdnIYWXDLZz61KXctVLPVLMevwU0YyWg70F9pb0d2LZt7Ztp9GxXBRj5WnU9IClaRh58RsYGhPjdfGuoC3P AoT_guest_node_key" >> /home/waggle/.ssh/authorized_keys
echo >> /home/waggle/.ssh/authorized_keys
# Setup RabbitMQ config files.
cp -r /usr/lib/waggle/nodecontroller/etc/rabbitmq /etc
# Just in case for now...ideally this would be in /etc/envinronment already.
WAGGLE_ID=$(ip link | awk '/ether 00:1e:06/ { print $2 }' | sed 's/://g')
sed -i -e "s/%WAGGLE_ID%/$WAGGLE_ID/" /etc/rabbitmq/rabbitmq.config
| true
|
a3021c531e2f1cb149af87661e2ca7227182529d
|
Shell
|
AymanAzzam/Mini-Reddit-Scripts
|
/Front-End-New.sh
|
UTF-8
| 515
| 2.53125
| 3
|
[] |
no_license
|
########## Front End Build ###########
#install the packages
sudo npm install jquery --save && sudo npm install --save @types/jquery && sudo npm install rxjs
#Build the project
sudo ng build --base-href=http://35.204.169.121
#Run unit testing
#sudo ng test
#Remove the Deployed Version
if [ -d "/var/lib/jenkins/workspace/Front-End-New_Deployed" ]; then
sudo rm -rv ../Front-End-New_Deployed
fi
#Deply the project
sudo ng serve --host=0.0.0.0 --port=4300 &
#
sudo mv ../Front-End-New ../Front-End-New_Deployed
| true
|
c967f705e68880ccb23da44016ee7424962fcd2f
|
Shell
|
lbrayner/lbrayner
|
/wm/bin/terminal
|
UTF-8
| 685
| 3.65625
| 4
|
[] |
no_license
|
#!/bin/sh
# set -x
print_usage() {
printf '\n%s\n' "$(basename "${0}") [-d] [-l] [-c PATH] -- [ARGS...]"
}
path=""
background="" # dark by default
while getopts ":c:dlh" opt
do
case ${opt} in
c)
path="${OPTARG}"
;;
d)
background=""
;;
l)
background="-u"
;;
h)
print_usage
exit 0
;;
\?)
echo "Invalid option: -$OPTARG" >&2
exit 1
;;
esac
done
shift $((OPTIND - 1))
if [ -n "${path}" ]
then
cd "${path}"
fi
/usr/local/bin/st ${background} -f "Iosevka:size=12" "${@}" >/dev/null 2>&1 &
| true
|
03d7670748eafc8ffeeafc2abde8d3d2da667f7c
|
Shell
|
jessestuart/dotfiles
|
/zsh/git/aliases.zsh
|
UTF-8
| 3,796
| 2.859375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
# ======================================
# Git aliases -- because who needs GUIs?
# ======================================
alias g="git"
alias ga="git add -A && git status -s"
alias gacam="git add -u && git cam"
alias gacm="git add -A && gcm"
alias gag="git add . && git commit --amend --no-edit && git push -f"
alias gas="git add -A && git status -s"
alias gbr='git checkout -b jesse/$(date "+%Y%m%d")'
alias gbrd="git branch -D"
alias gca="git commit -a"
alias gcaa="git commit -a --amend -C HEAD"
alias gci="hub ci-status -v"
alias gcl="git clone"
alias gcm="git commit -s -m"
alias gco="git checkout"
alias gcob="git checkout -b"
alias gdc="git diff --cached"
alias gdm="git diff master"
alias gds="git add -A && git diff --staged"
alias ggo="git checkout -"
# alias gh="cd ~/github"
alias gl="git log --pretty=oneline -n20 --abbrev-commit --reverse"
alias glb="git_list_branches"
alias god="git checkout dev && git pull"
alias gom="git checkout master && git pull"
alias gp="git push"
alias gpf="git push --force-with-lease"
alias gpr="git pull --rebase"
alias grba="git rebase --abort"
alias grbc="git rebase --continue"
alias grHH="git add -u && git reset --hard HEAD"
alias gs="git status -s"
alias gst="git stash push"
alias gwip="git_push_work_in_progress"
alias hpr="hub pull-request"
alias hubci="hub ci-status -v"
alias pop="git stash pop"
alias pu="git pull"
alias pull="git pull"
alias grbid="git rebase -i dev"
alias rbim="git rebase -i master"
alias repush="gpr && git push"
alias retag="ctags -R --exclude=.svn --exclude=.git --exclude=log --exclude=tmp *"
alias review="git diff master"
alias track="git checkout -t"
alias hbr="hub browse && open /Applications/Google\ Chrome.app"
alias gqs="git-quick-stats"
alias gstats="git-quick-stats"
alias gr="git remotes"
alias gfa="git fetch --all"
alias gcanon="git commit --amend --no-edit --no-verify"
alias gwe="git reset --hard HEAD"
alias gh="ghcl"
# i.e., "git last committed" -- prints when the last commit to the current
# branch was, in relative format.
function glc() {
load_colors
local rel_date=$(git log --oneline --date-order --pretty=format:"%ad" --date=relative | head -n1)
echo "${GREEN}$(git branch --show-current)${NORMAL} was last updated ${RED}${rel_date}${NORMAL}."
}
function load_colors() {
# https://unix.stackexchange.com/a/10065
# If stdout is a terminal
if test -t 1; then
# see if it supports colors
ncolors=$(tput colors)
if test -n "$ncolors" && test $ncolors -ge 8; then
export BOLD="$(tput bold)"
export UNDERLINE="$(tput smul)"
export STANDOUT="$(tput smso)"
export NORMAL="$(tput sgr0)"
export BLACK="$(tput setaf 0)"
export RED="$(tput setaf 1)"
export GREEN="$(tput setaf 2)"
export YELLOW="$(tput setaf 3)"
export BLUE="$(tput setaf 4)"
export MAGENTA="$(tput setaf 5)"
export CYAN="$(tput setaf 6)"
export WHITE="$(tput setaf 7)"
fi
fi
}
# ===============================================
# Hub aliases.
# NB: some of these features requiring building
# from HEAD at time of writing.
# ===============================================
alias hcl="hub clone"
alias hcp="hub create -p"
alias ci="hub ci-status -v"
function gacp() {
if [[ "$#" == 0 ]]; then
echo "Git commit message required."
fi
git add -A
git commit -m "$1"
git push
}
function git_push_work_in_progress() {
git add -u .
git commit -m '[commit message skipped]' --no-verify
git push
}
function git_cleanup_merged_prs() {
git fetch --prune
git branch -r --merged |
grep origin |
grep -Ev 'master|dev|>' |
sed 's/origin\///g' |
xargs git push origin --delete
}
function git_latest_tag() {
git describe --tags $(git rev-list --tags --max-count=1) --abbrev=0
}
| true
|
2d894e293fea93f603ebe701ad0f802bb9ec0a91
|
Shell
|
afunsten/oil
|
/spec/explore-parsing.test.sh
|
UTF-8
| 259
| 2.796875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
#
# Tests that explore parsing corner cases.
#### Length of length of ARGS!
func() { echo ${##}; }
func 0 1 2 3 4 5 6 7 8
## stdout: 1
#### Length of length of ARGS! 2 digit
func() { echo ${##}; }
func 0 1 2 3 4 5 6 7 8 9
## stdout: 2
| true
|
37b1af11d604fbec0be9e8412ee56c4c612387c7
|
Shell
|
saodasinei/shell_script
|
/d2.sh
|
UTF-8
| 3,766
| 3.890625
| 4
|
[] |
no_license
|
#!/bin/bash
#环境变量文件的地址
ENV_DIR='/etc/profile.d/myenv.sh'
#绿色安装的根目录
SOFTWARE_DIR='/opt/software/'
#安装源文件目录
DOWNLOAD_DIR='/opt/download/'
#根据据参数1提供的识别符和参数2提供的行数[删除相关的配置信息]
function removeEnvVarIfExists(){
if [ $# -lt 2 ]
then
echo 'no sign when remove env variables'
exit -1
fi
sed -rin "/^#.*?$1/,+$2d" $ENV_DIR
echo "env variables [ $1 ] removed"
}
#根据参数1提供的目录前缀[删除历史已解压目录]
function removeSrcIfExists(){
if [ $# -lt 1 ]
then
echo 'no sign when remove env src'
exit -1
fi
RST=`ls $SOFTWARE_DIR|grep $1`
if [[ $RST ]]
then
rm -rf $SOFTWARE_DIR$RST
echo $SOFTWARE_DIR$1' removed'
else
echo "no [ $1 ] dir"
fi
}
#根据参数1提供的原文件的前缀名将相关资源[解压缩]文件到目标路径
function tarxTo(){
if [ $# -lt 1 ]
then
echo "no sign when tar -zxf $1"
exit -1
fi
RST=(`ls $DOWNLOAD_DIR|grep $1`)
if [ ${#RST[@]} -gt 0 ]
then
RST=${RST[0]}
tar -zxf $DOWNLOAD_DIR$RST -C $SOFTWARE_DIR
eval mv $SOFTWARE_DIR$1'*' $SOFTWARE_DIR$1
echo "$1 decompressed"
else
echo "no $1 source in "$DOWNLOAD_DIR
fi
}
#将参数1环境变量列表加入myenv.sh
function addEnvVar(){
echo '========== add env variables ========='
DIR="$1"
DIR=${DIR//;/ }
for item in $DIR
do
sed -in '$i'$item $ENV_DIR
echo $item' appended'
done
sed -in '${x;p;x}' $ENV_DIR
echo '======================================='
sed -in 's/-/ /g' $ENV_DIR
#每次添加完新环境变量后再次激活
source /etc/profile
}
############################################ 检查 myenv.sh 文件 ############################################
#检查myenv.sh是否存在,不存在则创建
ENV_DIR="/etc/profile.d/"
RST=`ls $ENV_DIR|grep myenv.sh`
if [[ -z $RST ]]
then
ENV_DIR=$ENV_DIR'myenv.sh'
eval touch $ENV_DIR
#新建的空文件无法通过sed修改内容,因此创建文件后使用流的重定向先行添加一行
echo '#[end]'>$ENV_DIR
echo $ENV_DIR' created'
else
ENV_DIR=$ENV_DIR'myenv.sh'
echo $ENV_DIR' existed'
fi
############################################### JDK 安装 #####################################################
JAVA_ENV="#jdk;export-JAVA_HOME=$SOFTWARE_DIR"'jdk;export-PATH=$JAVA_HOME/bin:$PATH;export-CLASSPATH=.:$JAVA_HOME/lib/tools.jar:$JAVA_HOME/lib/dt.jar'
removeEnvVarIfExists jdk 4
removeSrcIfExists jdk
tarxTo jdk
addEnvVar $JAVA_ENV
############################################# hadoop 安装 ####################################################
HADOOP_ENV='#hadoop;export-HADOOP_HOME=/opt/software/hadoop;export-PATH=$HADOOP_HOME/bin:$HADOOP_HOME/sbin:$PATH;export-HDFS_NAMENODE_USER=root;export-HDFS_DATANODE_USER=root;export-HDFS_SECONDARYNAMENODE_USER=root;export-YARN_RESOURCEMANAGER_USER=root;export-YARN_NODEMANAGER_USER=root'
removeEnvVarIfExists hadoop 8
removeSrcIfExists hadoop
tarxTo hadoop
addEnvVar $HADOOP_ENV
################## hadoop 配置文件
cp /root/script/conf/* /opt/software/hadoop/etc/hadoop
cd /opt/software/hadoop
mkdir data
./bin/hdfs namenode -format
################################################ Hive 安装 ########################################
HIVE_ENV='#hive;export-HIVE_HOME=/opt/software/hive312;export-PATH=$HIVE_HOME/bin:$PATH'
removeSrcIfExists hive
tarxTo hive
addEnvVar $HIVE_ENV
################# hive 配置
cd /opt/software/hive/conf
mv hive-default.xml.template hive-default.xml
cp /root/script/conf/hive-site.xml ./
cd /opt/software/hive/lib
RST=ls|grep '^guava.*.jar'
rm -f $RST
cp /opt/software/hadoop/share/hadoop/common/lib/guava-27.0-jre.jar /opt/software/hive/lib
| true
|
9d3eafa22c1d94cbb2cf4ca4a4f2b50c4eb64cb3
|
Shell
|
c4s4/babel
|
/bin/vscodium-build
|
UTF-8
| 1,500
| 4.1875
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
#
# Script to install VScodium in ${APP_DIR}/vscodium/${version} directory
set -e
TMP_DIR=/tmp
VSC_DIR=${APP_DIR}/vscodium
VSC_REPO=git@github.com:VSCodium/vscodium.git
# test if version was passed on command line
VERSION=$1
if [[ $VERSION == "" ]];
then
VERSION=$(git ls-remote --tags ${VSC_REPO} | tail -n1 | sed -En '1!q;s/^[[:xdigit:]]+[[:space:]]+refs\/tags\/(.+)/\1/gp')
fi
# test if version already installed
if [ -d ${VSC_DIR}/${VERSION} ];
then
echo "Version ${VERSION} already installed"
exit 1
fi
# download archive
ARCHIVE=vscodium-${VERSION}.tar.gz
DOWNLOAD_URL="https://github.com/VSCodium/vscodium/releases/download/${VERSION}/VSCodium-linux-x64-${VERSION}.tar.gz"
wget -O "${TMP_DIR}/${ARCHIVE}" "${DOWNLOAD_URL}"
# untar archive
cd ${TMP_DIR}
mkdir vscodium-$VERSION
tar zxvf $ARCHIVE -C vscodium-$VERSION
rm $ARCHIVE
# install archive
if [ -w "${VSC_DIR}" ]
then
mv vscodium-${VERSION} ${VSC_DIR}/${VERSION}
sudo chown -R root: ${VSC_DIR}/${VERSION}/chrome-sandbox
sudo chmod 4755 ${VSC_DIR}/${VERSION}/chrome-sandbox
else
sudo mv vscodium-${VERSION} ${VSC_DIR}/${VERSION}
sudo chown -R root: ${VSC_DIR}/${VERSION}
sudo chmod 4755 ${VSC_DIR}/${VERSION}/chrome-sandbox
fi
# delete older version if more than two
while [ $(ls $VSC_DIR | wc -l) -gt 3 ];
do
OLDER=${VSC_DIR}/$(ls ${VSC_DIR} | sort -V | head -n 1)
if [ -w "${VSC_DIR}" ]
then
rm -rf $OLDER
else
sudo rm -rf $OLDER
fi
done
echo "done"
| true
|
0a4e164d5ed5056b96bbc75e014ef99d6e81929e
|
Shell
|
Gavinok/scripts
|
/surf/mypassmenu
|
UTF-8
| 751
| 3.5
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
winid=$(xdotool getactivewindow)
shopt -s nullglob globstar
typeit=0
if [[ $1 == "--type" ]]; then
typeit=1
shift
fi
prefix=${PASSWORD_STORE_DIR-~/.password-store}
password_files=( "$prefix"/**/*.gpg )
password_files=( "${password_files[@]#"$prefix"/}" )
password_files=( "${password_files[@]%.gpg}" )
password=$(printf '%s\n' "${password_files[@]}" | uniq | dmenu -w "$winid" -l 5 "$@" )
[[ -n $password ]] || exit 0
#copy username to clipboard
pass show "$password" | grep login: | sed 's/login: //' | awk 'NR==1' | xclip
if [[ $typeit -eq 0 ]]; then
pass show -c "$password" 2>/dev/null
else
pass show "$password" | { IFS= read -r pass; printf %s "$pass"; } |
xdotool type --clearmodifiers --file -
fi
#vim:ft=sh
| true
|
f570e1bfe3ee1e34e4ad149de5ddabef5927bdc2
|
Shell
|
NHKWorldForks/plugin.video.nhkworldtv
|
/build/copy_local_wsl.sh
|
UTF-8
| 561
| 2.65625
| 3
|
[] |
no_license
|
#!/bin/bash
# Copies the Plugin from the dist folder to the local Kodi directory - used for local development on Windows 10 WSL2 (Ubuntu)
# Build the Plugin
. ./build_matrix.sh
# Set the local path (you need to adjust this)
local_kodi=/mnt/c/Users/stefa/AppData/Roaming/Kodi
# Change the <reuselanguageinvoker> to false
sed -i "s/>true</>false</g" $KODI_VERSION/plugin.video.nhkworldtv/addon.xml
# Delete existing add on folder
rm -rf $local_kodi/addons/plugin.video.nhkworldtv
# Copy the new build
cp -r $KODI_VERSION/plugin.video.nhkworldtv $local_kodi/addons
| true
|
e62c53f8fe7e49855357e4dda22414eff415c564
|
Shell
|
danieljahn/dotfiles
|
/vscode/setup.sh
|
UTF-8
| 251
| 2.78125
| 3
|
[] |
no_license
|
#!/bin/bash
. ../helpers.sh
_link settings.json "$HOME/Library/Application Support/Code/User/settings.json"
cat extensions.txt | while read extension
do
echo_info "Installing extension $extension"
code --install-extension "$extension"
done
| true
|
c844bec7b1b9e6e2e4e1c32ff36bb2f56a7c82bf
|
Shell
|
geoneric/peacock
|
/script/build_all.sh
|
UTF-8
| 2,441
| 4.46875
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
set -e
# The purpose of this script is to test the build of all versions of all
# packages with the current set compiler (default in PATH or explicitly set
# in CC/CXX environment variables).
cwd=$(cd "$(dirname "${BASH_SOURCE[0]}" )" && pwd)
peacock_root=$cwd/..
function print_usage()
{
echo -e "\
usage: $0 [-h] <download_dir> <prefix>
-h Show (this) usage information.
download_dir Directory to store downloaded files.
prefix Directory to install the resulting files."
}
function parse_commandline()
{
while getopts h option; do
case $option in
h) print_usage; exit 0;;
*) print_usage; exit 2;;
esac
done
shift $((OPTIND-1))
if [ $# -ne 2 ]; then
print_usage
exit 2
fi
download_dir=$1
prefix=$2
}
function build_peacock_package()
{
local package_name=$1
local version=$2
echo "build package: $package_name-$version"
if [[ $OSTYPE == "cygwin" ]]; then
options+=("-GUnix Makefiles")
options+=("-DCMAKE_MAKE_PROGRAM=mingw32-make")
fi
if [[ $OSTYPE == "linux-gnu" ]] && [[ "$CC" == *mingw* ]]; then
# Cross-compiling, need toolchain file.
options+=("-DCMAKE_TOOLCHAIN_FILE=$peacock_root/cmake/toolchain/mingw-linux.cmake")
fi
# TODO This partly overwrites earlier versions.
# Add option to not install built packages.
# options+=("-Dpeacock_build_only=true")
options+=("-Dpeacock_download_dir=$download_dir")
options+=("-Dpeacock_prefix=$prefix")
# options+=("-DCMAKE_VERBOSE_MAKEFILE=ON")
options+=("-Dbuild_$package_name=true")
options+=("-D${package_name}_version=$version")
build_root=$package_name-$version
mkdir -p $build_root
cd $build_root
rm -fr *
cmake "${options[@]}" $peacock_root
cmake --build . --target all
cd ..
}
function build_peacock_packages()
{
for package_name in `ls $peacock_root/cmake/package`; do
local package_root=$peacock_root/cmake/package/$package_name
if [ -d $package_root ]; then
for version in `ls $package_root`; do
local version_root=$package_root/$version
if [ -d $version_root ]; then
build_peacock_package $package_name $version
fi
done
fi
done
}
parse_commandline $*
build_peacock_packages
| true
|
aca2715bcd6eb3c60ad9f2c90d4c92ad231afb18
|
Shell
|
dankamongmen/s-i
|
/scripts/git-setup
|
UTF-8
| 703
| 3.625
| 4
|
[] |
no_license
|
#!/bin/sh
set -e
runshow () {
echo "$@"
eval "$@"
}
config_git_anon () {
# rewrite urls to use git://
runshow git config --global 'url.git://git.debian.org/d-i/.insteadOf' 'git+ssh://git.debian.org/git/d-i/'
}
config_git_auth () {
config_git_anon # speeds up checkouts
# use ssh for pushes
runshow git config --global 'url.git+ssh://git.debian.org/git/d-i/.pushInsteadOf' 'git+ssh://git.debian.org/git/d-i/'
}
URL=$(LANG=C git remote -v | cut -f2)
case "$URL" in
git@*)
config_git_auth
;;
http:*|https:*)
config_git_anon
;;
*)
echo "unknown svn url: $URL" >&2
exit 1
;;
esac
echo ""
echo "Your git is now configured to checkout d-i optimally."
echo "Now run 'mr -p checkout'"
| true
|
afc53d5609018d93bd2b6e2421626f5fb09659e2
|
Shell
|
loony-bean/textplots-rs
|
/run_examples.sh
|
UTF-8
| 178
| 2.984375
| 3
|
[] |
no_license
|
#!/bin/bash
set -e
for d in examples/*
do
example=$(echo $d | sed 's/examples\/\(.*\).rs/\1/')
echo "Running example $example..."
cargo run --example $example
done
| true
|
6b8f41de1193abe3bf798969ea44e9e6120e0865
|
Shell
|
PerinatalLab/SE_MFR_FAMILIES
|
/convert_cousinfamilies_topairs.sh
|
UTF-8
| 1,087
| 2.515625
| 3
|
[] |
no_license
|
#!/bin/bash
## remove 'next' from the first line if you have no header
awk 'NR==1{print "fid" FS "mom1" FS "kid1" FS "mom2" FS "kid2" FS "dad1" FS "dad2" FS "birthyear1" FS "birthyear2" FS "dadage1" FS "dadage2" FS "momage1" FS "momage2"}
{split($3, m, ","); split($4, k, ","); split($5, d, ","); split($6, by, ",");
split($7, da, ","); split($8, ma, ",");
for(i in k){
for(j=i+1; j in k; j++){
if(m[i]!=m[j]){
print NR-1, m[i], k[i], m[j], k[j],
d[i], d[j], by[i], by[j],
da[i], da[j], ma[i], ma[j]
}
}
}
}' input_file_maternal.csv > cousins_pairs_maternal.csv
awk 'NR==1{print "fid" FS "dad1" FS "kid1" FS "dad2" FS "kid2" FS "mom1" FS "mom2" FS "birthyear1" FS "birthyear2" FS "dadage1" FS "dadage2" FS "momage1" FS "momage2"}
{split($3, d, ","); split($4, k, ","); split($5, m, ","); split($6, by, ",");
split($7, da, ","); split($8, ma, ",");
for(i in k){
for(j=i+1; j in k; j++){
if(d[i]!=d[j]){
print NR-1, d[i], k[i], d[j], k[j],
m[i], m[j], by[i], by[j],
da[i], da[j], ma[i], ma[j]
}
}
}
}' input_file_paternal.csv > cousins_pairs_paternal.csv
| true
|
179f47afe84d4c6fbd96081b61346149c2331719
|
Shell
|
scubasonar/openrov-software
|
/linux/arduino/firmware-upload.sh
|
UTF-8
| 1,405
| 3.953125
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/sh
PROGNAME=$(basename $0)
error_exit ()
{
# ----------------------------------------------------------------
# Function for exit due to fatal program error
# Accepts 1 argument:
# string containing descriptive error message
# ----------------------------------------------------------------
echo "${PROGNAME}: ${1:-"Unknown Error"}" 1>&2
exit 1
}
cd $1 || error_exit "$LINENO:"
#the first time you call 'ino upload' it has to configure itself. That takes longer and interferes with the reset. therefore we start it before hand and let it fail.
echo Setting up uploader 1>&2
#/opt/openrov/linux/reset.sh 1>&2
#ino upload -m atmega328 -p /dev/ttyO1 1>&2
COUNTER=0
OUTPUT=`ino upload -p /dev/ttyO1 2>&1`
#OUTPUT=`avrdude -P /dev/ttyO1 -c arduino -D -vvvv -p m328p -U flash:w: .build/uno/firmware.hex 2>&1`
while [ $COUNTER -lt 9 ]; do
echo $COUNTER
(sleep 0.0$COUNTER && /opt/openrov/linux/reset.sh) & 1>&2
avrdude -P /dev/ttyO1 -c arduino -D -v -p m328p -U flash:w:.build/uno/firmware.hex 2>&1
# /opt/openrov/linux/reset.sh 1>&2
# sleep 0.4
# echo $OUTPUT | grep "bytes of flash verified"
if [ $? -eq 0 ]
then
echo upload successfull! 1>&2
#echo $OUTPUT 1>&2
exit 0
fi
COUNTER=`expr $COUNTER + 1`
echo upload failed, trying again. 1>&2
# echo $OUTPUT 1>&2
# sleep 1
done
error_exit "Failed to upload after numerous tries. Aborting."
| true
|
e7ffd4ff95cf5ffba41a7f87cf1da2c62d8a7557
|
Shell
|
junxie01/RecieverFunctions
|
/Cheng_codes/Prep_RFs.sh
|
UTF-8
| 1,464
| 3.4375
| 3
|
[] |
no_license
|
#!/bin/bash
#RMS Feb 2017
#Following Cheng's workflow in preparing data for RF analysis
#Enter directory structure created by obspyDMT, make a new directory for the RFs, copy the
#SAC files here, filter at the desired passband.
freqmin=2
freqmax=4
for event in $( ls -d 20* ); do
echo In $event
cd $event
cd BH_VEL
rm -r rf
mkdir -p rf
cp *BHZ rf
cp *BHR rf
cp *BHT rf
cd rf
#-------------------------------------------------------------------------------------
#look at each sacfile, filter in the desired passband and cut around the P arrival
for sacfile in $( ls *BH* ); do
echo $sacfile
parrival=`saclst USER1 f $sacfile | awk '{print $2}'`
parr_lower=`echo $parrival | awk -F" " '{print ($1-60)}'`
parr_upper=`echo $parrival | awk -F" " '{print ($1+80)}'`
#--------------------------------------------------------------------------------------
#In this case, we're limiting the upper passband to 1/freqmin seconds (typically 5 or 10?)
sac<<EOF
cut $parr_lower $parr_upper
read $sacfile
taper w 0.1
rmean
rtrend
bp co 0.01 0.1 p $freqmin n $freqmax
write $sacfile.filt
quit
EOF
rm $sacfile
done
#--------------------------------------------------
#Creates a list of unique station names, followed by the character 'n'
#This is needed as input to the program 'burgays'
#uses the uniq program, which outputs all lines exactly once (avoids repeats)
ls *BHZ.filt | awk -F. '{print $3}' | uniq > rstation
echo n >> rstation
cd ../../../
done
| true
|
e44234d1ff8ea68ce7273242e5ef5e0eb34dd8f6
|
Shell
|
jkelly467/dotfiles
|
/bash/bashrc
|
UTF-8
| 297
| 2.53125
| 3
|
[] |
no_license
|
#!/bin/bash
[ -n "$PS1" ] && source ~/.bash_profile
#THIS MUST BE AT THE END OF THE FILE FOR SDKMAN TO WORK!!!
export SDKMAN_DIR="/Users/jkelly/.sdkman"
[[ -s "/Users/jkelly/.sdkman/bin/sdkman-init.sh" ]] && source "/Users/jkelly/.sdkman/bin/sdkman-init.sh"
export PATH="$HOME/.yarn/bin:$PATH"
| true
|
e8697ca4faee02266e4d6e9005400d57e016f385
|
Shell
|
justintoo/rose-sh
|
/applications/claws_mail/claws_mail.sh
|
UTF-8
| 4,233
| 2.96875
| 3
|
[] |
no_license
|
# TOO1 (2/10/2014): We need to use gtk 2.x since Claws Mail 3.7.9+ does not support gtk 3.x,
#
# Claws mail bug report:
# http://www.thewildbeast.co.uk/claws-mail/bugzilla/show_bug.cgi?id=2371
: ${CLAWS_MAIL_DEPENDENCIES:=curl gtk224 libetpan zlib}
: ${CLAWS_MAIL_CONFIGURE_OPTIONS:=
--disable-networkmanager
--disable-jpilot
--disable-startup-notification
--enable-enchant
--disable-perl-plugin
--disable-python-plugin
--enable-gnutls
--enable-ipv6
--disable-maemo
--disable-pdf_viewer-plugin
--disable-gdata-plugin
--disable-bogofilter-plugin
--disable-bsfilter-plugin
--disable-clamd-plugin
--disable-notification-plugin
--disable-fancy-plugin
--disable-geolocation-plugin
}
#-------------------------------------------------------------------------------
download_claws_mail()
#-------------------------------------------------------------------------------
{
info "Downloading source code"
set -x
clone_repository "${application}" "${application}-src" || exit 1
cd "${application}-src/" || exit 1
git checkout origin/rhel6 || exit 1
set +x
}
#-------------------------------------------------------------------------------
install_deps_claws_mail()
#-------------------------------------------------------------------------------
{
install_deps ${CLAWS_MAIL_DEPENDENCIES} || fail "Could not install dependencies"
}
#-------------------------------------------------------------------------------
patch_claws_mail()
#-------------------------------------------------------------------------------
{
info "Patching not required"
}
#-------------------------------------------------------------------------------
configure_claws_mail__rose()
#-------------------------------------------------------------------------------
{
info "Configuring application for ROSE compiler='${ROSE_CC}'"
#-----------------------------------------------------------------------------
set -x
#-----------------------------------------------------------------------------
ACLOCAL_PATH="${ROSE_SH_DEPS_PREFIX}/share/aclocal:${ROSE_SH_DEPS_PREFIX}/share/libtool/libltdl:${ROSE_SH_DEPS_PREFIX}/share/libtool/config:${ACLOCAL_PATH}" \
./autogen.sh || exit 1
CC="${ROSE_CC}" \
CPPFLAGS="$CPPFLAGS" \
CFLAGS="$CFLAGS" \
LDFLAGS="$LDFLAGS" \
./configure \
--prefix="$(pwd)/install_tree" \
${CLAWS_MAIL_CONFIGURE_OPTIONS} \
|| exit 1
#-----------------------------------------------------------------------------
set +x
#-----------------------------------------------------------------------------
}
#-------------------------------------------------------------------------------
configure_claws_mail__gcc()
#-------------------------------------------------------------------------------
{
info "Configuring application for default compiler='${CC}'"
#-----------------------------------------------------------------------------
set -x
#-----------------------------------------------------------------------------
CC="${CC}" \
CPPFLAGS="$CPPFLAGS" \
CFLAGS="$CFLAGS" \
LDFLAGS="$LDFLAGS" \
./configure \
--prefix="$(pwd)/install_tree" \
${CLAWS_MAIL_CONFIGURE_OPTIONS} \
|| exit 1
#-----------------------------------------------------------------------------
set +x
#-----------------------------------------------------------------------------
}
#-------------------------------------------------------------------------------
compile_claws_mail()
#-------------------------------------------------------------------------------
{
info "Compiling application"
#-----------------------------------------------------------------------------
set -x
#-----------------------------------------------------------------------------
make -j${parallelism} || exit 1
make -j${parallelism} install || exit 1
#-----------------------------------------------------------------------------
set +x
#-----------------------------------------------------------------------------
}
| true
|
f802a0f6d12ef20e8bc3dc4a644dc43457a5e997
|
Shell
|
zaitrarrio/mingw-distro
|
/pcre.sh
|
UTF-8
| 754
| 2.609375
| 3
|
[] |
no_license
|
#!/bin/sh
source ./0_append_distro_path.sh
extract_file pcre-8.40.tar
patch -d /c/temp/gcc/pcre-8.40 -p1 < pcre-color.patch
cd /c/temp/gcc
mv pcre-8.40 src
mkdir build dest
cd build
../src/configure --build=x86_64-w64-mingw32 --host=x86_64-w64-mingw32 --target=x86_64-w64-mingw32 \
--prefix=/c/temp/gcc/dest --disable-shared --enable-utf --enable-unicode-properties --enable-pcre16 --enable-pcre32 \
"CFLAGS=-s -O3" "CXXFLAGS=-s -O3" || fail_with pcre 1 - EPIC FAIL
make $X_MAKE_JOBS all || fail_with pcre 2 - EPIC FAIL
make install || fail_with pcre 3 - EPIC FAIL
cd /c/temp/gcc
rm -rf build src
mv dest pcre-8.40
cd pcre-8.40
rm -rf bin/pcre-config lib/pkgconfig lib/*.la share
cp include/pcreposix.h include/regex.h
7z -mx0 a ../pcre-8.40.7z *
| true
|
53b736d4d20aefdb144cf33dcf854e7aff5a5abe
|
Shell
|
Gandalf-/ipfwd
|
/scripts/cpu_usage.sh
|
UTF-8
| 328
| 3.234375
| 3
|
[] |
no_license
|
# bash
finish() {
echo
echo "$results"
awk '{s+=$1}END{print "average:",s/NR}' RS=" " <<< "$results"
exit
}
trap finish INT
results=""
while true; do
pid="$(pgrep "$1" | head -n 1)"
if ! [[ -z "$pid" ]]; then
results="$results $(ps -p "$pid" -o '%cpu' | tail -n 1 | grep -o '[0-9\.]*')"
fi
sleep 0.5
done
| true
|
76467ac8348ac52653d3d6981319f1428ed8c0bd
|
Shell
|
doumeki/grub4dos-build
|
/build-page.sh
|
UTF-8
| 1,454
| 3.359375
| 3
|
[
"MIT",
"TMate"
] |
permissive
|
#! /bin/bash
COMMIT=${GITHUB_SHA:0:8}
VER=`cat grub4dos_version`
DATE=`date -u +%Y-%m-%d`
#DATE=`git log -1 --pretty=format:%ad --date=format:%Y-%m-%d --no-merges $GITHUB_SHA`
BIN=grub4dos-${VER}-${DATE}.7z
NAME=`basename $BIN`
PAGE=`pwd`/${NAME%.7z}.md
BASE_URI=$GITHUB_SERVER_URL/$GITHUB_REPOSITORY
echo title: $NAME > $PAGE
du -sh $BIN|awk '{printf "size: %s\n",$1}' >>$PAGE
echo date: `stat -c %y $BIN` >> $PAGE
echo commit: $COMMIT >>$PAGE
echo downlink: http://dl.grub4dos.chenall.net/${NAME} >>$PAGE
echo categories: $VER >>$PAGE
echo tags: $VER >>$PAGE
md5sum $BIN|awk '{printf "md5: %s\n",$1}' >>$PAGE
echo files: >>$PAGE
#可执行文件MD5信息
7z e -y -r -ssc- -obin $BIN grub.exe grldr ipxegrldr *.efi
pushd bin && md5sum *|awk '{printf " %s: %s\n",$2,$1}' >> $PAGE && popd
echo --- >>$PAGE
echo >>$PAGE
echo "### 更新信息(update log):" >>$PAGE
git log --pretty=format:" * [%ad %h@%an ]($BASE_URI/commit/%H) %w(0,4,6)%B" --no-merges --date=format:%Y-%m-%d $COMMIT_RANGE >>$PAGE || git log --pretty=format:" * [%ad %h@%an ]($BASE_URI/commit/%H) %w(0,4,6)%B" -1 --no-merges --date=format:%Y-%m-%d HEAD >>$PAGE
echo >>$PAGE
echo >>$PAGE
echo "### 对应源码(sources):" >>$PAGE
echo " [查看源码(Browse source)]($BASE_URI/tree/$GITHUB_SHA)" >>$PAGE
echo >>$PAGE
echo " [下载源码(Download ZIP)]($BASE_URI/archive/$GITHUB_SHA.zip)" >>$PAGE
echo ===========$PAGE================
cat $PAGE
echo ==============Success===========
| true
|
f2cafb797a5f54d79eee2e7d0f1125516773c059
|
Shell
|
PlumpMath/cpp-precompiler
|
/generate_grammar_parser.sh
|
UTF-8
| 676
| 3.109375
| 3
|
[] |
no_license
|
#!/bin/bash
function generate_parser()
{
java -jar ${WORKSPACE}/lib/antlr-4.6-complete.jar \
-o ${WORKSPACE}/generated \
-listener \
-visitor \
-DcontextSuperClass=PrettyPrintParserRuleContext \
-Dlanguage=Python3 \
-lib ${WORKSPACE} \
${WORKSPACE}/grammar/CPP14.g4
touch ${WORKSPACE}/generated/__init__.py
}
function prepend_parser_import_statement()
{
PARSER=${WORKSPACE}/generated/CPP14Parser.py
TEMP=`mktemp`
mv $PARSER $TEMP
echo "from src.PrettyPrintParserRuleContext import PrettyPrintParserRuleContext" | cat - $TEMP > $PARSER
}
generate_parser
prepend_parser_import_statement
| true
|
8b95d6bf00275e30bd33806e26c79f043283f374
|
Shell
|
intel/stacks
|
/dlrs/deprecated/clearlinux/ml-compiler/scripts/install_mkl.sh
|
UTF-8
| 1,924
| 3.09375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
#
# Copyright (c) 2020 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
set -e
set -u
set -o pipefail
export MKL_VERSION=mklml_lnx_2019.0.5.20190502
export MKLDNN=v0.19
export MKL_ROOT=/usr/local/lib/mkl
export CFLAGS="-O3 -falign-functions=32 -fno-lto -fno-math-errno -fno-semantic-interposition -fno-trapping-math "
export CXXFLAGS="-O3 -falign-functions=32 -fno-lto -fno-math-errno -fno-semantic-interposition -fno-trapping-math "
export FCFLAGS="$CFLAGS "
export FFLAGS="$CFLAGS "
export CFLAGS="$CFLAGS -march=skylake-avx512 -m64 -pipe"
export CXXFLAGS="$CXXFLAGS -march=skylake-avx512 -m64 -pipe"
export GCC_IGNORE_WERROR=1
run()
{
echo "=============================================================="
printf "$(date) -- %s"
printf "%s\n" "$@"
echo "=============================================================="
}
install_mkl()
{
if [ ! -d ${MKL_ROOT} ]; then
mkdir -p ${MKL_ROOT} \
&& wget https://github.com/intel/mkl-dnn/releases/download/"$MKLDNN"/"$MKL_VERSION".tgz \
&& tar -xzf "$MKL_VERSION".tgz -C ${MKL_ROOT} \
&& mv "${MKL_ROOT:?}"/"${MKL_VERSION:?}"/* ${MKL_ROOT} \
&& rm -rf "${MKL_ROOT:?}"/"${MKL_VERSION:?}" \
&& rm "${MKL_VERSION}.tgz"
fi
echo "MKL libs are in directory: ${MKL_ROOT}"
}
start="$(date +%s)"
run "Install mkl" && install_mkl
end="$(date +%s)"
runtime=$(((end-start)/60))
run "Done in : $runtime minute(s)"
| true
|
eba7e3bcc065c9731dfc583b9a196b587c658c05
|
Shell
|
Filipovici-Andrei/scripts
|
/run_acceptance.sh
|
UTF-8
| 1,704
| 3.4375
| 3
|
[] |
no_license
|
#!/bin/sh -x
echo "Checking for available host..."
if test -z $HOST || ! grep -q "$HOST" <<< "$(floaty list --active)"
then
echo 'Getting new machine'
sleep 2
beaker init -h windows2019-64a -o config/aio/options.rb
export HOST=`beaker provision | grep 'Using available host' | awk {'print $4'} | xargs`
echo 'Runinng tests on ' $HOST
sleep 2
export BP_ROOT=/Users/andrei.filipovici/projects/beaker-puppet
echo 'Puppet Agent SHA is ' $SHA
export SHA=`curl --fail --silent GET --url http://builds.delivery.puppetlabs.net/passing-agent-SHAs/puppet-agent-master`
echo 'Starting pre-suite'
sleep 2
bundle exec beaker exec pre-suite --pre-suite $BP_ROOT/setup/common/000-delete-puppet-when-none.rb,$BP_ROOT/setup/aio/010_Install_Puppet_Agent.rb
echo 'Setting Facter 4 repo on machine ' $HOST
sleep 2
ssh Administrator@$HOST << 'EOF'
cmd /c puppet config set facterng true
cd /cygdrive/c/Program\ Files/Puppet\ Labs/Puppet/bin
mv facter-ng.bat facter.bat
facter_ng_version=`cmd /c facter-ng --version | tr -d '\r'`
cd /cygdrive/c/Program\ Files/Puppet\ Labs/Puppet/puppet/lib/ruby/gems/2.5.0/gems/facter-ng-$facter_ng_version
git init
git remote add origin https://github.com/puppetlabs/facter-ng.git
git fetch
git reset --hard origin/master
EOF
fi
# ssh Administrator@$HOST << 'EOF'
#git fetch
#git reset --hard origin/FACT-2345
#EOF
echo 'Runinng tests on ' $HOST
current_time=$(date +"%F:%T")
log="$current_time.log"
fails="$current_time.fails"
beaker exec tests 2>&1 | tee $log
sed -n '/Failed Tests Cases:/,/Skipped Tests Cases:/p' $log | grep 'Test Case' | awk {'print $3'} > $fails
diff --suppress-common-lines -y master_fails $fails
| true
|
3bf75c2167e8401d41c47573c44f917cb50b3894
|
Shell
|
nafeu/oavp
|
/tools/export-app.sh
|
UTF-8
| 1,108
| 3.359375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
BUILD_SCRIPT=./configure-build.sh
source ./configure-build.sh
if [[ ! -f "$BUILD_SCRIPT" ]]; then
echo "Missing build configuration script at $BUILD_SCRIPT" 1>&2
exit 1
fi
if [[ -z "$OAVP_PROJECT_PATH" ]]; then
echo "Must provide OAVP_PROJECT_PATH in environment" 1>&2
exit 1
fi
if [[ -z "$OAVP_BUILD_PATH" ]]; then
echo "Must provide OAVP_BUILD_PATH in environment" 1>&2
exit 1
fi
echo Accessing project source from: $OAVP_PROJECT_PATH
echo Exporting application to: $OAVP_BUILD_PATH
OAVP_SOURCE=$OAVP_PROJECT_PATH/src
OAVP_RESOURCES=$OAVP_PROJECT_PATH/resources
RAW_EXPORT=$OAVP_SOURCE/application.macosx64
RAW_APP=$RAW_EXPORT/src.app
APP_CONTENTS=$RAW_APP/Contents
APP_PLIST=$APP_CONTENTS/Info.plist
processing-java --sketch=$OAVP_SOURCE --platform=macosx --export
rm -r $RAW_EXPORT/source
plutil -insert NSMicrophoneUsageDescription -string "Oavp needs access to the microphone for audio visualization" $APP_PLIST
cp $OAVP_RESOURCES/sketch.icns $APP_CONTENTS/Resources/sketch.icns
mv $RAW_APP $OAVP_BUILD_PATH/oavp.app
rm -r $RAW_EXPORT
open $OAVP_BUILD_PATH
| true
|
0f68611539070e468a1a55b10bb2e91488256bbd
|
Shell
|
apple/servicetalk
|
/servicetalk-test-resources/src/main/resources/io/servicetalk/test/resources/generate-certs.sh
|
UTF-8
| 3,164
| 2.65625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
#
# Copyright © 2018 Apple Inc. and the ServiceTalk project authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
set -eu
# Server
## Generate a new, self-signed root CA for the server
openssl req -new -x509 -days 30 -nodes -subj "/CN=ServiceTalkTestServerRoot" -newkey rsa:3072 -sha512 -keyout server_ca.key -out server_ca.pem
## Generate a certificate/key for the server to use for Hostname Verification via localhost
openssl req -new -keyout localhost_server_rsa.key -nodes -newkey rsa:3072 -subj "/CN=localhost" | \
openssl x509 -req -CAkey server_ca.key -CA server_ca.pem -days 36500 -set_serial $RANDOM -sha512 -out localhost_server.pem
openssl pkcs8 -topk8 -inform PEM -outform PEM -in localhost_server_rsa.key -nocrypt -out localhost_server.key
## Import server key in to a PKCS#12 keystore
rm -f localhost_server.p12
openssl pkcs12 -info -export -out localhost_server.p12 \
-name localhost_server -in localhost_server.pem -inkey localhost_server.key \
-CAfile server_ca.pem -caname root -chain \
-passout pass:changeit
## Clean up intermediate files
rm server_ca.key localhost_server_rsa.key
# Client
## Generate a new, self-signed root CA for the client
openssl req -new -x509 -days 30 -nodes -subj "/CN=ServiceTalkTestClientRoot" -newkey rsa:3072 -sha512 -keyout client_ca.key -out client_ca.pem
## Generate a certificate/key for the server to use for Hostname Verification via localhost
openssl req -new -keyout localhost_client_rsa.key -nodes -newkey rsa:3072 -subj "/CN=localhost" | \
openssl x509 -req -CAkey client_ca.key -CA client_ca.pem -days 36500 -set_serial $RANDOM -sha512 -out localhost_client.pem
openssl pkcs8 -topk8 -inform PEM -outform PEM -in localhost_client_rsa.key -nocrypt -out localhost_client.key
## Import client key in to a PKCS#12 keystore
rm -f localhost_client.p12
openssl pkcs12 -info -export -out localhost_client.p12 \
-name localhost_client -in localhost_client.pem -inkey localhost_client.key \
-CAfile client_ca.pem -caname root -chain \
-passout pass:changeit
## Clean up intermediate files
rm client_ca.key localhost_client_rsa.key
# Generate our trust store
rm -f truststore.p12
keytool -import -alias server_ca -file server_ca.pem -keystore truststore.p12 -storepass changeit -noprompt
keytool -import -alias client_ca -file client_ca.pem -keystore truststore.p12 -storepass changeit -noprompt
keytool -list -v -storepass changeit -keystore localhost_server.p12
keytool -list -v -storepass changeit -keystore localhost_client.p12
keytool -list -v -storepass changeit -keystore truststore.p12
| true
|
15b722920566ffa789fddf9bcca85260ba06e31d
|
Shell
|
OCHA-DAP/hdx-infrastructure
|
/scripts/devtools/ckan-db-get-snapshot.sh
|
UTF-8
| 1,167
| 3.625
| 4
|
[] |
no_license
|
#!/bin/bash
if [ $(id -u) -ne 0 ]; then
echo "You need to be root to run me.";
exit 1;
fi
# includes the config file to define YOUR specific parameters
# (ckan and cps location, branches etc)
. $(which devtoolconfig.sh)
ckan_db_backup="NON-EXISTENT-BACKUP"
function get_last_backup_name {
echo -en "Getting the last backup name: "
ckan_db_backup=$(rsync -e "ssh -o PasswordAuthentication=no" --list-only $ckan_backup_user@$ckan_backup_server:$ckan_backup_dir/$ckan_backup_prefix* | tail -n 1 | awk '{ print $5 }')
if [ $? -ne 0 ]; then
echo -en "failed.\n";
exit 1;
fi
echo -en "$ckan_db_backup\n"
}
function get_backup {
mkdir -p $ckan_tmp_dir
echo -en "Getting the last backup... "
rsync -av --progress -e "ssh -o PasswordAuthentication=no" $ckan_backup_user@$ckan_backup_server:$ckan_backup_dir/$ckan_db_backup $ckan_tmp_dir/
if [ $? -ne 0 ]; then
echo -en "failed. \n";
exit 2;
fi
}
# main
get_last_backup_name;
get_backup;
if [[ $_ != $0 ]]; then
echo "Script completed."
echo "The backup file is: $ckan_tmp_dir/$ckan_db_backup."
echo "Please do not forget to remove the file after you got it."
echo "Thank you."
fi
cd $curr_dir;
| true
|
0808e3af6a15ddaabe868ce731f024bceed582b7
|
Shell
|
sdrik/athome
|
/mythtv/bin/mythmove
|
UTF-8
| 1,567
| 3.5625
| 4
|
[] |
no_license
|
#!/bin/bash
# mythmove %JOBID% %CHANID% %STARTTIME% BACKEND
. /usr/local/lib/mythjoblib.sh
backend="$1"
shift
checkdb
checkjob
[ "${hostname}" = "${backend}" ] && die 3 "Cannot move to myself !!"
which klist >/dev/null || die 1 "Missing command : klist"
which kinit >/dev/null || die 1 "Missing command : kinit"
which ssh >/dev/null || die 1 "Missing command : ssh"
which scp >/dev/null || die 1 "Missing command : scp"
which md5sum >/dev/null || die 1 "Missing command : md5sum"
findfile
klist -t || kinit -t ${HOME}/.krb5.keytab || die 1 "Cannot start kerberos"
ssh ${backend} true || die 1 "Cannot log into ${backend}"
filesize=$(stat -c %s "${dir}/${file}")
found=
while read destdir
do
freespace=$(eval echo $(ssh ${backend} "stat -f -c '\$((%a*%S))' \"${destdir}\""))
[ ${filesize} -lt ${freespace} ] || continue
found=yes
break
done <<EOF
$(myexec "SELECT dirname FROM storagegroup WHERE hostname='${backend}' AND groupname='${storagegroup}'")
EOF
[ -n "${found}" ] || die 4 "No destination dir found on ${backend}"
$log "Copying file..."
scp "${dir}/${file}" ${backend}:"${destdir}" || die 5 "File copy failed"
$log "Verifying file..."
(cd "${dir}" && md5sum "${file}") | ssh ${backend} "cd \"${destdir}\" && md5sum -c --status" || die 5 "Copied file do not match original file"
myexec "UPDATE recorded SET hostname='${backend}' WHERE chanid='${chanid}' AND starttime='${starttime}'" || die 5 "Update of record failed"
rm -f "${dir}/${file}" "${dir}/${file}.png" "${dir}/${file}.*.png" "${dir}/${file}.old"
$log "Record moved to ${backend}"
| true
|
7e25ed46d96410ec4906306bebd0da4ff50940fc
|
Shell
|
arhangeldim/ok
|
/iterate.sh
|
UTF-8
| 1,322
| 3.3125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
# Results:
# GIF source = 73Mb
#
# 1) default(sierra) = 62sec (77Mb=68+8,8)
# 2) bayer 2 = 32sec (75Mb=66+8,8)
# 3) bayer 0 = 34sec (80Mb=71+8,8)
# 4) none = 30sec (62Mb=53+8,8)
ffmpeg_bin="/Users/dmirty/Downloads/ffmpeg"
palette="/tmp/palette.png"
filters="fps=10"
out_folder="out/"
gif_to_mp4() {
$ffmpeg_bin -v error -i $1 -r 25 -an -preset veryfast -c:v libx264 -crf 25 -maxrate 784K -profile:v baseline -pix_fmt yuv420p -threads 0 -movflags +faststart -y $2
}
# $1 - mp4 input
# $2 - dithering
# $3 - gif output
mp4_to_gif() {
$ffmpeg_bin -v error -i $1 -vf "$filters,palettegen" -y $palette
$ffmpeg_bin -v error -i $1 -i $palette -lavfi "$filters [x]; [x][1:v] $2" -y $3
}
start=`gdate +%s`
for f in *.gif; do
if test -f "$f"
then
echo "$f"
gif_to_mp4 $f $out_folder$f".mp4"
# bayer dithering
#mp4_to_gif $out_folder$f".mp4" "paletteuse=dither=bayer:bayer_scale=3" $out_folder$f"_mp4.gif"
# no dithering
mp4_to_gif $out_folder$f".mp4" "paletteuse=dither=none" $out_folder$f"_mp4.gif"
# default = sierra error diffusion
#mp4_to_gif $out_folder$f".mp4" "paletteuse" $out_folder$f"_mp4.gif"
fi
done
end=`gdate +%s`
elapsed=$((end-start))
echo "Elapsed: $elapsed sec"
| true
|
b27cc1522648eba5d2d2cfd15bda79dee4080db7
|
Shell
|
admix/admix-osx-conf
|
/.zshrc
|
UTF-8
| 2,821
| 2.53125
| 3
|
[] |
no_license
|
# Path to your oh-my-zsh installation.
export ZSH=$HOME/.oh-my-zsh
eval $(thefuck --alias)
# Set name of the theme to load.
# Look in ~/.oh-my-zsh/themes/
# Optionally, if you set this to "random", it'll load a random theme each
# time that oh-my-zsh is loaded.
ZSH_THEME="spaceship"
SPACESHIP_VENV_COLOR=green
# Plugins
plugins=(git)
plugins=(kubectl)
plugins=(zsh-autosuggestions)
# User configuration
export PATH="/Users/alexander/dev/upwork/mobile_dev/flutter/bin:/usr/local/bin:/usr/bin:/bin:/usr/sbin:/sbin:."
export GOROOT=/usr/local/opt/go/libexec
export GOPATH=$HOME/.go
export PATH=$PATH:$GOROOT/bin:$GOPATH/bin
export PATH="/usr/local/opt/openssl/bin:$PATH"
export LDFLAGS="-L/usr/local/opt/zlib/lib"
export CPPFLAGS="-I/usr/local/opt/zlib/include"
export PKG_CONFIG_PATH="/usr/local/opt/zlib/lib/pkgconfig"
# export MANPATH="/usr/local/man:$MANPATH"
source $ZSH/oh-my-zsh.sh
# misc aliases
alias ph="cd /Users/alexander/dev/elixr_labs/philter"
alias upwork="cd /Users/alexander/dev/upwork/"
alias em="cd /Users/alexander/dev/ExactMotion/"
source ~/.virtualenvs/py2default/bin/activate
alias py2="deactivate && source ~/.virtualenvs/py2default/bin/activate"
alias py3="deactivate && source ~/.virtualenvs/py3default/bin/activate"
alias ipython="ipython --profile=admix" # config -> vim /Users/alexander/.ipython/profile_admix/ipython_config.py
# Git aliases
alias gst="git status"
alias gl="git lg"
alias gco="git checkout"
alias gp="git pull"
alias gaa="git add --all"
alias gm="git merge"
# docker-compose aliases
alias dc="docker-compose"
alias dcb="docker-compose build"
alias dcu="docker-compose up"
alias dcud="docker-compose up -d"
alias dcdw="docker-compose down"
alias dcps="docker-compose ps"
alias dcl="docker-compose logs"
alias dcrm="docker-compose rm"
function dex() {
docker exec -it "$1" /bin/bash
echo "$1"
}
#docker misc
alias dprune="docker volume prune"
alias dprune-all="docker image prune -a"
# ExactMotion misc
export PYTHONPATH=$PYTHONPATH:/Users/alexander/dev/ExactMotion/egbgrr/api:/Users/alexander/dev/ExactMotion/egbgrr/database:/Users/alexander/dev/ExactMotion/egbgrr/tools:/Users/alexander/dev/ExactMotion/egbgrr/utils:/Users/alexander/dev/ExactMotion/egbgrr/processors
export CONFIG_PATH=/Users/alexander/dev/ExactMotion/egbgrr/.config.local.yaml
export BASEDIR=/Users/alexander/dev/ExactMotion/egbgrr/
export CODEDIR=api
alias kh="cd /Users/alexander/dev/ExactMotion/ETL_projects"
export NVM_DIR="$HOME/.nvm"
[ -s "$NVM_DIR/nvm.sh" ] && \. "$NVM_DIR/nvm.sh" # This loads nvm
[ -s "$NVM_DIR/bash_completion" ] && \. "$NVM_DIR/bash_completion" # This loads nvm bash_completion
export PATH="/usr/local/opt/icu4c/bin:$PATH"
export PATH="/usr/local/opt/icu4c/sbin:$PATH"
# Set Spaceship ZSH as a prompt
autoload -U promptinit; promptinit
prompt spaceship
| true
|
59752f033094c1eaecb03b3c9d3d41166d3b4562
|
Shell
|
Snap-A/HS4-RPi-USB-Alsa
|
/combiner_alsa.sh
|
UTF-8
| 1,112
| 4.125
| 4
|
[] |
no_license
|
#! /usr/bin/env bash
###
# Apply adjustments to HomeSeer 'root' image
#
# - Adjust speaker setup to use USB speaker instead of internal one.
#
####
# Platform: Raspian RPi 32bit
####
function Usage()
{
echo "Usage: $0 [-h] <output path>"
echo ""
echo "This script adjusts the HomeSeer for RPi root fs image with"
echo "new or changed files. This is done to in order to switch the default ALSA output to USB."
}
if [ "$1" == "-h" ]; then
Usage
exit;
fi
if [ -z ${1} ]; then
echo "Usage: $0 [-h] <output path>"
exit
fi
ROOT_FS=${1}
###
# We need these paths
OUT_IMG=${ROOT_FS}
INP_IMG=${PWD}
###
INP_ALSA=${INP_IMG}/alsa-base_USB.conf
######
# MAIN
######
###
# Sanity
if [ ! -e ${OUT_IMG}/etc/modprobe.d ]; then
echo "The directory '${OUT_IMG}' seem invalid. Did find root filesystem"
exit
fi
###
# Add modprobe order of ALSA devices: USB first
if [ ! -f ${OUT_IMG}/etc/modprobe.d/alsa-base.conf ]; then
echo "Setting ALSA device order: USB first"
cp ${INP_ALSA} ${OUT_IMG}/etc/modprobe.d/alsa-base.conf
fi
echo "Syncing image..."
sync
sleep 2
echo "Done"
| true
|
4654e987deb3a4c517b18e9ce1be2cdb633ed1a4
|
Shell
|
Aniruddha-Deb/DebPMM
|
/DebPMMSetup.sh
|
UTF-8
| 1,268
| 3.34375
| 3
|
[] |
no_license
|
#!/bin/sh
set -e
codename=''
# get project codename
read -p "Enter project codename: " codename
codename_caps=$(echo $codename | tr 'a-z' 'A-Z')
mkdir -p "$codename/Documents/Team/Mail/Sent"
touch "$codename/Documents/Team/Team_members.txt"
echo "$codename_caps\nTEAM MEMBERS" > "$codename/Documents/Team/Team_members.txt"
mkdir -p "$codename/Documents/Team/Mail/Received"
mkdir -p "$codename/Documents/Team/WhatsApp"
mkdir -p "$codename/Documents/Project"
cd "$codename/Documents/Project"
mkdir "Planning" "Implementation" "Conclusion"
initialization="$codename_caps
INITIALIZATION PLAN
IDEA:
OBEJCTIVE:
TARGET MARKET:
COMPETITORS:
TITLE:
"
touch "Initialization.txt"
echo "$initialization" > "Initialization.txt"
cd "Planning"
touch "Bill_of_materials.xlsx" "Time_cost.xlsx" "Implementation_plan.txt"
mkdir "Construction" "Architecture_plan"
impl="$codename_caps\nIMPLEMENTATION PLAN"
echo "$impl" > "Implementation_plan.txt"
cd "../Implementation"
mkdir "Receipts"
cd "../Conclusion"
conc="$codename_caps\nCONCLUSION REPORT"
take="$codename_caps\nTAKEAWAYS"
touch "Conclusion.txt" "Takeaways.txt"
echo "$conc" > "Conclusion.txt"
echo "$take" > "Takeaways.txt"
cd "../../.."
mkdir "Hardware" "Software"
echo "Made all directories and related files"
| true
|
dff1feccd7c56c91f6adefc5535de147172d0b36
|
Shell
|
andreafrancia/dot-files
|
/bin/lineinfile
|
UTF-8
| 1,155
| 3.984375
| 4
|
[] |
no_license
|
#!/bin/bash
# from https://gist.github.com/kokumura/a6d819ddcb4efe54c5541fc15e1d0347
function lineinfile(){
if [[ $# != 3 ]];then
local THIS_FUNC_NAME="${funcstack[1]-}${FUNCNAME[0]-}"
echo "$THIS_FUNC_NAME - 3 arguments are expected. given $#. args=[$@]" >&2
{
cat << USAGE
Usage:
$THIS_FUNC_NAME PATTERN LINE FILE
USAGE
cat << EXAMPLES
Examples:
lineinfile '^foo\s*=\s*' "foo = POO # changed!" test.txt
lineinfile '^baz\s*=' "baz = BAZ" test.txt
EXAMPLES
} >&2
return 1
fi
local PATTERN="$1"
local LINE="$2"
local FILE="$3"
if grep -E -q "${PATTERN}" "${FILE}" ;then
## solution 1: works with GNU sed well, but not works with BSD sed.
# sed -E -i '' "/${PATTERN//\//\\/}/c${LINE}" "${FILE}"
## solution 2: works with both (GNU|BSD) sed, but get useless *.bak file generated.
# sed -E -i.bak "/${PATTERN//\//\\/}/c\\"$'\n'"${LINE}" "${FILE}"
## solution 3: give up to use sed, using perl instead.
PATTERN="${PATTERN}" LINE="${LINE}" perl -i -nle 'if(/$ENV{"PATTERN"}/){print $ENV{"LINE"}}else{print}' "${FILE}"
else
echo "$LINE" >> "$FILE"
fi
}
lineinfile "$@"
| true
|
25ebea1f252f4a6312f36b3bbbddb3afca520e64
|
Shell
|
zoujianmin/fighters
|
/build.bash
|
UTF-8
| 12,815
| 3.75
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# Copyright 2021, 2023 Ye Holmes <yeholmes@outlook.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Array to hold a list of source tarballs or directories
declare -a srcList
# Associate array to hold configure functions
declare -A ftConfig
# Associate array to hold build functions
declare -A ftBuild
# Associate array to hold clean functions
declare -A ftClean
# Associate array to hold extracted source directory names
declare -A ftSrcdir
# index of first source to process
declare -i startIndex=1
# index of last source to process
declare -i endIndex=0
# declare FTOPDIR environment variable
get_topdir() {
local topdir=`realpath -s "$1"`
[ -n "${topdir}" ] && topdir="${topdir%/*}"
if [ -z "${topdir}" ] ; then
echo "Error, failed to get parent directory." 1>&2
return 1
fi
if [ ! -d "${topdir}" ] ; then
echo "Error, not a directory: \"${topdir}\"." 1>&2
return 2
fi
declare -g -x FTOPDIR="${topdir}"
return 0
}
# save current directoy to FCURDIR
declare -r -x FCURDIR="$(command pwd -L)"
# get FTOPDIR defined as early as possible
get_topdir "$0" || exit 1
# add external toolchain wrapper to PATH
export PATH=${FTOPDIR}/toolchain/bin:/usr/bin:/usr/sbin:/bin:/sbin
# tag files used for control compilation
declare -r TAG_BUILT='.tag-built'
declare -r TAG_PATCHED='.tag-patched'
declare -r TAG_CONFIG='.tag-configured'
# opensource tarball directory
declare -r FSOURCE_DIR="${FTOPDIR}/opensource"
# project target directory path
declare -r -x FTARGET_DIR="${FTOPDIR}/target"
# project target staging directory
declare -r -x FSTAGING_DIR="${FTARGET_DIR}/staging"
# project target install directory
declare -r -x FINSTALL_DIR="${FTARGET_DIR}/install"
# sources definition file
declare -r def_sources="${FTARGET_DIR}/sources.sh"
# platform definition file
declare -r def_platform="${FTARGET_DIR}/platform.sh"
# toolchain definition file
declare -r def_toolchain="${FTARGET_DIR}/toolchain.sh"
# apply patches from directory
apply_patches() {
local pdir="$1"
if [ ! -d "${pdir}" ] ; then
echo "Error, not a directory: '${pdir}'" 1>&2
return 1
fi
if [ -e "${TAG_PATCHED}" ] ; then
# already patched, skipped
return 0
fi
local retp=0
local -i pcnt=0
local pfile=""
for pfile in "${pdir}"/*.patch ; do
if [ -f "${pfile}" ] ; then
patch -Np1 -i "${pfile}" ; retp=$?
[ ${retp} -ne 0 ] && break
pcnt+=1
fi
done
[ ${retp} -ne 0 ] && return 2
[ ${pcnt} -eq 0 ] && return 3
touch "${TAG_PATCHED}"
return $?
}
# check whether target definition files exist
check_target_defines() {
if [ ! -d "${FTARGET_DIR}" ] ; then
echo "Error, target directory not found: \"${FTARGET_DIR}\"." 1>&2
return 1
fi
if [ ! -f "${def_sources}" ] ; then
echo "Error, sources definition not found: \"${def_sources}\"." 1>&2
return 2
fi
if [ ! -f "${def_platform}" ] ; then
echo "Error, platform definition not found: \"${def_platform}\"." 1>&2
return 3
fi
if [ ! -f "${def_toolchain}" ] ; then
echo "Error, toolchain definition not found: \"${def_toolchain}\"." 1>&2
return 4
fi
# create staging and install directories
create_directory "${FSTAGING_DIR}" "${FINSTALL_DIR}" || return 5
return 0
}
# function to check and create directory
create_directory() {
local rval=0
while [ -n "$1" ] ; do
local tdir="$1"
if [ -d "${tdir}" ] ; then
shift 1
continue
fi
mkdir -p "${tdir}" 2>/dev/null ; rval=$?
if [ ${rval} -ne 0 ] ; then
echo "Error, failed to create directory: '${tdir}'" 1>&2
break
fi
shift 1
done
return ${rval}
}
# check whether the argument given is a function
check_function() {
# check if the function name is empty
if [ -z "$2" ] ; then
echo "Error, empty function specified for $1" 1>&2
return 1
fi
# check whether the function is a real function
if [ "$(type -t $2)" != "function" ] ; then
echo "Error, not a function: '$2' for $1" 1>&2
return 2
fi
return 0
}
register_source_dir() {
if [ -z "$1" ] ; then
echo "Error, source tarball not given." 1>&2
return 1
fi
if [ -z "$2" ] ; then
echo "Error, directory for '$1' not given." 1>&2
return 2
fi
declare -g -A ftSrcdir["$1"]="$2"
return 0
}
# parent function must define local variable, `retval
fetch_source_dir() {
local srcm="$1"
if [ -z "${srcm}" ] ; then
echo "Error, source tarball not given." 1>&2
return 1
fi
# check `ftSrcdir associate array
local sdir="${ftSrcdir[${srcm}]}"
if [ -n "${sdir}" ] ; then
retval="${sdir}"
return 0
fi
# remove suffix, XXXXXX.tar.* => XXXXXX
sdir="${srcm%.tar.*}"
if [ -z "${sdir}" -o "${sdir}" = "${srcm}" ] ; then
echo "Error, cannot determine directory for '${srcm}'." 1>&2
return 2
fi
retval="${sdir}"
return 0
}
# register a source package to the list, `srcList
register_source() {
local -r srcpkg="$1"
# check the source path
if [ -z "${srcpkg}" ] ; then
echo "Error, no source path given." 1>&2
return 1
fi
# check whether the source package has been disabled
if [ -n "$5" ] ; then
local srcpkgDisable=0
eval "srcpkgDisable=\${srcpkg_disable_$5}"
if [ "${srcpkgDisable}" = "y" ] ; then
echo "INFO: source package disabled: '${srcpkg}'"
return 0
fi
fi
# check the configure/build/clean functions
check_function "${srcpkg}" "$2" || return 2
check_function "${srcpkg}" "$3" || return 3
check_function "${srcpkg}" "$4" || return 4
# get the number of elements in `srcList
local -i idx=${#srcList[@]}
idx+=1 # increment the index
# Add the source path to the list
declare -g -a srcList[${idx}]="${srcpkg}"
# Add the functions to associate arrays
declare -g -A ftConfig["${srcpkg}"]="$2"
declare -g -A ftBuild["${srcpkg}"]="$3"
declare -g -A ftClean["${srcpkg}"]="$4"
return 0
}
build_source() {
local -r srcn="${srcList[$1]}"
if [ -z "${srcn}" ] ; then
echo "Error, source not found at index: $1" 1>&2
return 1
fi
local retw=0
# goto FTOPDIR directory
cd "${FTOPDIR}" || return 2
# define Source Build directory
local sbdir="${srcn}"
if [ ! -d "${sbdir}" ] ; then
local -r tarball="${FSOURCE_DIR}/${srcn}"
# check whether the source tarball exists
if [ ! -f "${tarball}" ] ; then
echo "Error, source not found: ${srcn}" 1>&2
return 3
fi
# get the name of extracted directory
local retval=""
fetch_source_dir "${srcn}" ; retw=$?
[ $retw -ne 0 ] && return 4
sbdir="${retval}"
if [ ! -d "${sbdir}" ] ; then
# extract the source tarball
echo "Extracting from \"${srcn}\"..."
tar -axf "${tarball}" ; retw=$?
if [ $retw -ne 0 ] ; then
echo "Error, failed to extract '${srcn}'." 1>&2
return 5
fi
fi
# check again, whether the directory exists
if [ ! -d "${sbdir}" ] ; then
echo "Error, directory for '${srcn}' not found: \"${sbdir}\"." 1>&2
return 6
fi
fi
# goto source directory
cd "${FTOPDIR}/${sbdir}" || return 7
if [ -e "${TAG_CONFIG}" ] ; then
echo "already configured: ${sbdir}, skipped"
else
# invoke the configuration function
local fconf="${ftConfig[${srcn}]}"
${fconf} ; retw=$?
cd "${FTOPDIR}/${sbdir}"
if [ $retw -ne 0 ] ; then
echo "Error, failed to configure '${sbdir}'" 1>&2
return 8
fi
touch "${TAG_CONFIG}"
fi
local dobuild=1
[ -e "${TAG_BUILT}" ] && dobuild=0
[ -e ".tag-rebuild" ] && dobuild=1
if [ ${dobuild} -eq 0 ] ; then
echo "already built: ${sbdir}, skipped"
else
# invoke build function
local fbuild="${ftBuild[${srcn}]}"
${fbuild} ; retw=$?
cd "${FTOPDIR}/${sbdir}"
if [ $retw -ne 0 ] ; then
[ -e "${TAG_BUILT}" ] && rm -rf "${TAG_BUILT}"
echo "Error, failed to build '${sbdir}'" 1>&2
return 9
fi
touch "${TAG_BUILT}"
fi
return 0
}
clean_source() {
local -r srco="${srcList[$1]}"
if [ -z "${srco}" ] ; then
echo "Error, source not found at index: $1" 1>&2
return 1
fi
# goto FTOPDIR directory
cd "${FTOPDIR}" || return 2
# define Source Build directory
local cbdir="${srco}"
if [ ! -d "${cbdir}" ] ; then
local -r tarball="${FSOURCE_DIR}/${srco}"
# check whether the source tarball exists
if [ ! -f "${tarball}" ] ; then
echo "Error, source not found: ${srco}" 1>&2
return 3
fi
# get the name of extracted directory
local retval=""
fetch_source_dir "${srco}" || return 4
# update source build directory
cbdir="${retval}"
fi
# check whether the directory exists
if [ ! -d "${cbdir}" ] ; then
echo "Warning, directory not found for '${srco}': ${cbdir}" 1>&2
return 0
fi
cd "${cbdir}" || return 5
rm -rf "${TAG_BUILT}" "${TAG_CONFIG}"
local fclean="${ftClean[${srco}]}"
${fclean}
return $?
}
fetch_build_range() {
# get the number of elements in source list
local -i maxnum=${#srcList[@]}
# get the length of FTOPDIR and FCURDIR
local -i toplen=${#FTOPDIR}
local -i curlen=${#FCURDIR}
if [ ${curlen} -lt ${toplen} ] ; then
echo "Error, invalid current directory: '${FCURDIR}'" 1>&2
return 1
fi
if [ "${FTOPDIR}" = "${FCURDIR}" ] ; then
declare -g -i endIndex=${maxnum}
return 0
fi
toplen+=1
local topdir="${FCURDIR:0:${toplen}}"
if [ "${topdir}" != "${FTOPDIR}/" ] ; then
echo "Error, invalid current directory: '${FCURDIR}'" 1>&2
return 2
fi
local srcp="${FCURDIR:${toplen}}"
if [ -z "${srcp}" ] ; then
echo "Error, cannot get source package path." 1>&2
return 3
fi
local found=0
local -i idx=1
while [ ${idx} -le ${maxnum} ] ; do
local srcq="${srcList[${idx}]}"
if [ "${srcp}" = "${srcq}" ] ; then
found=${idx}
break
fi
if [ -d "${FTOPDIR}/${srcq}" ] ; then
idx+=1
continue
fi
local retval=""
fetch_source_dir "${srcq}" || break
if [ "${srcp}" = "${retval}" ] ; then
found=${idx}
break
fi
idx+=1
done
if [ ${found} -eq 0 ] ; then
echo "Error, cannot build '${srcp}'" 1>&2
return 4
fi
declare -g -i endIndex=${found}
declare -g -i startIndex=${found}
return 0
}
process_sources() {
local -r pcmd="$1"
local -r end=${endIndex}
local -i idx=${startIndex}
local retu=0
while [ ${idx} -le ${end} ] ; do
${pcmd} ${idx} ; retu=$?
[ ${retu} -ne 0 ] && break
idx+=1
done
return ${retu}
}
single_command() {
local retn=0
local solecmd="$1"
[ $# -gt 1 ] && echo "Warning, only one command is supported."
if [ -z "${solecmd}" -o "${solecmd}" = "build" ] ; then
process_sources build_source ; retn=$?
elif [ "${solecmd}" = "buildall" ] ; then
declare -g -i endIndex=${#srcList[@]}
process_sources build_source ; retn=$?
elif [ "${solecmd}" = "clean" ] ; then
process_sources clean_source ; retn=$?
elif [ "${solecmd}" = "cleanall" ] ; then
declare -g -i endIndex=${#srcList[@]}
process_sources clean_source ; retn=$?
else
echo "Error, unknown command: '${solecmd}'"
retn=4
fi
return ${retn}
}
check_target_defines || exit 2
source "${def_toolchain}"
source "${def_platform}"
source "${def_sources}"
fetch_build_range || exit 3
single_command "$@" ; exit $?
| true
|
81288e5b7d7fb0279b48f104e46ca833a9d5f422
|
Shell
|
thor/dotfiles
|
/config/zsh/.zprofile
|
UTF-8
| 1,784
| 3.3125
| 3
|
[] |
no_license
|
#!/usr/bin/env zsh
#
# Executes commands at login pre-zshrc.
#
#
# Editors and tools
#
export EDITOR='nvim'
export VISUAL='nvim'
export PAGER='less'
export AUR_PAGER='nvim'
export FZF_DEFAULT_COMMAND='fd --color=never'
export DIFFPROG='nvim -d'
export FZF_DEFAULT_COMMAND='fd --color=never -IH -E .git'
#
# Language
#
# - There's only one to default to
if [[ -z "$LANG" ]]; then
export LANG='en_GB.UTF-8'
fi
# - Ibus for Korean
export GTK_IM_MODULE=ibus
export XMODIFIERS=@im=ibus
export QT_IM_MODULE=ibus
#
# Appearance
#
# - Set the default BASE16_THEME so that a terminal emulator may dynamically
# load it as it pleases, which works better for Kitty than setting it
# with a script upon every start. Plus, at the time of this writing, I
# start Kitty via i3.
export BASE16_THEME=base16-default-dark
#
# Paths
#
# Ensure path arrays do not contain duplicates and setup added folders.
typeset -gu cdpath fpath mailpath path
path=(
~/.local/bin
~/dev/go/bin
$path[@]
)
# Set up GOPATH
export GOPATH=~/dev/go
#
# Special locations
#
# - Configure virtualenvwrapper
export WORKON_HOME="$HOME/.virtualenvs"
# - Temporary Files
if [[ ! -d "$TMPDIR" ]]; then
export TMPDIR="/tmp/$LOGNAME"
mkdir -p -m 700 "$TMPDIR"
fi
TMPPREFIX="${TMPDIR%/}/zsh"
# Source homebrew
if [[ -f "/opt/homebrew/bin/brew" ]]; then
eval "$(/opt/homebrew/bin/brew shellenv)"
fi
#
# Less
#
# Set the default Less options.
# -X (disable screen clearing), -F (exit if content fits on one screen)
export LESS='-F -g -i -M -R -S -w -z-4 -X --mouse'
# Set the Less input preprocessor.
# Try both `lesspipe` and `lesspipe.sh` as either might exist on a system.
if (( $#commands[(i)lesspipe(|.sh)] )); then
export LESSOPEN="| /usr/bin/env $commands[(i)lesspipe(|.sh)] %s 2>&-"
fi
| true
|
cc88aa14d37ff4022a845cf0d2bf0fee4a8976cf
|
Shell
|
tdierks/tageventor
|
/tagEventor/tageventor
|
UTF-8
| 371
| 3.625
| 4
|
[
"LicenseRef-scancode-generic-cla",
"Apache-2.0"
] |
permissive
|
#! /bin/sh
# /etc/init.d/tageventor
#
# Carry out specific functions when asked to by the system
case "$1" in
start)
echo "Starting tagEventor"
/usr/bin/tagEventor -p 250 -d start -v 1
;;
stop)
echo "Stopping tagEventor"
/usr/bin/tagEventor -d stop
;;
*)
echo "Usage: /etc/init.d/tageventor {start|stop}"
exit 1
;;
esac
exit 0
| true
|
9df3e8579efbd8f0a19f18de89c456a4ca249272
|
Shell
|
xiexiang89/Android-Auto-BannerView
|
/switch_git_remote.sh
|
UTF-8
| 317
| 2.9375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
remote=$1
originUrl='';
if [ "$remote" == '1' ]
then
echo "switch to oschina"
originUrl=git@git.oschina.net:DeWork.com/Banner.git;
elif [ "$remote" == '2' ]
then
echo "Switch to gitHub"
originUrl=git@github.com:xiexiang89/Android-Auto-BannerView.git
fi
git remote set-url origin $originUrl
git remote -v
| true
|
61e440a442b25dd8c1b0a04492f68baa445f85eb
|
Shell
|
mralexgray/Appstore-Through-Terminal
|
/Uncompiled/com.levi.appstorethruterm/bin/AppStore
|
UTF-8
| 629
| 3.34375
| 3
|
[] |
no_license
|
#!/bin/sh
#run this to open AppStore from terminal, at #any time.
#this requires EricaUtilities
#Code for EricaUtilies check taken from #metadatatool on insanelyi repo, thanks!
if [ ! -e /usr/bin/plutil ] ; then
echo "Installing erica utilities now"
apt-get install -y com.ericasadun.utilities
echo "Erica Utilities now installed, exiting."
exit
fi
case "$1" in
*)
echo "Opening AppStore"
openURL itms-apps:// ;;
--help)
echo "Usage:
--help Dispays this Usage
appstore --------\
AppStore ------> either opens AppStore"
esac
| true
|
af6877c36a00a44d4a4a27b03f6b38e952469448
|
Shell
|
akramjannah/MN
|
/bltg_install.sh
|
UTF-8
| 4,280
| 3.28125
| 3
|
[] |
no_license
|
#!/bin/bash
NONE='\033[00m'
CYAN='\033[01;36m'
RED='\033[01;31m'
GREEN='\033[01;32m'
echo "[1/${MAX}] Checking Ubuntu version..."
if [[ `cat /etc/issue.net` == *16.04* ]]; then
echo -e "${GREEN}* You are running `cat /etc/issue.net` . Setup will continue.${NONE}";
else
echo -e "${RED}* You are not running Ubuntu 16.04.X. You are running `cat /etc/issue.net` ${NONE}";
echo && echo "Installation cancelled" && echo;
exit;
fi
cd ~
wget "https://docs.google.com/uc?id=0B3X9GlR6EmbnQ0FtZmJJUXEyRTA&export=download"
mv "uc?id=0B3X9GlR6EmbnQ0FtZmJJUXEyRTA&export=download" gdrive
chmod +x gdrive
sudo install gdrive /usr/local/bin/gdrive
gdrive list
sudo apt-get -y install fail2ban
sudo systemctl enable fail2ban
sudo systemctl start fail2ban
IP_ADD=`curl ipinfo.io/ip`
COIN="Bitcoin_Lightning"
DAEMON="Bitcoin_Lightningd"
RPCPORT="17126"
MNPORT="17127"
THEDATE=`date +"%Y%m%d-%H%M"`
BACKUPWALLET="wallet-${COIN}-${IP_ADD}-${THEDATE}.txt"
#sudo touch /var/swap.img
#sudo chmod 600 /var/swap.img
#sudo dd if=/dev/zero of=/var/swap.img bs=1024k count=2000
#mkswap /var/swap.img
#sudo swapon /var/swap.img
#sudo echo "/var/swap.img none swap sw 0 0" >> /etc/fstab
sudo apt-get update -y
sudo apt-get upgrade -y
sudo apt-get dist-upgrade -y
sudo apt-get install -y build-essential libtool autotools-dev pkg-config libssl-dev libboost-all-dev autoconf automake -y
sudo apt-get install libzmq3-dev libminiupnpc-dev libssl-dev libevent-dev -y
#sudo git clone https://github.com/bitcoin-core/secp256k1
#cd ~/secp256k1
#./autogen.sh
#./configure
#make
#./tests
#make install
sudo apt-get install libgmp-dev -y
sudo apt-get install openssl -y
sudo apt-get install software-properties-common && add-apt-repository ppa:bitcoin/bitcoin -y
sudo apt-get update
sudo apt-get install libdb4.8-dev libdb4.8++-dev -y
#cd ~
#git clone https://github.com/Bitcoinlightning/Bitcoin-Lightning.git
#cd ~/Bitcoin-Lightning/src
#make -f makefile.unix
#strip ${DAEMON}
#cp ${DAEMON} /usr/bin/
#cd ~
wget https://github.com/Bitcoinlightning/Bitcoin-Lightning/releases/download/v1.1.0.0/Bitcoin_Lightning-Daemon-1.1.0.0.tar.gz
tar xvzf Bitcoin_Lightning-Daemon-1.1.0.0.tar.gz
rm Bitcoin_Lightning-Daemon-1.1.0.0.tar.gz
chmod 755 ${DAEMON}
strip ${DAEMON}
sudo mv ${DAEMON} /usr/bin
cd
${DAEMON}
GEN_USER=`cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w 32 | head -n 1`
GEN_PASS=`cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w 32 | head -n 1`
cat > /root/.${COIN}/${COIN}.conf <<EOF
rpcuser=${GEN_USER}
rpcpassword=${GEN_PASS}
server=1
listen=1
daemon=1
staking=1
discover=1
rpcthreads=8
maxconnections=256
#port=${RPCPORT}
rpcallowip=127.0.0.1
addnode=92.186.144.255
EOF
cd ~
sleep 2
${DAEMON}
sleep 3
PRIVKEY=`${DAEMON} masternode genkey`
ADDRESS=`${DAEMON} getnewaddress MN1`
${DAEMON} stop
sleep 2
echo -e "masternode1 ${IP_ADD}:${MNPORT} ${PRIVKEY} " >> /root/.${COIN}/masternode.conf
echo -e "masternode=1" >> /root/.${COIN}/${COIN}.conf
echo -e "masternodeprivkey=${PRIVKEY}" >> /root/.${COIN}/${COIN}.conf
echo -e "masternodeaddr=${IP_ADD}:${MNPORT}" >> /root/.${COIN}/${COIN}.conf
echo " "
echo -e "${CYAN} Auto backup wallet.dat to Your google drive ${NONE}"
echo " "
cd .${COIN}
cp wallet.dat wallet.dat-${COIN}-${IP_ADD}-${THEDATE}
gdrive upload wallet.dat-${COIN}-${IP_ADD}-${THEDATE}
echo -e "Your Masternode Privkey : ${PRIVKEY}" >> /root/.${COIN}/${BACKUPWALLET}
echo -e "Wallet Address : ${ADDRESS}" >> /root/.${COIN}/${BACKUPWALLET}
gdrive upload ${BACKUPWALLET}
echo " "
echo -e "################################################################################"
echo " "
echo -e "Backup wallet.dat finish ${CYAN}(${BACKUPWALLET}) ${NONE}"
echo " "
echo -e "Your Masternode Privkey :${CYAN} ${PRIVKEY} ${NONE}"
echo -e "Transfer 3000 BLTG to address :${CYAN} ${ADDRESS} ${NONE}"
echo " "
echo -e "After send 3000 BLTG, type ${CYAN}${DAEMON} masternode outputs ${NONE} in VPS"
echo -e "If no value,type ${CYAN}masternode outputs ${NONE} in PC Wallet console"
echo -e "edit file ${CYAN}masternode.conf ,${CYAN} nano /root/.${COIN}/masternode.conf ${NONE} and put:"
echo " "
echo -e " ${CYAN} masternode1 ${IP_ADD}:${MNPORT} ${PRIVKEY} <TXID> <NO> ${NONE} "
echo " "
echo "################################################################################"
| true
|
60363381aa8658e3acaec8e8356e5c10a8b36673
|
Shell
|
ellbee/dotfiles
|
/mysetup.sh
|
UTF-8
| 1,216
| 3.734375
| 4
|
[] |
no_license
|
#!/bin/bash
set -e
create_dot_config() {
if [ ! -d ~/.config ]; then
echo "creating .config file"
mkdir ~/.config
fi
}
create_dot_vim() {
if [ ! -d ~/.vim ]; then
echo "creating .vim file"
mkdir -p ~/.vim/backup
fi
if [ ! -e ~/.vim/UltiSnips ]; then
ln -s ~/dotfiles/configfiles/UltiSnips ~/.vim/UltiSnips
fi
if [ -e ~/.config/nvim ]; then
rm -rf ~/.config/nvim
fi
ln -s ~/.vim ~/.config/nvim
}
copy_dotfiles() {
echo "copying dotfiles into user directory"
cd ~/dotfiles/configfiles
for file in *; do
if [ -f ~/dotfiles/configfiles/"$file" ]; then
[ -f ~/."$file" ] && mv ~/."$file"{,-backup}
echo "copying $file"
ln -s ~/dotfiles/configfiles/"$file" ~/."$file"
fi
done
}
vim_swap_and_backup_dirs() {
if [ ! -d ~/.vim/backup ]; then
echo "creating vim backup dir"
mkdir -p ~/.vim/backup
fi
if [ ! -d ~/.vim/swap ]; then
echo "creating vim swap dir"
mkdir ~/.vim/swap
fi
}
vim_install_plug() {
curl -fLo ~/.vim/autoload/plug.vim --create-dirs \
https://raw.githubusercontent.com/junegunn/vim-plug/master/plug.vim
}
create_dot_config
create_dot_vim
vim_swap_and_backup_dirs
vim_install_plug
copy_dotfiles
| true
|
d5e02de73a7acaaa4e4bc04d73a744f1a65e69e7
|
Shell
|
OpenCMISS/iron
|
/doc/latex/genpstex.sh
|
UTF-8
| 297
| 3.125
| 3
|
[] |
no_license
|
#!/bin/sh
#
# Shell file for generating pstex files (for the first time)
#
# Usage:
# genpstex [figs]/plots *.fig
# Created:
# Chris Bradley
# Updates:
# Chris Bradley 10/3/96 Added figs/plots option
#
type=$1
shift
for filename
do
${OPENCMISS_ROOT}/src/iron/doc/latex/genpstex1.sh $type $filename
done
| true
|
f4ebf20ac0b9d77154ff3fa19654f923be460109
|
Shell
|
Jason-gyj/X6
|
/scripts/build-apps
|
UTF-8
| 515
| 2.5625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
target=sites/x6-sites/static/apps
er_repo=examples/x6-app-er
dag_repo=examples/x6-app-dag
draw_repo=examples/x6-app-draw
er_dist=$target/er
dag_dist=$target/dag
draw_dist=$target/draw
(cd $er_repo && yarn build)
rm -rf $er_dist && mkdir -p $er_dist && cp -rv $er_repo/dist/* $er_dist
(cd $dag_repo && yarn build)
rm -rf $dag_dist && mkdir -p $dag_dist && cp -rv $dag_repo/dist/* $dag_dist
(cd $draw_repo && yarn build)
rm -rf $draw_dist && mkdir -p $draw_dist && cp -rv $draw_repo/dist/* $draw_dist
| true
|
c1322b821083790708c0f655427ca793f9f4cb10
|
Shell
|
scriptzteam/SCENE-SCRiPTS
|
/Others/Other/glftpd_scripts/loadet/FTP-Backup.txt
|
UTF-8
| 1,680
| 3.171875
| 3
|
[
"WTFPL"
] |
permissive
|
#!/bin/bash
#----------------------------------------------------------------------------#
# Name: FTP-Backup #
# Version: 1.0 #
# Last Updated: 2002-05-03 #
# Author: loadet (C) 2002 #
# Contact: loadet on EFNet or loadet@gbit.dk #
# Usage: ./FTP-Backup.sh (run from crontab) #
# Notes: This script makes backup of important files and ftp them. #
# Requirements: rm zip ncftp #
#----------------------------------------------------------------------------#
# Thnx to Zio for getting me started on this with inspiration #
# from his mail Backuper script :] #
#----------------------------------------------------------------------------#
# Path to store the temporary zipfile.
path="/tmp";
# Name of the zipfile.
filename="site_backup_`date +%Y-%m-%d`.zip";
ip="ftp.backup.com";
login="loadet";
password="rules";
port="666";
destination="/incoming/backup"
#The backup process. (change to your needs)
zip -q $path/$filename /glftpd/ftp-data/users/*
zip -q $path/$filename /glftpd/etc/group
zip -q $path/$filename /glftpd/etc/passwd
zip -q $path/$filename /etc/glftpd.conf
zip -q $path/$filename /glftpd/etc/pre.cfg
# Comment out the line below if you don't want the zipfile to be ftp'd and removed.
ncftpput -u $login -p $password -P $port $ip $destination $path/$filename -DD
| true
|
6817a6b3776cad6af80267285ae9ad176ab5d8b2
|
Shell
|
Mrs-wang1/python-test
|
/CMDB/shell_script/get_host_info.sh
|
UTF-8
| 1,162
| 2.9375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
# centos6系统
#windows上编辑的shelljiaoben ,要在linux上执行dos2unix后才能执行成功
# dos2unix get_host_info.sh
# [root@m01 shell_script]# sh get_host_info.sh
#mem:1990,host_name:m01,disk:30865437,cpu:2
#1.获取CPU信息
## 总核数 = 物理CPU个数 X 每颗物理CPU的核数
## 总逻辑CPU数 = 物理CPU个数 X 每颗物理CPU的核数 X 超线程数
#
## 查看物理CPU个数
#cat /proc/cpuinfo| grep "physical id"| sort| uniq| wc -l
#
## 查看每个物理CPU中core的个数(即核数)
#cat /proc/cpuinfo| grep "cpu cores"| uniq
#
## 查看逻辑CPU的个数
#cat /proc/cpuinfo| grep "processor"| wc -l
#2.获取总磁盘大小
#[root@m01 crond]# df |grep -v 'Filesystem'| awk -F '[ ]+' '{print $2}'
#9213440
#953128
#194241
#20504628
#[root@m01 crond]# df |grep -v 'Filesystem'| awk -F '[ ]+' 'BEGIN{sum=0}{sum+=$2}END{print sum}'
#30865437
host_name=`hostname`
mem=`free -m|grep 'Mem'|awk -F '[ ]+' '{print $2}'`
disk=`df |grep -v 'Filesystem'| awk -F '[ ]+' 'BEGIN{sum=0}{sum+=$2}END{print sum}'`
cpu=`cat /proc/cpuinfo| grep "processor"| wc -l`
echo "mem:${mem},host_name:${host_name},disk:${disk},cpu:${cpu}"
| true
|
29efd674d29151c80ad6d46d505c3c788a6f3bc8
|
Shell
|
luke1987515/box-turtle
|
/run.bash
|
UTF-8
| 1,217
| 3.5625
| 4
|
[
"Unlicense"
] |
permissive
|
# !/bin/bash
# Log JBOF Nvme SSD number
#
# 2019-07-22 12:00:00 Add 300 loop & Date_Time Info
# 2019-07-22 10:04:59 luke.chen
IP=192.168.0.41
if [ "$1" == "" ]
then
echo You forget input IP "(use defult 192.168.0.41 )"
else
IP=192.168.$1
fi
sleep_time=3
Start_DAY=$(date +"%Y-%m-%d_%H%M%S")
echo "" > log_$Start_DAY.txt
###
# Save First Log File(base.txt)
###
file="base.txt"
if [ -f "$file" ]
then
echo base.txt is EXIST !!!
else
# sleep
sleep $sleep_time
TODAY=$(date +"%H:%M:%S %d/%m/%Y")
echo NO. 000 $TODAY | tee -a log_$Start_DAY.txt
./plink -no-antispoof -l root $IP lspci | grep No | tee -a log_$Start_DAY.txt base.txt
# sleep
sleep $sleep_time
fi
###
# Loop 300 times
###
for Run_count in $(seq -f "%03g" 1 300)
do
# sleep
sleep $sleep_time
./plink -no-antispoof -l root $IP lspci | grep No > $Run_count.txt
TODAY=$(date +"%H:%M:%S %d/%m/%Y")
DIFF=$(diff base.txt $Run_count.txt)
if [ "$DIFF" != "" ]
then
echo NO. $Run_count "FAIL" $TODAY | tee -a log_$Start_DAY.txt
else
echo NO. $Run_count "PASS" $TODAY | tee -a log_$Start_DAY.txt
fi
cat $Run_count.txt | tee -a log_$Start_DAY.txt
# sleep
sleep $sleep_time
done
| true
|
0bf7ceacd1a73c722e45fc53ef19788179ecb613
|
Shell
|
yanweibing/docker-zookeeper
|
/customer/usr/local/bin/environment.sh
|
UTF-8
| 3,957
| 2.953125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Ver: 1.0 by Endial Fang (endial@126.com)
#
# 应用环境变量定义及初始化
# 通用设置
export ENV_DEBUG=${ENV_DEBUG:-false}
export ALLOW_ANONYMOUS_LOGIN="${ALLOW_ANONYMOUS_LOGIN:-no}"
# 通过读取变量名对应的 *_FILE 文件,获取变量值;如果对应文件存在,则通过传入参数设置的变量值会被文件中对应的值覆盖
# 变量优先级: *_FILE > 传入变量 > 默认值
app_env_file_lists=(
ZOO_CLIENT_PASSWORD
ZOO_SERVER_PASSWORDS
)
for env_var in "${app_env_file_lists[@]}"; do
file_env_var="${env_var}_FILE"
if [[ -n "${!file_env_var:-}" ]]; then
export "${env_var}=$(< "${!file_env_var}")"
unset "${file_env_var}"
fi
done
unset app_env_file_lists
# 应用路径参数
export APP_HOME_DIR="/usr/local/${APP_NAME}"
export APP_DEF_DIR="/etc/${APP_NAME}"
export APP_CONF_DIR="/srv/conf/${APP_NAME}"
export APP_DATA_DIR="/srv/data/${APP_NAME}"
export APP_DATA_LOG_DIR="/srv/datalog/${APP_NAME}"
export APP_CACHE_DIR="/var/cache/${APP_NAME}"
export APP_RUN_DIR="/var/run/${APP_NAME}"
export APP_LOG_DIR="/var/log/${APP_NAME}"
export APP_CERT_DIR="/srv/cert/${APP_NAME}"
# 应用配置参数
# Paths configuration
export ZOO_CONF_FILE="${APP_CONF_DIR}/zoo.cfg"
# Enviroment for zkServer.sh
export ZOO_LOG_DIR=${APP_LOG_DIR}
export ZOO_DATADIR=${APP_DATA_DIR}
export ZOO_DATALOGDIR=${APP_DATA_LOG_DIR}
export ZOOCFGDIR=${APP_CONF_DIR}
export ZOOPIDFILE=${APP_RUN_DIR}/zookeeper_server.pid
export ZOO_LOG4J_PROP="${ZOO_LOG4J_PROP:-INFO,CONSOLE}"
# Application settings
export ZOO_PORT_NUMBER="${ZOO_PORT_NUMBER:-2181}"
export ZOO_TICK_TIME="${ZOO_TICK_TIME:-2000}"
export ZOO_INIT_LIMIT="${ZOO_INIT_LIMIT:-10}"
export ZOO_SYNC_LIMIT="${ZOO_SYNC_LIMIT:-5}"
export ZOO_MAX_CNXNS="${ZOO_MAX_CNXNS:-0}"
export ZOO_MAX_CLIENT_CNXNS="${ZOO_MAX_CLIENT_CNXNS:-60}"
export ZOO_AUTOPURGE_PURGEINTERVAL="${ZOO_AUTOPURGE_PURGEINTERVAL:-0}"
export ZOO_AUTOPURGE_SNAPRETAINCOUNT="${ZOO_AUTOPURGE_SNAPRETAINCOUNT:-3}"
export ZOO_4LW_COMMANDS_WHITELIST="${ZOO_4LW_COMMANDS_WHITELIST:-srvr, mntr}"
export ZOO_RECONFIG_ENABLED="${ZOO_RECONFIG_ENABLED:-no}"
export ZOO_LISTEN_ALLIPS_ENABLED="${ZOO_LISTEN_ALLIPS_ENABLED:-no}"
export ZOO_ENABLE_PROMETHEUS_METRICS="${ZOO_ENABLE_PROMETHEUS_METRICS:-no}"
export ZOO_PROMETHEUS_METRICS_PORT_NUMBER="${ZOO_PROMETHEUS_METRICS_PORT_NUMBER:-7000}"
export ZOO_STANDALONE_ENABLED=${ZOO_STANDALONE_ENABLED:-true}
export ZOO_ADMINSERVER_ENABLED=${ZOO_ADMINSERVER_ENABLED:-true}
# Cluster configuration
export ZOO_SERVER_ID="${ZOO_SERVER_ID:-1}"
export ZOO_SERVERS="${ZOO_SERVERS:-server.1=0.0.0.0:2888:3888}"
# Application TLS Settings
export ZOO_TLS_CLIENT_ENABLE="${ZOO_TLS_CLIENT_ENABLE:-false}"
export ZOO_TLS_PORT_NUMBER="${ZOO_TLS_PORT_NUMBER:-3181}"
export ZOO_TLS_CLIENT_KEYSTORE_FILE="${ZOO_TLS_CLIENT_KEYSTORE_FILE:-}"
export ZOO_TLS_CLIENT_KEYSTORE_PASSWORD="${ZOO_TLS_CLIENT_KEYSTORE_PASSWORD:-}"
export ZOO_TLS_CLIENT_TRUSTSTORE_FILE="${ZOO_TLS_CLIENT_TRUSTSTORE_FILE:-}"
export ZOO_TLS_CLIENT_TRUSTSTORE_PASSWORD="${ZOO_TLS_CLIENT_TRUSTSTORE_PASSWORD:-}"
export ZOO_TLS_QUORUM_ENABLE="${ZOO_TLS_QUORUM_ENABLE:-false}"
export ZOO_TLS_QUORUM_KEYSTORE_FILE="${ZOO_TLS_QUORUM_KEYSTORE_FILE:-}"
export ZOO_TLS_QUORUM_KEYSTORE_PASSWORD="${ZOO_TLS_QUORUM_KEYSTORE_PASSWORD:-}"
export ZOO_TLS_QUORUM_TRUSTSTORE_FILE="${ZOO_TLS_QUORUM_TRUSTSTORE_FILE:-}"
export ZOO_TLS_QUORUM_TRUSTSTORE_PASSWORD="${ZOO_TLS_QUORUM_TRUSTSTORE_PASSWORD:-}"
# Java Settings
export JVMFLAGS="${JVMFLAGS:-}"
export HEAP_SIZE="${HEAP_SIZE:-1024}"
# Authentication
export ZOO_ENABLE_AUTH="${ZOO_ENABLE_AUTH:-no}"
export ZOO_CLIENT_USER="${ZOO_CLIENT_USER:-}"
export ZOO_CLIENT_PASSWORD="${ZOO_CLIENT_PASSWORD:-}"
export ZOO_SERVER_USERS="${ZOO_SERVER_USERS:-}"
export ZOO_SERVER_PASSWORDS="${ZOO_SERVER_PASSWORDS:-}"
# 内部变量
export APP_PID_FILE="${ZOOPIDFILE}"
export APP_DAEMON_USER="${APP_NAME}"
export APP_DAEMON_GROUP="${APP_NAME}"
# 个性化变量
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.