blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 4 115 | path stringlengths 2 970 | src_encoding stringclasses 28 values | length_bytes int64 31 5.38M | score float64 2.52 5.28 | int_score int64 3 5 | detected_licenses listlengths 0 161 | license_type stringclasses 2 values | text stringlengths 31 5.39M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
a4a6340efce7bd331968d3427cdd8ecabeafd95b | Shell | troykinsella/concourse-tasks | /semver/to_rubygem_version.sh | UTF-8 | 501 | 3.734375 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env bash
set -e
VERSION_DIR=$PWD/$1
OUT=$PWD/$2
VERSION=$(cat ${VERSION_DIR}/number)
echo "Semver: ${VERSION}"
FINAL_PART=$(echo ${VERSION} | awk -F- '{print $1}')
PRE_PART=$(echo ${VERSION} | sed "s/^${FINAL_PART}//" | sed "s/^-//")
if [ -z "${PRE_PART}" ]; then
RG_VERSION="${VERSION}"
else
RG_PRE_PART=$(echo ${PRE_PART} | sed -E 's/[_\-](.)/\U\1/g' | tr -d '.')
RG_VERSION="${FINAL_PART}.${RG_PRE_PART}"
fi
echo "RubyGems: ${RG_VERSION}"
echo ${RG_VERSION} > ${OUT}/number
| true |
43f1aedfef56809d4b1e7b59e18536c3ae583179 | Shell | ccnic/Exercise1-Chris | /exercise1-chris | UTF-8 | 351 | 3.234375 | 3 | [] | no_license | #!/bin/bash
# $1 means "the first argument passed to this script"
# cut -d " " means cut out the delimiter, which is a space
# -f 1 means only use field (column) 1
# sort | uniq means sort them to only show the unique permissions
# wc -l counts the words, or the number of different permissions
ls -l "$1" | cut -d ' ' -f 1 | sort | uniq | wc -l
| true |
bef374b87e59b30b79e75ed74a9555d353b7ade7 | Shell | iammehrabalam/Scripts | /Web Server Apache Setup/python_django_apache2.sh | UTF-8 | 2,266 | 3.59375 | 4 | [] | no_license | #!/bin/bash
if [ $(id -u) != 0 ]
then
echo "\033[1;37;40mThis script must be run as root user . \nGo back and try again as root user\033[0m"
exit 1
fi
sudo apt-get update
sudo apt-get install -y python-pip apache2 libapache2-mod-wsgi
echo '\033[1;37;40m Enter Project Name eg. myproject \033[0m'
read projectname
echo '\033[1;37;40m Enter full path of your project eg. /home/username/myproject/ \n make sure path start and end with / \033[0m'
read projectpath
echo '\033[1;37;40m'
echo $projectname'/static/ is your static file path press (y or n) \033[0m'
read choice
if [ "$choice" = "n" ]
then
echo '\033[1;37;40m Enter full Path of your static Directory \033[0m'
read staticpath
fi
echo '\033[1;37;40m Enter DNS name \033[0m'
read Dns
echo '\033[1;37;40m Enter Email Address \033[0m'
read Email
#sudo chown :www-data ~/myproject
virtualhost="
<VirtualHost *:80>\n \
ServerName $Dns\n \
ServerAlias www.$Dns\n \
ServerAdmin $Email\n \
Alias /static/ $staticpath \n \
<Directory $staticpath>\n \
Require all granted\n \
</Directory>\n \
WSGIDaemonProcess $projectname python-path=$projectpath:/usr/local/lib/python2.7/ \n \
WSGIProcessGroup $projectname\n \
WSGIScriptAlias / $projectpath$projectname/wsgi.py \n \
<Directory $projectpath$projectname/> \n \
<Files wsgi.py>\n \
Require all granted\n \
</Files>\n \
</Directory>\n \
#ErrorLog log file path\n \
#CustomLog access file path combined\n \
</VirtualHost>\n \
"
echo $virtualhost > /etc/apache2/sites-available/$projectname.conf
chown -R :www-data $projectpath
chmod -R 664 $projectpath
a2dissite $projectname.conf
a2ensite $projectname.conf
service apache2 restart
echo '\033[1;37;40m If running apache server at localhost make entry of DNS in /etc/hosts/ file \n if any permission denied error occur see log /var/log/apache2/error.log or access.log and change group of this file to www-data using chown :www-data filename\033[0m'
echo '\033[1;37;40m you can see ur project path here /etc/apache2/sites-available/'$projectname.conf
echo 'if something wrong edit it and save after that run this'
echo 'a2dissite '$projectname.conf
echo 'a2densite '$projectname.conf
echo 'and reload or restart apache2 server \033[0m' | true |
3e05e8b1c6e6ab3837215fb55b72ba695289a882 | Shell | jamieconnolly/pyenv-virtualenv-ext | /bin/pyenv-virtualenv-origin | UTF-8 | 532 | 3.6875 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env bash
# Usage: pyenv virtualenv-origin
# Summary: Explain how the current virtual Python environment is set
set -e
[ -n "$PYENV_DEBUG" ] && set -x
unset PYENV_VIRTUAL_ENV_ORIGIN
OLDIFS="$IFS"
IFS=$'\n' scripts=(`pyenv-hooks virtualenv-origin`)
IFS="$OLDIFS"
for script in "${scripts[@]}"; do
source "$script"
done
if [ -n "$PYENV_VIRTUAL_ENV_ORIGIN" ]; then
echo "$PYENV_VIRTUAL_ENV_ORIGIN"
elif [ -n "$PYENV_VIRTUAL_ENV" ]; then
echo "PYENV_VIRTUAL_ENV environment variable"
else
pyenv-virtualenv-file
fi
| true |
717b27f00d4bfa8b204cc517a0c231b759ae56bd | Shell | thomaslepoix/meta-remote-estei | /recipes-project/init-gpio/files/init-gpio.sh | UTF-8 | 1,409 | 3.296875 | 3 | [
"MIT"
] | permissive | #!/bin/sh
### BEGIN INIT INFO
# Provides: init-gpio
# Required-Start: $remote_fs $syslog
# Required-Stop:
# Default-Start: 5
# Default-Stop: 0 1 6
# Short-Description: GPIO initialization for remote shield
### END INIT INFO
case "$1" in
start)
echo -n 'Setting GPIO... '
echo '2' >/sys/class/gpio/export
echo '3' >/sys/class/gpio/export
echo '4' >/sys/class/gpio/export
echo '14' >/sys/class/gpio/export
echo '17' >/sys/class/gpio/export
echo '22' >/sys/class/gpio/export
echo '23' >/sys/class/gpio/export
echo '27' >/sys/class/gpio/export
echo 'in' >/sys/class/gpio/gpio2/direction
echo 'in' >/sys/class/gpio/gpio3/direction
echo 'in' >/sys/class/gpio/gpio4/direction
echo 'in' >/sys/class/gpio/gpio17/direction
echo 'in' >/sys/class/gpio/gpio22/direction
echo 'in' >/sys/class/gpio/gpio23/direction
echo 'in' >/sys/class/gpio/gpio27/direction
echo 'out' >/sys/class/gpio/gpio14/direction
echo 'Done'
;;
stop)
echo -n 'Unsetting GPIO... '
echo '2' >/sys/class/gpio/unexport
echo '3' >/sys/class/gpio/unexport
echo '4' >/sys/class/gpio/unexport
echo '14' >/sys/class/gpio/unexport
echo '17' >/sys/class/gpio/unexport
echo '22' >/sys/class/gpio/unexport
echo '23' >/sys/class/gpio/unexport
echo '27' >/sys/class/gpio/unexport
echo 'Done'
;;
restart)
$0 stop
$0 start
;;
*)
echo "Usage : $0 {start|stop|restart}"
;;
esac
exit 0
| true |
a9fb5eadaba9ab862fc6863e6c5f5286ae3d2453 | Shell | karthink/n2wal | /n2wal | UTF-8 | 507 | 3.34375 | 3 | [] | no_license | #!/bin/bash
##
# CLI launcher to run the n2wal sync
set -eo pipefail
# Set the PATH for cron compatibility. This will work when there is a
# "system" emacs installed. Otherwise, a wrapper script might be
# necessary.
export PATH="$PATH:/usr/local/bin:/usr/local/sbin:/usr/bin:/usr/sbin:/bin:/sbin"
HERE=$(dirname $(readlink -f "${BASH_SOURCE[0]}"))
emacs --batch -l "$HERE/n2wal.el" -f n2wal-sync-feeds 2>&1 \
| while read -r line; do echo "[$(date)] $line"; done \
| tee -a ~/.n2wal.d/sync.log
| true |
979ba0976292e5e7859b1615a23c27e01446e8b9 | Shell | axsh/vnet-bootstrap | /bin/advance-vms-to-high-level-stage.sh | UTF-8 | 1,477 | 3.671875 | 4 | [] | no_license | #!/bin/bash
export SCRIPT_DIR="$(cd "$(dirname "$(readlink -f "$0")")" && pwd -P)" || exit
source "$SCRIPT_DIR/../lib/processgroup-error-handler.source"
# TODO, do a pull from local git clones here??
source "$SCRIPT_DIR/../lib/shared-code.source"
[ -d ./lib/vnet-install-script ] || reportfail "expect to be run from grandparent dir of .../vnet-install-script/"
[ -d ./lib/c-dinkvm ] || reportfail "expect to be run from grandparent dir of .../c-dinkvm/"
config_path="$SCRIPT_DIR/../demo.config"
[ -f "$config_path" ] || reportfail "demo.config file must be created with ./bin/initialize-demo-configuration"
source "$config_path" # only read in here for MEM_{1,2,3,r} parameters
divider()
{
echo
echo =============================================================================
echo =============================================================================
echo
}
for s in $STAGES; do
high_level_stage="$s"
[ "$high_level_stage" = "$1" ] && break
done
[ "$high_level_stage" = "$1" ] || reportfail "First parameter must be one of: $STAGES"
shift
# default to all VMs
if [ "$*" = "" ]; then
vmlist=( 1 2 3 r )
else
vmlist=( "$@" )
fi
for i in "${vmlist[@]}"; do
case "$i" in
1 | 2 | 3 | r)
eval "./bin/advance-vms-to-low-level-step.sh \$${high_level_stage}_$i $i"
;;
*)
sleep 5 # increase the chance of other messages moving out of the way
echo "bad parameter: $1" 1>&2
sleep 5
;;
esac
done
| true |
f1a49ed50201d6bee47b998f46ffdf776a5a2ff9 | Shell | io7m/coreland-sysdeps | /sysdeps-remove | UTF-8 | 581 | 4.25 | 4 | [
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-public-domain"
] | permissive | #!/bin/sh
PROGRAM_NAME="sysdeps-remove"
fatal()
{
echo "${PROGRAM_NAME}: fatal: $1" 1>&2
exit 1
}
error()
{
echo "${PROGRAM_NAME}: error: $1" 1>&2
FAILED=1
}
if [ $# -lt 1 ]
then
echo "${PROGRAM_NAME}: usage: module [modules ...]" 1>&2
exit 1
fi
FAILED=0
for MODULE in $@
do
MODULE_PATH="SYSDEPS/modules/${MODULE}"
if [ -d "${MODULE_PATH}" ]
then
rm -r "${MODULE_PATH}" || error "could not remove ${MODULE_PATH}"
else
error "module ${MODULE} does not exist"
fi
done
if [ ${FAILED} -eq 1 ]
then
fatal "one or more modules were not removed"
fi
| true |
c8bf70d58f0ee459de301c5eef3b79b9e42bf111 | Shell | andyleejordan/llvm | /mesos-llvm.sh | UTF-8 | 1,037 | 2.515625 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env bash
VERSION="2017-11-11"
mkdir /tmp/llvm
wget -O - https://releases.llvm.org/5.0.0/llvm-5.0.0.src.tar.xz | tar --strip-components=1 -xJ -C /tmp/llvm
git clone --depth 1 -b mesos_50 https://github.com/mesos/clang.git /tmp/llvm/tools/clang
git clone --depth 1 -b mesos_50 https://github.com/mesos/clang-tools-extra.git /tmp/llvm/tools/clang/tools/extra
source /opt/rh/devtoolset-4/enable
source /opt/rh/python27/enable
cmake -GNinja \
-DCMAKE_BUILD_TYPE=Release \
-DCMAKE_C_FLAGS_RELEASE=-DNDEBUG \
-DCMAKE_CXX_FLAGS_RELEASE=-DNDEBUG \
-DCMAKE_INSTALL_PREFIX=/mesos-llvm/"${VERSION}" \
-DCMAKE_FIND_FRAMEWORK=LAST \
-DLLVM_BUILD_STATIC=ON \
-DLLVM_OPTIMIZED_TABLEGEN=ON \
-Wno-dev /tmp/llvm
cmake --build . --target clang-format
cmake -DCOMPONENT=clang-format -P cmake_install.cmake
ninja tools/clang/tools/extra/clang-tidy/install && \
ninja tools/clang/tools/extra/clang-apply-replacements/install && \
tar cf /install/mesos-llvm-"${VERSION}".linux.tar.gz /mesos-llvm
| true |
9cd2243a22ea1632f9f860d1e5acd101432fd135 | Shell | mreschke/logs.js | /forever | UTF-8 | 596 | 3.5625 | 4 | [] | no_license | #!/bin/bash
# This will daemonize the server
# See https://github.com/nodejitsu/forever
# mReschke 2013-03-22
function start_service {
cd /tmp/logs.js/ && \
NODE_ENV=$environment forever \
start \
-a -l /dev/null \
-o /var/log/logs.js.log \
-e /var/log/logs.js-error.log \
./logs.js
}
function stop_service {
forever stop logs.js
}
if [ "$1" == "start" -o "$1" == "restart" ]; then
stop_service
start_service
elif [ "$1" == "stop" ]; then
stop_service
elif [ "$1" == "log" ]; then
tail -f /var/log/logs.js.log
else
echo "Use start, stop, or log to control this script"
fi
| true |
a9c117742c3eea0d6aca22de4ba6a57a82e53850 | Shell | ltfafei/myShell_Script | /Deploy_LAMP.sh | UTF-8 | 3,697 | 3.59375 | 4 | [] | no_license | #!/bin/bash
# Author:afei
PATH=/bin:/sbin:/usr/bin:/usr/sbin:/usr/local/bin:/usr/local/sbin:~/bin
export PATH
# Centos7换源函数
Change_yum_repo(){
url1=http://mirrors.aliyun.com/repo/Centos-7.repo
url2=http://mirrors.163.com/.help/CentOS7-Base-163.repo
# 获取url响应状态码
code_st1=$(curl --connect-time 6 -I -s -o /dev/null -w %{http_code} ${url1})
code_st2=$(curl --connect-time 6 -I -s -o /dev/null -w %{http_code} ${url2})
mv -f /etc/yum.repos.d/CentOS-Base.repo /etc/yum.repos.d/CentOS-Base.repo.bak
if [ $code_st1 -eq 200 ];then
wget -O /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo
elif [ $code_st1 != 200 -a $code_st2 -eq 200 ];then
wget -O /etc/yum.repos.d/CentOS-Base.repo http://mirrors.163.com/.help/CentOS7-Base-163.repo
else
mv -f /etc/yum.repos.d/CentOS-Base.repo.bak /etc/yum.repos.d/CentOS-Base.repo
echo "无法下载yum源,换源失败"
fi
yum makecache
}
# 安装MySQL_5.7函数
Install_mysql(){
wget https://dev.mysql.com/get/mysql57-community-release-el7-11.noarch.rpm
rpm -ivh mysql57-community-release-el7-11.noarch.rpm
yum -y install mysql mysql-devel mysql-server
if echo $? == 0 ;then
echo "mysql5.7安装成功"
mysql -V
else
echo "mysql5.7安装失败!"
exit
fi
}
# 安装PHP_7.0函数
Install_PHP(){
yum update -y && yum -y install epel-release yum-utils
#rpm -Uvh https://mirror.webtatic.com/yum/el6/latest.rpm
yum -y install http://rpms.remirepo.net/enterprise/remi-release-7.rpm
yum install -y php php-common php-fpm php-opcache php-gd php-mysqlnd php-mbstring php-pecl-redis php-pecl-memcached php-devel --enablerepo remi-php70
if echo $? == 0 ;then
echo "php安装成功"
php -v
else
echo "php安装失败!"
exit
fi
}
Deploy_main(){
# 确认操作系统
Centos_ver=$(cat /etc/redhat-release | grep ' 7.' | grep -i centos)
if [ "$Centos_ver" ]; then
Change_yum_repo
else
echo "system is not Centos7,change yumrepo fail!"
fi
# 检测并卸载mariadb并安装mysql5.7
if [ "rpm -qa mariadb" ];then
yum remove mariadb* -y && yum autoremove -y
if [ "! rpm -qa mariadb" ];then
echo "mariadb卸载完成"
fi
else
echo "未检测到mariadb!"
fi
echo ""
echo "开始安装MySQL5.7"
Install_mysql
# 检测并卸载其他版本php并安装PHP7.0
if ! rpm -qa php;then
php_ver=$(php -v |grep 'PHP'|head -n 1 |awk '{print $2}')
if [ `expr $php_ver \<= 7.0.0` -eq 1 ];then
yum remove php* -y && yum autoremove -y
echo "低版本php卸载完成,开始安装PHP7.0"
Install_PHP
systemctl start php-fpm && systemctl enable php-fpm
else
echo "已存在PHP大于7.0版本,不需要重新安装"
fi
else
Install_PHP
fi
# 安装httpd
yum -y install httpd httpd-devel
#创建php测试页面并测试
echo "<?php phpinfo(); ?>" >> /var/www/html/index.php
sed -i '284a AddType application/x-http-php .php .phtml' /etc/httpd/conf/httpd.conf
systemctl restart httpd
ip_add=$(ip addr | grep -E -o '[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}' | grep -E -v "^127\.|^255\.|^0\." | head -n 1)
url_stat=$(curl --connect-time 6 -I -s -o /dev/null -w %{http_code} http://${ip_add}/index.php)
if [ $url_stat -eq 200 ]; then
echo "==========================="
echo "PHP info页面测试成功"
echo "==========================="
else
echo "==========================="
echo "PHP info测试失败!"
echo "==========================="
fi
# 初始化Mysql数据库
systemctl start mysqld && systemctl enable mysqld
mysql_secure_installation
}
read -p "是否开始部署LAMP架构(y|n):" X
if [ $X == 'y' -o $X == 'Y' ];then
Deploy_main
else
echo "正在退出部署LAMP架构脚本..."
fi | true |
919128b750d810baedad87970ce8a41da58bd4ea | Shell | Dakesi95/ids-honeypots | /install-scripts/snort-config.sh | UTF-8 | 26,939 | 2.609375 | 3 | [] | no_license | #--------------------------------------------------
# VRT Rule Packages Snort.conf
#
# For more information visit us at:
# http://www.snort.org Snort Website
# http://vrt-blog.snort.org/ Sourcefire VRT Blog
#
# Mailing list Contact: snort-sigs@lists.sourceforge.net
# False Positive reports: fp@sourcefire.com
# Snort bugs: bugs@snort.org
#
# Compatible with Snort Versions:
# VERSIONS : 2.9.13
#
# Snort build options:
# OPTIONS : --enable-gre --enable-mpls --enable-targetbased --enable-ppm --enable-perfprofiling --enable-zlib --enable-active-response --enable-normalizer --enable-reload --enable-react --enable-flexresp3
#
# Additional information:
# This configuration file enables active response, to run snort in
# test mode -T you are required to supply an interface -i <interface>
# or test mode will fail to fully validate the configuration and
# exit with a FATAL error
#--------------------------------------------------
###################################################
# This file contains a sample snort configuration.
# You should take the following steps to create your own custom configuration:
#
# 1) Set the network variables.
# 2) Configure the decoder
# 3) Configure the base detection engine
# 4) Configure dynamic loaded libraries
# 5) Configure preprocessors
# 6) Configure output plugins
# 7) Customize your rule set
# 8) Customize preprocessor and decoder rule set
# 9) Customize shared object rule set
###################################################
###################################################
# Step #1: Set the network variables. For more information, see README.variables
###################################################
# Setup the network addresses you are protecting
ipvar HOME_NET 10.0.2.0/24
# Set up the external network addresses. Leave as "any" in most situations
ipvar EXTERNAL_NET any
# List of DNS servers on your network
ipvar DNS_SERVERS $HOME_NET
# List of SMTP servers on your network
ipvar SMTP_SERVERS $HOME_NET
# List of web servers on your network
ipvar HTTP_SERVERS $HOME_NET
# List of sql servers on your network
ipvar SQL_SERVERS $HOME_NET
# List of telnet servers on your network
ipvar TELNET_SERVERS $HOME_NET
# List of ssh servers on your network
ipvar SSH_SERVERS $HOME_NET
# List of ftp servers on your network
ipvar FTP_SERVERS $HOME_NET
# List of sip servers on your network
ipvar SIP_SERVERS $HOME_NET
# List of ports you run web servers on
portvar HTTP_PORTS [80,81,311,383,591,593,901,1220,1414,1741,1830,2301,2381,2809,3037,3128,3702,4343,4848,5250,6988,7000,7001,7144,7145,7510,7777,7779,8000,8008,8014,8028,8080,8085,8088,8090,8118,8123,8180,8181,8243,8280,8300,8800,8888,8899,9000,9060,9080,9090,9091,9443,9999,11371,34443,34444,41080,50002,55555]
# List of ports you want to look for SHELLCODE on.
portvar SHELLCODE_PORTS !80
# List of ports you might see oracle attacks on
portvar ORACLE_PORTS 1024:
# List of ports you want to look for SSH connections on:
portvar SSH_PORTS 22
# List of ports you run ftp servers on
portvar FTP_PORTS [21,2100,3535]
# List of ports you run SIP servers on
portvar SIP_PORTS [5060,5061,5600]
# List of file data ports for file inspection
portvar FILE_DATA_PORTS [$HTTP_PORTS,110,143]
# List of GTP ports for GTP preprocessor
portvar GTP_PORTS [2123,2152,3386]
# other variables, these should not be modified
ipvar AIM_SERVERS [64.12.24.0/23,64.12.28.0/23,64.12.161.0/24,64.12.163.0/24,64.12.200.0/24,205.188.3.0/24,205.188.5.0/24,205.188.7.0/24,205.188.9.0/24,205.188.153.0/24,205.188.179.0/24,205.188.248.0/24]
# Path to your rules files (this can be a relative path)
# Note for Windows users: You are advised to make this an absolute path,
# such as: c:\snort\rules
var RULE_PATH /etc/snort/rules
var SO_RULE_PATH /etc/snort/so_rules
var PREPROC_RULE_PATH /etc/snort/preproc_rules
# If you are using reputation preprocessor set these
# Currently there is a bug with relative paths, they are relative to where snort is
# not relative to snort.conf like the above variables
# This is completely inconsistent with how other vars work, BUG 89986
# Set the absolute path appropriately
var WHITE_LIST_PATH /etc/snort/rules/iplists
var BLACK_LIST_PATH /etc/snort/rules/iplists
###################################################
# Step #2: Configure the decoder. For more information, see README.decode
###################################################
# Stop generic decode events:
config disable_decode_alerts
# Stop Alerts on experimental TCP options
config disable_tcpopt_experimental_alerts
# Stop Alerts on obsolete TCP options
config disable_tcpopt_obsolete_alerts
# Stop Alerts on T/TCP alerts
config disable_tcpopt_ttcp_alerts
# Stop Alerts on all other TCPOption type events:
config disable_tcpopt_alerts
# Stop Alerts on invalid ip options
config disable_ipopt_alerts
# Alert if value in length field (IP, TCP, UDP) is greater th elength of the packet
# config enable_decode_oversized_alerts
# Same as above, but drop packet if in Inline mode (requires enable_decode_oversized_alerts)
# config enable_decode_oversized_drops
# Configure IP / TCP checksum mode
config checksum_mode: all
# Configure maximum number of flowbit references. For more information, see README.flowbits
# config flowbits_size: 64
# Configure ports to ignore
# config ignore_ports: tcp 21 6667:6671 1356
# config ignore_ports: udp 1:17 53
# Configure active response for non inline operation. For more information, see REAMDE.active
# config response: eth0 attempts 2
# Configure DAQ related options for inline operation. For more information, see README.daq
#
# config daq: <type>
# config daq_dir: <dir>
# config daq_mode: <mode>
# config daq_var: <var>
#
# <type> ::= pcap | afpacket | dump | nfq | ipq | ipfw
# <mode> ::= read-file | passive | inline
# <var> ::= arbitrary <name>=<value passed to DAQ
# <dir> ::= path as to where to look for DAQ module so's
# Configure specific UID and GID to run snort as after dropping privs. For more information see snort -h command line options
#
# config set_gid:
# config set_uid:
# Configure default snaplen. Snort defaults to MTU of in use interface. For more information see README
#
# config snaplen:
#
# Configure default bpf_file to use for filtering what traffic reaches snort. For more information see snort -h command line options (-F)
#
# config bpf_file:
#
# Configure default log directory for snort to log to. For more information see snort -h command line options (-l)
#
# config logdir:
###################################################
# Step #3: Configure the base detection engine. For more information, see README.decode
###################################################
# Configure PCRE match limitations
config pcre_match_limit: 3500
config pcre_match_limit_recursion: 1500
# Configure the detection engine See the Snort Manual, Configuring Snort - Includes - Config
config detection: search-method ac-split search-optimize max-pattern-len 20
# Configure the event queue. For more information, see README.event_queue
config event_queue: max_queue 8 log 5 order_events content_length
###################################################
## Configure GTP if it is to be used.
## For more information, see README.GTP
####################################################
# config enable_gtp
###################################################
# Per packet and rule latency enforcement
# For more information see README.ppm
###################################################
# Per Packet latency configuration
#config ppm: max-pkt-time 250, \
# fastpath-expensive-packets, \
# pkt-log
# Per Rule latency configuration
#config ppm: max-rule-time 200, \
# threshold 3, \
# suspend-expensive-rules, \
# suspend-timeout 20, \
# rule-log alert
###################################################
# Configure Perf Profiling for debugging
# For more information see README.PerfProfiling
###################################################
#config profile_rules: print all, sort avg_ticks
#config profile_preprocs: print all, sort avg_ticks
###################################################
# Configure protocol aware flushing
# For more information see README.stream5
###################################################
config paf_max: 16000
###################################################
# Step #4: Configure dynamic loaded libraries.
# For more information, see Snort Manual, Configuring Snort - Dynamic Modules
###################################################
# path to dynamic preprocessor libraries
dynamicpreprocessor directory /usr/local/lib/snort_dynamicpreprocessor/
# path to base preprocessor engine
dynamicengine /usr/local/lib/snort_dynamicengine/libsf_engine.so
# path to dynamic rules libraries
dynamicdetection directory /usr/local/lib/snort_dynamicrules
###################################################
# Step #5: Configure preprocessors
# For more information, see the Snort Manual, Configuring Snort - Preprocessors
###################################################
# GTP Control Channle Preprocessor. For more information, see README.GTP
# preprocessor gtp: ports { 2123 3386 2152 }
# Inline packet normalization. For more information, see README.normalize
# Does nothing in IDS mode
preprocessor normalize_ip4
preprocessor normalize_tcp: ips ecn stream
preprocessor normalize_icmp4
preprocessor normalize_ip6
preprocessor normalize_icmp6
# Target-based IP defragmentation. For more inforation, see README.frag3
preprocessor frag3_global: max_frags 65536
preprocessor frag3_engine: policy windows detect_anomalies overlap_limit 10 min_fragment_length 100 timeout 180
# Target-Based stateful inspection/stream reassembly. For more inforation, see README.stream5
preprocessor stream5_global: track_tcp yes, \
track_udp yes, \
track_icmp no, \
max_tcp 262144, \
max_udp 131072, \
max_active_responses 2, \
min_response_seconds 5
preprocessor stream5_tcp: log_asymmetric_traffic no, policy windows, \
detect_anomalies, require_3whs 180, \
overlap_limit 10, small_segments 3 bytes 150, timeout 180, \
ports client 21 22 23 25 42 53 79 109 110 111 113 119 135 136 137 139 143 \
161 445 513 514 587 593 691 1433 1521 1741 2100 3306 6070 6665 6666 6667 6668 6669 \
7000 8181 32770 32771 32772 32773 32774 32775 32776 32777 32778 32779, \
ports both 80 81 311 383 443 465 563 591 593 636 901 989 992 993 994 995 1220 1414 1830 2301 2381 2809 3037 3128 3702 4343 4848 5250 6988 7907 7000 7001 7144 7145 7510 7802 7777 7779 \
7801 7900 7901 7902 7903 7904 7905 7906 7908 7909 7910 7911 7912 7913 7914 7915 7916 \
7917 7918 7919 7920 8000 8008 8014 8028 8080 8085 8088 8090 8118 8123 8180 8243 8280 8300 8800 8888 8899 9000 9060 9080 9090 9091 9443 9999 11371 34443 34444 41080 50002 55555
preprocessor stream5_udp: timeout 180
# performance statistics. For more information, see the Snort Manual, Configuring Snort - Preprocessors - Performance Monitor
# preprocessor perfmonitor: time 300 file /var/snort/snort.stats pktcnt 10000
# HTTP normalization and anomaly detection. For more information, see README.http_inspect
preprocessor http_inspect: global iis_unicode_map unicode.map 1252 compress_depth 65535 decompress_depth 65535
preprocessor http_inspect_server: server default \
http_methods { GET POST PUT SEARCH MKCOL COPY MOVE LOCK UNLOCK NOTIFY POLL BCOPY BDELETE BMOVE LINK UNLINK OPTIONS HEAD DELETE TRACE TRACK CONNECT SOURCE SUBSCRIBE UNSUBSCRIBE PROPFIND PROPPATCH BPROPFIND BPROPPATCH RPC_CONNECT PROXY_SUCCESS BITS_POST CCM_POST SMS_POST RPC_IN_DATA RPC_OUT_DATA RPC_ECHO_DATA } \
chunk_length 500000 \
server_flow_depth 0 \
client_flow_depth 0 \
post_depth 65495 \
oversize_dir_length 500 \
max_header_length 750 \
max_headers 100 \
max_spaces 200 \
small_chunk_length { 10 5 } \
ports { 80 81 311 383 591 593 901 1220 1414 1741 1830 2301 2381 2809 3037 3128 3702 4343 4848 5250 6988 7000 7001 7144 7145 7510 7777 7779 8000 8008 8014 8028 8080 8085 8088 8090 8118 8123 8180 8181 8243 8280 8300 8800 8888 8899 9000 9060 9080 9090 9091 9443 9999 11371 34443 34444 41080 50002 55555 } \
non_rfc_char { 0x00 0x01 0x02 0x03 0x04 0x05 0x06 0x07 } \
enable_cookie \
extended_response_inspection \
inspect_gzip \
normalize_utf \
unlimited_decompress \
normalize_javascript \
apache_whitespace no \
ascii no \
bare_byte no \
directory no \
double_decode no \
iis_backslash no \
iis_delimiter no \
iis_unicode no \
multi_slash no \
utf_8 no \
u_encode yes \
webroot no
# ONC-RPC normalization and anomaly detection. For more information, see the Snort Manual, Configuring Snort - Preprocessors - RPC Decode
preprocessor rpc_decode: 111 32770 32771 32772 32773 32774 32775 32776 32777 32778 32779 no_alert_multiple_requests no_alert_large_fragments no_alert_incomplete
# Back Orifice detection.
preprocessor bo
# FTP / Telnet normalization and anomaly detection. For more information, see README.ftptelnet
preprocessor ftp_telnet: global inspection_type stateful encrypted_traffic no check_encrypted
preprocessor ftp_telnet_protocol: telnet \
ayt_attack_thresh 20 \
normalize ports { 23 } \
detect_anomalies
preprocessor ftp_telnet_protocol: ftp server default \
def_max_param_len 100 \
ports { 21 2100 3535 } \
telnet_cmds yes \
ignore_telnet_erase_cmds yes \
ftp_cmds { ABOR ACCT ADAT ALLO APPE AUTH CCC CDUP } \
ftp_cmds { CEL CLNT CMD CONF CWD DELE ENC EPRT } \
ftp_cmds { EPSV ESTA ESTP FEAT HELP LANG LIST LPRT } \
ftp_cmds { LPSV MACB MAIL MDTM MIC MKD MLSD MLST } \
ftp_cmds { MODE NLST NOOP OPTS PASS PASV PBSZ PORT } \
ftp_cmds { PROT PWD QUIT REIN REST RETR RMD RNFR } \
ftp_cmds { RNTO SDUP SITE SIZE SMNT STAT STOR STOU } \
ftp_cmds { STRU SYST TEST TYPE USER XCUP XCRC XCWD } \
ftp_cmds { XMAS XMD5 XMKD XPWD XRCP XRMD XRSQ XSEM } \
ftp_cmds { XSEN XSHA1 XSHA256 } \
alt_max_param_len 0 { ABOR CCC CDUP ESTA FEAT LPSV NOOP PASV PWD QUIT REIN STOU SYST XCUP XPWD } \
alt_max_param_len 200 { ALLO APPE CMD HELP NLST RETR RNFR STOR STOU XMKD } \
alt_max_param_len 256 { CWD RNTO } \
alt_max_param_len 400 { PORT } \
alt_max_param_len 512 { SIZE } \
chk_str_fmt { ACCT ADAT ALLO APPE AUTH CEL CLNT CMD } \
chk_str_fmt { CONF CWD DELE ENC EPRT EPSV ESTP HELP } \
chk_str_fmt { LANG LIST LPRT MACB MAIL MDTM MIC MKD } \
chk_str_fmt { MLSD MLST MODE NLST OPTS PASS PBSZ PORT } \
chk_str_fmt { PROT REST RETR RMD RNFR RNTO SDUP SITE } \
chk_str_fmt { SIZE SMNT STAT STOR STRU TEST TYPE USER } \
chk_str_fmt { XCRC XCWD XMAS XMD5 XMKD XRCP XRMD XRSQ } \
chk_str_fmt { XSEM XSEN XSHA1 XSHA256 } \
cmd_validity ALLO < int [ char R int ] > \
cmd_validity EPSV < [ { char 12 | char A char L char L } ] > \
cmd_validity MACB < string > \
cmd_validity MDTM < [ date nnnnnnnnnnnnnn[.n[n[n]]] ] string > \
cmd_validity MODE < char ASBCZ > \
cmd_validity PORT < host_port > \
cmd_validity PROT < char CSEP > \
cmd_validity STRU < char FRPO [ string ] > \
cmd_validity TYPE < { char AE [ char NTC ] | char I | char L [ number ] } >
preprocessor ftp_telnet_protocol: ftp client default \
max_resp_len 256 \
bounce yes \
ignore_telnet_erase_cmds yes \
telnet_cmds yes
# SMTP normalization and anomaly detection. For more information, see README.SMTP
preprocessor smtp: ports { 25 465 587 691 } \
inspection_type stateful \
b64_decode_depth 0 \
qp_decode_depth 0 \
bitenc_decode_depth 0 \
uu_decode_depth 0 \
log_mailfrom \
log_rcptto \
log_filename \
log_email_hdrs \
normalize cmds \
normalize_cmds { ATRN AUTH BDAT CHUNKING DATA DEBUG EHLO EMAL ESAM ESND ESOM ETRN EVFY } \
normalize_cmds { EXPN HELO HELP IDENT MAIL NOOP ONEX QUEU QUIT RCPT RSET SAML SEND SOML } \
normalize_cmds { STARTTLS TICK TIME TURN TURNME VERB VRFY X-ADAT X-DRCP X-ERCP X-EXCH50 } \
normalize_cmds { X-EXPS X-LINK2STATE XADR XAUTH XCIR XEXCH50 XGEN XLICENSE XQUE XSTA XTRN XUSR } \
max_command_line_len 512 \
max_header_line_len 1000 \
max_response_line_len 512 \
alt_max_command_line_len 260 { MAIL } \
alt_max_command_line_len 300 { RCPT } \
alt_max_command_line_len 500 { HELP HELO ETRN EHLO } \
alt_max_command_line_len 255 { EXPN VRFY ATRN SIZE BDAT DEBUG EMAL ESAM ESND ESOM EVFY IDENT NOOP RSET } \
alt_max_command_line_len 246 { SEND SAML SOML AUTH TURN ETRN DATA RSET QUIT ONEX QUEU STARTTLS TICK TIME TURNME VERB X-EXPS X-LINK2STATE XADR XAUTH XCIR XEXCH50 XGEN XLICENSE XQUE XSTA XTRN XUSR } \
valid_cmds { ATRN AUTH BDAT CHUNKING DATA DEBUG EHLO EMAL ESAM ESND ESOM ETRN EVFY } \
valid_cmds { EXPN HELO HELP IDENT MAIL NOOP ONEX QUEU QUIT RCPT RSET SAML SEND SOML } \
valid_cmds { STARTTLS TICK TIME TURN TURNME VERB VRFY X-ADAT X-DRCP X-ERCP X-EXCH50 } \
valid_cmds { X-EXPS X-LINK2STATE XADR XAUTH XCIR XEXCH50 XGEN XLICENSE XQUE XSTA XTRN XUSR } \
xlink2state { enabled }
# Portscan detection. For more information, see README.sfportscan
# preprocessor sfportscan: proto { all } memcap { 10000000 } sense_level { low }
# ARP spoof detection. For more information, see the Snort Manual - Configuring Snort - Preprocessors - ARP Spoof Preprocessor
# preprocessor arpspoof
# preprocessor arpspoof_detect_host: 192.168.40.1 f0:0f:00:f0:0f:00
# SSH anomaly detection. For more information, see README.ssh
preprocessor ssh: server_ports { 22 } \
autodetect \
max_client_bytes 19600 \
max_encrypted_packets 20 \
max_server_version_len 100 \
enable_respoverflow enable_ssh1crc32 \
enable_srvoverflow enable_protomismatch
# SMB / DCE-RPC normalization and anomaly detection. For more information, see README.dcerpc2
preprocessor dcerpc2: memcap 102400, events [co ]
preprocessor dcerpc2_server: default, policy WinXP, \
detect [smb [139,445], tcp 135, udp 135, rpc-over-http-server 593], \
autodetect [tcp 1025:, udp 1025:, rpc-over-http-server 1025:], \
smb_max_chain 3, smb_invalid_shares ["C$", "D$", "ADMIN$"]
# DNS anomaly detection. For more information, see README.dns
preprocessor dns: ports { 53 } enable_rdata_overflow
# SSL anomaly detection and traffic bypass. For more information, see README.ssl
preprocessor ssl: ports { 443 465 563 636 989 992 993 994 995 7801 7802 7900 7901 7902 7903 7904 7905 7906 7907 7908 7909 7910 7911 7912 7913 7914 7915 7916 7917 7918 7919 7920 }, trustservers, noinspect_encrypted
# SDF sensitive data preprocessor. For more information see README.sensitive_data
preprocessor sensitive_data: alert_threshold 25
# SIP Session Initiation Protocol preprocessor. For more information see README.sip
preprocessor sip: max_sessions 40000, \
ports { 5060 5061 5600 }, \
methods { invite \
cancel \
ack \
bye \
register \
options \
refer \
subscribe \
update \
join \
info \
message \
notify \
benotify \
do \
qauth \
sprack \
publish \
service \
unsubscribe \
prack }, \
max_uri_len 512, \
max_call_id_len 80, \
max_requestName_len 20, \
max_from_len 256, \
max_to_len 256, \
max_via_len 1024, \
max_contact_len 512, \
max_content_len 2048
# IMAP preprocessor. For more information see README.imap
preprocessor imap: \
ports { 143 } \
b64_decode_depth 0 \
qp_decode_depth 0 \
bitenc_decode_depth 0 \
uu_decode_depth 0
# POP preprocessor. For more information see README.pop
preprocessor pop: \
ports { 110 } \
b64_decode_depth 0 \
qp_decode_depth 0 \
bitenc_decode_depth 0 \
uu_decode_depth 0
# Modbus preprocessor. For more information see README.modbus
preprocessor modbus: ports { 502 }
# DNP3 preprocessor. For more information see README.dnp3
preprocessor dnp3: ports { 20000 } \
memcap 262144 \
check_crc
# Reputation preprocessor. For more information see README.reputation
preprocessor reputation: \
memcap 500, \
priority whitelist, \
nested_ip inner, \
whitelist $WHITE_LIST_PATH/white_list.rules, \
blacklist $BLACK_LIST_PATH/black_list.rules
###################################################
# Step #6: Configure output plugins
# For more information, see Snort Manual, Configuring Snort - Output Modules
###################################################
# unified2
# Recommended for most installs
# output unified2: filename merged.log, limit 128, nostamp, mpls_event_types, vlan_event_types
# Additional configuration for specific types of installs
# output alert_unified2: filename snort.alert, limit 128, nostamp
# output log_unified2: filename snort.log, limit 128, nostamp
# syslog
# output alert_syslog: LOG_AUTH LOG_ALERT
# pcap
# output log_tcpdump: tcpdump.log
# metadata reference data. do not modify these lines
include classification.config
include reference.config
###################################################
# Step #7: Customize your rule set
# For more information, see Snort Manual, Writing Snort Rules
#
# NOTE: All categories are enabled in this conf file
###################################################
# site specific rules
include $RULE_PATH/local.rules
#include $RULE_PATH/app-detect.rules
#include $RULE_PATH/attack-responses.rules
#include $RULE_PATH/backdoor.rules
#include $RULE_PATH/bad-traffic.rules
#include $RULE_PATH/blacklist.rules
#include $RULE_PATH/botnet-cnc.rules
#include $RULE_PATH/browser-chrome.rules
#include $RULE_PATH/browser-firefox.rules
#include $RULE_PATH/browser-ie.rules
#include $RULE_PATH/browser-other.rules
#include $RULE_PATH/browser-plugins.rules
#include $RULE_PATH/browser-webkit.rules
#include $RULE_PATH/chat.rules
#include $RULE_PATH/content-replace.rules
#include $RULE_PATH/ddos.rules
#include $RULE_PATH/dns.rules
#include $RULE_PATH/dos.rules
#include $RULE_PATH/experimental.rules
#include $RULE_PATH/exploit-kit.rules
#include $RULE_PATH/exploit.rules
#include $RULE_PATH/file-executable.rules
#include $RULE_PATH/file-flash.rules
#include $RULE_PATH/file-identify.rules
#include $RULE_PATH/file-image.rules
#include $RULE_PATH/file-multimedia.rules
#include $RULE_PATH/file-office.rules
#include $RULE_PATH/file-other.rules
#include $RULE_PATH/file-pdf.rules
#include $RULE_PATH/finger.rules
#include $RULE_PATH/ftp.rules
#include $RULE_PATH/icmp-info.rules
#include $RULE_PATH/icmp.rules
#include $RULE_PATH/imap.rules
#include $RULE_PATH/indicator-compromise.rules
#include $RULE_PATH/indicator-obfuscation.rules
#include $RULE_PATH/indicator-shellcode.rules
#include $RULE_PATH/info.rules
#include $RULE_PATH/malware-backdoor.rules
#include $RULE_PATH/malware-cnc.rules
#include $RULE_PATH/malware-other.rules
#include $RULE_PATH/malware-tools.rules
#include $RULE_PATH/misc.rules
#include $RULE_PATH/multimedia.rules
#include $RULE_PATH/mysql.rules
#include $RULE_PATH/netbios.rules
#include $RULE_PATH/nntp.rules
#include $RULE_PATH/oracle.rules
#include $RULE_PATH/os-linux.rules
#include $RULE_PATH/os-other.rules
#include $RULE_PATH/os-solaris.rules
#include $RULE_PATH/os-windows.rules
#include $RULE_PATH/other-ids.rules
#include $RULE_PATH/p2p.rules
#include $RULE_PATH/phishing-spam.rules
#include $RULE_PATH/policy-multimedia.rules
#include $RULE_PATH/policy-other.rules
#include $RULE_PATH/policy.rules
#include $RULE_PATH/policy-social.rules
#include $RULE_PATH/policy-spam.rules
#include $RULE_PATH/pop2.rules
#include $RULE_PATH/pop3.rules
#include $RULE_PATH/protocol-finger.rules
#include $RULE_PATH/protocol-ftp.rules
#include $RULE_PATH/protocol-icmp.rules
#include $RULE_PATH/protocol-imap.rules
#include $RULE_PATH/protocol-pop.rules
#include $RULE_PATH/protocol-services.rules
#include $RULE_PATH/protocol-voip.rules
#include $RULE_PATH/pua-adware.rules
#include $RULE_PATH/pua-other.rules
#include $RULE_PATH/pua-p2p.rules
#include $RULE_PATH/pua-toolbars.rules
#include $RULE_PATH/rpc.rules
#include $RULE_PATH/rservices.rules
#include $RULE_PATH/scada.rules
#include $RULE_PATH/scan.rules
#include $RULE_PATH/server-apache.rules
#include $RULE_PATH/server-iis.rules
#include $RULE_PATH/server-mail.rules
#include $RULE_PATH/server-mssql.rules
#include $RULE_PATH/server-mysql.rules
#include $RULE_PATH/server-oracle.rules
#include $RULE_PATH/server-other.rules
#include $RULE_PATH/server-webapp.rules
#include $RULE_PATH/shellcode.rules
#include $RULE_PATH/smtp.rules
#include $RULE_PATH/snmp.rules
#include $RULE_PATH/specific-threats.rules
#include $RULE_PATH/spyware-put.rules
#include $RULE_PATH/sql.rules
#include $RULE_PATH/telnet.rules
#include $RULE_PATH/tftp.rules
#include $RULE_PATH/virus.rules
#include $RULE_PATH/voip.rules
#include $RULE_PATH/web-activex.rules
#include $RULE_PATH/web-attacks.rules
#include $RULE_PATH/web-cgi.rules
#include $RULE_PATH/web-client.rules
#include $RULE_PATH/web-coldfusion.rules
#include $RULE_PATH/web-frontpage.rules
#include $RULE_PATH/web-iis.rules
#include $RULE_PATH/web-misc.rules
#include $RULE_PATH/web-php.rules
#include $RULE_PATH/x11.rules
###################################################
# Step #8: Customize your preprocessor and decoder alerts
# For more information, see README.decoder_preproc_rules
###################################################
# decoder and preprocessor event rules
# include $PREPROC_RULE_PATH/preprocessor.rules
# include $PREPROC_RULE_PATH/decoder.rules
# include $PREPROC_RULE_PATH/sensitive-data.rules
###################################################
# Step #9: Customize your Shared Object Snort Rules
# For more information, see http://vrt-blog.snort.org/2009/01/using-vrt-certified-shared-object-rules.html
###################################################
# dynamic library rules
# include $SO_RULE_PATH/bad-traffic.rules
# include $SO_RULE_PATH/chat.rules
# include $SO_RULE_PATH/dos.rules
# include $SO_RULE_PATH/exploit.rules
# include $SO_RULE_PATH/icmp.rules
# include $SO_RULE_PATH/imap.rules
# include $SO_RULE_PATH/misc.rules
# include $SO_RULE_PATH/multimedia.rules
# include $SO_RULE_PATH/netbios.rules
# include $SO_RULE_PATH/nntp.rules
# include $SO_RULE_PATH/p2p.rules
# include $SO_RULE_PATH/smtp.rules
# include $SO_RULE_PATH/snmp.rules
# include $SO_RULE_PATH/specific-threats.rules
# include $SO_RULE_PATH/web-activex.rules
# include $SO_RULE_PATH/web-client.rules
# include $SO_RULE_PATH/web-iis.rules
# include $SO_RULE_PATH/web-misc.rules
# Event thresholding or suppression commands. See threshold.conf
include threshold.conf
| true |
f5a7958db927e7322993f460cc292d2456aa878d | Shell | SINHASantos/GraphEngine | /tools/build.sh | UTF-8 | 500 | 3.78125 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env bash
if [ "$REPO_ROOT" == "" ] ; then REPO_ROOT="$(readlink -f $(dirname $(readlink -f $0))/../)" ; fi
if [ "$(command -v cmake)" == "" ] ;
then
echo "error: cmake not found." 1>&2
exit
fi
if [ "$(command -v dotnet)" == "" ] ;
then
echo "error: dotnet not found." 1>&2
echo "see: https://www.microsoft.com/net/download/linux"
exit
fi
# build
mkdir -p "$REPO_ROOT/build" && pushd "$_" || exit
cmake "$REPO_ROOT" -DCMAKE_BUILD_TYPE=Release || exit
make -j $(nproc) || exit
popd
| true |
8c7ec5d279a2e1c05c7f7d5eb0213e34eb9224a7 | Shell | icebourg/nagios-checks | /check_redis_slave.sh | UTF-8 | 2,452 | 4.28125 | 4 | [] | no_license | #!/bin/bash
# check_redis_slave.sh
# Nagios check to ensure that this redis server is either a master with
# connected slaves or a slave with an up master. Anything else results in an
# error.
# Author: AJ Bourg (aj <at> ajbourg dot com)
usage()
{
cat << EOF
Usage:
$0 -s [socket]
Connect to redis via socket.
$0 -i [redis ip] -p [port]
Connect to redis via ip/host and port.
$0 -h
This help.
This script is intended to be called by nagios to check redis. We connect to
redis and check the role. If role:slave, we expect that the master has a status
of UP. If role:master, we expect connected_slaves to be >= 1. If we can\'t
connect to redis, that\'s a immediate critical warning.
redis-cli must be in your path.
EOF
}
SOCKET=
REDIS_IP=127.0.0.1
REDIS_PORT=6379
# Options
while getopts "s:i:p:h" OPTION
do
case $OPTION in
h)
usage
exit 1
;;
s)
SOCKET=$OPTARG
;;
i)
REDIS_IP=$OPTARG
;;
p)
REDIS_PORT=$OPTARG
;;
?)
usage
exit
;;
esac
done
if [[ "$SOCKET" ]]
then
CONNECT_STRING="-s $SOCKET "
else
CONNECT_STRING="-h $REDIS_IP -p $REDIS_PORT "
fi
# see if we can even connect, or throw up a critical error
redis-cli $CONNECT_STRING PING &> /dev/null
if [[ $? -ne 0 ]]
then
echo "CRITICAL: Unable to connect to Redis."
exit 2
fi
ROLE=$(redis-cli $CONNECT_STRING INFO | grep '^role' | cut -d':' -f2 | tr -d '\r\n')
# We're a slave. Our master needs to be up.
if [[ "$ROLE" == "slave" ]]
then
# is the master up?
STATUS=$(redis-cli $CONNECT_STRING INFO | grep '^master_link_status' | cut -d':' -f2 | tr -d '\r\n')
# who is our master?
MASTER=$(redis-cli $CONNECT_STRING INFO | grep '^master_host' | cut -d':' -f2 | tr -d '\r\n')
if [[ "$STATUS" == "up" ]]
then
# ok
echo "OK: master $MASTER is $STATUS."
exit 0
else
# not ok
echo "CRITICAL: master $MASTER is $STATUS."
exit 2
fi
fi
# We're a master. Need at least 1 client.
if [[ "$ROLE" == "master" ]]
then
SLAVES=$(redis-cli $CONNECT_STRING INFO | grep '^connected_slaves' | cut -d':' -f2 | tr -d '\r\n')
if [[ $SLAVES -gt 0 ]]
then
echo "OK: We have $SLAVES slaves."
exit 0
else
echo "CRITICAL: We have $SLAVES slaves."
exit 2
fi
fi
# If we made it this far, something weird is up man... report critical
echo "CRITICAL: Role is $ROLE and I don\'t know what to do."
exit 2
| true |
2f1d22cddeba04dc088d48a5515d41a25f656d83 | Shell | bgruening/docker-build | /perlgd/build.sh | UTF-8 | 505 | 3.09375 | 3 | [
"MIT"
] | permissive | #!/bin/sh
arch=x86_64
pkg=perlgd
version=2.56
build_deps=""
urls="
https://cpan.metacpan.org/authors/id/L/LD/LDS/GD-${version}.tar.gz
"
build=/build/
apt-get -qq update &&
apt-get install --no-install-recommends -y $build_deps &&
mkdir ${build} &&
cd ${build} &&
( for url in $urls; do
wget "$url" || false || exit
done ) &&
tar xfz GD-${version}.tar.gz &&
pwd &&
chmod ugo+w GD-${version}/bdf_scripts/* &&
tar zcf /host/GD-${version}.tar.gz GD-${version}/
| true |
645186df7631d43ee2c55903829bd8504120a0c6 | Shell | PureStorage-OpenConnect/FlashStache | /start.sh | UTF-8 | 451 | 2.609375 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
echo "Starting Monitoring Services."
mng="python manage.py"
# Clear out previously running jobs in the default queue.
echo "Resetting the job queue, you will need to disable/enable arrays to continue monitoring."
/usr/local/bin/rq empty failed
$mng runserver 0.0.0.0:8080 -v0 &>> flasharray/flash_stache.log &
$mng rqworker default -v0 &>> flasharray/flash_stache.log &
$mng rqscheduler -v0 --interval 1 &>> flasharray/flash_stache.log &
| true |
372ea67abb7090927ecd4043190c11b626919b7e | Shell | RyanGreenup/cadmus | /bin/rofi_find.sh | UTF-8 | 6,405 | 3.859375 | 4 | [] | no_license | #! /usr/bin/env bash
#
# Author: Ryan Greenup <ryan.greenup@protonmail.com>
# * Shell Settings
set -o errexit # abort on nonzero exitstatus
set -o nounset # abort on unbound variable
set -o pipefail # don't hide errors within pipes
# * Main Function
main() {
check_for_dependencies
setVars
readFirstArgument "${@}"
rofi_over_Notes "${@}"
}
# ** Helper Functions
# *** Check for Dependencies
check_for_dependencies () {
for i in ${DependArray[@]}; do
command -v "$i" >/dev/null 2>&1 || { echo >&2 "I require $i but it's not installed. Aborting."; exit 1; }
done
}
# **** List of Dependencies
declare -a DependArray=(
"rofi"
"rg"
)
# *** Set variables below main
setVars () {
readonly script_name=$(basename "${0}")
readonly script_dir=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
IFS=$'\t\n' # Split on newlines and tabs (but not on spaces)
}
# **** Print Help
Help () {
echo
echo -e " \e[3m\e[1mNoteFind.sh \e[0m; Helpful Shell Scripts for Markdown Notes"
echo -e " \e[1;31m--------------------------\e[0m "
echo
echo -e " \e[3m\e[1m• Usage \e[0m "
echo
echo -e " "${script_name}" [<path/to/notes>]"
echo -e " "${script_name}" [-h]"
echo -e " "${script_name}" [--help]"
echo
echo -e " \e[3m By Design: No Options; No other Arguments\e[0m"
echo
# echo -e " \e[3m\e[1m• Key Bindings\e[0m "
# echo
# echo
# echo -e " \e[1;91m \e[1m Binding \e[0m\e[0m \e[1;34m┊┊┊ \e[0m Description "
# echo -e " ..............\e[1;34m┊┊┊\e[0m........................................... "
# echo -e " \e[1;95m Ctrl - q \e[0m \e[1;34m ┊┊┊ \e[0m \e[1m Search \e[0m with \e[0m\e[3mripgrep\e[0m"
# echo -e " \e[1;93m Ctrl - w \e[0m \e[1;34m ┊┊┊ \e[0m \e[1m Copy \e[0m the Full Path to the Clipboard"
# echo -e " \e[1;93m Alt - w \e[0m \e[1;34m ┊┊┊ \e[0m \e[1m Copy \e[0m the Relative Path to the Clipboard"
# echo -e " \e[1;94m Alt - e \e[0m \e[1;34m ┊┊┊ \e[0m \e[1m Open \e[0m in Emacs"
# echo -e " \e[1;94m Alt - v \e[0m \e[1;34m ┊┊┊ \e[0m \e[1m Open \e[0m in VSCode"
# echo -e " \e[1;94m Ctrl - o \e[0m \e[1;34m ┊┊┊ \e[0m \e[1m Open \e[0m in Default Program"
# echo
# echo -e " \e[3m\e[1m• Compatability \e[0m "
# echo
}
# *** Read First Argument
readFirstArgument () {
if [[ "${1:-}" == "-h" ]] || [[ "${1:-}" == "--help" ]] || [[ "${1:-}" == "" ]]; then
Help && exit 0
fi
}
# *** Skim and Grep, the important stuff
#
rofi_over_Notes () {
## Change directory if One was specified, exit if no directory exists
cd "${1}"
FILE="$(RofiFind)"
if [[ $FILE != "" ]]; then
realpath $FILE && exit 0
fi
exit 0
}
# **** Skim with Grep
RofiFind () {
## Change directory if One was specified, exit if no directory exists
# I took this bit from https://github.com/davatorium/rofi/issues/997
# Not totally sure how it works but it does :shrug
## Set Variables
local selected
local string
selected="${1:-}"
TEMP_DIR="/tmp/cadmus_rofi_preview"
mkdir -p "${TEMP_DIR}"
# schemes="$(fd '\.org$')" # TODO Only look at org-mode files (hmmmm)
schemes="$(find ./ -name '*\.org' -or -name '*\.md')"
lines=$(printf '%s\n' "${schemes}" | wc -l)
menu=$(printf '%s\n' "${schemes}" | rofi -matching fuzzy -location 1 -kb-row-up "" -kb-row-down "" -kb-custom-1 "Up" -kb-custom-2 "Down" -format 'd:s' -dmenu -selected-row $selected)
exit_code=$?
selected="${menu%:*}"
string="${menu##*:}"
case "${exit_code}" in
"1") exit 0;;
"0") PRINT_OUT "${string}" & disown;;
"10")
if [[ $selected == "1" ]]; then
foo_selected="${lines}"
call="3"
else
foo_selected="$(echo -e $(( ${selected} - 1 )))";
call=$(echo $(( ${selected} - 2 )))
fi
foo="$(printf '%s' "${schemes}" | sed -n "${foo_selected}"p)";
PRINT_OUT "${foo}" & disown;;
"11")
if [[ "${selected}" -ge "${lines}" ]]; then
foo_selected="1"
call="0"
else
foo_selected="$(echo -e $(( ${selected} + 1 )))";
call="${selected}"
fi
foo="$(printf '%s' "${schemes}" | sed -n "${foo_selected}"p)";
PRINT_OUT "${foo}" & disown
esac
RofiFind "${call}"
exit 0
}
# **** Convert the File with Pandoc and Show in Browser
PRINT_OUT () {
FILEPATH="$(realpath ${1})"
FILEPATH_NO_EXT="$(realpath ${1} | cut -f 1 -d '.')"
DIRECTORY="$(dirname ${FILEPATH}})"
NAME="$(basename ${@} | cut -f 1 -d '.')"
BROWSER="chromium"
# Simpler calls
# pandoc -f org -t html "${FILEPATH}" --quiet | cat
function pandoc_browser() {
#pandoc -f org -t html "${FILEPATH}" -A /home/ryan/Templates/CSS/gitOrgWrapped.css --mathjax -s --quiet -o "/dev/shm/${NAME}.html" && \
pandoc -t html "${FILEPATH}" --extract-media="${TEMP_DIR}/media_${NAME}" -A /home/ryan/Templates/CSS/gitOrgWrapped.css --katex -s --quiet -o "${TEMP_DIR}/${NAME}.html" && \
"${BROWSER}" "${TEMP_DIR}/${NAME}.html" > /dev/null & disown # Chromium is faster than firefox
}
## By caching the export in /dev/shm/ chrome will just go back to the last tab (quicker)
## and pandoc won't reconvert unnecessarily (quicker)
## Given that most of the time is spent looking and reading this makes a lot of sense
if [ "${FILEPATH}" -nt "${TEMP_DIR}/${NAME}.html" ]; then
# The Live_Reload_JS lets me reload this, otherwise do not disown this process
pandoc_browser & disown
else
"${BROWSER}" "${TEMP_DIR}/${NAME}.html" & disown
fi
# I tried this with org-ruby, no luck though for latex though
# Org-ruby is much faster than pandoc
# /home/ryan/.gem/ruby/2.7.0/bin/org-ruby "${FILEPATH}" -t html > /dev/shm/mpv.html
# cat /home/ryan/Templates/mathjax >> /dev/shm/mpv.html
# cat /home/ryan/Templates/CSS/gitOrgWrapped.css >> /dev/shm/mpv.html
# chromium /dev/shm/mpv.html & disown
}
# * Call Main Function
main "${@}"
| true |
4d1ebc0730c2c08b97fbe6bf6b92b48dc5e4fc5d | Shell | zooyl/GiveThingsBack | /install.sh | UTF-8 | 2,370 | 3.5625 | 4 | [] | no_license | #!/usr/bin/env bash
echo "---------------------------------------------------"
echo "This script will install virtual environment,"
echo "Set it up and install packages to run this project"
echo "One key note:"
echo "It will not install Python, Postgresql and Docker"
echo "*docker-compose included in requirements*"
echo "---------------------------------------------------"
read -p "Click ""ENTER"" continue."
echo "Installing virtual environment..."
sudo apt install virtualenv
echo "---------------------------------------------------"
echo "Creating virtual environment in current directory"
echo "---------------------------------------------------"
virtualenv -p python3 venv
# pip install -r requirements.txt
venv/bin/pip install -r requirements.txt
source ./venv/bin/activate
echo "---------------------------------------------------"
echo "Installation completed"
echo "---------------------------------------------------"
echo "Make sure 'local_settings.py' is configured!"
echo "---------------------------------------------------"
read -r -p "Do you want to start server? [Y/n] " response
echo
response=${response,,} # tolower
if [[ $response =~ ^(yes|y| ) ]] || [[ -z $response ]]; then
echo "---------------------------------------------------"
echo "Making migrations"
echo "---------------------------------------------------"
python manage.py migrate
echo "---------------------------------------------------"
echo "Collecting static files"
echo "---------------------------------------------------"
python manage.py collectstatic
echo "---------------------------------------------------"
echo "Running Tests"
echo "---------------------------------------------------"
python manage.py test
echo "---------------------------------------------------"
echo "Populating database"
echo "---------------------------------------------------"
python manage.py loaddata sample.json
echo "---------------------------------------------------"
echo "Please configure e-mail service in settings.py"
echo "otherwise you wont be able to register new users"
echo "---------------------------------------------------"
echo "Running Server"
echo "By default there are two users:"
echo "'super-user@gtb.com' with password 'Mkonjibhu1!'"
echo "'wiyayoh@digital-email.com' with password 'Mkonjibhu1!'"
python manage.py runserver
fi
if [[ $response =~ ^(no|n| ) ]] || [[ -z $response ]]; then
exit
fi | true |
38b4403d51f9bed687ae31980c003f9c00087636 | Shell | masterstevelu/cluster-tutorials | /jupyter/singularity_jupyter.sh | UTF-8 | 1,194 | 3.515625 | 4 | [] | no_license | #!/bin/bash
exist_jupyter=`qstat | grep singularity_jupyter | grep " R "`
if [[ $exist_jupyter == *"singularity_jupyter"* ]]; then
exist_arr=(${exist_jupyter//./ })
exist_jobid=${exist_arr[0]}
echo "singularity_jupyter job exists!"
cat log/tunnel.$exist_jobid.rmdx-cluster.edu.com.cn
exit
fi
# get input
# get input
while :; do
read -p "Enter singularity image path: " image_path
echo $image_path
if [ -f $image_path ]; then
echo "Image exists!"
break
else
echo "No image on the path : $image_path , try again!"
fi
done
while :; do
read -p "Enter a number between 9001 and 9999: " port_number
[[ $port_number =~ ^[0-9]+$ ]] || { echo "Enter a valid port_number"; continue; }
if ((port_number >= 9001 && port_number <= 9999)); then
echo "valid port_number"
break
else
echo "number out of range, try again!"
fi
done
sub=`qsub -v IMAGE=$image_path,PORT=$port_number submit_singularity_jupyter.pbs`
sub_arr=(${sub//./ })
jobid=${sub_arr[0]}
sleep 6
if [ -f log/tunnel.$jobid.rmdx-cluster.edu.com.cn ]; then
cat log/tunnel.$jobid.rmdx-cluster.edu.com.cn
fi
if [ -f log/port_in_use ]; then
cat log/port_in_use
rm log/port_in_use
fi
| true |
f725f0b0829fcf2fb40c0696f458f12796797604 | Shell | sofya-on/labora23 | /lab23.c | UTF-8 | 4,979 | 3.15625 | 3 | [] | no_license | #!/bin/bash
#include <stdio.h>
#include "vector.c"
#include "tree.c"
void KLP(TreeNode **node, const int level);
void countNodesOnLevels(TreeNode **node, Vector *v, const int h);
int max(int a, int b);
int treeDFS(TreeNode **node);
TreeNode *getNodeByPath(TreeNode **node, const char *path);
int main(void)
{
int i, maxBFS;
char cmd[255], arg;
TreeNode *root = NULL, *tmpNode = NULL;
Vector v;
do
{
printf("Введите команду (h - справка):\n");
scanf("%s", cmd);
if (cmd[0] == '+')
{
scanf(" %c", &arg);
if (cmd[1] == 'r')
{
if (root == NULL)
{
if (arg >= 'A' && arg <= 'Z')
{
treeAddNode(&root, arg - 'A');
printf("Корень %c создан\n", arg);
}
else
printf("Ошибка. Введена недопустимая буква\n");
}
else
printf("Корень уже существует\n");
}
else if (root == NULL)
printf("Корень не создан\n");
else
{
tmpNode = root;
if (cmd[1] != '\0')
tmpNode = getNodeByPath(&root, &cmd[1]);
if (tmpNode == NULL)
printf("Ошибка. Такого пути не существует\n");
else if (arg >= 'A' && arg <= 'Z')
{
if (treeAddNode(&tmpNode, arg - 'A') != NULL)
printf("Узел %c добавлен к узлу %c\n", arg, tmpNode->_data + 'A');
}
else
printf("Ошибка. Введена недопустимая буква\n");
}
}
else if (cmd[0] == '-')
{
scanf(" %c", &arg);
if (arg >= 'A' && arg <= 'Z')
{
if (treeRemoveNode(&root, arg - 'A'))
printf("Узел %c удален\n", arg);
else
printf("Узел %c не найден\n", arg);
}
else
printf("Ошибка. Введена недопустимая буква\n");
}
else if (cmd[0] == 'p')
{
KLP(&root, 0);
}
else if (cmd[0] == 't')
{
if (root != NULL)
{
vectorCreate(&v, treeDFS(&root));
for (i = 0; i < vectorSize(&v); i++)
vectorSave(&v, i, 0);
countNodesOnLevels(&root, &v, 0);
maxBFS = 0;
int flag = 0;
for (i = 0; i < vectorSize(&v); i++)
{
if (vectorLoad(&v, i) <= maxBFS)
flag = 1;
maxBFS = vectorLoad(&v, i);
}
if (flag == 1)
printf("Моннотонность возрастания ширины уровня дерева не сохраняется\n");
else
printf("Моннотонность возрастания ширины уровня дерева сохраняется\n");
vectorDestroy(&v);
}
else
printf("Дерево пусто\n");
}
else if (cmd[0] == 'h')
{
printf("================================\n");
printf("Список команд:\n");
printf("+r CHAR - создать корень CHAR (A, B, ..., Z)\n");
printf("+ CHAR - добавить сына CHAR к корню\n");
printf("+PATH CHAR - добавить CHAR узел по заданому пути (s - сын, b - брат)\n");
printf("- CHAR - удалить первый найденный узел CHAR и его поддерево\n");
printf("p - распечатать дерево\n");
printf("t - выполнить задание над деревом\n");
printf("q - завершить программу\n");
printf("================================\n");
}
else if (cmd[0] != 'q')
{
printf("Неизвестная команда\n");
}
}
while (cmd[0] != 'q');
treeDestroy(&root);
return 0;
}
void KLP(TreeNode **node, const int level)
{
if (*node == NULL)
{
printf("Дерево пусто\n");
return;
}
printf("%*s%c\n", level * 2, "", (*node)->_data + 'A');
if ((*node)->_son != NULL)
KLP(&(*node)->_son, level + 1);
if ((*node)->_bro != NULL)
KLP(&(*node)->_bro, level);
}
int treeNodesCount(TreeNode **node)
{
int cnt = 0;
if (*node == NULL)
return 0;
if ((*node)->_bro != NULL)
cnt += treeNodesCount(&(*node)->_bro);
if ((*node)->_son != NULL)
cnt += treeNodesCount(&(*node)->_son);
return cnt + 1;
}
void countNodesOnLevels(TreeNode **node, Vector *v, const int h)
{
int curH = 0;
if (*node == NULL)
return;
curH = vectorLoad(v, h);
vectorSave(v, h, curH + 1);
if ((*node)->_bro)
countNodesOnLevels(&(*node)->_bro, v, h);
if ((*node)->_son)
countNodesOnLevels(&(*node)->_son, v, h + 1);
}
int max(int a, int b)
{
return (a > b ? a : b);
}
int treeDFS(TreeNode **node)
{
if (*node == NULL)
return 0;
return max(1 + treeDFS(&(*node)->_son), treeDFS(&(*node)->_bro));
}
TreeNode *getNodeByPath(TreeNode **node, const char *path)
{
int i = 0;
TreeNode *tmpNode = *node;
while (path[i] != '\0')
{
if (path[i] == 's')
if (tmpNode->_son != NULL)
tmpNode = tmpNode->_son;
else
return NULL;
else if (path[i] == 'b')
if (tmpNode->_bro != NULL)
tmpNode = tmpNode->_bro;
else
return NULL;
else
return NULL;
i++;
}
return tmpNode;
}
| true |
bbe7873a3f976d26937813b66a59e99ce0a34463 | Shell | experteer/rbenv-script | /bin/rbenv-script | UTF-8 | 930 | 3.84375 | 4 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | #!/usr/bin/env bash
#
# Usage: rbenv script
#
set -e
[ -n "$RBENV_DEBUG" ] && set -x
if [ "$1" = "--version" ] || [ "$1" = "-v" ]; then
echo "rbenv-script 0.0.1"
exit
fi
traverse-rbenv-script-files() {
local root="$1"
local results=""
while [ -n "$root" ]; do
if [ -e "${root}/.rbenv-script.local" ]; then
results="${root}/.rbenv-script.local"$'\n'"$results"
fi
if [ -e "${root}/.rbenv-script" ]; then
results="${root}/.rbenv-script"$'\n'"$results"
fi
root="${root%/*}"
done
if [ -n "$results" ]; then
echo -n "$results"
else
return 1
fi
}
find-rbenv-script-files() {
if [ -e "${RBENV_ROOT}/script" ]; then
echo "${RBENV_ROOT}/script"
fi
traverse-rbenv-script-files "$RBENV_DIR" ||
[ "$RBENV_DIR" = "$PWD" ] || traverse-rbenv-script-files "$PWD"
}
while read file; do
echo "# $file"
cat "$file"; echo;
echo
done < <( find-rbenv-script-files )
| true |
7570930107f48f330197a5c0f653c75e0348aeb1 | Shell | waynegraham/ivanhoe | /ivanhoe/scripts/automation/mkarchive.sh | UTF-8 | 2,982 | 4.1875 | 4 | [] | no_license | #!/bin/bash
#
# mkarchive.sh
# A tool to automate backups of Ivanhoe data
# by Ben Cummings
#
get_property() {
if [ $# -ne 2 ] ; then
echo "Error: function [$0] needs two arguments"
exit 255;
fi
prop_name=$1
prop_file=$2
PROPERTY=`sed 's/#.*//g' $prop_file | grep "$prop_name" | gawk --field-separator = -- '{print $2;}'`
if [ $PROPERTY ] ; then
return 0
else
echo "Error: could not get property [$prop_name] from [$prop_file]"
exit 4
fi
}
#
# SETUP AND ARGUMENT PROCESSING
#
if [ $# -ne 2 ] ; then
cat << EOF
Usage: `basename $0` server_directory backup_directory
e.g.: `basename $0` ./ivanhoe/dist/server ./backup
EOF
exit 0
fi
IVANHOE_SERVER=$1
IVANHOE_BACKUP=$2
if [ -z `echo $IVANHOE_SERVER | egrep '^/'` ] ; then
IVANHOE_SERVER="`pwd`/$IVANHOE_SERVER"
fi
if [ -z `echo $IVANHOE_BACKUP | egrep '^/'` ] ; then
IVANHOE_BACKUP="`pwd`/$IVANHOE_BACKUP"
fi
if [ ! -d $IVANHOE_SERVER ] ; then
echo "Error: [$IVANHOE_SERVER] must be a directory"
exit 1
fi
if [ ! -d $IVANHOE_BACKUP ] ; then
echo "Error: [$IVANHOE_BACKUP] must be a directory"
exit 1
fi
DATE_STRING=`date +%d`
#
# DATA PROCESSING
#
cd $IVANHOE_SERVER
if [ -e ./ivanhoe.properties ] ; then
get_property "gameServerName" "./ivanhoe.properties"
GAME_SERVER_NAME=$PROPERTY
#
# PART ONE:
# SQL dump
#
get_property "dbName" "./ivanhoe.properties"
DB_NAME=$PROPERTY
get_property "dbHost" "./ivanhoe.properties"
DB_HOST=$PROPERTY
get_property "dbUser" "./ivanhoe.properties"
DB_USER=$PROPERTY
get_property "dbPass" "./ivanhoe.properties"
DB_PASS=$PROPERTY
if [ -z $DB_PASS ] ; then
echo "Error: could not find required database fields in the properties file"
exit 4
fi
DB_BACKUP_FILE="$IVANHOE_BACKUP/$GAME_SERVER_NAME.$DATE_STRING.sql"
# This is insecure! FIXME: use 'expect' instead!
mysqldump -h$DB_HOST -u$DB_USER -p$DB_PASS --opt --databases $DB_NAME > $DB_BACKUP_FILE && \
gzip -f $DB_BACKUP_FILE
DB_BACKUP_FILE="$DB_BACKUP_FILE.gz"
if [ ! -f $DB_BACKUP_FILE ] ; then
echo "Error: could not dump database."
exit 2
fi
#
# PART TWO:
# Discourse field dump
#
DF_BACKUP_FILE="$IVANHOE_BACKUP/$GAME_SERVER_NAME.$DATE_STRING.discourse_field.tar"
get_property "discourseFieldRoot" "./ivanhoe.properties"
DF_ROOT=$PROPERTY
tar cf $DF_BACKUP_FILE $DF_ROOT # *game[0-9]*-journal.html
gzip -f $DF_BACKUP_FILE
rm -f $DF_BACKUP_FILE
DF_BACKUP_FILE="$DF_BACKUP_FILE.gz"
else
echo "Error: Could not find the properties file."
exit 2
fi
#
# REPORT
#
cat << EOF
Backup completed succesfully. Generated files:
SQL database dump: [$DB_BACKUP_FILE]
Discourse field archive: [$DF_BACKUP_FILE]
EOF
exit 0
| true |
7506c90d0486276c286bc2acba346da27c086e5b | Shell | barkpixels/Experimental_helpers | /ynh_chown/ynh_chown | UTF-8 | 3,330 | 4.34375 | 4 | [] | no_license | #!/bin/bash
# Set permissions on files and directories with chown
#
# Use find to apply permissions faster on very big directories.
#
# usage: ynh_chown --user=user [--group=group] --file="file_or_directory" [--recursive]
# | arg: -u, --user - Owner
# | arg: -g, --group - Owner group (Default same as --user)
# | arg: -f, --file - File or directory where permissions will be applied.
# | arg: -r, --recursive - Change permissions recursively
ynh_chown () {
# Declare an array to define the options of this helper.
local legacy_args=ugfr
declare -Ar args_array=( [u]=user= [g]=group= [f]=file= [r]=recursive )
local user
local group
local file
local recursive
# Manage arguments with getopts
ynh_handle_getopts_args "$@"
group="${group:-$user}"
recursive=${recursive:-0}
if [ $recursive -eq 1 ]
then
local ending_slash=""
if [ -d "$file" ]
then
ending_slash=/
fi
# With very big directories, find is way faster than chown itself.
# Especially because find will check the permissions and applied chown only if the permissions aren't correct.
# '\!' is used to have a negation on -user and -group.
# ' -d '\n' ' force \n to be the delimiter of each entry instead of space. So xargs will handle correctly directories and files with spaces.
ynh_exec_warn_less "find \"$file$ending_slash\" \! -user $user -o \! -group $group | xargs --no-run-if-empty --delimiter='\n' chown --preserve-root $user:$group"
else
ynh_exec_warn_less chown $user:$group \"$file\"
fi
}
# Set permissions on files and directories with chmod
#
# Use find to apply permissions faster on very big directories.
#
# usage: ynh_chmod --permissions=0755 --file="file_or_directory" [--recursive] [--type=file/dir]
# | arg: -p, --permissions - Permissions to apply with chmod.
# | arg: -f, --file - File or directory where permissions will be applied.
# | arg: -r, --recursive - Change permissions recursively
# | arg: -t, --type - Apply permissions only on regular files (file) or directories (dir)
ynh_chmod () {
# Declare an array to define the options of this helper.
local legacy_args=pfrt
declare -Ar args_array=( [p]=permissions= [f]=file= [r]=recursive [t]=type= )
local permissions
local file
local recursive
local type
# Manage arguments with getopts
ynh_handle_getopts_args "$@"
recursive=${recursive:-0}
type="${type:-}"
if [ -n "$type" ] && [ "$type" != "file" ] && [ "$type" != "dir" ]
then
ynh_print_err --message="The value \"$type\" for --type is not recognized."
type=""
else
if [ "$type" == "file" ]
then
type="-type f"
elif [ "$type" == "dir" ]
then
type="-type d"
fi
fi
if [ $recursive -eq 1 ]
then
local ending_slash=""
if [ -d "$file" ]
then
ending_slash=/
fi
# With very big directories, find is way faster than chmod itself.
# Especially because find will check the permissions and applied chmod only if the permissions aren't correct.
# '\!' is used to have a negation on -perm.
# ' -d '\n' ' force \n to be the delimiter of each entry instead of space. So xargs will handle correctly directories and files with spaces.
ynh_exec_warn_less "find \"$file$ending_slash\" $type \! -perm $permissions | xargs --no-run-if-empty --delimiter='\n' chmod --preserve-root $permissions"
else
ynh_exec_warn_less chmod $permissions \"$file\"
fi
}
| true |
2e2b56089b864c091e29a2982bdadfd9457dbe61 | Shell | bitfree/zipkin | /docker/collector/kafka/docker-bin/docker-entrypoint | UTF-8 | 1,501 | 2.78125 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/sh
#
# Copyright 2015-2020 The OpenZipkin Authors
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
#
# ENTRYPOINT script that starts ZooKeeper and then Kafka
set -eu
echo Starting ZooKeeper
bin/kafka-run-class.sh -Dlog4j.configuration=file:config/log4j.properties org.apache.zookeeper.server.quorum.QuorumPeerMain config/zookeeper.properties &
# wait for ZooKeeper
until echo stat | nc 127.0.0.1 2181
do
sleep 1
done
# Internal docker producers and consumers use the normal hostname:9092, and outside docker the advertised host on port 19092
ADVERTISED_LISTENERS="advertised.listeners=PLAINTEXT://${HOSTNAME}:9092,PLAINTEXT_HOST://${KAFKA_ADVERTISED_HOST_NAME}:19092"
# Ensure the line is only added once in the config file.
CONFIG=config/server.properties
grep -qF -- "$ADVERTISED_LISTENERS" $CONFIG || echo "$ADVERTISED_LISTENERS" >> $CONFIG
echo Starting Kafka
exec bin/kafka-run-class.sh -name kafkaServer -Dlog4j.configuration=file:config/log4j.properties kafka.Kafka config/server.properties
| true |
bb2828723d0fbc2af14a9654c9155e4bbaa8bed2 | Shell | germs-lab/softwarecarpentry | /day2.testing/run_all_tests.sh | UTF-8 | 116 | 2.640625 | 3 | [
"LicenseRef-scancode-public-domain",
"CC-BY-4.0",
"MIT"
] | permissive | #!/bin/bash
for test_file in `ls test_*.txt`
do
echo "Current test file is $test_file"
./mean.py $test_file
done
| true |
8f190335264a71539e12704fd352ac1189a7d592 | Shell | razuos/curso-shellscript | /script3/relatorio.sh | UTF-8 | 1,134 | 4.09375 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env bash
#Criamos a função na parte mais superior do script para que ela possa ser utilizada
#em qualquer parte. Se ela fosse declarada no final do arquivo, não poderia ser acessada.
gerar_relatorio() {
#Podemos redirecionar a saída do comando "uptime".
echo "Data: $(date)"
#Descobrindo o nome da máquina com o comando "hostname".
echo "Nome da máquina: $(hostname)"
#Descobrindo quanto tempo a máquina está ligada.
echo "Saída do comando uptime: $(uptime)"
#Arquivo /proc/version contém a versão do sistema.
echo "Versão do sistema: $(cat /proc/version)"
#Arquivo /etc/shells retorna as shells disponíveis.
echo "Shells disponíveis: $(cat /etc/shells)"
}
echo "Digite o nome do arquivo:"
flag="false"
arquivo=""
while [ "$flag" = "false" ]; do
read input
if [ -z "$input" ]; then
echo "Você precisa digitar algo!"
else
flag="true"
arquivo=$input
fi
done
#Podemos chamar a função e redirecionar sua saída para um arquivo!
gerar_relatorio > $arquivo
#https://chmod-calculator.com
chmod 600 $arquivo
#Mostrando as permiossões do arquivo.
ls -la $arquivo
| true |
1f23af1370fc4a294d53c9fb8585312a150c199b | Shell | GandhiCloudLab/mcm-goldensignals-upro | /install/51-continous-trafic.sh | UTF-8 | 616 | 2.578125 | 3 | [] | no_license | #!/bin/bash
while true
do
echo "Press CTRL+C to stop the script execution"
# Enter your desired command in this block.
ab -n 50 -c 3 http://upro-bankweb-upro-icam-appdomain.cloud/
ab -n 50 -c 3 http://upro-bankweb-upro-icam-appdomain.cloud/interest
ab -n 50 -c 3 http://upro-bankweb-upro-icam-appdomain.cloud/payment/2
ab -n 50 -c 3 http://upro-bankweb-upro-icam-appdomain.cloud/commissionRate/2
ab -n 50 http://upro-bankweb-upro-icam-appdomain.cloud/errorCheck
ab -n 10 http://upro-bankweb-upro-icam-appdomain.cloud/latencyCheck
#Sleep for 10 seconds 1m 1h
sleep 5
done
| true |
160dc4f829cc1103f93a24d6330d0afa8cbe83d4 | Shell | augustine829/Self_research | /trunk.devtools/.svn/pristine/16/160dc4f829cc1103f93a24d6330d0afa8cbe83d4.svn-base | UTF-8 | 4,711 | 4.25 | 4 | [] | no_license | #!/bin/bash
set -eu
usage() {
cat <<EOF
Usage:
fetch-sdk [OPTION]... BRANCH[@REVISION] KITPRODUCT
This script builds and downloads an sdk/sdk++ for preferred branch from
http://kreatvkits.arrisi.com. Note that if no revision number is provided
you get latest _build_, not latest _revision_, meaning that we may match
an old revision if someone queued it. File $log
is used to track which sdks have been downloaded.
Options:
BRANCH
http://svn.arrisi.com/dev/bsg branch including
'branches'/'tags'.
Example: 'trunk', 'branches/DEV_123', 'tags/STABLE_1'
REVISION
The branch revision. Example: '123456'
KITPRODUCT
The kit product to be built and downloaded.
Example: 'kreatv4-sdk++-alefnula'
-h / --help
Show this help text.
-e / --edit
Edit file telling which SDKs have been downloaded (this would
be $log). This enables you to reset
downloads, thus allowing you to perform them again.
-d / --delete
Delete local copy of sdk after download. SDK is however
still marked as downloaded. Can be used to have SDK on
built server side but not keeping the local copy of it.
-l / --list
List possible KITPRODUCTS for a specified BRANCH[@REVISION].
-o / --output_path <path>
Put downloaded SDKs in specified path instead of in
current directory.
Examples:
> fetch-sdk tags/STABLE_1 kreatv4-sdk++-alefnula
> fetch-sdk trunk@123456 kreatv4-sdk++-st40
EOF
}
edit() {
if [[ -n "$EDITOR" ]]; then
$EDITOR $logfile
else
emacs -nw $logfile
fi
}
help() {
cat <<EOF
For usage information run: fetch-sdk -h
EOF
exit 1
}
list() {
curl -s "http://kreatvkits.arrisi.com/$branch@${revision}?return=plainlist"
}
fetchsdk() {
if [[ -z "$revision" ]]; then
set +e
revision=$(wget -o /dev/null -O - "http://svn.arrisi.com/build_remote.php?action=latest_build_rev&repo=dev&project=bsg&branch=$branch" | grep . || echo "N/A")
set -e
if [ "$revision" == "N/A" ]; then
echo "Error: Could not find any built revision for $branch."
exit 1
fi
fi
logentry="$branch,$revision,$kitproduct"
touch $logfile
already_downloaded=$(grep "$logentry" -o $logfile || true)
if [ -z "$already_downloaded" ];then
curdir=$(pwd)
cd $path
echo "Trying to fetch $kitproduct for $branch@$revision..."
if [[ -n $delete ]]; then
wget --content-disposition "http://kreatvkits.arrisi.com/$branch@${revision}/${kitproduct}" -nv 2>&1 | tee .download_log
archive=$(cat .download_log |sed -e 's/\(.*\)"kreatv\(.\?\)-sdk\(.*\)"\(.*\)/kreatv\2-sdk\3/')
echo "Deleting downloaded archive $archive"
rm -f $archive
rm -f .download_log
else
wget --content-disposition "http://kreatvkits.arrisi.com/$branch@${revision}/${kitproduct}" \
|| (echo "Error: Could not find kit product '$kitproduct', possible values are:" \
&& list && exit 1)
fi
### Download was successful, write to log:
echo "$logentry" >> $logfile
cd $curdir
else
echo "Kit product $kitproduct is already downloaded for $branch@$revision (according to file $logfile)."
echo "Did nothing..."
exit 0
fi
}
branch=
kitproduct=
path=.
revision=
delete=
edit=
list=
log=.downloaded_sdks_in_current_dir
if [ "$#" -eq 0 ]; then
usage
exit 0
fi
while [ "$#" -gt 0 ]; do
case $1 in
-h|--help)
usage
exit 0
;;
-o|--output)
path=$2
shift
;;
-d|--delete)
delete=1
;;
-e|--edit)
edit=1
;;
-l|--list)
list=1
;;
*)
if [[ -z "$branch" ]]; then
read branch revision <<<$(IFS="@"; echo $1)
elif [[ -z "$kitproduct" ]]; then
kitproduct=$1
else
echo "Error: Too many arguments"
help
fi
;;
esac
shift
done
if [ ! -d "$path" ]; then
echo "Error: Directory $path does not exist"
help
fi
logfile=$path/$log
if [[ -n "$edit" ]]; then
edit
exit 0
fi
if [[ -z $branch ]]; then
echo "Error: Missing BRANCH argument."
help
fi
if [[ -n "$list" ]]; then
list
exit 0
fi
if [[ -z $kitproduct ]]; then
echo "Error: Missing KITPRODUCT argument."
help
fi
fetchsdk
| true |
099090bec47562d658388e4ea6a7787091882d0e | Shell | NPOpenSource/jass-plugin | /publsh.sh | UTF-8 | 504 | 3.53125 | 4 | [] | no_license | #!/bin/bash
set -e
versionfile="version.txt"
version="jass-plugin-0.0.1.vsix"
readVersion(){
while read line
do
version=$line
done < $versionfile
}
readVersion
echo "最后的版本号:"$version
vsce package
list=`ls *.vsix`
lastVersion=""
for element in $list
do
lastVersion=$element
done
if [ "$lastVersion" == "$version" ];then
echo "请修改package.json的版本号"
exit 1
fi
vsce publish
echo $lastVersion > $versionfile
echo "当前版本号:"$lastVersion
| true |
dcfd2e760fb8bcdc43bba9eb793f32ce636f1e50 | Shell | kazuakiishiguro/dotfiles | /bash/.bashrc | UTF-8 | 4,836 | 3.515625 | 4 | [] | no_license | HISTSIZE=1000
HISTFILESIZE=2000
function _is_command() {
local check_command="$1"
command -v "${check_command}" > /dev/null 2>&1
}
# arch
arch=`uname -m`
# os
platform='unknown'
if [[ `uname` == 'Darwin' ]]; then
platform='osx'
elif [[ `uname -a` == *Ubuntu* ]]; then
platform='debian'
if _is_command Xorg; then
# set xcape keymap
source $HOME/.bin/start-xcape.sh
fi
fi
# env
export PATH="$PATH:$HOME/.bin"
export PATH="/usr/local/sbin:$PATH"
export PATH="$HOME/.local/bin:$PATH"
# lang
if [[ -z "$LANG" ]]; then
export LANG='en_US.UTF-8'
export LC_ALL="en_US.UTF-8"
fi
# git branch
function _git_branch () {
# Get current Git branch
git branch 2> /dev/null | sed -e '/^[^*]/d' -e 's/* \(.*\)/(\1)/'
}
# comand prompt
if [ "$platform" = osx ]; then
if [ "$arch" == 'arm64' ]; then
export PS1="[${arch}]\u@\h:\[\e[36m\]\w\[\e[32m\]\$(_git_branch)\[\e[0m\]\$ "
# export brew path
export PATH=/opt/homebrew/bin:$PATH
else
export PS1="\u@\h:\[\e[36m\]\w\[\e[32m\]\$(_git_branch)\[\e[0m\]\$ "
fi
elif [ "$platform" = debian ]; then
# set variable identifying the chroot you work in (used in the prompt below)
if [ -z "${debian_chroot:-}" ] && [ -r /etc/debian_chroot ]; then
debian_chroot=$(cat /etc/debian_chroot)
fi
# set a fancy prompt (non-color, unless we know we "want" color)
case "$TERM" in
xterm-color|*-256color) color_prompt=yes;;
esac
# uncomment for a colored prompt, if the terminal has the capability; turned
# off by default to not distract the user: the focus in a terminal window
# should be on the output of commands, not on the prompt
#force_color_prompt=yes
if [ -n "$force_color_prompt" ]; then
if [ -x /usr/bin/tput ] && tput setaf 1 >&/dev/null; then
# We have color support; assume it's compliant with Ecma-48
# (ISO/IEC-6429). (Lack of such support is extremely rare, and such
# a case would tend to support setf rather than setaf.)
color_prompt=yes
else
color_prompt=
fi
fi
if [ "$color_prompt" = yes ]; then
PS1="${debian_chroot:+($debian_chroot)}\[\033[01;32m\]\u@\h\[\033[00m\]:\[\033[01;34m\]\w\[\033[00m\]\$ "
else
PS1="${debian_chroot:+($debian_chroot)}\u@\h:\w\$ "
fi
unset color_prompt force_color_prompt
# If this is an xterm set the title to user@host:dir
case "$TERM" in
xterm*|rxvt*)
PS1="\[\e]0;${debian_chroot:+($debian_chroot)}\u@\h: \w\a\]$PS1"
;;
*)
;;
esac
fi
# aliases
alias ls='ls --color=auto'
alias ll='ls -al'
alias dockerrm='docker rm $(docker ps -aq)'
alias dockerrmi='docker rmi $(docker images -aq)'
alias dockerstop='docker stop $(docker ps -aq)'
alias dockerkill='docker kill $(docker ps -aq)'
alias emacsmin='emacs -nw --no-init-file --no-site-file'
alias emacs='emacs -nw'
## check if it's arm64
if [ "$arch" == 'arm64' ]; then
alias screen='/opt/homebrew/bin/screen'
else
alias screen='/usr/local/bin/screen'
fi
# nvm
if [ -e "$HOME/bin" ];then
export PATH="$HOME/bin:./node_modules/.bin:$PATH"
fi
if [ -e "$HOME/.nvm" ]; then
export NVM_DIR="$HOME/.nvm"
if [ -e "/usr/local/opt/nvm/nvm.sh" ]; then
. "/usr/local/opt/nvm/nvm.sh"
elif [ -e "$NVM_DIR/nvm.sh" ]; then
. "$NVM_DIR/nvm.sh"
fi
fi
if [ "$arch" == 'arm64' ]; then
export NVM_DIR="$HOME/.nvm"
if [ -e "/opt/homebrew/opt/nvm/nvm.sh" ]; then
# This loads nvm
[ -s "/opt/homebrew/opt/nvm/nvm.sh" ] && . "/opt/homebrew/opt/nvm/nvm.sh"
# This loads nvm bash_completion
[ -s "/opt/homebrew/opt/nvm/etc/bash_completion.d/nvm" ] && . "/opt/homebrew/opt/nvm/etc/bash_completion.d/nvm"
fi
fi
# go
if [ -x "`which go`" ]; then
# I chose to download arm64 go binary from here: https://golang.org/doc/install?download=go1.16beta1.darwin-arm64.pkg
if [ "$arch" == 'arm64' ]; then
export GOPATH=/usr/local/go
else
export GOPATH=$HOME/.go
fi
export PATH=$PATH:$GOPATH/bin
fi
# rustc
if [ -e "$HOME/.cargo" ]; then
export PATH="$HOME/.cargo/bin:$PATH"
fi
if _is_command sccache; then
export RUSTC_WRAPPER=`which sccache`
fi
# for macos only setting
if [ "$platform" = osx ]; then
# dircolors
export PATH="$(brew --prefix coreutils)/libexec/gnubin:$PATH"
# LS_COLORS="di=01;36"
# export LS_COLORS
if [ -f "$HOME/.dircolors" ]; then
if type dircolors > /dev/null 2>&1; then
eval $(dircolors $HOME/.dircolors/dircolors.256dark)
elif type gdircolors > /dev/null 2>&1; then
eval $(gdircolors $HOME/.dircolors/dircolors.256dark)
fi
fi
fi
# for gpg sign
export GPG_TTY=$(tty)
# for mosh
export LC_ALL="en_US.UTF-8"
[ -s "$NVM_DIR/bash_completion" ] && \. "$NVM_DIR/bash_completion" # This loads nvm bash_completion
# rbenv
if [ -d "$HOME/.rbenv" ]; then
export PATH=${HOME}/.rbenv/bin:${PATH} && \
eval "$(rbenv init -)"
fi
| true |
0f24759f36796702392a336b5b76464a8cf1a24b | Shell | s0la/scripts | /drives/sh/61_GB.sh | UTF-8 | 314 | 2.859375 | 3 | [] | no_license | #!/bin/bash
if [ -z $(mount | grep "/media/sola/61_GB") ]; then
gksudo mount /dev/sda5 /media/sola/61_GB
fi
opened=$(wmctrl -l | grep '61_GB')
if [ "$opened" ]; then
wmctrl -i -a ${opened:0:10}
else
Documents/scripts/files/sh/thunar_opacity.sh /media/sola/61_GB
fi
Documents/scripts/general/sh/shift.sh 8 1 | true |
a1c244f49f73ae64b0a84b96afaf80273d49c6c0 | Shell | joelandman/nyble | /OS/centos7/gen_kd.bash | UTF-8 | 754 | 3.03125 | 3 | [] | no_license | #!/bin/bash -x
set -e
[[ -e kernel.data ]] && . ./kernel.data
pwd
echo TARGET = $TARGET
echo URL = $KERNEL_URL
echo K VERS = $KERNEL_VERSION
echo KV = $KV
echo NYBLE_KERNEL = $NK
echo DISTRO = $DISTRO
rpm -qa --installroot=${TARGET} | grep kernel | perl -lane 's/kernel-.*?(\d.*?).x86_64/$1/g;print' | sort | uniq > k.d
cat k.d | perl -lane 's/\.el7//g;print' > kv.d
# create the kernel.data
echo DISTRO=${DISTRO} > ${TARGET}/root/kernel.data2
echo NK=${NK} >>${TARGET}/root/kernel.data2
echo KERNEL_URL=${KERNEL_URL} >>${TARGET}/root/kernel.data2
echo KERNEL_VERSION=$(cat k.d) >>${TARGET}/root/kernel.data2
echo KV=$(cat kv.d | perl -lane 's/^(\d+\.\d+)(.*?)$/$1/g;print') >>${TARGET}/root/kernel.data2
| true |
ddc966468620443a33d203e0c193b8ac127c93ec | Shell | n1ay/nodejs-iperf-server | /run.sh | UTF-8 | 2,846 | 4.09375 | 4 | [] | no_license | #!/bin/bash
#restart wpa, dhclient and nodejs server every X seconds
restart_timeout=7200
if [ -z "$(uname | grep -i darwin)" ]
then
sys_type='linux'
else
sys_type='darwin'
fi
clean_darwin() {
echo "Running ${FUNCNAME[0]}..."
ps -ef | grep -i screen | grep node | awk '{ print $2 }' | xargs kill -9
ps -ef | grep node | grep iperf | awk '{ print$2 }' | xargs kill -9
screen -wipe
echo "${FUNCNAME[0]}: OK"
}
clean_linux() {
echo "Running ${FUNCNAME[0]}..."
ps -ef | grep -i screen | grep wpa | awk '{ print $2 }' | xargs kill -9
ps -ef | grep -i screen | grep node | awk '{ print $2 }' | xargs kill -9
ps -ef | grep wpa_supp | awk '{ print $2 }' | xargs sudo kill -9
ps -ef | grep node | grep iperf | awk '{ print$2 }' | xargs kill -9
screen -wipe
echo "${FUNCNAME[0]}: OK"
}
clean() {
echo "Running ${FUNCNAME[0]}..."
clean_${sys_type}
echo "${FUNCNAME[0]}: OK"
}
finish() {
echo "Running ${FUNCNAME[0]}..."
clean
exit
}
#trap for cleaning after ctrl-c
trap finish INT
main() {
echo "Running ${FUNCNAME[0]}..."
main_${sys_type}
}
main_linux() {
echo "Running ${FUNCNAME[0]}..."
echo "Running wpa supplicant for wlan0 interface"
clean
echo -n "" > wpa.log
screen -S wpa -d -m sudo wpa_supplicant -i wlan0 -c /etc/wpa_supplicant/wpa_supplicant.conf -dd -f wpa.log
auth=""
while [ -z "$auth" ]
do
echo -n "Waiting for wpa supplicant to authenticate"
for i in $(seq 1 30)
do
if ! [ -z "$auth" ]
then
break
fi
auth=$(grep "EAPOL authentication completed - result=SUCCESS" wpa.log)
sleep 1
echo -n "."
done
echo ""
done
echo $auth
sudo dhclient wlan0 -v
echo -e "\nRunning nodejs server on $(ifconfig enp0s3 | grep "inet addr" | awk '{ print $2 }'):8080"
cd nodejs-iperf-server
screen -S node -d -m nodejs iperf_server.js
cd ..
echo "${FUNCNAME[0]}: OK"
}
main_darwin() {
echo "Running ${FUNCNAME[0]}..."
clean
echo -e "\nRunning nodejs server on $(ifconfig en0 | grep "inet addr" | awk '{ print $2 }'):8080"
cd nodejs-iperf-server
screen -S node -d -m node iperf_server.js
cd ..
}
if [ $# -eq 0 ]
then
echo "Detected $sys_type system"
echo "Restarting everything every $restart_timeout seconds"
while [ 1 ]
do
if [ "$sys_type" == "linux" ]
then
timeout --signal=9 $restart_timeout ./$(basename "$0") main
sleep $restart_timeout
echo "" > wpa.log
else
gtimeout --signal=9 $restart_timeout ./$(basename "$0") main
sleep $restart_timeout
fi
clean
done
elif [ $# -eq 1 ] && [ "$1" == "main" ]
then
echo "Calling main..."
main
elif [ $# -eq 1 ] && ([ "$1" == "clean" ] || [ "$1" == "c" ])
then
echo "Calling clean..."
clean
else
echo "Unsupported command. Exiting..."
fi
| true |
08356bfc3455552cb5c0de3074eb363cb0785b75 | Shell | tribblix/build | /create-illumos-aliases | UTF-8 | 356 | 3.453125 | 3 | [] | no_license | #!/bin/ksh
#
# catalog format is
# alias|name
#
THOME=${THOME:-/packages/localsrc/Tribblix}
PKGDIR=${THOME}/illumos-pkgs-m18/pkgs
case $# in
1)
PKGDIR=$1
;;
esac
#
cd $PKGDIR
for ZPKG in *.zap
do
PNAME=${ZPKG%%.*}
ANAME=${PNAME#TRIB}
echo "${ANAME}|${PNAME}"
case $ANAME in
net-*)
XNAME=${ANAME#net-}
echo "${XNAME}|${PNAME}"
;;
esac
done
| true |
eba6c3590589d80c4601890f0967384fc57e0d7a | Shell | raphaoo/roadiz | /samples/vagrant-solr-provisioning.sh | UTF-8 | 1,824 | 3.421875 | 3 | [
"MIT"
] | permissive | #!/bin/bash
#
export DEBIAN_FRONTEND=noninteractive
# Apache Solr
SOLR_VERSION="5.3.1"
SOLR_MIRROR="http://apache.mirrors.ovh.net/ftp.apache.org/dist"
sudo apt-get -qq update;
# Install Apache Solr - based on article from Tomasz Muras - https://twitter.com/zabuch
# http://jmuras.com/blog/2012/setup-solr-4-tomcat-ubuntu-server-12-04-lts/
echo -e "\n--- Installing Open JDK ---\n"
sudo apt-get -qq -f -y install openjdk-7-jre-headless unzip > /dev/null 2>&1;
echo -e "\n--- Installing Apache Solr (may take a while, be patient) ---\n"
cd /tmp/
sudo wget –q --output-document=solr-$SOLR_VERSION.tgz $SOLR_MIRROR/lucene/solr/$SOLR_VERSION/solr-$SOLR_VERSION.tgz > /dev/null 2>&1
tar xzf solr-$SOLR_VERSION.tgz
sudo cp -fr solr-$SOLR_VERSION /opt/solr
sudo cp /opt/solr/bin/init.d/solr /etc/init.d/solr
sudo sed -i "s/RUNAS=solr/#RUNAS=solr/" /etc/init.d/solr
sudo mkdir -p /var/solr
sudo cp /opt/solr/bin/solr.in.sh /var/solr/solr.in.sh
sudo update-rc.d solr defaults > /dev/null 2>&1;
sudo update-rc.d solr enable > /dev/null 2>&1;
sudo service solr start > /dev/null 2>&1;
echo -e "\n--- Create a new Solr core called \"roadiz\" ---\n"
sudo /opt/solr/bin/solr create_core -c roadiz > /dev/null 2>&1;
echo -e "\n--- Restarting Solr server ---\n"
sudo service solr restart > /dev/null 2>&1;
##### CLEAN UP #####
sudo dpkg --configure -a > /dev/null 2>&1; # when upgrade or install doesnt run well (e.g. loss of connection) this may resolve quite a few issues
sudo apt-get autoremove -y > /dev/null 2>&1; # remove obsolete packages
echo -e "\n-----------------------------------------------------------"
echo -e "\n---------------- Your Solr server is ready ----------------"
echo -e "\n* Type http://localhost:8983/solr to use Apache Solr admin."
echo -e "\n-----------------------------------------------------------"
| true |
b7b45256eb4c8ed996711880925b328823056b59 | Shell | Helly1206/rpipoweroff | /install.sh | UTF-8 | 2,005 | 3.890625 | 4 | [] | no_license | #!/bin/bash
NAME="rpipoweroff"
OPTDIR="/opt"
OPTLOC="$OPTDIR/$NAME"
DEBFOLDER="debian"
ETCDIR="/etc"
ETCLOC=$ETCDIR
SERVICEDIR="$ETCDIR/systemd/system"
SERVICESCRIPT="$NAME.service"
INSTALL="/usr/bin/install -c"
INSTALL_DATA="$INSTALL -m 644"
if [ "$EUID" -ne 0 ]
then
echo "Please execute as root ('sudo install.sh' or 'sudo make install')"
exit
fi
if [ "$1" == "-u" ] || [ "$1" == "-U" ]
then
echo "$NAME uninstall script"
echo "Uninstalling daemon $NAME"
systemctl stop "$SERVICESCRIPT"
systemctl disable "$SERVICESCRIPT"
if [ -e "$SERVICEDIR/$SERVICESCRIPT" ]; then rm -f "$SERVICEDIR/$SERVICESCRIPT"; fi
echo "Removing files"
if [ -d "$OPTLOC" ]; then
rm -rf "$OPTLOC"
fi
elif [ "$1" == "-h" ] || [ "$1" == "-H" ]
then
echo "Usage:"
echo " <no argument>: install $NAME"
echo " -u/ -U : uninstall $NAME"
echo " -h/ -H : this help file"
echo " -d/ -D : build debian package"
echo " -c/ -C : Cleanup compiled files in install folder"
elif [ "$1" == "-c" ] || [ "$1" == "-C" ]
then
echo "$NAME Deleting compiled files in install folder"
rm -f ./*.deb
rm -rf "$DEBFOLDER"/$NAME
rm -rf "$DEBFOLDER"/.debhelper
rm -f "$DEBFOLDER"/files
rm -f "$DEBFOLDER"/files.new
rm -f "$DEBFOLDER"/$NAME.*
elif [ "$1" == "-d" ] || [ "$1" == "-D" ]
then
echo "$NAME build debian package"
fakeroot debian/rules clean binary
mv ../*.deb .
else
echo "$NAME install script"
if [ ! -d "$OPTLOC" ]; then
mkdir "$OPTLOC"
fi
cp -r ".$OPTLOC/." "$OPTLOC/"
echo "Installing daemon $NAME"
read -p "Do you want to install an automatic startup service for $NAME (Y/n)? " -n 1 -r
echo # (optional) move to a new line
if [[ $REPLY =~ ^[Nn]$ ]]
then
echo "Skipping install automatic startup service for $NAME"
else
echo "Install automatic startup service for $NAME"
$INSTALL_DATA ".$SERVICEDIR/$SERVICESCRIPT" "$SERVICEDIR/$SERVICESCRIPT"
systemctl enable $SERVICESCRIPT
systemctl start $SERVICESCRIPT
fi
fi
| true |
70cc745c9cbf51d695f1aed22c6b648ce1847fe2 | Shell | Alexander-Serov/job-manager | /sbatch_one_job.sh | UTF-8 | 625 | 2.875 | 3 | [
"MIT"
] | permissive | #! /bin/bash
# Sbatch options
#SBATCH -J ito-guy
#SBATCH -p dedicated
#SBATCH --qos=dbc
### #SBATCH --qos=fast
#SBATCH --cpus-per-task=1
#SBATCH --mem-per-cpu=1200MB
#SBATCH --time=120
# Constants
logs_folder="./logs/"
args_file="arguments.dat"
# Read command line arguments from file
argument=`awk "NR==${SLURM_ARRAY_TASK_ID}" $args_file`
# Launch srun with these argument sequence
module load Python/3.6.0
echo $argument
srun -o "${logs_folder}log_job_${SLURM_ARRAY_TASK_ID}.out" -e "${logs_folder}log_job_${SLURM_ARRAY_TASK_ID}.err" -J "${SLURM_ARRAY_TASK_ID}" python3 main.py $argument
| true |
6bbab7caae5234295d1bff7469298278cdf6178e | Shell | richnieh/phpcl_core_php8_developers | /lfc.sh | UTF-8 | 440 | 3.4375 | 3 | [
"MIT"
] | permissive | #!/bin/bash
DIR=`pwd`
TOOLS_DIR=$DIR/vendor/phpcl/lfc_tools
LFC_DIR=$DIR/vendor/linuxforphp/linuxforcomposer/bin
export USAGE="Usage: lfc.sh start|stop|deploy|creds"
if [[ -z "$1" ]]; then
echo $USAGE
exit 1
fi
if [[ "$1" = "start" || "$1" = "stop" || "$1" = "deploy" ]]; then
php $LFC_DIR/linuxforcomposer.phar docker:run $1
elif [[ "$1" = "creds" ]]; then
php $TOOLS_DIR/generate_creds.php $2 $3 $4 $5 $6
else
echo $USAGE
exit 1
fi
| true |
908e7b1a29f0b0ba27ab4eacb2f96775ef658cb3 | Shell | ndsev/zserio | /scripts/show_parser_tree.sh | UTF-8 | 6,594 | 4.09375 | 4 | [
"BSD-3-Clause"
] | permissive | #!/bin/bash
SCRIPT_DIR=`dirname $0`
source "${SCRIPT_DIR}/common_tools.sh"
# Show parser tree using grun.
show_parser_tree()
{
exit_if_argc_ne $# 4
local PARAM_ZSERIO_SOURCE="${1}" ; shift
local PARAM_PARSER_RULE="${1}" ; shift
local ZSERIO_PROJECT_ROOT="${1}" ; shift
local PARSER_TREE_BUILD_DIR="${1}" ; shift
local ANTLR4_JAR="${ZSERIO_PROJECT_ROOT}/3rdparty/java/antlr-4.7.2-complete.jar"
local ANTLR4_GRAMMAR_DIR="${ZSERIO_PROJECT_ROOT}/compiler/core/antlr"
"${JAVA_BIN}" -jar "${ANTLR4_JAR}" "${ANTLR4_GRAMMAR_DIR}"/Zserio*.g4 -o "${PARSER_TREE_BUILD_DIR}"
if [ $? -ne 0 ]; then
return 1
fi
echo
"${JAVAC_BIN}" "${PARSER_TREE_BUILD_DIR}"/Zserio*.java -cp .:"${ANTLR4_JAR}"
if [ $? -ne 0 ]; then
return 1
fi
echo
pushd "${PARSER_TREE_BUILD_DIR}" > /dev/null
java -cp .:"${ANTLR4_JAR}" org.antlr.v4.gui.TestRig Zserio "${PARAM_PARSER_RULE}" -tokens -gui \
"${PARAM_ZSERIO_SOURCE}"
local GRUN_RESULT=$?
popd > /dev/null
return ${GRUN_RESULT}
}
# Set and check global variables used by this script.
set_parser_tree_global_variables()
{
# check java binary
if [ -n "${JAVA_HOME}" ] ; then
JAVA_BIN="${JAVA_HOME}/bin/java"
fi
JAVA_BIN="${JAVA_BIN:-java}"
if [ ! -f "`which "${JAVA_BIN}"`" ] ; then
stderr_echo "Cannot find java! Set JAVA_HOME or JAVA_BIN environment variable."
return 1
fi
# check javac binary
if [ -n "${JAVA_HOME}" ] ; then
JAVAC_BIN="${JAVA_HOME}/bin/javac"
fi
JAVAC_BIN="${JAVAC_BIN:-javac}"
if [ ! -f "`which "${JAVAC_BIN}"`" ] ; then
stderr_echo "Cannot find java compiler! Set JAVA_HOME or JAVAC_BIN environment variable."
return 1
fi
return 0
}
# Print help on the environment variables used by this script.
print_parser_tree_help_env()
{
cat << EOF
Uses the following environment variables:
JAVAC_BIN Java compiler executable to use. Default is "javac".
JAVA_BIN Java executable to use. Default is "java".
EOF
}
# Print help message.
print_help()
{
cat << EOF
Description:
Shows ANTLR4 parser tree.
Usage:
$0 [-h] [-e] [-c] [-p] [-o <dir>] [-r rule] zserio_source
Arguments:
-h, --help Show this help.
-e, --help-env Show help for enviroment variables.
-c, --clean Clean output directory and exit.
-p, --purge Purge output directory before start.
-o <dir>, --output-directory <dir>
Output directory where to store ANTLR4 outputs.
-r <rule>, --parser-rule <rule>
Parser rule to accept (default is packageDeclaration).
zserio_source Zserio source for which to show ANTLR4 parser tree.
Examples:
$0 test.zs
EOF
}
# Parse all command line arguments.
#
# Return codes:
# -------------
# 0 - Success. Arguments have been successfully parsed.
# 1 - Failure. Some arguments are wrong or missing.
# 2 - Help switch is present. Arguments after help switch have not been checked.
# 3 - Environment help switch is present. Arguments after help switch have not been checked.
parse_arguments()
{
local NUM_OF_ARGS=5
exit_if_argc_lt $# ${NUM_OF_ARGS}
local PARAM_ZSERIO_SOURCE_OUT="$1"; shift
local PARAM_OUT_DIR_OUT="$1"; shift
local PARAM_PARSER_RULE_OUT="$1"; shift
local SWITCH_CLEAN_OUT="$1"; shift
local SWITCH_PURGE_OUT="$1"; shift
eval ${SWITCH_CLEAN_OUT}=0
eval ${SWITCH_PURGE_OUT}=0
local NUM_PARAMS=0
local PARAM_ARRAY=();
local ARG="$1"
while [ $# -ne 0 ] ; do
case "${ARG}" in
"-h" | "--help")
return 2
;;
"-e" | "--help-env")
return 3
;;
"-c" | "--clean")
eval ${SWITCH_CLEAN_OUT}=1
shift
;;
"-p" | "--purge")
eval ${SWITCH_PURGE_OUT}=1
shift
;;
"-o" | "--output-directory")
eval ${PARAM_OUT_DIR_OUT}="$2"
shift 2
;;
"-r" | "--parser-rule")
eval ${PARAM_PARSER_RULE_OUT}="$2"
shift 2
;;
"-"*)
stderr_echo "Invalid switch '${ARG}'!"
echo
return 1
;;
*)
if [ ${NUM_PARAMS} -eq 1 ] ; then
stderr_echo "Invalid argument '${PARAM}'!"
echo
return 1
fi
eval ${PARAM_ZSERIO_SOURCE_OUT}="${ARG}"
NUM_PARAMS=1
shift
;;
esac
ARG="$1"
done
if [[ ${!SWITCH_CLEAN_OUT} == 0 && ${NUM_PARAMS} == 0 ]] ; then
stderr_echo "Zserio source is not specified!"
echo
return 1
fi
return 0
}
main()
{
# get the project root
local ZSERIO_PROJECT_ROOT="${SCRIPT_DIR}/.."
# parse command line arguments
local PARAM_ZSERIO_SOURCE
local PARAM_OUT_DIR="${ZSERIO_PROJECT_ROOT}"
local PARAM_PARSER_RULE="packageDeclaration"
local SWITCH_CLEAN
local SWITCH_PURGE
parse_arguments PARAM_ZSERIO_SOURCE PARAM_OUT_DIR PARAM_PARSER_RULE SWITCH_CLEAN SWITCH_PURGE "$@"
local PARSE_RESULT=$?
if [ ${PARSE_RESULT} -eq 2 ] ; then
print_help
return 0
elif [ ${PARSE_RESULT} -eq 3 ] ; then
print_parser_tree_help_env
return 0
elif [ ${PARSE_RESULT} -ne 0 ] ; then
return 1
fi
# set global variables if needed
set_parser_tree_global_variables
if [ $? -ne 0 ] ; then
return 1
fi
# purge if requested and then create build directory
local PARSER_TREE_BUILD_DIR="${PARAM_OUT_DIR}/build/parser_tree"
if [[ ${SWITCH_PURGE} == 1 || ${SWITCH_CLEAN} == 1 ]] ; then
echo "Purging build directory."
echo
rm -rf "${PARSER_TREE_BUILD_DIR}/"
fi
mkdir -p "${PARSER_TREE_BUILD_DIR}"
# show parser tree using grun
if [[ ${SWITCH_CLEAN} == 0 ]] ; then
# grun needs absolute paths
convert_to_absolute_path "${PARAM_ZSERIO_SOURCE}" PARAM_ZSERIO_SOURCE
convert_to_absolute_path "${ZSERIO_PROJECT_ROOT}" ZSERIO_PROJECT_ROOT
show_parser_tree "${PARAM_ZSERIO_SOURCE}" "${PARAM_PARSER_RULE}" "${ZSERIO_PROJECT_ROOT}" \
"${PARSER_TREE_BUILD_DIR}"
if [ $? -ne 0 ] ; then
return 1
fi
fi
return 0
}
# call main function
main "$@"
| true |
bbb31e0110b5764ffdd31d0780624c0f9dba14e8 | Shell | Alee24/KaliLinuxMiniStarter | /Modules/downloads/Addons | UTF-8 | 1,117 | 2.90625 | 3 | [] | no_license | #!/bin/bash
#This Addons shoulb be installed in your Defaulf Browser
while true
do
echo -e "\e[1;32m ======================================================================= \e[0m"
echo -e "\e[1;33m Open and this Links on your Default Browser(iceweasel/firefox \e[0m)"
echo -e "\e[1;33m Install the Addons and restart the browser \e[0m"
echo -e "\e[1;32m ======================================================================= \e[0m"
echo "Web Developer Add-on:"
echo " https://addons.mozilla.org/en-US/firefox/addon/web-developer/"
echo
echo "Tamper Data:"
echo " https://addons.mozilla.org/en-US/firefox/addon/tamper-data/"
echo
echo "Foxy Proxy:"
echo " https://addons.mozilla.org/en-US/firefox/addon/foxyproxy-standard/"
echo
echo "User Agent Switcher:"
echo " https://addons.mozilla.org/en-US/firefox/addon/user-agent-switcher/"
echo -e "\e[1;32m ======================================================================= \e[0m"
echo -e "\e[1;34m
99)Exit
\e[0m "
echo -e "\e[1;36mExit \e[0m "
read n
case $n in
99) /opt/scripts/Modules/downloads/install_tools;;
*)invalid option;;
esac
done
| true |
e173ae8f1becd5417d4e0a179de6c720aa9493b9 | Shell | joatmon08/2018-cisco-devnet-create | /tests/contract/fixtures/install-consul.sh | UTF-8 | 1,006 | 3.265625 | 3 | [] | no_license | #!/bin/bash
function isinstalled {
if yum list installed "$@" >/dev/null 2>&1; then
return 0
else
return 1
fi
}
function isactive {
if systemctl is-active "$@" >/dev/null 2>&1; then
echo "$@ IS ON"
else
systemctl start "$@"
fi
}
yum -y update && yum -y upgrade
echo "=== INSTALLING CONSUL ==="
wget https://releases.hashicorp.com/consul/1.0.6/consul_1.0.6_linux_amd64.zip
unzip consul_1.0.6_linux_amd64.zip
mv consul /usr/bin/
echo "" > /tmp/consul_watch.log
echo $'[Unit]
Description=consul
[Service]
ExecStart=/usr/bin/consul agent -config-file /opt/consul/config/config.json -server -dev -ui -client 0.0.0.0
[Install]
WantedBy=multi-user.target' > /etc/systemd/system/consul.service
echo "=== CONFIGURE CONSUL ==="
yum -y install git
mkdir -p /opt/consul/config && chmod 777 /opt/consul/config
git clone -b artifacts https://github.com/joatmon08/docker-consul-handler.git /opt/consul/config
chmod +x /opt/consul/config/handler
echo "=== INSTALLED CONSUL ==="
exit 0
| true |
13cf229f4be77f5ae0d0e0d97547980f82948825 | Shell | svallero/cloud-init-context | /cloud-init-files/CE/servercontext/delete-node | UTF-8 | 1,566 | 4.09375 | 4 | [] | no_license | #!/bin/bash
#
# delete-offline-node -- by Dario Berzano <dario.berzano@cern.ch>
#
# Script meant to be run on the CE by an unprivileged user to delete a node
# passed on stdin from the list of active nodes, or to set it offline; SSH keys
# for the node are removed as well.
#
# Symlinking to delete-node or offline-node sets operation mode.
#
# This script is primarily invoked remotely by onevm-wn-drain.rb.
#
# The unprivileged user should have the right to create and delete nodes:
#
# qmgr -c 'set server managers += unpriv_user@ce.host.com'
#
# This script is meant to be the only allowed command of a SSH public key of the
# unprivileged user.
#
# In addition the same user must have the right to run ssh-hosts-keys from the
# same directory of this very script as root via sudo, so edit sudoers properly,
# for instance:
#
# qmanager ALL = NOPASSWD: /full/path/to/ssh-hosts-keys
#
#
# Functions
#
# Main function
function Main() {
# Read WN name
read NodeName
# Delete or set offline?
Base=`basename "$0"`
if [ "$Base" == 'delete-node' ] ; then
# Remove node from queues, if exists; ignore error value
qmgr -c "delete node $NodeName"
# Invoke script to manage keys (it should feature a lock/wait mechanism)
local ScriptDir=`dirname $0`
ScriptDir=`cd "$ScriptDir" ; pwd`
sudo "$ScriptDir/ssh-hosts-keys" delete $NodeName
elif [ "$Base" == 'offline-node' ] ; then
# Set node offline
pbsnodes -o $NodeName
else
echo "Symlink $0 to delete-node or offline-node to decide operation" >&2
exit 1
fi
}
#
# Entry point
#
Main "$@"
| true |
b3a38b6aca57fc60192d03df3c33bb1e0651ac2c | Shell | Yuri6037/better-pop | /install_light_exclusive.sh | UTF-8 | 1,588 | 2.53125 | 3 | [
"MIT"
] | permissive | #!/bin/bash
# Copyright 2020 Yuri6037
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
cd /tmp
#Install requirements
sudo apt install sassc ninja-build meson
#Download
git clone https://github.com/Yuri6037/Pop-full-light
#Run system-wide install
cd Pop-full-light
mkdir build
cd build
meson ..
sudo ninja install
cd ..
#Run flatpak system-wide install
cd flatpak/org.gtk.Gtk3theme.Pop-full-light
sudo flatpak install org.gtk.Gtk3theme.Pop-full-light.flatpak
cd ../../
#Cleanup
cd ..
sudo rm -rf Pop-full-light
#Uninstall requirements
sudo apt purge sassc ninja-build meson
| true |
bdd26eb6914c48ebc6520896837b7fef104523f9 | Shell | stephanwehner/vanlug-mailman3 | /make_installation_script_html.sh | UTF-8 | 550 | 3.09375 | 3 | [] | no_license | # Convert md file to html, using kramdown / https://kramdown.gettalong.org/
# Usage is simple:
# $ source make_installation_script_html.sh
kramdown debian-8-installation.md > debian-8-installation.html
ed -s debian-8-installation.html <<-END_TOP
0a
<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
<style>
pre { border: 1px solid #aaa; padding: 5px; background: #deeff5; width: 99%}
</style>
</head>
<body>
.
$a
w
END_TOP
cat >> debian-8-installation.html <<-END_BOTTOM
</body>
</html>
END_BOTTOM
echo "Wrote file debian-8-installation.html"
| true |
9e4b1beef7a8081d836af01fead733c69a832201 | Shell | mustafaoguzz/phpvms | /.travis/deploy_script.sh | UTF-8 | 4,303 | 3.71875 | 4 | [
"BSD-3-Clause"
] | permissive | #!/usr/bin/env bash
if [ "$TRAVIS" = "true" ]; then
cd $TRAVIS_BUILD_DIR
if test "$TRAVIS_TAG"; then
PKG_NAME=$TRAVIS_TAG
VERSION=$TRAVIS_TAG
# Pass in the tag as the version to write out
php artisan phpvms:version --write $VERSION
else
echo "On branch $TRAVIS_BRANCH"
if [ "$TRAVIS_BRANCH" != "master" ] && [ "$TRAVIS_BRANCH" != "dev" ]; then
echo "Not on valid branch, exiting"
exit 0
fi
# Write the version out but place the branch ID in there
# This is only for the dev branch
BASE_VERSION=$(php artisan phpvms:version --base-only)
# This now includes the pre-release version, so "-dev" by default
PKG_NAME=${BASE_VERSION}
# Don't pass in a version here, just write out the latest hash
php artisan phpvms:version --write >VERSION
VERSION=$(cat VERSION)
fi
echo "Version: $VERSION"
echo "Package name: $TAR_NAME"
FILE_NAME="phpvms-$PKG_NAME"
TAR_NAME="$FILE_NAME.tar.gz"
echo "Cleaning files"
rm -rf vendor
composer install --no-dev --prefer-dist --no-interaction --verbose
# Leftover individual files to delete
declare -a remove_files=(
.git
.github
.sass-cache
.idea
.travis
docker
_ide_helper.php
.dockerignore
.dpl
.editorconfig
.eslintignore
.eslintrc
.php_cs
.php_cs.cache
.phpstorm.meta.php
.styleci.yml
.phpunit.result.cache
env.php
intellij_style.xml
config.php
docker-compose.yml
Makefile
phpcs.xml
phpunit.xml
phpvms.iml
Procfile
phpstan.neon
node_modules
composer.phar
vendor/willdurand/geocoder/tests
)
for file in "${remove_files[@]}"; do
rm -rf $file
done
find ./vendor -type d -name ".git" -print0 | xargs rm -rf
find . -type d -name "sass-cache" -print0 | xargs rm -rf
# clear any app specific stuff that might have been loaded in
find bootstrap/cache -mindepth 1 -maxdepth 1 -not -name '.gitignore' -print0 -exec rm -rf {} +
find storage/app -mindepth 1 -maxdepth 1 -not -name '.gitignore' -not -name public -not -name import -print0 -exec rm -rf {} +
find storage/app/public -mindepth 1 -maxdepth 1 -not -name '.gitignore' -not -name avatars -not -name uploads -print0 -exec rm -rf {} +
find storage/app/public/avatars -mindepth 1 -not -name '.gitignore' -print0 -exec rm -rf {} +
find storage/app/public/uploads -mindepth 1 -not -name '.gitignore' -print0 -exec rm -rf {} +
find storage/debugbar -mindepth 1 -not -name '.gitignore' -print0 -exec rm -rf {} +
find storage/docker -mindepth 1 -not -name '.gitignore' -print0 -exec rm -rf {} +
find storage/framework/cache -mindepth 1 -not -name '.gitignore' -print0 -exec rm -rf {} +
find storage/framework/sessions -mindepth 1 -not -name '.gitignore' -print0 -exec rm -rf {} +
find storage/framework/views -mindepth 1 -not -name '.gitignore' -print0 -exec rm -rf {} +
find storage/logs -mindepth 1 -not -name '.gitignore' -print0 -exec rm -rf {} +
mkdir -p storage/app/public/avatars
mkdir -p storage/app/public/uploads
mkdir -p storage/framework/cache
mkdir -p storage/framework/sessions
mkdir -p storage/framework/views
# Regenerate the autoloader and classes
composer dump-autoload
make clean
cd /tmp
tar -czf $TAR_NAME -C $TRAVIS_BUILD_DIR/../ phpvms
sha256sum $TAR_NAME >"$TAR_NAME.sha256"
echo "Uploading to S3"
mkdir -p $TRAVIS_BUILD_DIR/build
cd $TRAVIS_BUILD_DIR/build
mv "/tmp/$TAR_NAME" "/tmp/$TAR_NAME.sha256" .
artifacts upload --target-paths "/" $TAR_NAME $TRAVIS_BUILD_DIR/VERSION $TAR_NAME.sha256
# Upload the version for a tagged release. Move to a version file in different
# tags. Within phpVMS, we have an option of which version to track in the admin
if test "$TRAVIS_TAG"; then
echo "Uploading release version file"
cp "$TRAVIS_BUILD_DIR/VERSION" release_version
artifacts upload --target-paths "/" release_version
else
echo "Uploading ${TRAVIS_BRANCH}_version file"
cp $TRAVIS_BUILD_DIR/VERSION ${TRAVIS_BRANCH}_version
artifacts upload --target-paths "/" ${TRAVIS_BRANCH}_version
fi
curl -X POST --data "{\"content\": \"A new build is available at http://downloads.phpvms.net/$TAR_NAME ($VERSION)\"}" -H "Content-Type: application/json" $DISCORD_WEBHOOK_URL
fi
| true |
5f2f537ea094849236f0aba9881a81bdfbb1e8d5 | Shell | arcdigital/ptsnaptest | /src/pterodactyl/scripts/manual-install | UTF-8 | 1,334 | 4.1875 | 4 | [] | no_license | #!/bin/sh
. $SNAP/utilities/php-utilities
. $SNAP/utilities/mysql-utilities
. $SNAP/utilities/pterodactyl-utilities
COMMAND="pterodactyl-panel.manual-install"
print_usage()
{
echo "Usage:"
echo " $COMMAND -h"
echo " Display this help message."
echo ""
echo " $COMMAND <username> <password>"
echo " Install Pterodactyl Panel, creating the admin user with the provided"
echo " credentials."
}
while getopts ":h" opt; do
case $opt in
h)
print_usage
exit 0
;;
\?)
echo "Invalid option: -$OPTARG" >&2
exit 1
;;
esac
done
shift $((OPTIND-1))
if [ $# != 2 ]; then
echo "Expected two parameters. Run '$COMMAND -h' for help." >&2
exit 1
fi
if [ $(id -u) -ne 0 ]; then
echo "This utility needs to run as root"
exit 1
fi
username=$1
password=$2
# We can't do anything until PHP and MySQL are up and running
wait_for_php
wait_for_mysql
# Now we can use 'occ maintenance:install'
mysql_pterodactyl_password="$(mysql_get_pterodactyl_password)"
if [ -n "$mysql_pterodactyl_password" ]; then
occ maintenance:install \
--database="mysql" \
--database-name="pterodactyl" \
--database-user="pterodactyl" \
--database-host="localhost:$MYSQL_SOCKET" \
--database-pass="$mysql_pterodactyl_password" \
--data-dir="$PTERODACTYL_DATA_DIR" \
--admin-user="$username" \
--admin-pass="$password"
fi
| true |
868c834d9a71cd4f5ac2e0597b3d508e4252ef9f | Shell | PagesjaunesMobile/SGDB-bitrise | /step.sh | UTF-8 | 851 | 3.5625 | 4 | [
"MIT"
] | permissive | #!/bin/bash
# Launching SGBD-bitrise step
echo "*************************"
echo "****** SGBD-bitrise *****"
echo "*************************"
# Testing the type of SGBD choosed
# SGBD_DB_TARGET have to be initialiaze en the step.yml file
if [[ ${SGBD_DB_TARGET} == "MYSQL" ]]
then
echo " > The SGBD target choosed : ${SGBD_DB_TARGET}"
echo " > Recovery Environment Variables..."
echo " - Timestamp ${ISO_DATETIME}"
echo " - BuildVersion : ${PJ_BUNDLE_VERSION}"
echo " - BuildNumber : ${PJ_BUNDLE_BUILD_NUMBER}"
echo " - Git branch : ${BITRISE_GIT_BRANCH}"
echo " > Inserting data into database..."
php insert.php ${SGBD_DB_TARGET} ${PJ_BUNDLE_VERSION} ${PJ_BUNDLE_BUILD_NUMBER} ${BITRISE_GIT_BRANCH}
else
echo "There is no SGBD target choosed."
exit 1
fi | true |
9c82c6438b70d861850e08daab0368e9fed51207 | Shell | brettbeeson/pi-timolo | /source/rclone-tl-move-s3.sh | UTF-8 | 2,050 | 4.09375 | 4 | [
"MIT"
] | permissive | #!/bin/bash
#
# $1: cam-name (default = hostname)
progName=$(basename -- "$0")
if [ $# -eq 0 ]; then
tmv_cam=$(hostname)
elif [ $# -eq 1 ]; then
tmv_cam=$1
else
echo Wrong or no arguments supplied 1>&2
exit 2
fi
# ---------------------------------------
rcloneName="s3tmv" # Name of Remote Storage Service
syncRoot="/home/pi/tmv/daily-photos" # Root Folder to Start
remoteDir="tmv.brettbeeson.com.au/$tmv_cam/daily-photos" # Destination Folder on Remote
rcloneParam="move -L --s3-acl=public-read" # -L follow symlinks. other options Eg sync, copy, move
# ---------------------------------------
echo ----------- SETTINGS -------------
echo tmv_cam : $tmv_cam
echo lockFileCheck : $lockFileCheck
echo rcloneName : $rcloneName
echo syncRoot : $syncRoot
echo remoteDir : $remoteDir
echo rcloneParam : $rcloneParam # sync|copy|move
echo ---------------------------------
cd $syncRoot # Change to local rclone root folder
if pidof -o %PPID -x "$progName"; then
echo "WARN - $progName Already Running. Only One Allowed." 1>&2
else
if [ -f /usr/bin/rclone ]; then # Check if rclone installed
rclone version # Display rclone version
if [ ! -d "$syncRoot" ] ; then # Check if Local sync Folder Exists
echo ERROR : syncRoot="syncRoot" Does Not Exist. 1>&2
exit 1
fi
/usr/bin/rclone listremotes | grep "$rcloneName" # Check if remote storage name exists
if [ $? == 0 ]; then # Check if listremotes found anything
echo "INFO : /usr/bin/rclone $rcloneParam -v $syncRoot $rcloneName:$remoteDir"
/usr/bin/rclone $rcloneParam $syncRoot $rcloneName:$remoteDir
if [ ! $? -eq 0 ]; then
echo ERROR : rclone $rcloneParam Failed 1>&2
fi
else
echo ERROR : rcloneName=$rcloneName Does not Exist. Check rclone listremotes 2>&1
rclone listremotes
fi
else
echo "ERROR : /usr/bin/rclone Not Installed." 2>&1
fi
fi
| true |
0f5b0504b2cff01a6f96b728dd3ea4d94366b423 | Shell | o-ran-sc/nonrtric | /service-exposure/deploy_rapp.sh | UTF-8 | 2,074 | 3 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/sh
#
# ============LICENSE_START=======================================================
# Copyright (C) 2022 Nordix Foundation.
# ================================================================================
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# SPDX-License-Identifier: Apache-2.0
# ============LICENSE_END=========================================================
#
export host=$(kubectl get nodes -o wide | tail -1 | sed s'/ */ /'g | cut -f6 -d' ')
if [ -z "$1" ]
then
echo "No argument supplied"
exit 1
fi
rapp=$1
echo "Deploying application..."
echo "------------------------"
curl http://$host:31570/install?chart=$rapp
echo "\n"
echo "Waiting for pod to start..."
echo "---------------------------"
kubectl wait deployment -n istio-nonrtric $rapp --for=condition=available --timeout=90s
echo ""
echo "Checking pod status..."
echo "----------------------"
kubectl get pods -n istio-nonrtric
#kubectl get pods --show-labels -n istio-nonrtric
#if [ "$rapp" == "rapp-helloworld-invoker1" ] || [ "$rapp" == "rapp-helloworld-invoker2" ]; then
if [ "$rapp" != "rapp-helloworld-provider" ]; then
echo ""
echo "Inspect the log for $rapp..."
echo "-----------------------------------------------"
kubectl logs -l app.kubernetes.io/name=$rapp -n istio-nonrtric
fi
if [ "$rapp" = "rapp-helloworld-invoker1" ]; then
echo ""
echo "Inspect the log for $rapp jwt sidecar..."
echo "-----------------------------------------------------------"
kubectl logs -l app.kubernetes.io/name=$rapp -c jwt-proxy -n istio-nonrtric
fi
| true |
8027c299319f44a99bd8367559dad4855981f69a | Shell | whiskaz/devbox | /dep.sh | UTF-8 | 489 | 2.515625 | 3 | [] | no_license | #!/bin/sh
## Constants
readonly PIP="9.0.3"
readonly ANSIBLE="2.7"
yum install -y epel-release git
yum install -y python2-pip
python -m pip install --disable-pip-version-check --upgrade --force-reinstall \
pip==${PIP}
python -m pip install --disable-pip-version-check --upgrade --force-reinstall \
setuptools
python -m pip install --disable-pip-version-check --upgrade --force-reinstall \
pyOpenSSL \
requests \
netaddr \
jmespath \
ansible==${1-${ANSIBLE}} | true |
77678844a8e04f20db4b70d33c5490ea0e4eb496 | Shell | turquoise-hexagon/dots | /wm/.local/bin/move | UTF-8 | 1,933 | 3.984375 | 4 | [
"0BSD"
] | permissive | #!/bin/sh
#
# move - move the focused window in a given direction
die() {
printf '%s\n' \
"${1:-usage : ${0##*/} <east|west|north|south>}" >&2
exit 1
}
json() {
arg=$1; shift
# filthy json parsing
var=${*#*\"$arg\":}
var=${var%%[,\}]*}
printf '%s\n' "$var"
}
readonly PERCENT=5
case $* in
east) f_dim=width ; t_dim=height; sign=+;;
west) f_dim=width ; t_dim=height; sign=-;;
north) f_dim=height; t_dim=width ; sign=-;;
south) f_dim=height; t_dim=width ; sign=+;;
*)
die
esac
case $(json state "$(bspc query -T -n)") in
'"tiled"')
# try to replace a preselection or to move the window normally
{
bspc node -n "$*.!automatic" ||
bspc node -s "$*.tiled"
} || {
node=$(bspc query -N -n)
while parent=$(bspc query -N -n "$node#@parent"); do
# compare dimensions to parent
[ \
$((
$(json "$t_dim" "$(bspc query -T -n "$parent")") >
$(json "$t_dim" "$(bspc query -T -n "$node")")
)) -eq 1 \
] && {
# climb up the tree using receptacles
bspc node "$parent" -p "$*" -i
bspc node "$node" -n \
"$(bspc query -N "$parent#@parent" -n .descendant_of.leaf.!window)"
bspc node "$parent" -B
break
}
node=$parent
done
}
;;
'"floating"')
# get a percentage of the resolution
: $((var = $(json "$f_dim" "$(bspc query -T -m)") * PERCENT / 100))
case $f_dim in
width) x=$sign$var; y=0;;
height) y=$sign$var; x=0;;
esac
bspc node -v "$x" "$y"
;;
*) exit 1
esac
cursor
: # fix exit status
| true |
51bb222560afb510249e918b4d78fb7cf105fa44 | Shell | yuri-tolstov/projects | /SDK/tilera/mde-4.2.0/examples/getting_started/hello_world_java/build.exec | UTF-8 | 295 | 3.484375 | 3 | [] | no_license | #!/bin/sh
BACKGROUND=0
if [ "$1" == "-background" ] ; then
BACKGROUND=1
shift
fi
COMMAND=$1
shift
ARGS="$*"
PWD=`pwd`
echo "build.exec: pwd = ${PWD}"
echo ${COMMAND} ${ARGS}
if [ ${BACKGROUND} -eq 1 ] ; then
${COMMAND} ${ARGS} &
else
${COMMAND} ${ARGS}
fi
echo "build.exec: Done."
| true |
99b60b17f19052887dd977d6b9b5946987c536cc | Shell | ew0s/Operating_Systems20 | /lab3-main/task6/handler.sh | UTF-8 | 322 | 3.546875 | 4 | [] | no_license | #!/bin/bash
result=1
command="+"
TERM()
{
echo "Finish"
exit 0
}
SIG1()
{
command="+"
}
SIG2()
{
command="*"
}
trap 'TERM' SIGTERM
trap 'SIG1' USR1
trap 'SIG2' USR2
while true;
do
case "$command" in
"+")
result=$(($result + 2))
;;
"*")
result=$(($result * 2))
;;
esac
echo $result
sleep 1
done
| true |
9bfb9eb0cf175ab99a7efee9603575d5c253cca8 | Shell | texierp/meta-warp7-distro | /recipes-mender/state-scripts/files/Sync_Enter_00 | UTF-8 | 377 | 3.140625 | 3 | [
"MIT"
] | permissive | #!/bin/sh
log() {
echo "mender:$*" >&2
}
log "$(cat /etc/mender/artifact_info): $(basename "$0") was called!"
RC=0
UPD=`dbus-send --system \
--dest=org.mender.updater \
--print-reply \
--type=method_call \
/ \
local.ServiceQtUpdater.CMenderQtUpdater.syncEnterState | \
grep "byte" | \
awk '{print $2}'`
RC=$UPD
log "Returning $RC from $0 state script"
exit $RC
| true |
bd35cd98de9aeeeae2b4f92a0f4971715f00a3e6 | Shell | FauxFaux/debian-control | /m/movim/movim_0.14.1~rc2-2_all/postrm | UTF-8 | 1,641 | 3.46875 | 3 | [] | no_license | #!/bin/sh
set -e
if test -f /usr/share/debconf/confmodule; then
. /usr/share/debconf/confmodule
fi
# Do dbconfig-common removal stuff
if test -f /usr/share/dbconfig-common/dpkg/postrm; then
. /usr/share/dbconfig-common/dpkg/postrm
dbc_go movim "$@"
fi
# custom actions
case $1 in
remove)
;;
purge)
# unregister
rm -f /var/lib/movim/dbconfig.inc
if which ucf >/dev/null 2>&1; then
ucf --purge /var/lib/movim/dbconfig.inc
ucfr --purge movim /var/lib/movim/dbconfig.inc
fi
# remove state/dynamic data
rm -rf /var/lib/movim /var/log/movim /var/cache/movim
;;
upgrade|disappear|failed-upgrade|abort-install|abort-upgrade)
;;
*)
echo >&2 "postrm called with unknown subcommand '$1'"
exit 1
;;
esac
# dh_installdeb will replace this with shell code automatically
# generated by other debhelper scripts.
# Automatically added by dh_installsystemd/11.5.4
if [ -d /run/systemd/system ]; then
systemctl --system daemon-reload >/dev/null || true
fi
# End automatically added section
# Automatically added by dh_installsystemd/11.5.4
if [ "$1" = "remove" ]; then
if [ -x "/usr/bin/deb-systemd-helper" ]; then
deb-systemd-helper mask 'movim.service' >/dev/null || true
fi
fi
if [ "$1" = "purge" ]; then
if [ -x "/usr/bin/deb-systemd-helper" ]; then
deb-systemd-helper purge 'movim.service' >/dev/null || true
deb-systemd-helper unmask 'movim.service' >/dev/null || true
fi
fi
# End automatically added section
# Automatically added by dh_installdebconf/11.5.4
if [ "$1" = purge ] && [ -e /usr/share/debconf/confmodule ]; then
. /usr/share/debconf/confmodule
db_purge
fi
# End automatically added section
exit 0
| true |
36d0036de58c7d79e9d76c789b2c54eef0f56693 | Shell | nesi/ARCS-systems | /dataFabricScripts/iRODS/utils/updateIrodsMapfile.sh | UTF-8 | 1,189 | 3.96875 | 4 | [] | no_license | #!/bin/sh
# updateIrodsMapfile.sh Updates the iRODS mapfile used by Griffin on "slave"
# servers; required for versions of iRODS below 2.3.
# Should be called at 30-min intervals by 'rods' cron.
# Graham Jenkins <graham@vpac.org> Rev: 20110124
# Usage, permissions
if [ ! -w "$1" ] ; then
( echo "Usage: `basename $0` mapfile"
echo " e.g.: `basename $0` /opt/griffin/irods-mapfile"
echo " Note: Designated file must exist and be writeable!" ) >&2; exit 2
elif ! touch $1.NEW ; then
logger -t `basename $0` "Can't write to: $1.NEW" exit 1
fi
# Generate the list
if ! `iquest --no-page "\"%s\" %s@%s" \
"select USER_DN,USER_NAME,USER_ZONE where USER_DN like '/%'" \
>$1.NEW 2>/dev/null` ; then
logger -t `basename $0` "iquest command failed!"; exit 1
fi
# Update the file if necessary
if ! `cmp -s $1.NEW $1`; then
mv -f $1.NEW $1 && logger -t `basename $0` "Updated: $1" && exit 0
logger -t `basename $0` "Update failed!"; exit 1
else
rm -f $1.NEW >/dev/null 2>&1; exit 0
fi
| true |
599cbf4c82c31cf3bb63900971fb013948e104e5 | Shell | hey-leon/prefs | /dots/.config/zsh/04_utils.sh | UTF-8 | 133 | 2.546875 | 3 | [] | no_license | if [[ -f $NVM_DIR/nvm.sh ]]; then
source $NVM_DIR/nvm.sh
fi
if [[ -f $RVM_DIR/scripts/rvm ]]; then
source $RVM_DIR/scripts/rvm
fi
| true |
60f4cc897f13066fb8652aa296fd6c9f76983595 | Shell | nicuveo/TOOLS_PP | /check/check.sh | UTF-8 | 718 | 3.421875 | 3 | [] | no_license | #! /usr/bin/env bash
for header in "$(dirname $0)"/../include/*/*/*.hh ; do
CODE=`cpp -D TOOLS_PP_UNIT_TESTS $header | tr -d ' ' | sed -n 's/^TEST://p'`
TESTS_N=0
TESTS_R=0
for line in $CODE; do
a=`echo $line | sed -n 's/\(.*\)==\(.*\)/_\1_/p'`
b=`echo $line | sed -n 's/\(.*\)==\(.*\)/_\2_/p'`
if [ "$a" == "$b" -a ! -z "$a" ] ; then
TESTS_R=$(( $TESTS_R + 1 ))
fi
TESTS_N=$(( $TESTS_N + 1 ))
done
printf "%-32s" $(basename "$header")
if [ "$TESTS_R" -eq "$TESTS_N" ] ; then
echo -ne "[\033[1;32mOK\033[0m]"
else
echo -ne "[\033[1;31mKO\033[0m]"
fi
printf " (%2s / %2s)" "$TESTS_R" "$TESTS_N"
echo
done
| true |
92005f65a64b33dbcc3582500a4290f9b1f92107 | Shell | MrMebelMan/Tinkering | /blackjack.sh | UTF-8 | 5,584 | 3.46875 | 3 | [] | no_license | #!/bin/bash
# blackjack.sh
clear
declare DEBUG="OFF" RET="" PLAYER_SKIP="" BOT_SKIP=""
declare -ai DECK PLAYER_DECK BOT_DECK
declare -i GAME_GOAL=42 PLAYER_VALUE=0 BOT_VALUE=0 BOT_CAUTION_LEVEL=$(((RANDOM % 12)+1))
declare -i IN_PROGRESS=0 END=1 TAKE=0 NO_TAKE=1 BOT_DECISION=$NO_TAKE
declare -i GAME_STATE=$IN_PROGRESS DECK_CNT=0 DECK_SIZE=0 PLAYER_DECK_SIZE=0 BOT_DECK_SIZE=0
declare -A CARD_NAMES=( ['1']='Ace' ['2']='2' ['3']='3' ['4']='4' ['5']='5' ['6']='6'
['7']='7' ['8']='8' ['9']='9' ['10']='Jack' ['11']='Queen' ['12']='King' )
declare NAMES=( "Itzhak Weissman" "Lily Shots" "Serah Fields" "Sid Zilla"
"Max Gingerbeard" "John Sweeps" "Chocola Nakimato" "Hortensia Harrison" )
declare BOT_NAME=${NAMES[$((RANDOM%${#NAMES[@]}))]}
echo "Game goal: $GAME_GOAL."
echo "You are playing against $BOT_NAME."
if [ $BOT_CAUTION_LEVEL -gt 9 ]; then
echo "$BOT_NAME looks very nervous."
elif [ \( $BOT_CAUTION_LEVEL -gt 6 \) -a \( $BOT_CAUTION_LEVEL -le 9 \) ]; then
echo "$BOT_NAME seems to be very cautious."
elif [ \( $BOT_CAUTION_LEVEL -gt 3 \) -a \( $BOT_CAUTION_LEVEL -le 6 \) ]; then
echo "$BOT_NAME looks confident."
elif [ \( $BOT_CAUTION_LEVEL -gt 0 \) -a \( $BOT_CAUTION_LEVEL -le 3 \) ];then
echo "$BOT_NAME is feeling risky and adventurous!"
fi
echo
function shuffle_deck ()
{
TIMES=$((RANDOM%10+1))
echo "The dealer shuffles the deck $TIMES times without even looking what they're doing. Cool!"
echo
SHUF_CNT=$TIMES
while [ $((--SHUF_CNT)) -ge 0 ]
do
for i in $(seq 0 $((DECK_SIZE-1)))
do
POS1=$((RANDOM % DECK_SIZE))
POS2=$((RANDOM % DECK_SIZE))
[ $POS1 -eq $POS2 ] && continue
TMP=${DECK[$POS1]}
DECK[$POS1]=${DECK[$POS2]}
DECK[$POS2]=$TMP
done
done
}
function pop ()
{
DECK[$((DECK_SIZE-1))]=-1
((--DECK_SIZE))
}
function fill_deck ()
{
for CARD in {1..12}
do
for SUIT in {1..4}
do
DECK[$((DECK_CNT++))]=CARD
done
done
DECK_SIZE=${#DECK[@]}
}
function show_deck ()
{
for i in ${!DECK[*]}
do
[ \( ${DECK[$i]} -eq 1 \) -o \( ${DECK[$i]} -ge 10 \) ] && echo ${CARD_NAMES[${DECK[$i]}]} || echo ${DECK[$i]}
done
}
function get_card_name ()
{
if [ \( $1 -eq 1 \) -o \( $1 -ge 10 \) ]; then
RET=${CARD_NAMES[$1]}
else
RET=$1
fi
}
function player_hand_push ()
{
PLAYER_DECK[$((PLAYER_DECK_SIZE++))]=$1
[ \( $1 -eq 1 \) -a \( $((PLAYER_VALUE + 11)) -le $GAME_GOAL \) ] && PLAYER_VALUE=$((PLAYER_VALUE + 11)) || \
PLAYER_VALUE=$((PLAYER_VALUE + $1))
[ $PLAYER_VALUE -gt $GAME_GOAL ] && echo "Oh no! You have $PLAYER_VALUE points. $BOT_NAME wins!" && GAME_STATE=$END
}
function bot_hand_push ()
{
BOT_DECK[$((BOT_DECK_SIZE++))]=$1
[ \( $1 -eq 1 \) -a \( $((BOT_VALUE + 11)) -le $GAME_GOAL \) ] && BOT_VALUE=$((BOT_VALUE + 10)) || \
BOT_VALUE=$((BOT_VALUE+$1))
[ $BOT_VALUE -gt $GAME_GOAL ] && echo && echo "Oh YES! $BOT_NAME took too much! He has $BOT_VALUE points. You win!" && GAME_STATE=$END
}
function deal_player ()
{
TOP_CARD=${DECK[$((DECK_SIZE-1))]}; pop
get_card_name $TOP_CARD
echo "You received a card: $RET "
player_hand_push $TOP_CARD
}
function deal_bot ()
{
TOP_CARD=${DECK[$((DECK_SIZE-1))]}; pop
get_card_name $TOP_CARD
echo -n "The Dealer deals $BOT_NAME a card."
bot_hand_push $TOP_CARD
if [ $BOT_VALUE -eq $GAME_GOAL ]; then
echo " $BOT_NAME grins and rubs one's hands."
elif [ \( $BOT_VALUE -gt $((GAME_GOAL - (GAME_GOAL/8) )) \) -a \( $BOT_VALUE -lt $GAME_GOAL \) ]; then
echo " $BOT_NAME smiles."
fi
echo
}
function deal_starting_cards ()
{
deal_player
deal_player
deal_bot
deal_bot
}
function bot_decide ()
{
[ "$DEBUG" = "ON" ] && echo "DEBUG: \$BOT_VALUE = $BOT_VALUE, BOT_CAUTION_LEVEL = $BOT_CAUTION_LEVEL"
[ \( $BOT_VALUE -gt $(( GAME_GOAL - (BOT_CAUTION_LEVEL/2) )) \) -a \( $BOT_VALUE -ne $GAME_GOAL \) ] && echo -n 'After some hesitation, '
if [ $BOT_VALUE -gt $((GAME_GOAL - BOT_CAUTION_LEVEL)) ]; then
BOT_DECISION=$NO_TAKE
echo -n "$BOT_NAME decides to skip their turn"
[ $BOT_VALUE -eq $GAME_GOAL ] && echo " and smiles." || echo "."
BOT_SKIP="YES"
else
BOT_DECISION=$TAKE
echo "$BOT_NAME decides to take another card."
fi
}
function check_results ()
{
if [ $PLAYER_VALUE -gt $BOT_VALUE ]; then
echo "Congratulations! You win!"
echo "You: $PLAYER_VALUE points."
echo "$BOT_NAME: $BOT_VALUE points."
elif [ $BOT_VALUE -gt $PLAYER_VALUE ]; then
echo "Oh no, you lose!"
echo "You: $PLAYER_VALUE points"
echo "$BOT_NAME: $BOT_VALUE points."
elif [ $PLAYER_VALUE -eq $BOT_VALUE ]; then
echo "Draw! You both have $PLAYER_VALUE points!"
fi
GAME_STATE=$END
}
function show_hand ()
{
echo -n "Your hand: "
for i in $(seq 0 $((PLAYER_DECK_SIZE-1)))
do
echo -n "${CARD_NAMES[${PLAYER_DECK[$i]}]}, "
done | sed 's/, $//'
echo
echo
}
function run_game_cycle ()
{
read dummy
while [ $GAME_STATE != $END ]
do
clear
while :
do
show_hand
printf "1) Take another card.\n2) Skip this turn.\n3) Quit.\n\n"
read REPLY
if [ "$REPLY" = 1 ]; then
deal_player
break
elif [ "$REPLY" = 2 ]; then
PLAYER_SKIP="YES"
break
elif [ "$REPLY" = 3 ]; then
GAME_STATE=$END;
break
else
clear
fi
done
[ $GAME_STATE = $END ] && break
bot_decide
[ $BOT_DECISION = $TAKE ] && deal_bot
[ \( "$PLAYER_SKIP" = "YES" \) -a \( "$BOT_SKIP" = "YES" \) ] && check_results
read dummy
done
}
# Main
fill_deck
shuffle_deck
deal_starting_cards
run_game_cycle
exit 0
| true |
77a16736e1f633f1971351e977c8832dc333e28f | Shell | jensp/Arch-Linux-on-i586 | /extra/xulrunner/PKGBUILD | UTF-8 | 2,314 | 2.5625 | 3 | [] | no_license | # Maintainer: Jens Pranaitis <jens@chaox.net>
# Contributor: Alexander Baldeck <alexander@archlinux.org>
# Contributor: Jan de Groot <jgc@archlinux.org>
pkgname=xulrunner
pkgver=1.9.1.3
_ffoxver=3.5.3
pkgrel=2
pkgdesc="Mozilla Runtime Environment"
arch=(i586 i686 x86_64)
license=('MPL' 'GPL' 'LGPL')
depends=('gtk2>=2.16.5' 'gcc-libs>=4.4.1' 'libidl2>=0.8.13' 'mozilla-common' 'nss>=3.12.3.1' 'libxt' 'hunspell>=1.2.8' 'startup-notification>=0.10' 'mime-types' 'dbus-glib>=0.80' 'alsa-lib>=1.0.20')
makedepends=('zip' 'pkgconfig' 'diffutils' 'python' 'libgnomeui')
optdepends=('libgnomeui')
provides=(gecko-sdk)
replaces=(gecko-sdk)
url="http://wiki.mozilla.org/XUL:Xul_Runner"
source=(http://releases.mozilla.org/pub/mozilla.org/firefox/releases/${_ffoxver}/source/firefox-${_ffoxver}.source.tar.bz2
mozconfig
100-system-hunspell-corrections.patch
mozilla-pkgconfig.patch
fix-mozilla-launcher.patch
mozilla-ps-pdf-simplify-operators.patch
nsThreadUtils.patch
xulrunner-version.patch)
md5sums=('ca167e69180ab1230aea0763da270a95'
'ed5045e94a7ef2a2c7d0c8c54581cd63'
'5efd6772ed0ecf8eddec5d5650191d3c'
'd806ffcd9fa8d178ebfa9057c1221fbf'
'63eee2d1da3b43c9d604f2253f242f40'
'13dca58c04e62a8916691c63c5c492a0'
'ccc3a0672c783c043434f256fbd38c86'
'd8a3066a3d9039b6f375316ed763e8f8')
build() {
cd "${srcdir}/mozilla-1.9.1"
cp "${srcdir}/mozconfig" .mozconfig
#Upstream patch. Still not applied to 1.9.0.1
patch -Np1 -i "${srcdir}/mozilla-ps-pdf-simplify-operators.patch" || return 1
#fix build with system hunspell - gentoo
patch -Np0 -i "${srcdir}/100-system-hunspell-corrections.patch" || return 1
#fix libdir/sdkdir - fedora - with local modifications
patch -Np1 -i "${srcdir}/mozilla-pkgconfig.patch" || return 1
#Fix stub launcher - archlinux
patch -Np0 -i "${srcdir}/fix-mozilla-launcher.patch" || return 1
#Fix epiphany crashes - requires epiphany rebuild
patch -Np1 -i "${srcdir}/nsThreadUtils.patch" || return 1
#Force installation to the same path for every version
patch -Np1 -i "${srcdir}/xulrunner-version.patch" || return 1
export LDFLAGS="$LDFLAGS -Wl,-rpath,/usr/lib/xulrunner-1.9.1"
make -j1 -f client.mk build MOZ_MAKE_FLAGS="$MAKEFLAGS" || return 1
make -j1 DESTDIR="${pkgdir}" install || return 1
}
| true |
9d8a3c37a07fffd880bfd1f2f3511232196cec0d | Shell | ShalokShalom/apps | /scala/PKGBUILD | UTF-8 | 1,031 | 2.75 | 3 | [] | no_license |
pkgname=scala
pkgver=2.13.6
_pkgver=2.13.6
pkgrel=1
pkgdesc="Acronym for Scalable Language, running on the JVM. Java and Scala classes"
arch=('x86_64')
url="https://www.scala-lang.org"
license=('custom')
depends=('sh' 'java-environment')
source=("https://www.scala-lang.org/files/archive/scala-${_pkgver}.tgz")
md5sums=('6aabf8704dfd17ea9f1b297a8bcfb04c')
package() {
cd scala-${_pkgver}
install -d ${pkgdir}/usr/{bin,share} ${pkgdir}/usr/share/man/man1 ${pkgdir}/usr/share/scala/{bin,lib}
cp -r lib ${pkgdir}/usr/share/scala/
cp -r man ${pkgdir}/usr/share/
install -m 755 bin/{fsc,scala,scalac,scalap,scaladoc} ${pkgdir}/usr/share/scala/bin
install -D -m0644 doc/LICENSE.md ${pkgdir}/usr/share/licenses/${pkgname}/LICENSE
ln -s ../share/scala/bin/fsc ${pkgdir}/usr/bin/fsc
ln -s ../share/scala/bin/scala ${pkgdir}/usr/bin/scala
ln -s ../share/scala/bin/scalac ${pkgdir}/usr/bin/scalac
ln -s ../share/scala/bin/scalap ${pkgdir}/usr/bin/scalap
ln -s ../share/scala/bin/scaladoc ${pkgdir}/usr/bin/scaladoc
}
| true |
43cefeb133f9ce9aad7a99e21edf60bd2c0dc94b | Shell | ImperialCollegeLondon/q4C-2018 | /fourc/FC03_alignFullparal.sh | UTF-8 | 1,518 | 3.421875 | 3 | [] | no_license | #@ This code contains sample scripts for analysis of data as described in Melamed, Yaguchi et al.,
#@ "The human leukemia virus HTLV-1 alters the structure and transcription of host chromatin *in cis*" eLife, 2018
#@ FC03_alignFullparal - alignment of single read data (R1 and R2 aligned separately) against
# mixed reference of human + virus.
# input variables: $outputdir - <path/to/output/>
# $filename - <file/path/to/align>.fq.gz
# require adding samtools to path (e.g. version 0.1.19)
# requires creating an index from concatanated hg19, HTLV-1 genomes (AB513134 used here)
INDEX_PATH=<PATH/TO/BOWTIE/INDEX>
date
echo " "
echo $outputdir
echo $filename
## copy file to location
cp $filename .
gunzip *.gz
ls -l -h -R
#full reference ONLY, BOWTIE2 ONLY
for mate in *.fq
do
bamName=`echo $mate | sed 's#.fq#_full.bam#g' - `
samName=`echo $mate | sed 's#.fq#_full.sam#g' - `
echo "mate is "$mate
echo "output is "$bamName
if [ ! -f $outputdir$bamName".gz" ]; then
echo "using bowtie2"
bowtie2 \
-x $INDEX_PATH \
-k 100 \
-L 20 \
-p 20 \
--reorder \
-U $mate \
| samtools view -bS - > $bamName
samtools view -F 256 $bamName > $samName
gzip $samName
gzip $bamName
else
echo $bamName" already exists, skipping."
fi
done
date
ls -l -h -R
date
cp *.bam.gz $outputdir
cp *.sam.gz $outputdir
echo " done!" | true |
f433e0ea53c1c15c45957503435cf5b1c58e0069 | Shell | iomz/dotfiles | /dot_config/zsh/rc.d/20-alias.zsh | UTF-8 | 8,506 | 3.359375 | 3 | [
"Unlicense"
] | permissive | #!/usr/bin/env zsh
#
# Commands, funtions and aliases
## Always set aliases _last,_ so they don't get used in function definitions.
###
## type '-' to return to your previous dir.
#alias -- -='cd -q -'
#alias -- b='-'
## '--' signifies the end of options. Otherwise, '-=...' would be interpreted as
## a flag.
## These aliases enable us to paste example code into the terminal without the
## shell complaining about the pasted prompt symbol.
#alias %= \$=
## zmv lets you batch rename (or copy or link) files by using pattern matching.
## https://zsh.sourceforge.io/Doc/Release/User-Contributions.html#index-zmv
#autoload -Uz zmv
#alias zmv='zmv -Mv'
#alias zcp='zmv -Cv'
#alias zln='zmv -Lv'
## Note that, unlike with Bash, you do not need to inform Zsh's completion system
## of your aliases. It will figure them out automatically.
## Set $PAGER if it hasn't been set yet. We need it below.
## `:` is a builtin command that does nothing. We use it here to stop Zsh from
## evaluating the value of our $expansion as a command.
#: ${PAGER:=less}
## Associate file name .extensions with programs to open them.
## This lets you open a file just by typing its name and pressing enter.
## Note that the dot is implicit; `gz` below stands for files ending in .gz
#alias -s {css,gradle,html,js,json,md,patch,properties,txt,xml,yml}=$PAGER
#alias -s gz='gzip -l'
#alias -s {log,out}='tail -F'
## Use `< file` to quickly view the contents of any text file.
#READNULLCMD=$PAGER # Set the program to use for this.
#_error() { builtin print -P -- "%F{red}Error%f: %F{white}${1}%f" >&2; }
#_info() { builtin print -P -- "%F{green}==>%f %F{white}${1}%f"; }
# +────────────────+
# │ UTIL FUNCTIONS │
# +────────────────+
#_clone_if_absent() { [[ ! -d $1 ]] && git clone "$1" "$2/$(basename "$1" .git)"; }
#_edit() { ${EDITOR:-nvim} $1 }
#_mkfile() { builtin echo "#!/usr/bin/env ${2}" > "$3.$1" && chmod +x "$3.$1"; rehash; $EDITOR "$3.$1"; }
#_sys_update() { "$1" update && "$1" upgrade; }
#_goto() { [[ -e $1 ]] && builtin cd "$1" && { exa --all --long 2> /dev/null || command ls -lGo || _error "${1} not found" } }
# +────────────────+
# │ CODE DIRECTORY │
# +────────────────+
# ! [[ -d $CODEDIR ]] && command mkdir -p -- "${CODEDIR}"
# +─────────────────+
# │ SYSTEM SPECIFIC │
# +─────────────────+
#if [[ $OSTYPE =~ darwin* ]]; then
# _copy_cmd='pbcopy'
# alias readlink="greadlink"
# alias copy="$_copy_cmd <"
#fi
# +───────+
# │ FILES │
# +───────+
#alias bashly_edge='docker run --rm -it --user $(id -u):$(id -g) --volume "$PWD:/app" dannyben/bashly:edge'
#alias rmr="rm -rf --"
#alias tailf="less +F -R"
# +──────────────────+
# │ CONFIG SHORTCUTS │
# +──────────────────+
#emulate -L zsh
#setopt extendedglob
#typeset -A pairs=(
# ealiases 'zsh/rc.d/[0-9]*-alias.zsh' gignore 'git/ignore' gcfg 'git/config'
# nvplg "nvim/lua/plugins.lua" rcenv 'zsh/rc.d/[0-9]*-env.zsh' wezrc 'wezterm/wezterm.lua'
# tmuxrc 'tmux/tmux.conf' zic 'zsh/rc.d/[0-9]*-zinit.zsh' zrc 'zsh/.zshrc'
# brewrc "$DOTFILES/Brewfile"
#)
#for k v in ${(kv)pairs[@]}; do
# builtin alias $k="_edit ${XDG_CONFIG_HOME:-${HOME}/.config}/${v}" || true
#done
#alias zinstall='_edit $ZINIT[BIN_DIR]/zinit-install.zsh'
# +────────────────+
# │ HOME SHORTCUTS │
# +────────────────+
#for k v in hscfg '.hammerspoon/init.lua' sshrc '.ssh/config' zec '.zshenv' zpc '.zprofile'; do
# builtin alias -- $k="_edit ${HOME}/${v}" || true
#done
# +─────────────────+
# │ RELOAD COMMANDS │
# +─────────────────+
#alias nvcln='command rm -rf $HOME/.{local/share/nvim,config/nvim/plugin/packer_compiled.lua}'
#alias zicln='command rm -rf ${HOME}/.{local/share/{zinit,zsh},cache,config/{zinit,zsh/.{zcomp{cache,dump},zsh_sessions}}}'
#alias ziprune='zi delete --all --yes; ( exec zsh -il );'
#alias zrld='builtin exec $(which zsh) -il'
#alias zireset='builtin cd ${HOME}; unset _comp{_{assocs,dumpfile,options,setup},{auto,}s}; ziprune; zrld; cd -'
# +────────────+
# │ NAVIGATION │
# +────────────+
#typeset -A pairs=(
# .. '../' ... '../../' .... '../../../'
# bin '~/.local/bin' dl '~/Downloads' hsd '~/.hammerspoon'
# xch '~/.config' xdh '~/.local/share' zdd '$ZDOTDIR'
# zcf '$ZDOTDIR/rc.d'
#)
## rr '$(git rev-parse --show-toplevel)' zs ' '
#for k v in ${(kv)pairs[@]}; do
# builtin alias -- "$k"="_goto $v" || true
#done
# +─────+
# │ GIT │
# +─────+
#for k v in g 'git' gd 'git diff' gs 'git status' gsu 'git submodule update --merge --remote'; do
# builtin alias -- $k="$v" || true
#done
# +───────────────────+
# │ COMMAND SHORTCUTS │
# +───────────────────+
#alias auld='builtin autoload'
#alias me='builtin print -P "%F{blue}$(whoami)%f @ %F{cyan}$(uname -a)%f"'
#alias mk='make'
#alias zc='zinit compile'
#alias zht='hyperfine --warmup 100 --runs 10000 "/bin/ls"'
#alias zmld="builtin zmodload"
# +───────+
# │ MISC. │
# +───────+
#alias -- +x='chmod +x'
#alias -- \?='which'
#alias gen-passwd='openssl rand -base64 24'
#alias get-my-ip='curl ifconfig.co'
#alias get-env='print -lio $(env)'
#alias get-path='print -l ${(@s[:])PATH}'
#alias tmp-md='$EDITOR $(mktemp -t scratch.XXX.md)'
# +────────+
# │ PYTHON │
# +────────+
#alias http-serve='python3 -m http.server'
# +──────────────+
# │ NETWORK INFO │
# +──────────────+
#alias get-open-ports='sudo lsof -i -n -P | grep TCP'
#alias ping='ping -c 10'
# +───────────────+
# │ FILE CREATION │
# +───────────────+
#alias mkmd='{ F_NAME="$(cat -).md"; touch "$F_NAME"; _info "created: $F_NAME"; }<<<'
#alias mkpy='_mkfile py "python3"'
#alias mksh='_mkfile sh "bash"'
#alias mktxt='{ F_NAME="$(cat -).txt"; touch "$F_NAME"; _info "created: $F_NAME"; }<<<'
#alias mkzsh='_mkfile zsh "zsh"'
#alias mkcmd='{ F_NAME="$(cat -)"; touch "$F_NAME"; chmod +x $F_NAME; rehash; nvim $F_NAME }<<<'
## alias mkcd='{ local DIR_NAME="$(cat -)"; command mkdir -p -- "$DIR_NAME" && builtin cd -P -- $DIR_NAME }<<<'
# +─────────────────+
# │ FILE FORMATTING │
# +─────────────────+
#alias fmtbtysh='python3 -m beautysh --indent-size=2 --force-function-style=paronly'
#alias fmtlua='stylua -i'
#alias fmtmd='mdformat --number --wrap 100'
#alias fmtpy='python3 -m black'
#alias fmtsh='shfmt -bn -ci -i 2 -ln=bash -s -sr -w'
# +─────+
# │ SYS │
# +─────+
#alias wsys='echo OSTYPE=${OSTYPE} MACHTYPE=${MACHTYPE} CPUTYPE=${CPUTYPE} hardware=$(uname -m) processor=$(uname -p)'
# +────────+
# │ REMOTE │
# +────────+
#alias cp-dotfiles='rsync -azP $XDG_CONFIG_HOME/dotfiles/ devcloud:~/dotfiles'
#alias cp-hammerspoon='rsync -azP $HOME/.hammerspoon/ devcloud:~/hammerspoon'
#alias cp-nvim='rsync -azP $XDG_CONFIG_HOME/nvim/ devcloud:~/nvim'
# +─────────+
# │ DEFAULT │
# +─────────+
if whence bat > /dev/null; then
export cat="bat --color=always"
# zeno
export ZENO_GIT_CAT="bat --color=always"
fi
# exa
if whence exa > /dev/null; then
alias l='exa -blF'
alias la='exa -abghilmu'
#alias la="exa -alh --git --time-style long-iso"
alias ll="exa -lh --git --time-style long-iso"
#alias ll='exa -al'
alias ls='exa --git --group-directories-first'
#alias ls="exa"
alias tree='exa --tree'
# zeno
export ZENO_GIT_TREE="exa --tree"
elif whence gls > /dev/null; then
alias ls='gls --color=auto'
alias ll='ls -lh'
alias la='ls -alh'
else
alias ll='ls -lh'
alias la='ls -alh'
fi
# nvim
if whence nvim > /dev/null; then
for i (v vi vim); do
alias $i="nvim"
done
alias vimdiff='nvim -d'
fi
# sed
if whence gsed > /dev/null; then
alias sed='gsed'
fi
| true |
57a59d49475e6603f43b8e7a5a6caa0cf20fa0c1 | Shell | ericfournier2/mugqic_utils | /generate_bigwig_chip.sh | UTF-8 | 1,590 | 3.625 | 4 | [] | no_license | #!/bin/bash
# Set argument default values.
MUGQIC_DIR=.
INPUTSUFFIX=.sorted.dup.bam
OUTPUTSUFFIX=""
RAP=$RAP_ID
OVERWRITE=FALSE
# Read command line arguments
while [[ $# -gt 1 ]]
do
key="$1"
case $key in
-d|--mugqicdir)
MUGQIC_DIR=$2
shift
;;
-s|--inputsuffix)
INPUTSUFFIX=$2
shift
;;
-o|--outputsuffix)
OUTPUTSUFFIX=$2
shift
;;
-r|--rapid)
RAP=$2
shift
;;
-w|--overwrite)
OVERWRITE=TRUE
;;
-l|--slurm)
SLURM=TRUE
;;
esac
shift # past argument or value
done
jobids=""
for i in $MUGQIC_DIR/alignment/*/*$INPUTSUFFIX
do
samplename=`basename $i $INPUTSUFFIX`
if [ ! -e "$MUGQIC_DIR/tracks/$samplename.bw" -o "$OVERWRITE" = "TRUE" ]
then
mkdir -p $MUGQIC_DIR/jobs
script="$MUGQIC_DIR/jobs/$samplename.make_bigwig.sh"
cat <<EOF > $script
#!/bin/bash
#PBS -N $script
#PBS -A $RAP
#PBS -l walltime=6:00:00
#PBS -l nodes=1:ppn=16
module load mugqic/python/2.7.12
bamCoverage -e 200 --binSize 5 -p 16 --normalizeUsingRPKM \
-b $i \
-o $MUGQIC_DIR/tracks/$samplename$OUTPUTSUFFIX.bw
EOF
workdir=`pwd`
if [ $SLURM = TRUE ]
then
jobid=`sbatch --time=6:00:00 --account=$RAP -J $script -N 1 --mem=32G --mincpus 16 -o $script.stdout -e $script.stderr -D $workdir $script `
jobid=`echo $jobid | sed -e 's/Submitted batch job //'`
jobids=$jobids:$jobid
else
jobids=$jobids:`qsub $script -o $script.stdout -e $script.stderr -d $workdir`
fi
fi
done
echo $jobids
| true |
0fc681c0e7b7cebae06c7d56ce63b958a47f2a9b | Shell | J-SbiN/bash_utils | /proxy-manager/functions/proxy-functions.sh | UTF-8 | 4,612 | 3.9375 | 4 | [] | no_license |
function __print_proxy () {
local proxy_env_vars="$(env | grep -E '(http_proxy|https_proxy|HTTP_PROXY|HTTPS_PROXY|no_proxy|NO_PROXY)' | sort)"
local session_vars="$(compgen -v | grep -E '(http_proxy|https_proxy|HTTP_PROXY|HTTPS_PROXY|no_proxy|NO_PROXY)' | while read line; do echo $line=${!line};done)"
if [ -z "${proxy_env_vars}" ]; then
echo "You have no proxy variables configured on your environment."
if ! [ -z "${session_vars}" ]; then
echo "But you do have the following variables in your session:"
echo -e "${session_vars}"
fi
else
echo "Your proxy configuration is:"
echo -e "${proxy_env_vars}"
fi
}
function __set_proxy () {
local default_proxy_file_path="${DEFAULT_PROXY_FILE_PATH:-"${HOME}/.parcelshop-tools/data/proxy-manager/proxys-list.lst"}"
local default_proxy="${DEFAULT_PROXY:-"proxy.gls-group.eu"}"
local default_scheme="${DEFAULT_SCHEME:-"http://"}"
local default_port="${DEFAULT_PORT:-":8080"}"
local default_no_proxy="${DEFAULT_NO_PROXY:-"localhost,127.0.0.1,::!"}"
local default_vars_to_export="${PROXY_VARS_LIST:-"HTTP_PROXY HTTPS_PROXY NO_PROXY http_proxy https_proxy no_proxy"}"
local proxy_file_path=""
local proxy=""
local scheme="${default_scheme}"
local port="${default_port}"
local avoid_proxy="${default_no_proxy}"
local vars_to_export="${default_vars_to_export}"
local tmp_file="${HOME}/.proxy.tmp"
# parsing input
while :; do
case $1 in
-h|-\?|--help|help)
__set_proxy_help # Display a usage synopsis.
return 0
;;
-f|--config-file-path) # Takes an option argument; ensure it has been specified.
if [ "$2" ]; then
proxy_file_path=${2}
shift
else
echo 'ERROR: "--config_file_path" requires a non-empty option argument.'
return 1
fi
;;
--config-file-path=?*)
proxy_file_path=${1#*=} # Delete everything up to "=" and assign the remainder.
;;
--proxy_file_path=) # Handle the case of an empty --config_file_path=
echo 'ERROR: "--config_file_path" requires a non-empty option argument.'
;;
--) # End of all options.
shift
break
;;
-?*)
printf 'WARN: Unknown option (ignored): %s\n' "$1" >&2
;;
*) # Default case: No more options, so break out of the loop.
break
esac
shift
done
local proxy="${1}"
# Input Validation
if ! [ "${proxy_file_path}" ]; then
proxy_file_path="${default_proxy_file_path}"
echo -e "\e[33;1m[WARN]:\e[0m No proxys file provided... Using default '${proxy_file_path}'."
fi
if ! [ "${proxy}" ]; then
proxy="${default_proxy}"
echo -e "\e[33;1m[WARN]:\e[0m No proxy argument provided. Using default '${proxy}'."
fi
if [[ "${proxy}" =~ (null|unset) ]]; then
unset ${vars_to_export}
echo -e "Your PROXY environment variables were \e[97;1mun\e[0mset."
echo "Here are your current PROXY environment vars:"
echo "$(env | grep -E '(^|_)(PROXY|proxy)')"
return 0
fi
local options="(^"
options+="$(cat ${proxy_file_path} | sed ':a;N;$!ba;s/\n/$|^/g')"
options+="$)"
# export env vars
if [[ -f ${proxy_file_path} ]] && [[ ! "${proxy}" =~ ${options} ]]; then
echo -e "\e[33;1m[WARN]:\e[0m The proxy '${proxy}' is not listed on your file."
echo "You are on your own. Proceeding..."
fi
# TODO: separate http from https
http_proxy="${scheme}${proxy}${port}"
https_proxy="${http_proxy}"
HTTP_PROXY="${http_proxy}"
HTTPS_PROXY="${http_proxy}"
no_proxy="${avoid_proxy}"
NO_PROXY="${avoid_proxy}"
echo "export ${vars_to_export}" > "${tmp_file}" # Just a fancy way of making the variables exported in the shell session
. "${tmp_file}" # (and not only inside the subshell running the function or the script)
rm "${tmp_file}"
__print_proxy
}
function __fetch_proxys () {
local default_proxys_file_path="${DEFAULT_PROXYS_FILE_PATH:-"${HOME}/.parcelshop-tools/data/proxy-manager/proxys-list.lst"}"
local proxys_file_path="${1:-${default_proxys_file_path}}"
cat "${proxys_file_path}"
} | true |
bb7716f1d7bde6624768dbb75256f6a8555f894e | Shell | lcbiao45/myrm | /remove.sh | UTF-8 | 517 | 3.859375 | 4 | [] | no_license | ### 重定义rm命令 ###
# 定义回收站目录
trash_path="${MY_RM_HOME}/.mytrash"
# 判断 $trash_path 定义的文件是否存在,如果不存在,那么就创建 $trash_path.
if [ ! -d $trash_path ]; then
mkdir -p $trash_path
fi
time=$(date "+%Y%m%d%H%M%S")
originParam="$@"
while [[ $1 == "-"* ]];do
shift 1
done
if [ $# -eq 0 ];then
echo "rm already replaced by mv ,MY_RM_HOME=$MY_RM_HOME"
mv --version
exit 0
fi
mkdir -p $trash_path/${time}_trash
/bin/mv "$@" $trash_path/${time}_trash
| true |
03c4772e7c17271dc2b75b0a20439d4ca94c3f37 | Shell | mateus-n00b/mailer | /mailer.sh | UTF-8 | 2,132 | 3.515625 | 4 | [] | no_license | #!/bin/bash
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# mailer.sh E um simples programa para o envio de emails via smtp utilizando
# o modulo smtplib do python.
#
# Mateus Sousa, Agosto 2016
#
# Versao 1.0
#
# Licenca GPL
# TODO: adicionar mais servidores smtps. No lugar de 'ifs' use o 'case'!
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
zen='zenity'
PORT=
SERVER=
LOGIN=
MSG=
PASS=
TO=
send_mail(){
foo="import smtplib;
import time;
var = smtplib.SMTP('"$SERVER"',"$PORT");
var.ehlo();
var.starttls();
var.login('"$LOGIN"','"$PASS"');
var.sendmail('"$LOGIN"','"$TO"','Subject: "$MSG"');
time.sleep(2);
var.quit()"
foo=$(sed 's/^ //g' <<< $foo)
python -c "$foo" 2> /tmp/error
[ -z $(cat /tmp/error) ] && $zen --error --width 200 --text "Somethings goes wrong! Try later." && exit 2
$zen --info --text "Email send!" --width 200
}
main(){
$zen --info --width 200 --text "Welcome to the client smtp"
account=$($zen --entry --text "Insert the type of account. e.g gmail or hotmail" --width 400 --title "")
if grep -i "gmail" <<< $account &> /dev/null
then
SERVER='smtp.gmail.com'
PORT=25
elif grep -i "hotmail" <<< $account &> /dev/null
then
SERVER='smtp.live.com'
PORT=25
else
$zen --error --title "" --text "Invalid entry! Exiting..." --width 200
exit 1
fi
LOGIN=$($zen --entry --text "Login" --title "" --width 200)
[ -z "$LOGIN" ] && $zen --error --text "Invalid entry! Exiting..." && exit 2
PASS=$($zen --password --text "Password")
[ -z "$PASS" ] && $zen --error --text "Invalid entry! Exiting..." && exit 2
TO=$($zen --entry --text "To" --title "" --width 200)
[ -z "$TO" ] && $zen --error --text "Invalid entry! Exiting..." && exit 2
MSG=$($zen --text-info --title "Mensagem" --editable)
[ -z "$TO" ] && $zen --error --text "Invalid entry! Exiting..." && exit 2
$zen --question
[ $? -ne 0 ] && $zen --info --text "Ok. Exiting..." --title "" --width 200
send_mail
}
################################### CALL THE MAIN #######################################
main
| true |
36ca51accd04f1165f00a936933f85c5efa4bb6b | Shell | aur-archive/pp-git | /PKGBUILD | UTF-8 | 984 | 3.046875 | 3 | [
"MIT"
] | permissive | # Maintainer: Josh VanderLinden <arch@cloudlery.com>
pkgname=pp-git
pkgver=20130125
pkgrel=1
pkgdesc="Simple wrapper around dd to show approximate progress during a dump"
arch=('any')
url="https://bitbucket.org/instarch/pp"
license=('MIT')
groups=()
depends=('coreutils' 'python2')
makedepends=('git')
source=('LICENSE')
md5sums=('3d9d229ee85c5a5bae93cff0033f66d9')
_gitroot=${url}
_gitname=pp
build() {
cd "$srcdir"
msg "Connecting to GIT server...."
if [[ -d "$_gitname" ]]; then
cd "$_gitname" && git pull origin
msg "The local files are updated."
else
git clone "$_gitroot" "$_gitname"
fi
msg "GIT checkout done or server timeout"
msg "Starting build..."
rm -rf "$srcdir/$_gitname-build"
git clone "$srcdir/$_gitname" "$srcdir/$_gitname-build"
cd "$srcdir/$_gitname-build"
}
package() {
cd "$srcdir/$_gitname-build"
install -D pp.py -m755 "${pkgdir}/usr/bin/pp"
install -D LICENSE "${pkgdir}/usr/share/licenses/${pkgname}/LICENSE"
}
# vim:set ts=2 sw=2 et:
| true |
6847c36c233686c5df0f15821f14997e497a3490 | Shell | loki-dv/zabbix-collection | /thermal/thermal.sh | UTF-8 | 919 | 3.828125 | 4 | [] | no_license | #!/bin/bash
# Usage: thermal.sh arg
# arg is:
# * discovery - for discovery
# * discovered_ID - for get value
# Usage
if [[ $# -eq 0 ]]; then
printf "Usage: thermal.sh arg\narg is:\n* discovery - for discovery\n* discovered_ID - for get value\n"
exit 0
fi
# Discovery
if [[ $1 == "discovery" ]]; then
it=0
printf "{\"data\":[";
#for I in `sensors | grep -in '^Core [0-9]\|^temp[0-9]' | awk -F: '{print $2" string "$1"" $3}' | awk '{print $1"_"$2"_"$3"_"$4}'`; do
for I in `sensors | grep -i '^Core [0-9]\|^temp[0-9]' | awk -F: '{print "Core_"NR}'`; do
#sensors | grep -i '^Core [0-9]\|^temp[0-9]' | awk -F: '{print "Core_"NR}'
if [ $it == 1 ]; then printf ","; fi
printf "{\"{#ID}\":\"$I\"}"
it=1
done;
printf "]}";
exit 0;
fi
# Get data
sensors | grep -i '^Core [0-9]\|^temp[0-9]' | awk -F: '{print "Core_"NR $2}' | grep "$1" | awk '{print $2}' | sed -e 's/\+//g' | sed -e 's/°C//g'
| true |
53ce6a9c1b5c2e8daa22442f763c7c74bd59903b | Shell | maheetha/Classy2017 | /intersect_cna_bedfile.sh | UTF-8 | 2,070 | 3.640625 | 4 | [] | no_license | #!/usr/bin/bash
<<COMMENTS
The purpose of this code is to intersect the copy number file and the bed file to come up with a single file
that has each gene mapped regions to sample ids, genes, and copy number polidy numbers.
INPUT: Copy Number File from TCGA Portal usually ending with *.seg.txt
SAMPLE FORMAT:
Sample Chromosome Start End Num_Probes Segment_Mean
TCGA-05-XXXX-01A-01D-XXXX-XX 1 3218610 8925112 3421 -0.545
OUTPUT: An intersected bed file with 9 regions
SAMPLE FORMAT: (no header in actual ending file)
CHR START END GENE CHR START END Num_Probes Segment_Mean SAMPLE
1 2985731 3355185 PRDM16 1 3218610 3225940 15 -0.182 TCGA-55-8090-01A-11D-2237-01
COMMENTS
while getopts hi:o:b: opt; do
case $opt in
i)
INPUT_FILE=$OPTARG
;;
o)
OUTPUT_FILE=$OPTARG
;;
b)
BED_FILE=$OPTARG
;;
h)
echo "This program has many mandatory and optional arguments:
Mandatory Arguments:
-i : input file
-b : bed file, with the first three columns chr, start, and end.
Optional Arguments:
-h : This help message
-o : if you have a selected output file name
"
exit 1
esac
done
shift $((OPTIND - 1))
if [[ -z ${INPUT_FILE+x} ]] || [[ -z ${BED_FILE+x} ]]
then
echo "Input or Bed File Missing"
fi
if [ -z ${OUTPUT_FILE+x} ]
then
OUTPUT_FILE=$INPUT_FILE.intersected
fi
######################################################################
### Rearrangement of files and intersection with bed file ######
### Note: Due to size of file, bedtools command might need #####
### submission to a cluster or higher memory node ##############
######################################################################
sed '1d' $INPUT_FILE > $INPUT_FILE.noheader
cut -f 2-6 $INPUT_FILE.noheader > $INPUT_FILE.first
cut -f 1 $INPUT_FILE.noheader > $INPUT_FILE.second
paste -d'\t' $INPUT_FILE.first $INPUT_FILE.second > $INPUT_FILE.rearranged
rm $INPUT_FILE.noheader $INPUT_FILE.first $INPUT_FILE.second
echo "bedtools intersect -a $BED_FILE -b $INPUT_FILE.rearranged -wa -wb > $OUTPUT_FILE" | qsub
done
| true |
13d357cf77c4f3961889f980a647781c2d253373 | Shell | Lewray/maven-s3-repo-uploader | /upload.sh | UTF-8 | 720 | 3.75 | 4 | [] | no_license | #!/bin/sh
display_usage() {
echo "Upload the specified artifact to a S3 bucket."
echo "\nUsage:\n$0 groupId artifactId version packaging filePath [repositoryId] [repositoryUrl]\n"
}
if [ $# -lt 5 ]; then
display_usage
exit 1
fi
command -v mvn >/dev/null 2>&1 || { echo >&2 "mvn is required but it's not installed. Aborting."; exit 1; }
if [[ ( $# == "--help") || $# == "-h" ]];then
display_usage
exit 0
fi
if [ -f upload.env ]; then
source upload.env
fi
if [ $# -eq 7 ]; then
REPOSITORY_ID=$6
REPOSITORY_URL=$7
fi
mvn deploy:deploy-file \
-DgroupId=$1 \
-DartifactId=$2 \
-Dversion=$3 \
-Dpackaging=$4 \
-Dfile=$5 \
-DrepositoryId=${REPOSITORY_ID} \
-Durl=${REPOSITORY_URL}
| true |
54d89df09d6db689b75bdf1027fbcc4d3e9b512e | Shell | donnelll/codebase-ALPHA- | /adhoc_scripts2/nagios_eventhandlers/laxlxap01-wwwlogs | UTF-8 | 2,051 | 3.921875 | 4 | [] | no_license | #!/bin/sh
#
# This script should manage the event handler for clearing old wwwlogs on laxlxap01 or laxlxap02
# on a disk space check of /home.
#
# What state is the logs directory in?
case "$1" in
OK)
# The service just came back up, so don't do anything...
;;
WARNING)
# We don't really care about warning states, since the service is probably still running...
;;
UNKNOWN)
# We don't know what might be causing an unknown error, so don't do anything...
;;
CRITICAL)
# Aha! The logs partition mount appears to have a problem - Try to correct
# Is this a "soft" or a "hard" state?
case "$2" in
# We're in a "soft" state, meaning that Nagios is in the middle of retrying the
# check before it turns into a "hard" state and contacts get notified...
SOFT)
# What check attempt are we on? We don't want to run logs script on the first
# check, because it may just be a fluke!
case "$3" in
# Wait until the check has been tried 3 times before running logs clearing script.
# If the check fails on the 4th time (after we run the log clear script), the state
# type will turn to "hard" and contacts will be notified of the problem.
# Hopefully this will run the logs clearing script, so the 4th check will
# result in a "soft" recovery. If that happens no one gets notified because we
# fixed the problem!
3)
echo -n "Running Jettis_wwwlogs.sh (3rd soft critical state)..."
# Call the init script to restart the HTTPD server
/root/scripts/jettis_wwwlogs.sh
;;
esac
;;
# The DISK service somehow managed to turn into a hard error without getting fixed.
# It should have been restarted by the code above, but for some reason it didn't.
# Let's give it one last try, shall we?
# Note: Contacts have already been notified of a problem with the service at this
# point (unless you disabled notifications for this service)
HARD)
echo -n "Running Jettis_wwwlogs.sh..."
# Call the init script to restart the HTTPD server
/root/scripts/jettis_wwwlogs.sh
;;
esac
;;
esac
exit 0
| true |
7ec2419cf64c22dd8aac7c61f0e21c8db21961cd | Shell | a2gs/PAINEL | /scripts/listPIDs.sh | UTF-8 | 976 | 3.359375 | 3 | [
"Apache-2.0"
] | permissive | # Andre Augusto Giannotti Scota (a2gs)
# andre.scota@gmail.com
#
# PAINEL
#
# Apache License 2.0
#
# listPIDs.sh
# List all processes PIDs.
#
# Who | When | What
# --------+------------+----------------------------
# a2gs | 13/08/2018 | Creation
# | |
#
#!/bin/bash
echo '--- PROCESSOS NO AR -----------------------------------'
ps -C serv,servList,select_html -o pid,cmd | sed 's/^ *//' | column -t
if [ -f "$PAINEL_HOME/running/PIDs/servlock" ]; then
echo '--- SERVLOCK ------------------------------------------'
echo "There is a servlock [$PAINEL_HOME/running/PIDs/servlock] file! Content:"
cat $PAINEL_HOME/running/PIDs/servlock
fi
echo '--- PORTAS EM LISTNING --------------------------------'
PROCS_PID_LIST=$(ps -C serv,servList,select_html --no-headers -o pid,cmd | sed 's/^ *//' | cut -f1 -d ' ')
if [ -z "$PROCS_PID_LIST" ]; then
echo "There are no listening processes running."
else
netstat -nap --tcp --listening 2>/dev/null | grep "$PROCS_PID_LIST"
fi
| true |
60fa84952e9b00280ec9a4868b9e049e5a3d4254 | Shell | semaphoreci/toolbox | /release/install_in_tests.sh | UTF-8 | 748 | 3.578125 | 4 | [] | no_license | #!/bin/bash
prefix_cmd() {
local cmd=$@
if [ `whoami` == 'root' ]; then
`$@`
else
`sudo $@`
fi
}
# Before running this, you need to run release/create.sh
# Remove installed toolbox
prefix_cmd rm -rf ~/.toolbox
prefix_cmd rm -f $(which artifact)
prefix_cmd rm -f $(which spc)
prefix_cmd rm -f $(which when)
prefix_cmd rm -f $(which test-results)
prefix_cmd rm -f $(which enetwork)
cd ~
arch=""
case $(uname) in
Darwin)
tar -xvf /tmp/Darwin/darwin.tar -C /tmp
mv /tmp/toolbox ~/.toolbox
;;
Linux)
[[ "$(uname -m)" =~ "aarch" ]] && arch="-arm"
tar -xvf /tmp/"Linux${arch}"/"linux${arch}".tar -C /tmp
mv /tmp/toolbox ~/.toolbox
;;
esac
cd -
bash ~/.toolbox/install-toolbox
source ~/.toolbox/toolbox
| true |
64e369d5007cf3b54a5700c3e3abefe696e90009 | Shell | braoult/exercism | /bash/two-fer/two_fer.sh | UTF-8 | 372 | 3.359375 | 3 | [] | no_license | #!/usr/bin/env bash
#str=$1
# the next two lines would trim leading & trailing blank chars. I believe the exercise
# should be amended to avoid "One for , one for me."
#str="${str#"${str%%[![:space:]]*}"}" # remove leading blanks
#str="${str%"${str##*[![:space:]]}"}" # remove trailing blanks
#[[ ${#str} = 0 ]] && str="you"
echo "One for ${1:-you}, one for me."
| true |
96f483230a9af80b98d5ab26bc9903df07b06744 | Shell | friendbear/bashr_scripts_utf8 | /chap09/initscript_deb.sh | UTF-8 | 500 | 3.8125 | 4 | [] | no_license | #!/bin/sh
# initscript_deb.sh - Debian GNU/Linuxの起動スクリプト
case $1 in
start)
# 起動時にメッセージを表示し、10秒待つ
echo -n "Running initscript_deb.sh for startup..."
sleep 10
echo "done."
exit 0
;;
stop)
# シャットダウンにメッセージを表示し、10秒待つ
echo -n "Running initscript_deb.sh for shutdown..."
sleep 10
echo "done."
exit 0
;;
esac
echo "usage: initscript_deb.sh {start|stop}"
exit 1
| true |
bfa5c0b59fefbd855e2f1ec0054ee23880749522 | Shell | jinyun1tang/netcdf-c | /unit_test/run_s3sdk.sh | UTF-8 | 1,331 | 2.96875 | 3 | [
"BSD-3-Clause"
] | permissive | #!/bin/sh
if test "x$srcdir" = x ; then srcdir=`pwd`; fi
. ../test_common.sh
set -e
#CMD="valgrind --leak-check=full"
URL="https://s3.us-east-1.amazonaws.com/unidata-zarr-test-data"
isolate "testdir_uts3sdk"
# Create an isolation path for S3; build on the isolation directory
S3ISODIR="$ISODIR"
S3ISOPATH="/netcdf-c"
S3ISOPATH="${S3ISOPATH}/$S3ISODIR"
test_cleanup() {
${CMD} ${execdir}/../nczarr_test/s3util -u "${URL}" -k "${S3ISOPATH}" clear
}
if test "x$GITHUB_ACTIONS" != xtrue; then
trap test_cleanup EXIT
fi
THISDIR=`pwd`
cd $ISOPATH
${CMD} ${execdir}/test_s3sdk -u "${URL}" exists
${CMD} ${execdir}/test_s3sdk -u "${URL}" -k "${S3ISOPATH}/test_s3sdk.txt" write
${CMD} ${execdir}/test_s3sdk -u "${URL}" -k "${S3ISOPATH}/test_s3sdk.txt" read
${CMD} ${execdir}/test_s3sdk -u "${URL}" -k "${S3ISOPATH}/test_s3sdk.txt" size
${CMD} ${execdir}/test_s3sdk -u "${URL}" -k "${S3ISOPATH}" list
${CMD} ${execdir}/test_s3sdk -u "${URL}" -k "${S3ISOPATH}" search
${CMD} ${execdir}/test_s3sdk -u "${URL}" -k "${S3ISOPATH}/test_s3sdk.txt" delete
if test "x$FEATURE_LARGE_TESTS" = xyes ; then
${CMD} ${execdir}/test_s3sdk -u "${URL}" -k "${S3ISOPATH}" longlist
fi
exit
if test "x$GITHUB_ACTIONS" = xtrue; then
# Cleanup on exit
test_cleanup
fi
exit
| true |
62c548dbd5446a96d5948349bf9c6e8ac36ca231 | Shell | jjosealf94/MC | /Tareas/HW1/stardate.sh | UTF-8 | 1,561 | 3.640625 | 4 | [] | no_license | #!/bin/bash
#: Description : Toma un año y muestra los eventos historicos importantes más información de estrellas que su luz llega en ese año
## Clean the screen
clear
## Download the files requiered
wget -q https://raw.githubusercontent.com/ComputoCienciasUniandes/MetodosComputacionales/master/homework/2015-V/HW1/worldhistory.tsv
wget -q https://raw.githubusercontent.com/ComputoCienciasUniandes/MetodosComputacionales/master/homework/2015-V/HW1/hyg.csv
## Build a divider
divider=#####################################
divider=$divider$divider
## Width of divider
totalwidth=60
## Print divider to match
printf "%$totalwidth.${totalwidth}s\n" "$divider"
echo
## Build a title
echo "StartDate" | figlet
## Print divider to match
printf "%$totalwidth.${totalwidth}s\n" "$divider"
echo
## Read input date and set the relative variable
echo $1 | figlet
idate=$1
dateA=$((2015-$idate))
dateB=$((2016-$idate))
##Formatted the historic information
grep $idate worldhistory.tsv | sed 's/^.....//g'
echo
## Print divider to match
printf "%$totalwidth.${totalwidth}s\n" "$divider"
##Print a message
echo "LOOK AT THE FOLLOWING STARS:"
##Formatted the star information
sed -i '1,32d' hyg.csv
awk -F "," '{if('$dateA' <= $10 && '$dateB' > $10 ) {print $8 "\t" $9 "\t" $2}}' hyg.csv > mostrar.csv
##Print the result
echo "RA/° DEC/° HIP No."
head -5 mostrar.csv
## Print divider to match
printf "%$totalwidth.${totalwidth}s\n" "$divider"
## remove the extra files
rm mostrar.csv
rm worldhistory.tsv
rm hyg.csv
| true |
18733cf9bd00c4ad57b3938e4c6ae68f2a5633b5 | Shell | KoreaHaos/hello_bash | /bash_scripts_a_2_z/i-if_and_if_else_examples/if_else.bash | UTF-8 | 338 | 3.5 | 4 | [] | no_license | # This is a basic if-else script
# run it with bash if_else.bash <optional arguement>
#
# example :$ bash bash_scripts_a_2_z/i-if_and_if_else_examples/if_else.bash test
# product :test
#
passed_in_arg=$1
if [ -z $passed_in_arg ];
then
echo "No argument passed to script"
else
echo "Argument passed to script = $passed_in_arg"
fi
| true |
1fba75c35e7bc8f68f7d9b7653958548adda7492 | Shell | DanillodeSouza/sqs-lambda-go-example | /localstack_setup/create-aws-queue.sh | UTF-8 | 276 | 3.25 | 3 | [] | no_license | #!/bin/bash
# Script to create sqs queue
SQS_QUEUE_NAME=example # Sqs queue name
# Creating sqs queue
echo "Creating queue"
awslocal sqs create-queue \
--queue-name $SQS_QUEUE_NAME \
&& echo "Created" || echo "Failed to create"
echo "Sqs initialization completed"
| true |
ef4877d7a07b94d5b0b67459b752a36a8b81e34b | Shell | krao-test/go-httpbin | /bin/gcloud | UTF-8 | 450 | 3.125 | 3 | [
"MIT"
] | permissive | #!/bin/bash
#
# A wrapper that executes the gcloud CLI in a docker container, to avoid
# requiring a local installation.
#
# Adapted from this helpful blog post:
# https://blog.scottlowe.org/2018/09/13/running-gcloud-cli-in-a-docker-container/
GCLOUD_SDK_TAG="312.0.0"
exec docker run \
--rm -it \
--workdir /code \
-v $PWD:/code \
-v $HOME/.config/gcloud:/root/.config/gcloud \
google/cloud-sdk:$GCLOUD_SDK_TAG \
gcloud $*
| true |
a1350648500d7b45f9a5d05dd2691affca1db61f | Shell | serverdensity/sd-agent-core-plugins | /.travis/dockerfiles/jammy/entrypoint.sh | UTF-8 | 757 | 3.484375 | 3 | [] | permissive | #!/bin/bash
DEBIAN_PATH="/sd-agent/debian"
UBUNTU=(bionic focal jammy xenial trusty)
if [[ ${UBUNTU[*]} =~ ${RELEASE} ]]
then
DISTRO="ubuntu"
else
DISTRO="debian"
fi
sudo cp -a ${DEBIAN_PATH}/distros/${RELEASE}/. ${DEBIAN_PATH}
sudo sed -i "s|trusty|$RELEASE|" ${DEBIAN_PATH}/changelog
sudo dpkg-source -b /sd-agent
for ARCH in amd64 arm64; do
if [ ! -d /packages/${DISTRO}/${RELEASE} ]; then
sudo mkdir /packages/${DISTRO}/${RELEASE}
fi
if [ ! -d /packages/${DISTRO}/${RELEASE}/${ARCH} ]; then
sudo mkdir /packages/${DISTRO}/${RELEASE}/${ARCH}
fi
pbuilder-dist $RELEASE $arch update
pbuilder-dist ${RELEASE} ${ARCH} build \
--buildresult /packages/${DISTRO}/${RELEASE}/${ARCH} *${RELEASE}*.dsc
done;
| true |
bc947a59cfbb4205c45dca438feb532a19ca7bd0 | Shell | PWmercy/Mercy-Digital-Arts | /Experiments/Testing.sh | UTF-8 | 191 | 2.84375 | 3 | [] | no_license | #!/bin/zsh
# shellcheck shell=bash
IFS=$'\n'
# CART
cd '/Applications' || exit
for appFolder in `ls -d *`; do
for innerFolder in $appFolder/*; do
echo $innerFolder
done
done
| true |
581cdffbe5a71a4b92eb4d4426d0a0dc67ec37ce | Shell | Ptival/PeaCoq | /uw-cse-505/distrib/linux/setup.sh | UTF-8 | 455 | 3.046875 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env bash
VERSION=0.2-alpha
DIR=PeaCoq-${VERSION}
TGZ=v${VERSION}.tar.gz
wget -N https://github.com/Ptival/PeaCoq/archive/${TGZ}
tar -xzvf ${TGZ}
cd ${DIR}
wget -N https://github.com/Ptival/PeaCoq/releases/download/v0.2-alpha/peacoq
chmod +x peacoq
./setup.sh
cd ..
echo "==========================================="
echo "SETUP COMPLETE"
echo "==========================================="
echo "Now enter directory ${DIR} and run ./peacoq"
| true |
fd115ee0f5b508b2100e8633e39e38cd6434a8af | Shell | wickedwukong/cassandra | /cassandra/src/try-port.sh | UTF-8 | 355 | 3.9375 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env bash
host=$1
port=$2
tries=$3
if [ -z "$tries" ]; then
echo "Usage: $0 <host> <port> <tries>"
exit 1
fi
for attempt in `seq 1 $tries`
do
if `nc $host $port </dev/null &> /dev/null`; then
echo "Connected to $host:$port on attempt $attempt"
exit
fi
sleep 2
done
echo "Unable to connect to $host:$port after $tries tries."
exit 1
| true |
3a46e1e3898fb20cfeea7e75ada3fbdc964b6bd8 | Shell | patrickdeyoreo/holbertonschool-system_engineering-devops | /0x05-processes_and_signals/7-highlander | UTF-8 | 202 | 3.03125 | 3 | [] | no_license | #!/usr/bin/env bash
#
# Don't stop - not even for SIGTERM
sleep() {
! read -rt "$1"
} <> <(:)
trap '
echo "I am invincible!!!"
' TERM
while true
do
echo 'To infinity and beyond'
sleep 2
done
| true |
cfe13763e2fb6d9cfedb3f4af09d2f4c7ed63681 | Shell | jan-moeller/hseg | /build_run_external.sh | UTF-8 | 1,066 | 3.21875 | 3 | [] | no_license | #! /bin/bash
server=atcremers50
directory=/work/moellerj/external
echo " => Updating project on $server..."
rsync -a --info=progress2 cmake exec include lib net src test CMakeLists.txt "$server:$directory"
ssh -o StrictHostkeyChecking=no -x "$server" bash << 'EOF'
directory=/work/moellerj/external
build_type=RelWithDebInfo
build_threads=4
run_app=hseg_train
make_target=hseg_train
if [[ ! -d "$directory/build" ]] ; then
echo " => Build directory does not exist, creating new one."
mkdir -p "$directory/build"
cd "$directory/build"
cmake -DCMAKE_BUILD_TYPE="$build_type" ..
fi
if [[ ! -d "$directory/out" ]] ; then
mkdir "$directory/out"
fi
echo " => Building..."
cd "$directory/build" ; make "$make_target" -j"$build_threads"
if [[ $? -ne 0 ]] ; then
echo " => Make failed, try again."
cd "$directory/build" ; make "$make_target" -j"$build_threads"
if [[ $? -ne 0 ]] ; then
echo " => Make failed again :("
exit 1
fi
fi
echo " => Executing $run_app..."
exec "$directory/build/$run_app"
EOF | true |
0e3318e6fe5f1c3ff7ccc7be92085294decc565c | Shell | yangyixiaof/gitcrawler | /programprocessor/ngram-train-scripts/refine.sh | UTF-8 | 139 | 2.765625 | 3 | [] | no_license | #!/bin/bash
find . -type f -not \( -name '*.java' -or -name '*.sh' \) -delete
numberoffiles=`ls -lR|grep "^-"|wc -l`
echo $numberoffiles
| true |
a288edd8b8e1ffac25a02420fac7a8a4e22c770f | Shell | hexming003/loragateway | /config_ip.sh | UTF-8 | 936 | 2.609375 | 3 | [] | no_license | #!/bin/sh
################################config ETH2########################################
MACADDR=86:43:C0:A8:01:E7
IPADDR=192.168.1.231
NETMASK=255.255.255.0
GW=192.168.1.1
DEV=eth2
echo "mac=$MACADDR"
echo "ip=$IPADDR"
echo "netmask=$NETMASK"
echo "gw=$GW"
ifconfig $DEV down &&
sleep 1 &&
sync
/MeterRoot/TestTool/gpio -w PA28 0 &&
sleep 1 &&
sync
/MeterRoot/TestTool/gpio -w PA28 1 &&
sleep 2 &&
sync
ifconfig $DEV hw ether $MACADDR $DEV
ifconfig $DEV $IPADDR up
sleep 1 &&
sync
ifconfig $DEV netmask $NETMASK
#route add default metric 10 gw $GW dev $DEV
#ifconfig UP BROADCAST MULTICAST MTU:1500
route add -net 224.0.0.0 netmask 224.0.0.0 dev $DEV
sleep 2 &&
route add default gw $GW dev $DEV
##################################################################################
| true |
a27acc89793ff5bab40befddc087d34565adaba8 | Shell | grassfishgmbh/QtPlayer-deps | /cef/2526/build-linux.sh | UTF-8 | 2,685 | 3.328125 | 3 | [] | no_license | CEF_BRANCH=2526
sudo umount chroot/home/ubuntu/buildspace || true
if [ ! -d buildspace ]; then
mkdir buildspace
sudo chown 1000:1000 buildspace
fi
if [ -d buildspace/overrides ]; then
sudo rm -rf buildspace/overrides
sudo mkdir buildspace
sudo chown 1000:1000 buildspace
fi
sudo umount chroot/run/shm || true
sudo rm -rf chroot
mkdir chroot
sudo debootstrap --variant=minbase --components=main,restricted,universe,multiverse --include=ca-certificates,ssl-cert,wget,python,python2.7,git,sudo,curl,file,cmake,lsb-release,libgtkglext1-dev --arch=amd64 trusty ./chroot http://de.archive.ubuntu.com/ubuntu/
sudo chroot chroot adduser --disabled-password --gecos "" ubuntu
sudo chroot --userspec 1000:1000 chroot mkdir /home/ubuntu/buildspace
sudo mount --bind buildspace chroot/home/ubuntu/buildspace
sudo cp -r patches-linux buildspace/overrides
sudo chroot chroot <<EOF
echo "ubuntu ALL = NOPASSWD: ALL" >> /etc/sudoers
echo "" >> /etc/fstab
echo "none /dev/shm tmpfs rw,nosuid,nodev,noexec 0 0" >> /etc/fstab
EOF
sudo chroot --userspec 1000:1000 chroot <<\EOF
# print environment variables
set
sudo mount /dev/shm
CEF_BRANCH=2526
cd /home/ubuntu
# Run the script excluding unnecessary components.
# yes | sudo ./install-build-deps.sh --no-arm --no-chromeos-fonts --no-nacl --no-prompt
cd buildspace
git clone https://chromium.googlesource.com/chromium/tools/depot_tools.git
export PATH=/home/ubuntu/buildspace/depot_tools:$PATH
rm -f automate-git.py
wget https://bitbucket.org/chromiumembedded/cef/raw/master/tools/automate/automate-git.py
python2.7 automate-git.py --download-dir=/home/ubuntu/buildspace/cef_$CEF_BRANCH --depot-tools-dir=/home/ubuntu/buildspace/depot_tools --no-build --branch=$CEF_BRANCH
echo ttf-mscorefonts-installer msttcorefonts/accepted-mscorefonts-eula select true | debconf-set-selections
cp -r overrides/* cef_$CEF_BRANCH/chromium/src/cef/patch/
cd cef_$CEF_BRANCH
cd chromium/src
# Now install all the necessary build dependencies
yes | sudo build/install-build-deps.sh --no-arm --no-chromeos-fonts --no-nacl --no-prompt
sudo apt-get -y install libtool libvdpau-dev libvdpau1 libva1 libva-dev
export GYP_DEFINES="proprietary_codecs=1 ffmpeg_branding=Chrome clang=0 use_allocator=none"
export GYP_GENERATORS="ninja"
cd cef
./cef_create_projects.sh
cd ..
build/util/lastchange.py > build/util/LASTCHANGE
build/util/lastchange.py > build/util/LASTCHANGE.blink
rm -rf cef/binary_distrib/* || true
ninja -C out/Release cefclient chrome_sandbox
NINJAEXIT=$?
if [ "$NINJAEXIT" != "0" ]; then
echo "Failed build with exit code $NINJAEXIT"
exit 1
fi
cd cef/tools
./make_distrib.sh --allow-partial --ninja-build
EOF
| true |
9c7a8109038b0784dc14c94d377e24b9569fd181 | Shell | Thembahank/ubuntu-server-setup | /tests/unit-tests.sh | UTF-8 | 3,773 | 3.828125 | 4 | [
"MIT"
] | permissive | #!/bin/bash
set -e
while getopts "s" opt; do
case $opt in
s) SKIP_SETUP=true ;;
*) echo "usage: $0 [-v] [-r]" >&2
exit 1 ;;
esac
done
function getCurrentDir() {
local current_dir="${BASH_SOURCE%/*}"
if [[ ! -d "${current_dir}" ]]; then current_dir="$PWD"; fi
echo "${current_dir}"
}
current_dir=$(getCurrentDir)
# shellcheck source=/dev/null
source "${current_dir}/lib/bunit.shl"
# shellcheck source=/dev/null
source "${current_dir}/../setupLibrary.sh"
test_user_account=testuser
# shellcheck disable=SC2034
VERBOSE_MODE="true"
### Unit Tests ###
function testSetup () {
echo "Test Setup"
addUserAccount ${test_user_account} true
}
function testUserAccountCreated() {
local user_exists_code
user_exists_code="$(id -u ${test_user_account} > /dev/null 2>&1; echo $?)"
assertEquals 0 "${user_exists_code}"
}
function testIfUserIsSudo() {
if [[ $SKIP_SETUP != true ]]; then
local user_access
user_access="$(sudo -l -U ${test_user_account})"
assertContains "(ALL : ALL) ALL" "${user_access}"
fi
}
function testAddingOfSSHKey() {
if [[ $SKIP_SETUP != true ]]; then
disableSudoPassword "${test_user_account}"
local dummy_key="ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDBGTO0tsVejssuaYR5R3Y/i73SppJAhme1dH7W2c47d4gOqB4izP0+fRLfvbz/tnXFz4iOP/H6eCV05hqUhF+KYRxt9Y8tVMrpDZR2l75o6+xSbUOMu6xN+uVF0T9XzKcxmzTmnV7Na5up3QM3DoSRYX/EP3utr2+zAqpJIfKPLdA74w7g56oYWI9blpnpzxkEd3edVJOivUkpZ4JoenWManvIaSdMTJXMy3MtlQhva+j9CgguyVbUkdzK9KKEuah+pFZvaugtebsU+bllPTB0nlXGIJk98Ie9ZtxuY3nCKneB+KjKiXrAvXUPCI9mWkYS/1rggpFmu3HbXBnWSUdf localuser@machine.local"
addSSHKey "${test_user_account}" "${dummy_key}"
local ssh_file
ssh_file="$(sudo cat /home/${test_user_account}/.ssh/authorized_keys)"
assertEquals "${ssh_file}" "${dummy_key}"
fi
}
function testChangeSSHConfig() {
if [[ $SKIP_SETUP != true ]]; then
changeSSHConfig
local ssh_config
ssh_config="$(sudo cat /etc/ssh/sshd_config)"
assertContains "PasswordAuthentication no" "${ssh_config}"
assertContains "PermitRootLogin no" "${ssh_config}"
fi
}
function testUfw() {
setupUfw
local ufw_status
ufw_status="$(sudo ufw status)"
assertContains "Status: active" "${ufw_status}"
assertContains "OpenSSH" "${ufw_status}"
}
function testSwap() {
createSwap
assertContains "/swapfile" "$(ls -lh /swapfile)"
assertContains "/swapfile" "$(sudo swapon -s)"
}
function testSwapSettings() {
local swappiness
local cache_pressure
swappiness="$(cat /proc/sys/vm/swappiness)"
cache_pressure="$(cat /proc/sys/vm/vfs_cache_pressure)"
tweakSwapSettings 10 50
assertEquals "10" "$(cat /proc/sys/vm/swappiness)"
assertEquals "50" "$(cat /proc/sys/vm/vfs_cache_pressure)"
tweakSwapSettings "${swappiness}" "${cache_pressure}"
}
function testTimezone() {
local timezone
timezone="$(cat /etc/timezone)"
setTimezone "America/New_York"
assertEquals "America/New_York" "$(cat /etc/timezone)"
setTimezone "${timezone}"
}
function testNTP() {
configureNTP
ubuntu_version="$(lsb_release -sr)"
if [[ $(bc -l <<< "${ubuntu_version} >= 18.04") -eq 1 ]]; then
sleep 2
assertContains "System clock synchronized: yes" "$(timedatectl status)"
else
assertContains "NTP synchronized: yes" "$(timedatectl status)"
fi
}
function testTeardown () {
echo "Test Teardown"
deleteTestUser
if [[ $SKIP_SETUP != true ]]; then
revertSudoers
revertSSHConfig
fi
revertUfw
deleteSwap
sudo apt-get --purge --assume-yes autoremove ntp
}
### Helper Functions ###
function deleteTestUser() {
sudo deluser ${test_user_account} sudo
sudo deluser -f --remove-home ${test_user_account}
}
function revertSSHConfig() {
sudo cp /etc/ssh/sshd_config.old /etc/ssh/sshd_config
sudo rm -rf /etc/ssh/sshd_config.old
}
function revertUfw() {
sudo ufw delete allow OpenSSH
sudo ufw disable
}
function deleteSwap() {
sudo swapoff /swapfile
sudo rm /swapfile
}
runUnitTests | true |
fbd734f575a7f2d51626b6a4fc1d448c473506ae | Shell | AAAI-DISIM-UnivAQ/DALI | /Examples/advanced/startmas.sh | UTF-8 | 1,824 | 3.375 | 3 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | #!/bin/bash
#exec 1>/dev/null # @echo off
clear # cls
#title "MAS"
SICSTUS_HOME=/usr/local/sicstus4.6.0
MAIN_HOME=../..
DALI_HOME=../../src
CONF_DIR=conf
PROLOG="$SICSTUS_HOME/bin/sicstus"
WAIT="ping -c 4 127.0.0.1"
INSTANCES_HOME=mas/instances
TYPES_HOME=mas/types
BUILD_HOME=build
XTERM=xterm
rm -rf tmp/*
rm -rf build/*
rm -f work/* # remove everything if you want to clear agent history
rm -rf conf/mas/*
# Build agents by creating a file with the instance name containing the type content for each instance.
for instance_filename in $INSTANCES_HOME/*.txt
do
type=$(<$instance_filename) # agent type name is the content of the instance file
type_filename="$TYPES_HOME/$type.txt"
instance_base="${instance_filename##*/}" # e.g. 'mas/instances/agent1.txt' -> 'agent1.txt'
echo $type_filename
cat $type_filename >> "$BUILD_HOME/$instance_base"
done
cp $BUILD_HOME/*.txt work
$XTERM -hold -e "$PROLOG -l $DALI_HOME/active_server_wi.pl --goal \"go(3010,'server.txt').\"" & #start /B "" "%PROLOG%" -l "%DALI_HOME%/active_server_wi.pl" --goal go(3010,'%daliH%/server.txt').
echo Server ready. Starting the MAS....
$WAIT > /dev/null # %WAIT% >nul
$XTERM -hold -e "$PROLOG -l $DALI_HOME/active_user_wi.pl --goal utente." & # start /B "" "%PROLOG%" -l "%DALI_HOME%/active_user_wi.pl" --goal utente.
echo Launching agents instances...
$WAIT > /dev/null # %WAIT% > nul
# Launch agents
for agent_filename in $BUILD_HOME/*
do
agent_base="${agent_filename##*/}"
echo "Agente: $agent_base"
$XTERM -e "./conf/makeconf.sh $agent_base $DALI_HOME" &
$XTERM -T "$agent_base" -hold -e "./conf/startagent.sh $agent_base $PROLOG $DALI_HOME" &
sleep 2s
$WAIT > /dev/null # %WAIT% >nul
done
echo MAS started.
echo Press a key to shutdown the MAS
read -p "$*"
echo Halting the MAS...
killall sicstus
killall xterm
| true |
7f1eaf65254f35087435b2d6b17ec900fd68d622 | Shell | Contrast-Security-OSS/contrast-s2i-jre | /s2i/bin/run | UTF-8 | 934 | 3.4375 | 3 | [] | no_license | #!/bin/bash
set -e
APP_TARGET=${APP_TARGET:-target}
function s2i_echo {
echo "[s2i] $@"
}
s2i_echo "Starting Spring Boot application"
s2i_echo "APP_TARGET = $APP_TARGET"
if [[ ! -z $JAVA_OPTS ]]
then
s2i_echo "JAVA_OPTS = $JAVA_OPTS"
fi
s2i_echo "Downloading Contrast Agent"
URL=$CONTRAST_URL
URL+="api/ng/"
URL+=$CONTRAST_ORGANIZATION_ID
URL+="/agents/default/java"
curl --max-time 15 $URL -H API-Key:$CONTRAST_API_KEY -H Authorization:$CONTRAST_AUTHORIZATION -o contrast.jar
s2i_echo "Searching for a valid JAR file (in $PWD/$APP_TARGET)"
EXECUTABLE=$(find $APP_TARGET -maxdepth 2 -name "*.jar")
if [[ -z $EXECUTABLE ]]
then
s2i_echo "ERROR: No executeable JAR file found"
exit 1
fi
s2i_echo "Found $PWD/$EXECUTABLE"
s2i_echo "Running application with Contrast"
exec java -javaagent:contrast.jar -Dapplication.name=S2iDemo -Dcontrast.server=S2iDemoServer -Dcontrast.path=/s2idemo $JAVA_OPTS -jar $EXECUTABLE
| true |
b9552d16476cc22a803a6c983e0040867d8ded32 | Shell | oraclebase/vagrant | /containers/ol8/docker/scripts/root_setup.sh | UTF-8 | 2,975 | 3.203125 | 3 | [] | no_license | sh /vagrant/scripts/prepare_disks.sh
echo "******************************************************************************"
echo "Prepare Yum with the latest repos." `date`
echo "******************************************************************************"
echo "nameserver 8.8.8.8" >> /etc/resolv.conf
dnf install -y dnf-utils zip unzip git
dnf config-manager --add-repo=https://download.docker.com/linux/centos/docker-ce.repo
echo "******************************************************************************"
echo "Install Docker." `date`
echo "******************************************************************************"
dnf install -y docker-ce --nobest
#yum update -y
echo "******************************************************************************"
echo "Enable Docker." `date`
echo "******************************************************************************"
systemctl enable docker.service
systemctl start docker.service
systemctl status docker.service
echo "******************************************************************************"
echo "Create non-root docker user." `date`
echo "******************************************************************************"
groupadd -g 1042 docker_fg
useradd -G docker_fg docker_user
mkdir -p /u01/volumes/ol7_19_ords_tomcat
mkdir -p /u01/volumes/ol7_19_ords_db
mkdir -p /u01/volumes/ol7_183_ords_tomcat
mkdir -p /u01/volumes/ol7_183_ords_db
mkdir -p /u01/volumes/ol8_19_ords_tomcat
mkdir -p /u01/volumes/ol8_19_ords_db
mkdir -p /u01/volumes/ol8_183_ords_tomcat
mkdir -p /u01/volumes/ol8_183_ords_db
chown -R docker_user:docker_fg /u01
chmod -R 775 /u01/volumes
chmod -R g+s /u01/volumes
# Add users so host reports process ownership properly. Not required.
useradd -u 500 oracle
useradd -u 501 tomcat
echo "docker_user ALL=(ALL) NOPASSWD: /usr/bin/docker" >> /etc/sudoers
echo "alias docker=\"sudo /usr/bin/docker\"" >> /home/docker_user/.bash_profile
echo "******************************************************************************"
echo "Configure docker-compose." `date`
echo "******************************************************************************"
curl -L https://github.com/docker/compose/releases/download/1.25.5/docker-compose-$(uname -s)-$(uname -m) -o /usr/local/bin/docker-compose
chmod +x /usr/local/bin/docker-compose
echo "docker_user ALL=(ALL) NOPASSWD: /usr/local/bin/docker-compose" >> /etc/sudoers
echo "alias docker-compose=\"sudo /usr/local/bin/docker-compose\"" >> /home/docker_user/.bash_profile
echo "******************************************************************************"
echo "Copy setup files to the local disks." `date`
echo "******************************************************************************"
cp /vagrant/scripts/docker_user_setup.sh /home/docker_user/
chown docker_user:docker_user /home/docker_user/docker_user_setup.sh
chmod +x /home/docker_user/docker_user_setup.sh
sudo su - docker_user -c '/home/docker_user/docker_user_setup.sh'
| true |
e246672b023d8d9fee80f86b463565ea0ecd06e8 | Shell | gyatskov/scripts | /github/latest-release-url.sh | UTF-8 | 668 | 3.890625 | 4 | [] | no_license | #!/usr/bin/env bash
##
## @author Gennadij Yatskov (gennadij@yatskov.de)
##
## Extracts the URL of the latest binary
##
## Usage: $0 <github-project-name> <artifact-pattern>
## Example: latest-release-url.sh BurntSushi/ripgrep 'ripgrep_.+_amd64\.deb'
##
## Requirements:
## * curl
## * jq
function latest_release_url()
{
# E.g. 'BurntSushi/ripgrep'
local -r _project=$1
# E.g. 'ripgrep_.+_amd64\.deb'
local -r _artifact_pattern=$2
curl -sL "https://api.github.com/repos/$_project/releases/latest" | jq -r '.assets[].browser_download_url' \
| grep -E "$_artifact_pattern"
}
_project="$1"
_artifact_pattern="$2"
latest_release_url "$_project" "$_artifact_pattern"
| true |
65739d9b47b1c897704eac963827e47780f88371 | Shell | Yuandong-Chen/js2cpp | /tnode.sh | UTF-8 | 203 | 2.578125 | 3 | [
"MIT"
] | permissive | #!/bin/bash
##make
if [ ! -f ./js2cpp ]; then
make
fi
if [ ! -f ./$1.out ]; then
./js2cpp < $1 > ./$1.cpp
g++ -std=c++11 ./$1.cpp -o $1.out
echo ----------- END OF DEBUG INFO -----------
fi
./$1.out
| true |
b60078dce30f766aa724cc343ebf0a88c19d164d | Shell | tommoor/vagrant-heroku | /bootstrap.sh | UTF-8 | 461 | 2.59375 | 3 | [] | no_license | #!/usr/bin/env bash
# Set timezone to UTC
echo "Etc/UTC" | sudo tee /etc/timezone
dpkg-reconfigure --frontend noninteractive tzdata
# update packages
apt-get update
# install rvm with ruby 1.9.3
curl -L https://get.rvm.io | bash -s stable --rails --autolibs=enabled --ruby=1.9.3
# set rvm environment
source /home/vagrant/.rvm/scripts/rvm
# download latest source
git clone git@github.com:sqwiggle/sqwiggle-web.git
# install bundle gems
cd sqwiggle-web && bundle | true |
b9c082ee0c74bf4e2be269409db248a23e3a0bf5 | Shell | swallace17/Dynamic-Theme-Building-Resources-iOS-7 | /Example Scripts/Detect Retina HD or Non-Retina SD.sh | UTF-8 | 1,386 | 3.9375 | 4 | [] | no_license | #!/bin/bash
#Integrating the following code into your script is an easy way of installing different files based on whether a device has a Retina HD display or a Non-Retina SD Display
##########################################################################################################################################################################
if [ -a /Applications/Preferences.app/iconCache@2x.artwork ]
then
#Do Stuff for a Retina Device
else
#Do Stuff for a Non-Retina Device
fi
##########################################################################################################################################################################
#Notes
# The "-a" inside of the parentheticals basically translates to "If 'file x' is present on the system, then do whatever I descripe here, otherwise do this stuff here."
# In our case, we are checking if the file iconCache@2x.artwork is on the users system, inside the Preferences app. If it is, we know the device is a Retina display.
# Why check for iconCache@2x.artwork? Its just an arbitrary file that is present on all devices, which has different versions for Retina and Non-Retina devices. If the device were Non-Retina for instance, this file would be named iconCache.artwork. If the device is Retina, the suffix "@2x" will be on the end of the file name, letting us know the devcie has a Retina display. | true |
693fd25db1acc8c34c572f938e0ca49b8688435d | Shell | schoettl/rangit | /haskell/test-backupai/run_single_backup_simulation.sh | UTF-8 | 735 | 3.65625 | 4 | [] | no_license | #!/bin/bash
# run it from it's directory!
usage="$0 <pathfile>:<trainfile>:<resultfile>"
# $1: error message
exitWithError() {
echo "$1"
exit 1
}
if [[ $# == 0 ]]; then
exitWithError "usage: $usage"
fi
IFS=':' read -a array <<< "$1"
pathFile="${array[0]}"
trainFile="${array[1]}"
resultFileNoExt="${array[2]}"
resultFileMatrix="$resultFileNoExt.txt"
initialTrain="$(../inittrain4path "$pathFile" < "$trainFile")"
echo "Processing: $pathFile and $trainFile" >&2
echo -e "# $pathFile\n# $trainFile" > "$resultFileMatrix"
../backupai "$pathFile" <(echo "$initialTrain") \
| tee "$resultFileNoExt.cmd.txt" \
| ../simulation --print-interval=0.5 <(echo "$initialTrain") \
| ../trains2positions >> "$resultFileMatrix"
| true |
62a034398822faa6feeb0e4d6df05ebaa19c642c | Shell | mcountryman/websocket | /test/websocket.test.sh | UTF-8 | 997 | 3.328125 | 3 | [] | no_license | #!/bin/bash
setup() {
echo "Setting up testing environment.."
mkdir -p bin
mkdir -p autorun
# Symbolic link library to cwd
ln -s ../websocket.lua ./websocket.lua
# Symbolic link tests to autorun
ln -s ./websocket.test.lua ./autorun/websocket.test.lua
# Download bromsock
echo "Downloading bromsock.."
wget https://github.com/Bromvlieg/gm_bromsock/raw/master/Builds/gmsv_bromsock_linux_nossl_ubuntu.dll \
-O bin/gmsv_bromsock_linux.dll
echo "Download complete."
echo "Setup complete."
}
run() {
echo "Starting docker container."
docker run \
-P \
-it \
--rm \
-v $(pwd):/gmod/garrysmod/addons/websocket/lua \
-v $(pwd)/bin:/gmod/garrysmod/lua/bin \
-v $(pwd)/autorun:/gmod/garrysmod/addons/websocket/lua/autorun \
countmarvin/gmod-docker:latest
}
if [ ! -f bin/gm_bromsock_linux.dll ]; then
setup
fi
run | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.